code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import pandas as pd
import numpy as np
import datetime
import time
import math
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import black_litterman
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt.black_litterman import BlackLittermanModel
from statsmodels.tsa.arima_model import ARIMA
def filter(init, source, asset_arr=[1, 2, 3, 4], geo_arr=[7, 2, 3, 5, 4, 6, 1], score=3):
# Filter according to user's rank
asset_class = ["Equity", "Fixed Income",
"Mixed Allocation", "Money Market"]
geo_class = ["Africa& Middle West Region", "Asian Pacific Region", "European Region", "Greater China",
"International", "Latin American Region", "U.S."]
fund_num = init.shape[0]
filter_re = []
for i in range(0, fund_num):
asset_tmp = init['Asset Class'][i]
geo_tmp = init['Geographical Focus'][i]
if ((asset_tmp == asset_class[asset_arr[0] - 1] or asset_tmp == asset_class[asset_arr[1] - 1] or asset_tmp == asset_class[asset_arr[2] - 1]) and (geo_tmp == geo_class[geo_arr[0] - 1] or geo_tmp == geo_class[geo_arr[1] - 1] or geo_tmp == geo_class[geo_arr[2] - 1] or geo_tmp == geo_class[geo_arr[3] - 1])):
filter_re.append(init['ISIN'][i])
# If number of the funds filted is smaller than 100(can be specified), choose again
fund_filted_min = 100
for i in range(4, 7):
if (len(filter_re) < fund_filted_min):
for j in range(0, fund_num):
asset_tmp = init['Asset Class'][j]
if ((asset_tmp == asset_class[asset_arr[0] - 1] or asset_tmp == asset_class[asset_arr[1] - 1] or asset_tmp == asset_class[asset_arr[2] - 1]) and geo_class[geo_arr[i] - 1] == init['Geographical Focus'][j]):
filter_re.append(init['ISIN'][j])
else:
break
# data: names after filter + their risks
data = pd.DataFrame()
data.insert(loc=0, column='name', value=[])
data.insert(loc=1, column='risk', value=[])
for i in range(0, len(filter_re)):
col_index = source.columns.get_loc(filter_re[i])
price = source.iloc[:, col_index + 1]
price = price.dropna().reset_index(drop=True)
returns = np.diff(price) / price[:-1]
ann_risk = np.std(returns) * math.sqrt(252)
len_data = len(data)
data.loc[len_data, 'name'] = filter_re[i]
data.loc[len_data, 'risk'] = ann_risk
# Sort according to their risks
data_sort = data.sort_values(
axis=0, ascending=True, by='risk').reset_index(drop=True)
'''
print("\n---risk---")
print(data_sort)
print()
'''
# get corresponding funds according to scores
len_index = int(np.floor(len(data_sort['name']) / 5))
fil_name = []
if (score == 5):
for i in range(len_index * 4, len(data_sort['name'])):
fil_name.append(data_sort.loc[i, 'name'])
else:
for i in range(len_index * (score - 1), len_index * score):
fil_name.append(data_sort.loc[i, 'name'])
### result: name + returns
result = pd.DataFrame()
result.insert(loc=0, column='name', value=[])
result.insert(loc=1, column='returns', value=[])
for i in range(0, len(fil_name)):
col_index = source.columns.get_loc(fil_name[i])
price = source.iloc[:, col_index + 1]
price = price.dropna().reset_index(drop=True)
returns = np.diff(price) / price[:-1]
rets_add_one = returns + 1
cum_rets = rets_add_one.cumprod() - 1
len_data = len(result)
result.loc[len_data, 'name'] = fil_name[i]
result.loc[len_data, 'returns'] = cum_rets[len(cum_rets) - 1]
# Sort according to their returns
result_sort = result.sort_values(
axis=0, ascending=False, by='returns').reset_index(drop=True)
'''
print("\n---return---")
print(result_sort)
print()
'''
# name_final: 5 names
name_final = []
for i in range(0, 5):
name_final.append(result_sort.loc[i, 'name'])
# price_five: 5 names + their prices
price_five = pd.DataFrame()
for i in range(0, len(name_final)):
price_five.insert(loc=i * 2, column=i, value=[])
price_five.insert(loc=i * 2 + 1, column=name_final[i], value=[])
for i in range(0, len(name_final)):
col_index = source.columns.get_loc(name_final[i])
date = source.iloc[:, col_index]
price = source.iloc[:, col_index + 1]
price_five.iloc[:, i * 2 + 1] = price
price_five.iloc[:, i * 2] = date
# combine
tmp = pd.DataFrame()
tmp.insert(loc=0, column='date', value=[])
tmp.insert(loc=1, column=name_final[0], value=[])
tmp['date'] = price_five.iloc[:, 0]
tmp[name_final[0]] = price_five.iloc[:, 1]
for i in range(1, 5):
price_five.rename(columns={i: 'date'}, inplace=True)
tmp = pd.merge(
tmp, price_five.iloc[:, 2 * i:2 * i + 2], on='date', how='outer')
tmp = tmp.sort_values(axis=0, ascending=True,
by='date').reset_index(drop=True)
tmp = tmp.iloc[:len(source), :]
tmp = tmp.dropna(how='all')
data_date_tmp = list(tmp['date']).copy()
for i in range(0, len(data_date_tmp)):
if(type(data_date_tmp[i]) != type("aha")):
break
tempt = datetime.datetime.strptime(data_date_tmp[i], "%Y/%m/%d")
y = tempt.year
m = tempt.month
d = tempt.day
data_date_tmp[i] = y * 365 + m * 30 + d
tmp['trans'] = data_date_tmp
tmp = tmp.sort_values(axis=0, ascending=True,
by='trans').reset_index(drop=True)
tmp = tmp.iloc[:len(source), :6]
filter1 = tmp.set_index('date')
return filter1
# filter1.to_csv("filter1.csv")
# print(filter1)
# df1 = pd.DataFrame({'d': ['2018/1/1', np.nan,'2019/8/3'], 'd1': [1,2,np.nan]})
# df2 = pd.DataFrame({'d': ['2018/1/1', '2019/1/3'], 'd2': [1,3]})
# df=pd.merge(df1,df2, on='d', how='outer')
# df=df.sort_values(axis=0, ascending=True, by='d').reset_index(drop=True)
# print(df)
def seven(data):
#data = pd.read_csv("filter1.csv",header = 0,index_col=[0])
#print(data)
data_fund = pd.read_csv("newfund.csv", header=0, encoding="UTF-8")
fund_1 = data_fund.iloc[:, 7:9]
fund_2 = pd.concat([data_fund.iloc[:, 0], data_fund.iloc[:, 4]], axis=1)
max_date = data.shape[0]
fund_tmp = np.array([range(0, max_date+1000), range(0, max_date+1000)])
fund_tmp = fund_tmp.transpose()
fund_tmp = fund_tmp * -1.0
date_tmp = np.array([range(0, max_date+1000), range(0, max_date+1000)])
date_tmp = date_tmp.transpose()
date_tmp = date_tmp * -1.0
'''
fund_list_tmp = [[]for i in range(2)]
for i in range(0,-max_date,-1):
fund_list_tmp[0].append(i)
for i in range(0,-max_date,-1):
fund_list_tmp[1].append(i)
date_list_tmp = [[]for i in range(2)]
for i in range(0,-max_date,-1):
date_list_tmp[0].append(i)
for i in range(0,-max_date,-1):
date_list_tmp[1].append(i)
'''
fund_1_date = 1
fund_2_date = 1
while (type(fund_1.iloc[fund_1_date, 0]) == type("aha") or (not np.isnan(fund_1.iloc[fund_1_date, 0]))):
fund_1_date = fund_1_date+1
while (type(fund_2.iloc[fund_2_date, 0]) == type("aha") or (not np.isnan(fund_2.iloc[fund_2_date, 0]))):
fund_2_date = fund_2_date+1
if (fund_2_date == fund_2.shape[0]):
break;
data_date_tmp = list(data.index).copy()
for i in range(0, len(data_date_tmp)):
tmp = datetime.datetime.strptime(data_date_tmp[i], "%Y/%m/%d")
y = tmp.year
m = tmp.month
d = tmp.day
data_date_tmp[i] = y*365+m*30+d
'''
#print(fund_1.iloc[1564,0])
#print(type(fund_1.iloc[1564,0]))
#print(np.isnan(int(fund_1.iloc[1564,0])))
'''
fund_tmp[0:fund_1_date, 0] = fund_1.iloc[0:fund_1_date, 1]
fund_tmp[0:fund_2_date, 1] = fund_2.iloc[0:fund_2_date, 1]
for i in range(0, fund_1_date):
tmp = datetime.datetime.strptime(fund_1.iloc[i, 0], "%Y/%m/%d")
y = tmp.year
m = tmp.month
d = tmp.day
date_tmp[i, 0] = y*365+m*30+d
for i in range(0, fund_2_date):
tmp = datetime.datetime.strptime(fund_2.iloc[i, 0], "%Y/%m/%d")
y = tmp.year
m = tmp.month
d = tmp.day
date_tmp[i, 1] = y*365+m*30+d
# print(fund_tmp[-100:,:])
# print(date_tmp[-100:,:])
# print(max_date)
i = 0
while i <= max_date - 1:
if date_tmp[i, 0] < 0:
date_tmp[i, 0] = data_date_tmp[i]
fund_tmp[i, 0] = -1000000 # NaN
fund_1_date = fund_1_date + 1
elif date_tmp[i, 0] > data_date_tmp[i]:
if i < max_date - 1:
# print(date_tmp[i,0],data_date_tmp[i])
# print(i,fund_1_date)
# print(fund_1_date)
fund_tmp[(i+1):(fund_1_date+1),
0] = fund_tmp[(i):(fund_1_date), 0]
date_tmp[(i+1):(fund_1_date+1),
0] = date_tmp[(i):(fund_1_date), 0]
date_tmp[i, 0] = data_date_tmp[i]
fund_tmp[i, 0] = -1000000 # NaN
elif i == max_date - 1:
date_tmp[i, 0] = data_date_tmp[i]
fund_tmp[i, 0] = -1000000 # NaN
fund_1_date = fund_1_date + 1
elif date_tmp[i, 0] < data_date_tmp[i]:
# print(date_tmp[i,0],data_date_tmp[i])
# print(i,fund_1_date)
# print(fund_1_date)
fund_tmp[(i):(fund_1_date), 0] = fund_tmp[(i+1):(fund_1_date+1), 0]
date_tmp[(i):(fund_1_date), 0] = date_tmp[(i+1):(fund_1_date+1), 0]
fund_tmp[fund_1_date-1, 0] = -1000000 # NaN
date_tmp[fund_1_date-1, 0] = -1000000 # NaN
fund_1_date = fund_1_date - 1
i = i - 1
i = i + 1
i = 0
while i <= max_date - 1:
if date_tmp[i, 1] < 0:
date_tmp[i, 1] = data_date_tmp[i]
fund_tmp[i, 1] = -1000000 # NaN
fund_2_date = fund_2_date + 1
elif date_tmp[i, 1] > data_date_tmp[i]:
if i < max_date - 1:
# print(date_tmp[i,1],data_date_tmp[i])
# print(i,fund_2_date)
# print(fund_1_date)
fund_tmp[(i+1):(fund_2_date+1),
1] = fund_tmp[(i):(fund_2_date), 1]
date_tmp[(i+1):(fund_2_date+1),
1] = date_tmp[(i):(fund_2_date), 1]
date_tmp[i, 1] = data.iloc[i, 1]
fund_tmp[i, 1] = -1000000 # NaN
elif i == max_date - 1:
date_tmp[i, 1] = data.iloc[i, 1]
fund_tmp[i, 1] = -1000000 # NaN
fund_2_date = fund_2_date + 1
elif date_tmp[i, 1] < data_date_tmp[i]:
fund_tmp[(i):(fund_2_date), 1] = fund_tmp[(i+1):(fund_2_date+1), 1]
date_tmp[(i):(fund_2_date), 1] = date_tmp[(i+1):(fund_2_date+1), 1]
fund_tmp[fund_2_date-1, 1] = -1000000 # NaN
date_tmp[fund_2_date-1, 1] = -1000000 # NaN
fund_2_date = fund_2_date - 1
i = i - 1
i = i + 1
fund_tmp = fund_tmp[:max_date, :]
# print(fund_tmp[-60:,:])
fund_new = pd.DataFrame(
fund_tmp, columns=["沪深300", "标普500"], index=data.index)
data = pd.concat([data, fund_new], axis=1)
# print(fund_new.iloc[-60:,:])
# print(data.iloc[-60:,:])
return data
def fund_completion(data):
data = seven(data)
date = data.shape[0]
fund_num = data.shape[1]
# print(rows,fund_num)
for j in range(0, fund_num):
for i in range(0, date):
if (data.iloc[i, j] == -1000000 or np.isnan(data.iloc[i, j])):
if i != date - 1:
i_tmp = i + 1
while ((data.iloc[i_tmp, j] == -1000000 or np.isnan(data.iloc[i_tmp, j])) and i_tmp <= date - 1):
i_tmp = i_tmp + 1
if i == 0:
data.iloc[i, j] = data.iloc[i_tmp, j]
elif i_tmp == date - 1:
data.iloc[i, j] = data[i-1, j]
else:
data.iloc[i, j] = (
data.iloc[i-1, j] + data.iloc[i_tmp, j]) / 2
else:
data.iloc[i, j] = data.iloc[i-1, j]
return data
def weights(data):
#df = pd.read_csv("complete_new.csv",parse_dates=[0], index_col=0,infer_datetime_format=True)
df = data
fund = df.iloc[0:, 0:5]
#print(fund.columns)
mu = expected_returns.mean_historical_return(fund)
# print(mu)
cov_matrix = risk_models.sample_cov(fund)
# print(S)
'''
# Method 1: Markowitz Mean-Variance Model
ef = EfficientFrontier(mu, cov_matrix, weight_bounds=(0.05, 0.4))
raw_weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
# print(cleaned_weights)
# ef.save_weights_to_file("weights.csv") # saves to file
ef.portfolio_performance(verbose=True)
weights = pd.DataFrame(cleaned_weights.values(),
index=cleaned_weights.keys(), columns=["weights"])
weights_T = pd.DataFrame(
weights.values.T, index=weights.columns, columns=weights.index)
# print(weights_T)
'''
# Method 2: Black-litterman Model
# Calculate Prior Return
spy_prices = df.iloc[0:, 6]
risk_pre = black_litterman.market_implied_risk_aversion(spy_prices)
mcaps = dict(zip(fund.columns,[1.0,1.0,1.0,1.0,1.0]))
prior = black_litterman.market_implied_prior_returns(mcaps, risk_pre, cov_matrix)
print(mu)
print(prior)
# Generate Absolute View by ARIMA
view_generated = [0.0, 0.0, 0.0, 0.0,0.0]
for fund_index in range (0,5):
model = ARIMA(df.iloc[:, fund_index],order=(5,1,0))
model_fit = model.fit(disp=-1, maxiter = 1000)
view_generated[fund_index] = (model_fit.forecast()[0] / df.iloc[-1,fund_index])- 1
#print("Predicted = {}" .format(model_fit.forecast()))
#print("Last = {}".format(df.iloc[-1, fund_index]))
viewdict = dict(zip(fund.columns,view_generated))
print(viewdict)
#print(mu)
bl_model = BlackLittermanModel(cov_matrix, absolute_views = viewdict, pi = prior)
rets = bl_model.bl_returns()
#print(rets)
# Generate Efficient Frontier and Optimize the model
ef = EfficientFrontier(rets, cov_matrix)
raw_weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
ef.portfolio_performance(verbose=True)
# Output
df = df.append(bl_model.weights, sort=False)
# print(df[-10:])
data_index = pd.DataFrame(df.index, index=df.index)
return_data = pd.concat([data_index, df], axis=1)
# print(return_data)
return return_data
def RoboAdvisor(init, source, asset_arr=[1, 2, 3, 4], geo_arr=[5, 2, 3, 7, 4, 6, 1], score=5):
#init = pd.read_csv("filter.csv")
#source = pd.read_csv("bloomberg.csv", low_memory=False)
level_1 = filter(init, source, asset_arr, geo_arr, score)
'''
print("\n---Level 1---")
print(level_1)
print()
'''
level_2 = fund_completion(level_1)
'''
print("\n---Level 2---")
print(level_2)
print()
'''
level_3 = weights(level_2)
'''
print("\n---Level 3---")
print(level_3)
print()
'''
return(level_3)
init = pd.read_csv("filter.csv")
source = pd.read_csv("bloomberg.csv", low_memory=False)
RoboAdvisor(init, source)
| [
"pypfopt.risk_models.sample_cov",
"pandas.DataFrame",
"pypfopt.black_litterman.market_implied_risk_aversion",
"pypfopt.black_litterman.BlackLittermanModel",
"statsmodels.tsa.arima_model.ARIMA",
"pandas.read_csv",
"numpy.std",
"pypfopt.efficient_frontier.EfficientFrontier",
"pypfopt.black_litterman.m... | [((15524, 15549), 'pandas.read_csv', 'pd.read_csv', (['"""filter.csv"""'], {}), "('filter.csv')\n", (15535, 15549), True, 'import pandas as pd\n'), ((15559, 15605), 'pandas.read_csv', 'pd.read_csv', (['"""bloomberg.csv"""'], {'low_memory': '(False)'}), "('bloomberg.csv', low_memory=False)\n", (15570, 15605), True, 'import pandas as pd\n'), ((1924, 1938), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1936, 1938), True, 'import pandas as pd\n'), ((3110, 3124), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3122, 3124), True, 'import pandas as pd\n'), ((4115, 4129), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4127, 4129), True, 'import pandas as pd\n'), ((4597, 4611), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4609, 4611), True, 'import pandas as pd\n'), ((6240, 6294), 'pandas.read_csv', 'pd.read_csv', (['"""newfund.csv"""'], {'header': '(0)', 'encoding': '"""UTF-8"""'}), "('newfund.csv', header=0, encoding='UTF-8')\n", (6251, 6294), True, 'import pandas as pd\n'), ((6344, 6407), 'pandas.concat', 'pd.concat', (['[data_fund.iloc[:, 0], data_fund.iloc[:, 4]]'], {'axis': '(1)'}), '([data_fund.iloc[:, 0], data_fund.iloc[:, 4]], axis=1)\n', (6353, 6407), True, 'import pandas as pd\n'), ((11359, 11427), 'pandas.DataFrame', 'pd.DataFrame', (['fund_tmp'], {'columns': "['沪深300', '标普500']", 'index': 'data.index'}), "(fund_tmp, columns=['沪深300', '标普500'], index=data.index)\n", (11371, 11427), True, 'import pandas as pd\n'), ((11448, 11483), 'pandas.concat', 'pd.concat', (['[data, fund_new]'], {'axis': '(1)'}), '([data, fund_new], axis=1)\n', (11457, 11483), True, 'import pandas as pd\n'), ((12694, 12739), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['fund'], {}), '(fund)\n', (12733, 12739), False, 'from pypfopt import expected_returns\n'), ((12773, 12801), 'pypfopt.risk_models.sample_cov', 'risk_models.sample_cov', (['fund'], {}), '(fund)\n', (12795, 12801), False, 'from pypfopt import risk_models\n'), ((13541, 13597), 'pypfopt.black_litterman.market_implied_risk_aversion', 'black_litterman.market_implied_risk_aversion', (['spy_prices'], {}), '(spy_prices)\n', (13585, 13597), False, 'from pypfopt import black_litterman\n'), ((13668, 13741), 'pypfopt.black_litterman.market_implied_prior_returns', 'black_litterman.market_implied_prior_returns', (['mcaps', 'risk_pre', 'cov_matrix'], {}), '(mcaps, risk_pre, cov_matrix)\n', (13712, 13741), False, 'from pypfopt import black_litterman\n'), ((14338, 14404), 'pypfopt.black_litterman.BlackLittermanModel', 'BlackLittermanModel', (['cov_matrix'], {'absolute_views': 'viewdict', 'pi': 'prior'}), '(cov_matrix, absolute_views=viewdict, pi=prior)\n', (14357, 14404), False, 'from pypfopt.black_litterman import BlackLittermanModel\n'), ((14530, 14565), 'pypfopt.efficient_frontier.EfficientFrontier', 'EfficientFrontier', (['rets', 'cov_matrix'], {}), '(rets, cov_matrix)\n', (14547, 14565), False, 'from pypfopt.efficient_frontier import EfficientFrontier\n'), ((14791, 14829), 'pandas.DataFrame', 'pd.DataFrame', (['df.index'], {'index': 'df.index'}), '(df.index, index=df.index)\n', (14803, 14829), True, 'import pandas as pd\n'), ((14848, 14883), 'pandas.concat', 'pd.concat', (['[data_index, df]'], {'axis': '(1)'}), '([data_index, df], axis=1)\n', (14857, 14883), True, 'import pandas as pd\n'), ((4902, 4976), 'pandas.merge', 'pd.merge', (['tmp', 'price_five.iloc[:, 2 * i:2 * i + 2]'], {'on': '"""date"""', 'how': '"""outer"""'}), "(tmp, price_five.iloc[:, 2 * i:2 * i + 2], on='date', how='outer')\n", (4910, 4976), True, 'import pandas as pd\n'), ((5354, 5410), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['data_date_tmp[i]', '"""%Y/%m/%d"""'], {}), "(data_date_tmp[i], '%Y/%m/%d')\n", (5380, 5410), False, 'import datetime\n'), ((7605, 7661), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['data_date_tmp[i]', '"""%Y/%m/%d"""'], {}), "(data_date_tmp[i], '%Y/%m/%d')\n", (7631, 7661), False, 'import datetime\n'), ((8076, 8133), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['fund_1.iloc[i, 0]', '"""%Y/%m/%d"""'], {}), "(fund_1.iloc[i, 0], '%Y/%m/%d')\n", (8102, 8133), False, 'import datetime\n'), ((8285, 8342), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['fund_2.iloc[i, 0]', '"""%Y/%m/%d"""'], {}), "(fund_2.iloc[i, 0], '%Y/%m/%d')\n", (8311, 8342), False, 'import datetime\n'), ((13916, 13962), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['df.iloc[:, fund_index]'], {'order': '(5, 1, 0)'}), '(df.iloc[:, fund_index], order=(5, 1, 0))\n', (13921, 13962), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((2250, 2264), 'numpy.diff', 'np.diff', (['price'], {}), '(price)\n', (2257, 2264), True, 'import numpy as np\n'), ((2297, 2312), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (2303, 2312), True, 'import numpy as np\n'), ((2315, 2329), 'math.sqrt', 'math.sqrt', (['(252)'], {}), '(252)\n', (2324, 2329), False, 'import math\n'), ((3441, 3455), 'numpy.diff', 'np.diff', (['price'], {}), '(price)\n', (3448, 3455), True, 'import numpy as np\n'), ((7217, 7254), 'numpy.isnan', 'np.isnan', (['fund_1.iloc[fund_1_date, 0]'], {}), '(fund_1.iloc[fund_1_date, 0])\n', (7225, 7254), True, 'import numpy as np\n'), ((7362, 7399), 'numpy.isnan', 'np.isnan', (['fund_2.iloc[fund_2_date, 0]'], {}), '(fund_2.iloc[fund_2_date, 0])\n', (7370, 7399), True, 'import numpy as np\n'), ((11814, 11839), 'numpy.isnan', 'np.isnan', (['data.iloc[i, j]'], {}), '(data.iloc[i, j])\n', (11822, 11839), True, 'import numpy as np\n'), ((11973, 12002), 'numpy.isnan', 'np.isnan', (['data.iloc[i_tmp, j]'], {}), '(data.iloc[i_tmp, j])\n', (11981, 12002), True, 'import numpy as np\n')] |
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from typing import List
from typing import Optional
import numpy as np
import onnx
from tqdm import tqdm
from nncf.common.utils.logger import logger as nncf_logger
from openvino.tools.accuracy_checker.config import ConfigReader
from openvino.tools.accuracy_checker.argparser import build_arguments_parser
from openvino.tools.accuracy_checker.dataset import Dataset
from openvino.tools.accuracy_checker.evaluators import ModelEvaluator
import nncf.experimental.post_training.api.dataset as ptq_api_dataset
from nncf.experimental.onnx.engine import ONNXEngine
from nncf.experimental.onnx.samplers import create_onnx_sampler
from time import time
import pandas as pd
class OpenVINOAccuracyCheckerDataset(ptq_api_dataset.Dataset):
def __init__(self, evaluator: ModelEvaluator, batch_size, shuffle):
super().__init__(batch_size, shuffle)
self.model_evaluator = evaluator
def __getitem__(self, item):
_, batch_annotation, batch_input, _ = self.model_evaluator.dataset[item]
filled_inputs, _, _ = self.model_evaluator._get_batch_input(
batch_annotation, batch_input)
assert len(filled_inputs) == 1
dummy_target = 0
for _, v in filled_inputs[0].items():
return np.squeeze(v, axis=0), dummy_target
raise RuntimeError("filled_inputs has no value.")
def __len__(self):
return len(self.model_evaluator.dataset)
def run(onnx_model_path: str, output_file_path: str, dataset: Dataset,
ignored_scopes: Optional[List[str]] = None, evaluate: Optional[bool] = False):
num_init_samples = len(dataset)
nncf_logger.info("Post-Training Quantization Parameters:")
nncf_logger.info(f" number of samples: {num_init_samples}")
nncf_logger.info(f" ignored_scopes: {ignored_scopes}")
onnx.checker.check_model(onnx_model_path)
original_model = onnx.load(onnx_model_path)
nncf_logger.info(f"The model is loaded from {onnx_model_path}")
onnx.checker.check_model(original_model)
engine = ONNXEngine()
sampler = create_onnx_sampler(dataset, range(len(dataset)))
engine.rt_session_options['providers'] = ["OpenVINOExecutionProvider"]
engine.set_model(original_model)
engine.set_sampler(sampler)
elapsed_times = []
for input_data, _ in tqdm(sampler):
start_time = time()
engine.infer(input_data)
elapsed_times += [1000.0 * (time() - start_time)]
elapsed_times = np.array(elapsed_times)
model_name, _ = os.path.splitext(os.path.basename(onnx_model_path))
df = pd.DataFrame({
"model_name": [model_name],
"latency_mean": [np.mean(elapsed_times)],
"latency_std": [np.std(elapsed_times)]
})
if os.path.exists(output_file_path):
df.to_csv(output_file_path, header=False, mode="a", index=False)
else:
df.to_csv(output_file_path, header=True, mode="w", index=False)
if __name__ == '__main__':
parser = build_arguments_parser()
parser.add_argument("--output-file-path", "-o",
help="Directory path to save output quantized ONNX model", type=str)
args = parser.parse_args()
config, mode = ConfigReader.merge(args)
assert mode == "models"
for config_entry in config[mode]:
model_evaluator = ModelEvaluator.from_configs(config_entry)
assert "datasets" in config_entry
assert len(config_entry["datasets"]
) == 1, "Config should have one dataset."
dataset_config = config_entry["datasets"][0]
assert "launchers" in config_entry
assert len(config_entry["launchers"]) == 1
run(onnx_model_path=str(config_entry["launchers"][0]["model"]),
output_file_path=args.output_file_path,
dataset=OpenVINOAccuracyCheckerDataset(model_evaluator, batch_size=1, shuffle=True))
| [
"tqdm.tqdm",
"os.path.basename",
"openvino.tools.accuracy_checker.argparser.build_arguments_parser",
"openvino.tools.accuracy_checker.evaluators.ModelEvaluator.from_configs",
"numpy.std",
"os.path.exists",
"time.time",
"nncf.experimental.onnx.engine.ONNXEngine",
"numpy.mean",
"numpy.array",
"nnc... | [((2209, 2267), 'nncf.common.utils.logger.logger.info', 'nncf_logger.info', (['"""Post-Training Quantization Parameters:"""'], {}), "('Post-Training Quantization Parameters:')\n", (2225, 2267), True, 'from nncf.common.utils.logger import logger as nncf_logger\n'), ((2272, 2332), 'nncf.common.utils.logger.logger.info', 'nncf_logger.info', (['f""" number of samples: {num_init_samples}"""'], {}), "(f' number of samples: {num_init_samples}')\n", (2288, 2332), True, 'from nncf.common.utils.logger import logger as nncf_logger\n'), ((2337, 2392), 'nncf.common.utils.logger.logger.info', 'nncf_logger.info', (['f""" ignored_scopes: {ignored_scopes}"""'], {}), "(f' ignored_scopes: {ignored_scopes}')\n", (2353, 2392), True, 'from nncf.common.utils.logger import logger as nncf_logger\n'), ((2397, 2438), 'onnx.checker.check_model', 'onnx.checker.check_model', (['onnx_model_path'], {}), '(onnx_model_path)\n', (2421, 2438), False, 'import onnx\n'), ((2460, 2486), 'onnx.load', 'onnx.load', (['onnx_model_path'], {}), '(onnx_model_path)\n', (2469, 2486), False, 'import onnx\n'), ((2491, 2554), 'nncf.common.utils.logger.logger.info', 'nncf_logger.info', (['f"""The model is loaded from {onnx_model_path}"""'], {}), "(f'The model is loaded from {onnx_model_path}')\n", (2507, 2554), True, 'from nncf.common.utils.logger import logger as nncf_logger\n'), ((2560, 2600), 'onnx.checker.check_model', 'onnx.checker.check_model', (['original_model'], {}), '(original_model)\n', (2584, 2600), False, 'import onnx\n'), ((2615, 2627), 'nncf.experimental.onnx.engine.ONNXEngine', 'ONNXEngine', ([], {}), '()\n', (2625, 2627), False, 'from nncf.experimental.onnx.engine import ONNXEngine\n'), ((2887, 2900), 'tqdm.tqdm', 'tqdm', (['sampler'], {}), '(sampler)\n', (2891, 2900), False, 'from tqdm import tqdm\n'), ((3042, 3065), 'numpy.array', 'np.array', (['elapsed_times'], {}), '(elapsed_times)\n', (3050, 3065), True, 'import numpy as np\n'), ((3312, 3344), 'os.path.exists', 'os.path.exists', (['output_file_path'], {}), '(output_file_path)\n', (3326, 3344), False, 'import os\n'), ((3543, 3567), 'openvino.tools.accuracy_checker.argparser.build_arguments_parser', 'build_arguments_parser', ([], {}), '()\n', (3565, 3567), False, 'from openvino.tools.accuracy_checker.argparser import build_arguments_parser\n'), ((3763, 3787), 'openvino.tools.accuracy_checker.config.ConfigReader.merge', 'ConfigReader.merge', (['args'], {}), '(args)\n', (3781, 3787), False, 'from openvino.tools.accuracy_checker.config import ConfigReader\n'), ((2923, 2929), 'time.time', 'time', ([], {}), '()\n', (2927, 2929), False, 'from time import time\n'), ((3104, 3137), 'os.path.basename', 'os.path.basename', (['onnx_model_path'], {}), '(onnx_model_path)\n', (3120, 3137), False, 'import os\n'), ((3881, 3922), 'openvino.tools.accuracy_checker.evaluators.ModelEvaluator.from_configs', 'ModelEvaluator.from_configs', (['config_entry'], {}), '(config_entry)\n', (3908, 3922), False, 'from openvino.tools.accuracy_checker.evaluators import ModelEvaluator\n'), ((1839, 1860), 'numpy.squeeze', 'np.squeeze', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (1849, 1860), True, 'import numpy as np\n'), ((3225, 3247), 'numpy.mean', 'np.mean', (['elapsed_times'], {}), '(elapsed_times)\n', (3232, 3247), True, 'import numpy as np\n'), ((3274, 3295), 'numpy.std', 'np.std', (['elapsed_times'], {}), '(elapsed_times)\n', (3280, 3295), True, 'import numpy as np\n'), ((2999, 3005), 'time.time', 'time', ([], {}), '()\n', (3003, 3005), False, 'from time import time\n')] |
# python 3.9 (>=3.7)
# encoding=UTF-8
# author: xyb
#---------------------------------------------------------------
# 功能:进行趋势离散分析
#
import sys
import os
import json
import numpy as np
import pandas as pd
from statsmodels.tsa.stattools import adfuller as ADF
from util import *
def peakvalley(ts_analyze): # 数据的波峰波谷
diff1 = ts_analyze.diff(1)
num = np.size(diff1)
#print(np.size(diff1))
peakvalley = 0
index_peakvalley=[]
for i in range(num - 1):
mult = diff1[i] * diff1[i + 1]
if mult < 0:
peakvalley = peakvalley + 1
index_peakvalley.append(i)
print('--波峰波谷个数:%s' % (peakvalley))
return index_peakvalley
def trend_features(df_analyze,valuename,trend_features_inputdata,DPlot_dir,Dplot):
# 判断准则 : trend_features_inputdata
# 时序序列 : df_analyze
# 时序序列在iotdb中的路径名:valuename
# 调试输出路径:DPlot_dir
# 调试输出判断:Dplot
print('==>>>数据测点:%s' % (valuename))
# 波动性判断滤波准则
rolmean_window4vibrate = trend_features_inputdata['rolmean_window4vibrate'] # type:int; 降噪平均的滑窗窗口长度,不能超过数据个数,用于判断震动,建议给的小一些
# 单调性判断滤波准则
rolmean_window4monotonicity = trend_features_inputdata[
'rolmean_window4monotonicity'] # type:int; 降噪平均的滑窗窗口长度,不能超过数据个数,用于判断单调性,可适当稍大
monotonicity_peakvalleys=trend_features_inputdata['monotonicity_peakvalleys']# type:int; 单调性加窗滤波后,单调性序列允许的最大波峰波谷数(该值取1,表示严格单调)
ADF_pvalue = trend_features_inputdata['ADF_pvalue'] # type:float;ADF 检验时得p-value
ADF_pvalue_tf = 'unknown'
S04_std_lower = trend_features_inputdata['S04_std_lower'] # type:float; 判断是否稳定不变时用到得方差上限
S12_vibrate_range = trend_features_inputdata['S12_vibrate_range'] # type:float; 判定为震荡时用到得四分位距下限(四分位距相对于均值的百分比)
S12_vibrate_rate = trend_features_inputdata['S12_vibrate_rate'] # type:float; 振荡条件时,波峰波谷数目占总数据点数的比例(滤去小波后)
S11_drop_range = trend_features_inputdata['S11_drop_range'] # type:float; 波动下降时,下降幅度(起点减终点绝对值)占起点绝对值的比例应大于这个数
S11_vibrate_rate = trend_features_inputdata['S11_vibrate_rate'] # type:float; 波动下降时,波峰波谷数目占总数据点数的比例(滤去小波后)
S10_rise_range = trend_features_inputdata['S10_rise_range'] # type:float; 波动上升时,下降幅度(起点减终点绝对值)占起点绝对值的比例应大于这个数
S10_vibrate_rate = trend_features_inputdata['S10_vibrate_rate'] # type:float; 波动上升时,波峰波谷数目占总数据点数的比例(滤去小波后)
S07_drop_range = trend_features_inputdata['S07_drop_range'] # type:float; 单调快速下降时,下降幅度(起点减终点绝对值)占起点绝对值的比例应大于这个数
S05_drop_range = trend_features_inputdata['S05_drop_range'] # type:float; 单调缓慢下降时,下降幅度(起点减终点绝对值)占起点绝对值的比例应小于这个数
S01_rise_range = trend_features_inputdata['S01_rise_range'] # type:float; 单调快速上升时,上升幅度(起点减终点绝对值)占起点绝对值的比例应大于这个数
S03_rise_range = trend_features_inputdata['S03_rise_range'] # type:float; 单调缓慢上升时,上升幅度(起点减终点绝对值)占起点绝对值的比例应小于这个数
S08_location_range = trend_features_inputdata['S08_location_range'] # type:floatlist; 单凸峰值所处的相对位置
S09_location_range = trend_features_inputdata['S09_location_range'] # type:floatlist; 单凹峰值所处的相对位置
ts_numeric = pd.to_numeric(df_analyze)
print('==>>>用于趋势判断的时序数据:')
# tsinfo = ts_info(ts_numeric)
if Dplot == 'yes': timeseries_plot(ts_numeric, 'g', valuename+'_oir', pathsave=DPlot_dir)
s_tf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ADF_pvalue_tf = 'unknown'
# 根据波动性判断滤波准则滤波
# 预处理:滑窗均值降噪,去掉微小波动,避免影响波动频率的估计
if np.size(df_analyze) < int(rolmean_window4vibrate):
print('Warning:‘波动性’滑窗降噪的窗口长度(长度指数据点个数)太大,大于读入的数据点个数!\n直接跳过滑窗降噪')
rolmean_window4vibrate=1
ts_numeric_rolmean = ts_numeric.rolling(window=int(rolmean_window4vibrate)).mean()
ts_numeric_rolmean = ts_numeric_rolmean.dropna(inplace=False)
ts_analyze = ts_numeric_rolmean
if Dplot == 'yes': timeseries_plot(ts_analyze, 'g',valuename +'_rollmean_vibrate', pathsave=DPlot_dir)
print('序列的平稳性检验(ADF检验结)果为:')
test_result = ADF(ts_analyze)
p_value = test_result[1]
print('p-value:', p_value)
if p_value >= ADF_pvalue:
print('--原始序列是非平稳序列')
ADF_pvalue_tf = 'unstationary'
else:
print('--原始序列是平稳序列')
ADF_pvalue_tf = 'stationary'
# 判断分支: 平稳不变序列
if ADF_pvalue_tf == 'stationary':
print('标准差判断:')
std_value = np.std(ts_analyze, ddof=1)
if std_value >= S04_std_lower:
# print('--标准差:s,大于下限:{:.8%}'.format(S04_S04_std_lower))
print('--标准差:%f,大于下限:%f' % (std_value, S04_std_lower))
if std_value < S04_std_lower:
print('--标准差:%f,小于下限:%f' % (std_value, S04_std_lower))
print('--原始序列是平稳不变序列')
s_tf[4-1] = 1 # 失效特征趋势:平稳不变 ,s_04=1
# 判断分支: 平稳震荡序列(并非平稳不变序列的补集,但不能有交集)
if ADF_pvalue_tf == 'stationary' and s_tf[4-1] == 0:
print('判断是否震荡:')
mean_value = np.mean(ts_analyze)
lower_q = np.quantile(ts_analyze, 0.25, interpolation='lower') # 下四分位数
higher_q = np.quantile(ts_analyze, 0.75, interpolation='higher') # 上四分位数
int_r = higher_q - lower_q # 四分位距
if int_r / abs(mean_value) >= S12_vibrate_range:
print('根据波动性判断滤波准则滤波后:')
num_peakvalley = np.size(peakvalley(ts_analyze)) # 计算波峰波谷数
vibrate_rate = num_peakvalley / np.size(ts_analyze) # 计算波动率,(过滤掉小波后)
if vibrate_rate >= S12_vibrate_rate:
print('--原始序列是平稳震荡序列')
s_tf[12-1] = 1 # 失效特征趋势:平稳震荡 ,s_12=1
# 判断分支: 波动性 (波峰波谷数,起止点变化范围)
if ADF_pvalue_tf == 'unstationary':
print('根据波动性判断滤波准则滤波后:')
index_peakvalley = peakvalley(ts_analyze)
num_peakvalley = np.size(index_peakvalley) # 计算波峰波谷数
vibrate_rate = num_peakvalley / np.size(ts_analyze) # 计算波动率,(过滤掉小波后)
if vibrate_rate >= S11_vibrate_rate:
drop_range = ts_analyze[0] - ts_analyze[np.size(ts_analyze) - 1]
if drop_range > 0 and abs(drop_range / ts_analyze[0]) >= S11_drop_range:
print('--原始序列是波动下降序列')
s_tf[11-1] = 1 # 失效特征趋势:波动下降 ,s_11=1
if vibrate_rate >= S10_vibrate_rate:
rise_range = ts_analyze[0] - ts_analyze[np.size(ts_analyze) - 1]
if rise_range < 0 and abs(rise_range / ts_analyze[0]) >= S10_rise_range:
print('--原始序列是波动上升序列')
s_tf[10-1] = 1 # 失效特征趋势:波动上升 ,s_10=1
# 判断分支: 判断单调性
if ADF_pvalue_tf == 'unstationary' and s_tf[10-1] == 0 and s_tf[11-1] == 0:
# 根据波动性判断滤波准则滤波
if rolmean_window4monotonicity >= np.size(ts_numeric):
print('Error: ‘单调性’滑窗降噪的窗口长度(长度指数据点个数)太大,大于读入的数据点个数!')
#print('Warning:‘单调性’滑窗降噪的窗口长度(长度指数据点个数)太大,大于读入的数据点个数!\n直接跳过滑窗降噪')
#rolmean_window4monotonicity = 1
os._exit()
ts_numeric_rolmean = ts_numeric.rolling(window=int(rolmean_window4monotonicity)).mean()
ts_numeric_rolmean = ts_numeric_rolmean.dropna(inplace=False)
ts_analyze = ts_numeric_rolmean
if Dplot == 'yes': timeseries_plot(ts_analyze, 'g',valuename+'_rollmean_monotonicity', pathsave=DPlot_dir)
print('根据单调性判断滤波准则滤波后:')
index_peakvalley = peakvalley(ts_analyze) # 计算波峰波位置索引
if np.size(index_peakvalley) <= monotonicity_peakvalleys: # 趋势(近似)是单调的
change_range = ts_analyze[0] - ts_analyze[np.size(ts_analyze) - 1]
if change_range > 0: # 单调下降
print("计算出来的斜率")
print(abs(change_range / ts_analyze[0]))
print(S05_drop_range)
# print(change_range)
# print(ts_analyze[0])
# print(ts_analyze[np.size(ts_analyze) - 1])
if abs(change_range / ts_analyze[0]) >= S07_drop_range:
print('--原始序列是单调急剧下降序列')
s_tf[7-1] = 1 # 失效特征趋势:单调急剧下降 ,s_07=1
elif abs(change_range / ts_analyze[0]) < S05_drop_range:
print('--原始序列是单调缓慢下降序列')
s_tf[5-1] = 1 # 失效特征趋势:单调缓慢下降 ,s_05=1
else:
print('--原始序列是单调下降序列')
s_tf[6-1] = 1 # 失效特征趋势:单调下降 ,s_06=1
if change_range < 0: # 单调上升
if abs(change_range / ts_analyze[0]) >= S01_rise_range:
print('--原始序列是单调急剧上升序列')
s_tf[1-1] = 1 # 失效特征趋势:单调急剧上升 ,s_01=1
elif abs(change_range / ts_analyze[0]) < S03_rise_range:
print('--原始序列是单调缓慢上升序列')
s_tf[3-1] = 1 # 失效特征趋势:单调缓慢上升 ,s_03=1
else:
print('--原始序列是单调上升序列')
s_tf[2-1] = 1 # 失效特征趋势:单调上升 ,s_02=1
if np.size(index_peakvalley) <= monotonicity_peakvalleys and max(s_tf)==0 : # 趋势(近似)是单凹或单凸
# 第一版中,通过唯一波峰波谷的位置来判断
# index = index_peakvalley[0]
# location = index / np.size(ts_analyze)
# 第二版中,改为通过最大值,最小值来判断。弱化了对波峰波谷数的要求
list = ts_analyze.values.tolist()
max_list = max(list)
min_list = min(list)
min_index = list.index(min_list)
max_index = list.index(max_list)
max_location = max_index / np.size(ts_analyze)
min_location = min_index / np.size(ts_analyze)
if ts_analyze[max_index] <= ts_analyze[0] or ts_analyze[max_index] <= ts_analyze[np.size(ts_analyze) - 1]:
if min_location <= S09_location_range[1] and min_location >= S09_location_range[0]: # 单凹
print('--原始序列是单凹(下降后上升)')
s_tf[9-1] = 1 # 失效特征趋势:单凹(下降后上升) ,s_09=1
if ts_analyze[min_index] >= ts_analyze[0] or ts_analyze[min_index] >= ts_analyze[np.size(ts_analyze) - 1]: # 单凸
if max_location <= S08_location_range[1] and max_location >= S08_location_range[0]: # 单凸
print('--原始序列是单凸(下降后上升)')
s_tf[8-1] = 1 # 失效特征趋势:单凸(上升后下降) ,s_08=1
print('趋势征兆向量:', s_tf)
return s_tf
def threshold_features(df_analyze,valuename,threshold_features_inputdata,DPlot_dir,Dplot):
# 判断准则 : threshold_features_inputdata
# 时序序列 : df_analyze
# 时序序列在iotdb中的路径名:valuename
# 调试输出路径:DPlot_dir
# 调试输出判断:Dplot
print('==>>>数据测点:%s'%(valuename))
T03_range = threshold_features_inputdata['T03_range'] #高高高
T02_range = threshold_features_inputdata['T02_range'] # 高高
T01_range = threshold_features_inputdata['T01_range'] # 高
T04_range = threshold_features_inputdata['T04_range'] #低
T05_range = threshold_features_inputdata['T05_range'] #低低
T06_range = threshold_features_inputdata['T06_range'] #低低低
# 若不落在以上区域中,均为正常
T_used = threshold_features_inputdata['T_used']
t_tf = [0, 0, 0, 0, 0, 0]
ts_numeric = pd.to_numeric(df_analyze)
print('==>>>用于阈值判断的时序数据:')
tsinfo = ts_info(ts_numeric)
# 阈值判断区间是否合理
ranking=[]
if not T_used[3-1] == 0:
ranking.append(T03_range[1])
ranking.append(T03_range[0])
if not T_used[2-1] == 0:
ranking.append(T02_range[1])
ranking.append(T02_range[0])
if not T_used[1-1] == 0:
ranking.append(T01_range[1])
ranking.append(T01_range[0])
if not T_used[4-1] == 0:
ranking.append(T04_range[1])
ranking.append(T04_range[0])
if not T_used[5-1] == 0:
ranking.append(T05_range[1])
ranking.append(T05_range[0])
if not T_used[6-1] == 0:
ranking.append(T06_range[1])
ranking.append(T06_range[0])
for i in range(np.size(ranking)-1):
if ranking[i] < ranking[i+1]:
print('Error:检查各失效阈值的判定区间是否满足规律要求!(T3>T2>T1>T4>T5>T6)')
os._exit()
mean_value = np.mean(df_analyze)
if not T_used[3-1] == 0:
if mean_value >= T03_range[0] and mean_value <= T03_range[1]:
t_tf[3-1]=1
if not T_used[2-1] == 0:
if mean_value >= T02_range[0] and mean_value <= T02_range[1]:
t_tf[2-1]=1
if not T_used[1-1] == 0:
if mean_value >= T01_range[0] and mean_value <= T01_range[1]:
t_tf[1-1]=1
if not T_used[4-1] == 0:
if mean_value >= T04_range[0] and mean_value <= T04_range[1]:
t_tf[4-1]=1
if not T_used[5-1] == 0:
if mean_value >= T05_range[0] and mean_value <= T05_range[1]:
t_tf[5-1]=1
if not T_used[6-1] == 0:
if mean_value >= T06_range[0] and mean_value <= T06_range[1]:
t_tf[6-1]=1
print('阈值征兆向量:', t_tf)
return t_tf
if __name__ == '__main__':
jsonfile ={
'rolmean_window4vibrate': 20, # type:int; 降噪平均的滑窗窗口长度,不能超过数据个数,用于判断震动,建议给的小一些
'rolmean_window4monotonicity':50, # type:int; 降噪平均的滑窗窗口长度,不能超过数据个数,用于判断单调性,可适当稍大
'monotonicity_peakvalleys':20, # type:int; 单调性加窗滤波后,单调性序列允许的最大波峰波谷数(该值取1,表示严格单调)
'ADF_pvalue':0.05, # type:float;ADF 检验时的p-value
'S04': {'std_lower':0.10}, # type:float; 判断是否稳定不变时用到得方差上限
'S12': {'vibrate_range': 0.05,'vibrate_rate': 0.05}, # type:float; 判定为震荡时用到得四分位距下限(四分位距相对于均值的百分比)
# type:float; 振荡条件时,波峰波谷数目占总数据点数的比例(滤去小波后)
'S11': {'drop_range': 0.01, 'vibrate_rate': 0.09}, # type:float; 波动下降时,下降幅度(起点减终点绝对值)占起点绝对值的比例应大于这个数
# type:float; 波动下降时,波峰波谷数目占总数据点数的比例(滤去小波后)
'S10': {'rise_range': 0.01, 'vibrate_rate': 0.05}, # type:float; 波动上升时,下降幅度(起点减终点绝对值)占起点绝对值的比例应大于这个数
# type:float; 波动上升时,波峰波谷数目占总数据点数的比例(滤去小波后)
'S01': {'rise_range': 0.05}, # type:float; 单调快速上升时,上升幅度(起点减终点绝对值)占起点绝对值的比例应大于这个数
'S02': {},
'S03': {'rise_range': 0.01}, # type:float; 单调缓慢上升时,上升幅度(起点减终点绝对值)占起点绝对值的比例应小于这个数
'S05': {'drop_range': 0.04}, # type:float; 单调快速下降时,下降幅度(起点减终点绝对值)占起点绝对值的比例应大于这个
'S06': {},
'S07': {'drop_range': 0.05}, # type:float; 单调缓慢下降时,下降幅度(起点减终点绝对值)占起点绝对值的比例应小于这个
'S08': {'range': [0.01, 0.99]}, # type:floatlist; 单凸峰值所处的相对位置
'S09': {'range': [0.01, 0.99]}, # type:floatlist; 单凹峰值所处的相对位置
}
with open("trend.json", "w") as f:
json.dump(jsonfile, f)
print("加载入文件完成...")
# jsonfile = { # for FQ1RCP604MP
# 'T03': {'lower': 10000001, 'upper': 10000002, }, # type:float; 高高高,上限建议给默认的极大值
# 'T02': {'lower': 10000000, 'upper': 10000001, }, # type:float; 高高
# 'T01': {'lower': 100, 'upper': 10000000, }, # type:float; 高
# 'T04': {'lower': -1000001, 'upper':-1000000, }, # type:float; 低
# 'T05': {'lower': -1000003, 'upper': -1000002, }, # type:float; 低低
# 'T06': {'lower': -1000005, 'upper': -1000004, }, # type:float; 低低低,下限建议给默认的极小值
# 'T_used':[1,1,1,1,1,1] # type:int; 使用到的征兆通道给非零值
# }
jsonfile = { # for 1APA136MT_1
'T03': {'lower': 1000003, 'upper': 1000004, }, # type:float; 高高高,上限建议给默认的极大值
'T02': {'lower': 1000001, 'upper': 1000002, }, # type:float; 高高
'T01': {'lower': 100, 'upper': 1000000, }, # type:float; 高
'T04': {'lower': 4.7, 'upper': 5, }, # type:float; 低
'T05': {'lower': 4.5, 'upper': 4.7, }, # type:float; 低低
'T06': {'lower': -1000005, 'upper': 4.5, }, # type:float; 低低低,下限建议给默认的极小值
'T_used': [1, 0, 0, 0, 0, 0] # type:int; 使用到的征兆通道给非零值
}
with open("threshold.json", "w") as f:
json.dump(jsonfile, f)
print("加载入文件完成...") | [
"json.dump",
"numpy.size",
"statsmodels.tsa.stattools.adfuller",
"numpy.quantile",
"numpy.std",
"numpy.mean",
"os._exit",
"pandas.to_numeric"
] | [((360, 374), 'numpy.size', 'np.size', (['diff1'], {}), '(diff1)\n', (367, 374), True, 'import numpy as np\n'), ((2967, 2992), 'pandas.to_numeric', 'pd.to_numeric', (['df_analyze'], {}), '(df_analyze)\n', (2980, 2992), True, 'import pandas as pd\n'), ((3802, 3817), 'statsmodels.tsa.stattools.adfuller', 'ADF', (['ts_analyze'], {}), '(ts_analyze)\n', (3805, 3817), True, 'from statsmodels.tsa.stattools import adfuller as ADF\n'), ((10566, 10591), 'pandas.to_numeric', 'pd.to_numeric', (['df_analyze'], {}), '(df_analyze)\n', (10579, 10591), True, 'import pandas as pd\n'), ((11493, 11512), 'numpy.mean', 'np.mean', (['df_analyze'], {}), '(df_analyze)\n', (11500, 11512), True, 'import numpy as np\n'), ((3296, 3315), 'numpy.size', 'np.size', (['df_analyze'], {}), '(df_analyze)\n', (3303, 3315), True, 'import numpy as np\n'), ((4155, 4181), 'numpy.std', 'np.std', (['ts_analyze'], {'ddof': '(1)'}), '(ts_analyze, ddof=1)\n', (4161, 4181), True, 'import numpy as np\n'), ((4689, 4708), 'numpy.mean', 'np.mean', (['ts_analyze'], {}), '(ts_analyze)\n', (4696, 4708), True, 'import numpy as np\n'), ((4727, 4779), 'numpy.quantile', 'np.quantile', (['ts_analyze', '(0.25)'], {'interpolation': '"""lower"""'}), "(ts_analyze, 0.25, interpolation='lower')\n", (4738, 4779), True, 'import numpy as np\n'), ((4808, 4861), 'numpy.quantile', 'np.quantile', (['ts_analyze', '(0.75)'], {'interpolation': '"""higher"""'}), "(ts_analyze, 0.75, interpolation='higher')\n", (4819, 4861), True, 'import numpy as np\n'), ((5485, 5510), 'numpy.size', 'np.size', (['index_peakvalley'], {}), '(index_peakvalley)\n', (5492, 5510), True, 'import numpy as np\n'), ((13984, 14006), 'json.dump', 'json.dump', (['jsonfile', 'f'], {}), '(jsonfile, f)\n', (13993, 14006), False, 'import json\n'), ((15235, 15257), 'json.dump', 'json.dump', (['jsonfile', 'f'], {}), '(jsonfile, f)\n', (15244, 15257), False, 'import json\n'), ((5562, 5581), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (5569, 5581), True, 'import numpy as np\n'), ((6366, 6385), 'numpy.size', 'np.size', (['ts_numeric'], {}), '(ts_numeric)\n', (6373, 6385), True, 'import numpy as np\n'), ((6590, 6600), 'os._exit', 'os._exit', ([], {}), '()\n', (6598, 6600), False, 'import os\n'), ((7030, 7055), 'numpy.size', 'np.size', (['index_peakvalley'], {}), '(index_peakvalley)\n', (7037, 7055), True, 'import numpy as np\n'), ((11326, 11342), 'numpy.size', 'np.size', (['ranking'], {}), '(ranking)\n', (11333, 11342), True, 'import numpy as np\n'), ((11465, 11475), 'os._exit', 'os._exit', ([], {}), '()\n', (11473, 11475), False, 'import os\n'), ((5124, 5143), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (5131, 5143), True, 'import numpy as np\n'), ((8488, 8513), 'numpy.size', 'np.size', (['index_peakvalley'], {}), '(index_peakvalley)\n', (8495, 8513), True, 'import numpy as np\n'), ((8986, 9005), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (8993, 9005), True, 'import numpy as np\n'), ((9045, 9064), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (9052, 9064), True, 'import numpy as np\n'), ((5697, 5716), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (5704, 5716), True, 'import numpy as np\n'), ((5998, 6017), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (6005, 6017), True, 'import numpy as np\n'), ((7153, 7172), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (7160, 7172), True, 'import numpy as np\n'), ((9158, 9177), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (9165, 9177), True, 'import numpy as np\n'), ((9492, 9511), 'numpy.size', 'np.size', (['ts_analyze'], {}), '(ts_analyze)\n', (9499, 9511), True, 'import numpy as np\n')] |
import sys
import threading
import numpy as np
from Orange.data import Table
from Orange.widgets import widget
from AnyQt.QtCore import pyqtSlot
from orangewidget.utils.signals import Input, Output
from streaming.widgets.fixation_detector import FixationDetector
class FixationsWidget(widget.OWWidget):
"""
Calculate real-time fixations using gaze coordinate data
"""
name = "Fixations"
description = "Calculate fixations from gaze data stream"
icon = "icons/Tabulate.svg"
priority = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# variables
self.lock = threading.Lock()
# ui
self.want_main_area = False
self.want_control_area = False
self.want_basic_layout = False
self.want_message_bar = False
class Inputs:
"""
Inputs
"""
gaze_data = Input("Gaze Data", Table)
class Outputs:
"""
Outputs
"""
fixation_data = Output("Fixation Data", Table)
@Inputs.gaze_data
def set_gaze_data(self, values: Table):
variable_cols = ['t', *map(str, list(values.domain.variables)[1:])]
gaze_x: np.array = ...
gaze_y: np.array = ...
found = 0
t = np.array(values.X[:, 0])
if 'gaze.0_x' in variable_cols and 'gaze.0_y' in variable_cols:
gaze_x = np.array(values.X[:, variable_cols.index('gaze.0_x')]) + (gaze_x if found > 0 else 0)
gaze_y = np.array(values.X[:, variable_cols.index('gaze.0_y')]) + (gaze_y if found > 0 else 0)
found += 1
if 'gaze.1_x' in variable_cols and 'gaze.1_y' in variable_cols:
gaze_x = np.array(values.X[:, variable_cols.index('gaze.1_x')]) + (gaze_x if found > 0 else 0)
gaze_y = np.array(values.X[:, variable_cols.index('gaze.1_y')]) + (gaze_y if found > 0 else 0)
if found > 0:
gaze_x /= found
gaze_y /= found
eqn = FixationDetector(job=[t, gaze_x, gaze_y], parent=self)
eqn.output.connect(self.send_output)
eqn.start()
@pyqtSlot(Table)
def send_output(self, output: Table):
self.Outputs.fixation_data.send(output)
if __name__ == "__main__":
from AnyQt.QtWidgets import QApplication
app = QApplication(sys.argv)
ow = FixationsWidget()
ow.show()
ow.raise_()
ow.handleNewSignals()
app.exec_()
sys.exit()
| [
"streaming.widgets.fixation_detector.FixationDetector",
"AnyQt.QtWidgets.QApplication",
"AnyQt.QtCore.pyqtSlot",
"threading.Lock",
"numpy.array",
"orangewidget.utils.signals.Input",
"orangewidget.utils.signals.Output",
"sys.exit"
] | [((2127, 2142), 'AnyQt.QtCore.pyqtSlot', 'pyqtSlot', (['Table'], {}), '(Table)\n', (2135, 2142), False, 'from AnyQt.QtCore import pyqtSlot\n'), ((2318, 2340), 'AnyQt.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2330, 2340), False, 'from AnyQt.QtWidgets import QApplication\n'), ((2445, 2455), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2453, 2455), False, 'import sys\n'), ((641, 657), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (655, 657), False, 'import threading\n'), ((901, 926), 'orangewidget.utils.signals.Input', 'Input', (['"""Gaze Data"""', 'Table'], {}), "('Gaze Data', Table)\n", (906, 926), False, 'from orangewidget.utils.signals import Input, Output\n'), ((1011, 1041), 'orangewidget.utils.signals.Output', 'Output', (['"""Fixation Data"""', 'Table'], {}), "('Fixation Data', Table)\n", (1017, 1041), False, 'from orangewidget.utils.signals import Input, Output\n'), ((1277, 1301), 'numpy.array', 'np.array', (['values.X[:, 0]'], {}), '(values.X[:, 0])\n', (1285, 1301), True, 'import numpy as np\n'), ((1993, 2047), 'streaming.widgets.fixation_detector.FixationDetector', 'FixationDetector', ([], {'job': '[t, gaze_x, gaze_y]', 'parent': 'self'}), '(job=[t, gaze_x, gaze_y], parent=self)\n', (2009, 2047), False, 'from streaming.widgets.fixation_detector import FixationDetector\n')] |
#!/usr/bin/env python3
import argparse
import datetime
import decimal
import os
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
# import pandas as pd
PING_TIME_REGEX = re.compile(r'^\[(\d+\.\d+)].*time=(\d+\.?\d*) ms$')
PING_STATS_REGEX = re.compile(r'^(\d)+ packets transmitted, (\d)+ received,')
def _parse_ping_latencies(log_content, min_datetime):
datetimes = []
latencies = []
for line in log_content.split('\n'):
match = PING_TIME_REGEX.match(line)
if not match:
continue
timestamp = float(match.groups()[0])
latency_ms = float(match.groups()[1])
ping_dt = datetime.datetime.fromtimestamp(timestamp)
if ping_dt >= min_datetime:
datetimes.append(ping_dt)
latencies.append(latency_ms)
return (datetimes, latencies)
def _parse_packet_loss(log_content, min_datetime):
sent = 0
received = 0
last_datetime = None
for line in log_content.split('\n'):
match = PING_TIME_REGEX.match(line)
if match:
timestamp = decimal.Decimal(match.groups()[0])
last_datetime = datetime.datetime.fromtimestamp(timestamp)
continue
if not last_datetime or last_datetime < min_datetime:
continue
match = PING_STATS_REGEX.match(line)
if not match:
continue
sent += int(match.groups()[0])
received += int(match.groups()[1])
return (sent, received)
def compute_percentile(values, percentile):
assert 0 <= percentile <= 1
return sorted(values)[int(percentile * len(values))]
# pylint: disable=too-many-locals
def main():
parser = argparse.ArgumentParser(description='Analyze ping times log.')
parser.add_argument('--ping-log-files',
help='Comma delimited list of ping log files',
required=True)
parser.add_argument('--min-datetime',
help='Minimum datetime of data to include',
required=False)
parser.add_argument('--smoothing-kernel-size',
type=int,
help='Dimension of smoothing kernel',
default=10)
args = parser.parse_args()
min_datetime = datetime.datetime(1970, 1, 1)
if args.min_datetime:
min_datetime = datetime.datetime.strptime(args.min_datetime,
'%Y-%m-%d %H:%M:%S')
for path in args.ping_log_files.split(','):
path = os.path.expanduser(path)
print('Parsing file: {}'.format(path))
if not os.path.exists(path):
sys.exit('File not found: {}'.format(path))
with open(path) as f:
log_content = f.read()
label = os.path.basename(path)
sent, received = _parse_packet_loss(log_content, min_datetime)
lost = sent - received
print('Packet loss: {:.1f}% ({}/{})'.format(100 * lost / sent, lost,
sent))
datetimes, latencies = _parse_ping_latencies(log_content, min_datetime)
sorted_latencies = sorted(latencies)
percentile_latencies = []
for percentile in [50, 90, 99]:
index = int(percentile / 100 * len(latencies))
percentile_latencies.append((percentile, sorted_latencies[index]))
print('Latency percentiles: {}'.format(', '.join(
'{}%: {:6.2f}'.format(l[0], l[1]) for l in percentile_latencies)))
smoothing_filter = scipy.signal.windows.hann(args.smoothing_kernel_size)
smoothing_filter /= sum(smoothing_filter)
latencies = np.convolve(latencies, smoothing_filter, mode='same')
plt.plot(datetimes, latencies, label=label)
plt.legend()
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"os.path.basename",
"matplotlib.pyplot.legend",
"os.path.exists",
"datetime.datetime",
"datetime.datetime.strptime",
"datetime.datetime.fromtimestamp",
"numpy.convolve",
"os.path.expanduser",
"re.compile"
] | [((216, 273), 're.compile', 're.compile', (['"""^\\\\[(\\\\d+\\\\.\\\\d+)].*time=(\\\\d+\\\\.?\\\\d*) ms$"""'], {}), "('^\\\\[(\\\\d+\\\\.\\\\d+)].*time=(\\\\d+\\\\.?\\\\d*) ms$')\n", (226, 273), False, 'import re\n'), ((287, 346), 're.compile', 're.compile', (['"""^(\\\\d)+ packets transmitted, (\\\\d)+ received,"""'], {}), "('^(\\\\d)+ packets transmitted, (\\\\d)+ received,')\n", (297, 346), False, 'import re\n'), ((1708, 1770), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Analyze ping times log."""'}), "(description='Analyze ping times log.')\n", (1731, 1770), False, 'import argparse\n'), ((2308, 2337), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (2325, 2337), False, 'import datetime\n'), ((3809, 3821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3819, 3821), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3834, 3836), True, 'import matplotlib.pyplot as plt\n'), ((677, 719), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (708, 719), False, 'import datetime\n'), ((2387, 2453), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['args.min_datetime', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(args.min_datetime, '%Y-%m-%d %H:%M:%S')\n", (2413, 2453), False, 'import datetime\n'), ((2567, 2591), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (2585, 2591), False, 'import os\n'), ((2813, 2835), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2829, 2835), False, 'import os\n'), ((3699, 3752), 'numpy.convolve', 'np.convolve', (['latencies', 'smoothing_filter'], {'mode': '"""same"""'}), "(latencies, smoothing_filter, mode='same')\n", (3710, 3752), True, 'import numpy as np\n'), ((3761, 3804), 'matplotlib.pyplot.plot', 'plt.plot', (['datetimes', 'latencies'], {'label': 'label'}), '(datetimes, latencies, label=label)\n', (3769, 3804), True, 'import matplotlib.pyplot as plt\n'), ((1167, 1209), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (1198, 1209), False, 'import datetime\n'), ((2654, 2674), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2668, 2674), False, 'import os\n')] |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
# <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
# <div style="position: relative ; top: 50% ; transform: translatey(-50%)">
# <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div>
# <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Finite Differences - Grid-Staggering Elastic 1D</div>
# </div>
# </div>
# </div>
# <p style="width:20%;float:right;padding-left:50px">
# <img src=../../share/images/book.jpg>
# <span style="font-size:smaller">
# </span>
# </p>
#
#
# ---
#
# This notebook is part of the supplementary material
# to [Computational Seismology: A Practical Introduction](https://global.oup.com/academic/product/computational-seismology-9780198717416?cc=de&lang=en&#),
# Oxford University Press, 2016.
#
#
# ##### Authors:
# * <NAME> ([@ashimrijal](https://github.com/ashimrijal))
# * <NAME> ([@heinerigel](https://github.com/heinerigel))
# This exercise covers the following aspects:
#
# * Solving velocity-stress formulation of 1D wave equation with finite difference method
# * Understanding the grid-staggering in connection with finite difference solution to the elastic wave equation
# ---
# ## Basic Equations
# ** Please refer to the sections 4.6.2 and 4.6.3 from the book.**
#
# The 1D wave equation (velocity-stress formulation) as a coupled system of two first-order partial differential equations
#
# $$
# \rho \partial_t v = \partial_x \sigma + f
# $$
# $$
# \partial_t \sigma = \mu \partial_x v
# $$
#
# where,
#
# $ \sigma $ is the stress,
#
# $ \rho $ is the density,
#
# $ v $ is the velocity,
#
# $ \mu $ is the shear modulus, and
#
# $ f $ is the source.
#
# Grid- staggering is the concept in connection with finite-difference solutions to the elastic wave equation.
# The discrete velocity and stress are defined on a regular spaced grid in space and time. Then, partial derivatives are replaced with centered finite-difference approximations to first derivative. However, these are not defined at the grid points of a function but in-between the grid points.
# In grid staggering the following computational scheme is used
#
# $$
# \frac{v_i^{j+ \tfrac{1}{2}} - v_i^{j- \tfrac{1}{2}} }{dt} \ = \ \frac{1}{\rho_i}\frac{\sigma_{i + \tfrac{1}{2}}^j - \sigma_{i - \tfrac{1}{2}}^j }{dx} + \frac{f_i^j}{\rho_i} \
# $$
#
# $$
# \frac{\sigma_{i+\tfrac{1}{2}}^{j+1} - \sigma_{i+\tfrac{1}{2}}^j }{dt} \ = \ \mu_{i+\tfrac{1}{2}} \frac{v_{i + 1}^{j +\tfrac{1}{2}} - v_i^{j + \tfrac{1}{2}} }{dx}
# $$
#
# The extrapolation scheme becomes
#
# $$
# v_i^{j+ \tfrac{1}{2}} \ = \ \frac{dt}{\rho_i} \frac{\sigma_{i + \tfrac{1}{2}}^j - \sigma_{i - \tfrac{1}{2}}^j }{dx} \ + \ v_i^{j- \tfrac{1}{2}} + \frac{dt}{\rho_i} \ f_i^j \
# $$
#
# $$
# \sigma_{i+\tfrac{1}{2}}^{j+1} \ = \ dt \ \mu_{i+\tfrac{1}{2}} \frac{v_{i + 1}^{j +\tfrac{1}{2}} - v_i^{j + \tfrac{1}{2}} }{dx} \ + \ \sigma_{i+\tfrac{1}{2}}^j \ \
# $$
#
#
# ** Note that in the codes below we do not deal with the index fractions.**
# ### Exercise
# First understand the codes below and run the simulation.
#
# Then, improve the result using (4-point operator) for 1st derivative.
#
# **Message: Once you become familiar with all the codes below you can go to the Cell tab on the toolbar and click Run All.**
#
# + {"code_folding": [0]}
# Configuration step (Please run it before the simulation code!)
import numpy as np
import matplotlib
# Show Plot in The Notebook
matplotlib.use("nbagg")
import matplotlib.pyplot as plt
matplotlib.rcParams['figure.facecolor'] = 'w' # remove grey background
# + {"code_folding": []}
# Initialization of parameters
# Simple finite difference solver
# Elastic wave equation
# 1-D regular staggered grid
# Basic parameters
nt = 1300 # number of time steps
nx = 1000 # number of grid points in x
c0 = 4500 # velocity (m/sec) (shear wave)
eps = 0.8 # stability limit
isnap = 2 # snapshot frequency
isx = round(nx/2) # source location
f0 = 1/15 # frequency (Hz)
xmax = 1000000. # maximum range (m)
rho0 = 2500. # density (kg/m**3)
mu0 = rho0*c0**2. # shear modulus (Pa)
nop = 4 # number of operator either 2 or 4
dx = xmax / (nx-1) # calculate space increment (m)
x = (np.arange(nx)*dx) # initialize space coordinates
dt = eps * dx/c0 # calculate time step from stability criterion(s)
# Source time function
t = (np.arange(0,nt) * dt) # initialize time axis
T0 = 1. / f0 # period
a = 4. / T0 # half-width (so called sigma)
t0 = T0 / dt
tmp = np.zeros(nt)
for it in range(nt):
t = (it - t0) * dt
tmp[it] = -2 * a * t * np.exp(-(a * t) ** 2) # derivative of Gaussian (so called sigma)
src = np.zeros(nt) # source
src[0:len(tmp)] = tmp
lam = c0 * T0 # wavelength
# + {"code_folding": []}
# Extrapolation scheme and the plots
# Initialization of plot
# Initialization of fields
v = np.zeros(nx) # velocity
vnew = v
dv = v
s = np.zeros(nx) # stress
snew = s
ds = s
mu = np.zeros(nx) # shear modulus
rho = mu
rho = rho + rho0
mu = mu + mu0
# Print setup parameters
print("rho =", rho0, ", f_dom =", f0, ", stability limit =", eps, ", n_lambda", (lam/dx))
# Initialize the plot
title = "FD Elastic 1D staggered grid"
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
line1 = ax1.plot(x, v, color = "red", lw = 1.5)
line2 = ax2.plot(x, s, color = "blue", lw = 1.5)
ax1.set_ylabel('velocity (m/s)')
ax2.set_xlabel('x (m)')
ax2.set_ylabel('stress (Pa)')
plt.ion()
plt.show()
# Begin extrapolation and update the plot
for it in range (nt):
# Stress derivative
for i in range (2, nx-2):
ds[i] = (0.0416666 * s[i-1] - 1.125 * s[i] + 1.125 * s[i+1] - 0.0416666 * s[i+2]) / (dx)
# Velocity extrapolation
v = v + dt * ds / rho
# Add source term at isx
v[isx] = v[isx] + dt * src[it] / (dt * rho[isx])
# Velocity derivative
for i in range (2, nx-2):
dv[i] = (0.0416666 * v[i-2] - 1.125 * v[i-1] + 1.125 * v[i] - 0.0416666 * v[i+1]) / (dx)
# Stress extrapolation
s = s + dt * mu * dv
# Updating the plots
if not it % isnap:
for l in line1:
l.remove()
del l
for l in line2:
l.remove()
del l
line1 = ax1.plot(x, v, color = "red", lw = 1.5)
line2 = ax2.plot(x, s, color = "blue", lw = 1.5)
ax1.set_title(title + ", time step: %i" % (it))
plt.gcf().canvas.draw()
plt.ioff()
plt.show()
# -
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.ioff",
"numpy.zeros",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.gcf"
] | [((4009, 4032), 'matplotlib.use', 'matplotlib.use', (['"""nbagg"""'], {}), "('nbagg')\n", (4023, 4032), False, 'import matplotlib\n'), ((5721, 5733), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5729, 5733), True, 'import numpy as np\n'), ((5882, 5894), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5890, 5894), True, 'import numpy as np\n'), ((6152, 6164), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (6160, 6164), True, 'import numpy as np\n'), ((6235, 6247), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (6243, 6247), True, 'import numpy as np\n'), ((6317, 6329), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (6325, 6329), True, 'import numpy as np\n'), ((6654, 6681), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (6664, 6681), True, 'import matplotlib.pyplot as plt\n'), ((6927, 6936), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (6934, 6936), True, 'import matplotlib.pyplot as plt\n'), ((6937, 6947), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6945, 6947), True, 'import matplotlib.pyplot as plt\n'), ((7945, 7955), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (7953, 7955), True, 'import matplotlib.pyplot as plt\n'), ((7956, 7966), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7964, 7966), True, 'import matplotlib.pyplot as plt\n'), ((5263, 5276), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (5272, 5276), True, 'import numpy as np\n'), ((5479, 5495), 'numpy.arange', 'np.arange', (['(0)', 'nt'], {}), '(0, nt)\n', (5488, 5495), True, 'import numpy as np\n'), ((5805, 5826), 'numpy.exp', 'np.exp', (['(-(a * t) ** 2)'], {}), '(-(a * t) ** 2)\n', (5811, 5826), True, 'import numpy as np\n'), ((7920, 7929), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7927, 7929), True, 'import matplotlib.pyplot as plt\n')] |
import pandas as pd
import numpy as np
from river_dl.postproc_utils import fmt_preds_obs
from river_dl.loss_functions import rmse, nse, kge
def filter_negative_preds(y_true, y_pred):
"""
filters out negative predictions and prints a warning if there are >5% of predictions as negative
:param y_true: [array-like] observed y_dataset values
:param y_pred: [array-like] predicted y_dataset values
:return: [array-like] filtered data
"""
# print a warning if there are a lot of negatives
n_negative = len(y_pred[y_pred < 0])
perc_negative = n_negative / len(y_pred)
if perc_negative > 0.05:
print(
f"Warning than 5% of predictions were negative {n_negative} of\
{len(y_pred)}"
)
# filter out negative predictions
y_true = np.where(y_pred < 0, np.nan, y_true)
y_pred = np.where(y_pred < 0, np.nan, y_pred)
return y_true, y_pred
def rmse_logged(y_true, y_pred):
"""
compute the rmse of the logged data
:param y_true: [array-like] observed y_dataset values
:param y_pred: [array-like] predicted y_dataset values
:return: [float] the rmse of the logged data
"""
y_true, y_pred = filter_negative_preds(y_true, y_pred)
return rmse(np.log(y_true), np.log(y_pred))
def nse_logged(y_true, y_pred):
"""
compute the rmse of the logged data
:param y_true: [array-like] observed y_dataset values
:param y_pred: [array-like] predicted y_dataset values
:return: [float] the nse of the logged data
"""
y_true, y_pred = filter_negative_preds(y_true, y_pred)
return nse(np.log(y_true), np.log(y_pred))
def filter_by_percentile(y_true, y_pred, percentile, less_than=True):
"""
filter an array by a percentile of the observations. The data less than
or greater than if `less_than=False`) will be changed to NaN
:param y_true: [array-like] observed y_dataset values
:param y_pred: [array-like] predicted y_dataset values
:param percentile: [number] percentile number 0-100
:param less_than: [bool] whether you want the data *less than* the
percentile. If False, the data greater than the percentile will remain.
:return: [array-like] filtered data
"""
percentile_val = np.nanpercentile(y_true, percentile)
if less_than:
y_true_filt = np.where(y_true < percentile_val, y_true, np.nan)
y_pred_filt = np.where(y_true < percentile_val, y_pred, np.nan)
else:
y_true_filt = np.where(y_true > percentile_val, y_true, np.nan)
y_pred_filt = np.where(y_true > percentile_val, y_pred, np.nan)
return y_true_filt, y_pred_filt
def percentile_metric(y_true, y_pred, metric, percentile, less_than=True):
"""
compute an evaluation metric for a specified percentile of the observations
:param y_true: [array-like] observed y_dataset values
:param y_pred: [array-like] predicted y_dataset values
:param metric: [function] metric function
:param percentile: [number] percentile number 0-100
:param less_than: [bool] whether you want the data *less than* the
percentile. If False, the data greater than the percentile will remain.
"""
y_true_filt, y_pred_filt = filter_by_percentile(
y_true, y_pred, percentile, less_than
)
return metric(y_true_filt, y_pred_filt)
def calc_metrics(df):
"""
calculate metrics (e.g., rmse and nse)
:param df:[pd dataframe] dataframe of observations and predictions for
one reach. dataframe must have columns "obs" and "pred"
:return: [pd Series] various evaluation metrics (e.g., rmse and nse)
"""
obs = df["obs"].values
pred = df["pred"].values
if len(obs) > 10:
metrics = {
"rmse": rmse(obs, pred).numpy(),
"nse": nse(obs, pred).numpy(),
"rmse_top10": percentile_metric(
obs, pred, rmse, 90, less_than=False
).numpy(),
"rmse_bot10": percentile_metric(
obs, pred, rmse, 10, less_than=True
).numpy(),
"rmse_logged": rmse_logged(obs, pred).numpy(),
"nse_top10": percentile_metric(
obs, pred, nse, 90, less_than=False
).numpy(),
"nse_bot10": percentile_metric(
obs, pred, nse, 10, less_than=True
).numpy(),
"nse_logged": nse_logged(obs, pred).numpy(),
"kge": kge(obs, pred).numpy(),
"rmse_logged": rmse_logged(obs, pred).numpy(),
"nse_top10": percentile_metric(obs, pred, nse, 90, less_than=False).numpy(),
"nse_bot10": percentile_metric(obs, pred, nse, 10, less_than=True).numpy(),
"nse_logged": nse_logged(obs, pred).numpy(),
}
else:
metrics = {
"rmse": np.nan,
"nse": np.nan,
"rmse_top10": np.nan,
"rmse_bot10": np.nan,
"rmse_logged": np.nan,
"nse_top10": np.nan,
"nse_bot10": np.nan,
"nse_logged": np.nan,
"kge": np.nan,
}
return pd.Series(metrics)
def partition_metrics(
pred_file,
obs_file,
partition,
spatial_idx_name="seg_id_nat",
time_idx_name="date",
group=None,
outfile=None
):
"""
calculate metrics for a certain group (or no group at all) for a given
partition and variable
:param pred_file: [str] path to predictions feather file
:param obs_file: [str] path to observations zarr file
:param partition: [str] data partition for which metrics are calculated
:param spatial_idx_name: [str] name of column that is used for spatial
index (e.g., 'seg_id_nat')
:param time_idx_name: [str] name of column that is used for temporal index
(usually 'time')
:param group: [str or list] which group the metrics should be computed for.
Currently only supports 'seg_id_nat' (segment-wise metrics), 'month'
(month-wise metrics), ['seg_id_nat', 'month'] (metrics broken out by segment
and month), and None (everything is left together)
:param outfile: [str] file where the metrics should be written
:return: [pd dataframe] the condensed metrics
"""
var_data = fmt_preds_obs(pred_file, obs_file, spatial_idx_name,
time_idx_name)
var_metrics_list = []
for data_var, data in var_data.items():
data.reset_index(inplace=True)
if not group:
metrics = calc_metrics(data)
# need to convert to dataframe and transpose so it looks like the
# others
metrics = pd.DataFrame(metrics).T
elif group == "seg_id_nat":
metrics = data.groupby(spatial_idx_name).apply(calc_metrics).reset_index()
elif group == "month":
metrics = (
data.groupby(
data[time_idx_name].dt.month)
.apply(calc_metrics)
.reset_index()
)
elif group == ["seg_id_nat", "month"]:
metrics = (
data.groupby(
[data[time_idx_name].dt.month,
spatial_idx_name])
.apply(calc_metrics)
.reset_index()
)
else:
raise ValueError("group value not valid")
metrics["variable"] = data_var
metrics["partition"] = partition
var_metrics_list.append(metrics)
var_metrics = pd.concat(var_metrics_list)
if outfile:
var_metrics.to_csv(outfile, header=True, index=False)
return var_metrics
def combined_metrics(
obs_file,
pred_trn=None,
pred_val=None,
pred_tst=None,
spatial_idx_name="seg_id_nat",
time_idx_name="date",
group=None,
outfile=None,
):
"""
calculate the metrics for flow and temp and training and test sets for a
given grouping
:param obs_file: [str] path to observations zarr file
:param pred_trn: [str] path to training prediction feather file
:param pred_val: [str] path to validation prediction feather file
:param pred_tst: [str] path to testing prediction feather file
:param spatial_idx_name: [str] name of column that is used for spatial
index (e.g., 'seg_id_nat')
:param time_idx_name: [str] name of column that is used for temporal index
(usually 'time')
:param group: [str or list] which group the metrics should be computed for.
Currently only supports 'seg_id_nat' (segment-wise metrics), 'month'
(month-wise metrics), ['seg_id_nat', 'month'] (metrics broken out by segment
and month), and None (everything is left together)
:param outfile: [str] csv file where the metrics should be written
:return: combined metrics
"""
df_all = []
if pred_trn:
trn_metrics = partition_metrics(pred_file=pred_trn,
obs_file=obs_file,
partition="trn",
spatial_idx_name=spatial_idx_name,
time_idx_name=time_idx_name,
group=group)
df_all.extend([trn_metrics])
if pred_val:
val_metrics = partition_metrics(pred_file=pred_val,
obs_file=obs_file,
partition="val",
spatial_idx_name=spatial_idx_name,
time_idx_name=time_idx_name,
group=group)
df_all.extend([val_metrics])
if pred_tst:
tst_metrics = partition_metrics(pred_file=pred_tst,
obs_file=obs_file,
partition="tst",
spatial_idx_name=spatial_idx_name,
time_idx_name=time_idx_name,
group=group)
df_all.extend([tst_metrics])
df_all = pd.concat(df_all, axis=0)
if outfile:
df_all.to_csv(outfile, index=False)
return df_all
| [
"pandas.DataFrame",
"numpy.nanpercentile",
"river_dl.postproc_utils.fmt_preds_obs",
"numpy.log",
"river_dl.loss_functions.nse",
"numpy.where",
"river_dl.loss_functions.kge",
"pandas.Series",
"river_dl.loss_functions.rmse",
"pandas.concat"
] | [((813, 849), 'numpy.where', 'np.where', (['(y_pred < 0)', 'np.nan', 'y_true'], {}), '(y_pred < 0, np.nan, y_true)\n', (821, 849), True, 'import numpy as np\n'), ((863, 899), 'numpy.where', 'np.where', (['(y_pred < 0)', 'np.nan', 'y_pred'], {}), '(y_pred < 0, np.nan, y_pred)\n', (871, 899), True, 'import numpy as np\n'), ((2261, 2297), 'numpy.nanpercentile', 'np.nanpercentile', (['y_true', 'percentile'], {}), '(y_true, percentile)\n', (2277, 2297), True, 'import numpy as np\n'), ((5092, 5110), 'pandas.Series', 'pd.Series', (['metrics'], {}), '(metrics)\n', (5101, 5110), True, 'import pandas as pd\n'), ((6253, 6320), 'river_dl.postproc_utils.fmt_preds_obs', 'fmt_preds_obs', (['pred_file', 'obs_file', 'spatial_idx_name', 'time_idx_name'], {}), '(pred_file, obs_file, spatial_idx_name, time_idx_name)\n', (6266, 6320), False, 'from river_dl.postproc_utils import fmt_preds_obs\n'), ((10056, 10081), 'pandas.concat', 'pd.concat', (['df_all'], {'axis': '(0)'}), '(df_all, axis=0)\n', (10065, 10081), True, 'import pandas as pd\n'), ((1258, 1272), 'numpy.log', 'np.log', (['y_true'], {}), '(y_true)\n', (1264, 1272), True, 'import numpy as np\n'), ((1274, 1288), 'numpy.log', 'np.log', (['y_pred'], {}), '(y_pred)\n', (1280, 1288), True, 'import numpy as np\n'), ((1619, 1633), 'numpy.log', 'np.log', (['y_true'], {}), '(y_true)\n', (1625, 1633), True, 'import numpy as np\n'), ((1635, 1649), 'numpy.log', 'np.log', (['y_pred'], {}), '(y_pred)\n', (1641, 1649), True, 'import numpy as np\n'), ((2338, 2387), 'numpy.where', 'np.where', (['(y_true < percentile_val)', 'y_true', 'np.nan'], {}), '(y_true < percentile_val, y_true, np.nan)\n', (2346, 2387), True, 'import numpy as np\n'), ((2410, 2459), 'numpy.where', 'np.where', (['(y_true < percentile_val)', 'y_pred', 'np.nan'], {}), '(y_true < percentile_val, y_pred, np.nan)\n', (2418, 2459), True, 'import numpy as np\n'), ((2492, 2541), 'numpy.where', 'np.where', (['(y_true > percentile_val)', 'y_true', 'np.nan'], {}), '(y_true > percentile_val, y_true, np.nan)\n', (2500, 2541), True, 'import numpy as np\n'), ((2564, 2613), 'numpy.where', 'np.where', (['(y_true > percentile_val)', 'y_pred', 'np.nan'], {}), '(y_true > percentile_val, y_pred, np.nan)\n', (2572, 2613), True, 'import numpy as np\n'), ((7445, 7472), 'pandas.concat', 'pd.concat', (['var_metrics_list'], {}), '(var_metrics_list)\n', (7454, 7472), True, 'import pandas as pd\n'), ((6644, 6665), 'pandas.DataFrame', 'pd.DataFrame', (['metrics'], {}), '(metrics)\n', (6656, 6665), True, 'import pandas as pd\n'), ((3747, 3762), 'river_dl.loss_functions.rmse', 'rmse', (['obs', 'pred'], {}), '(obs, pred)\n', (3751, 3762), False, 'from river_dl.loss_functions import rmse, nse, kge\n'), ((3791, 3805), 'river_dl.loss_functions.nse', 'nse', (['obs', 'pred'], {}), '(obs, pred)\n', (3794, 3805), False, 'from river_dl.loss_functions import rmse, nse, kge\n'), ((4428, 4442), 'river_dl.loss_functions.kge', 'kge', (['obs', 'pred'], {}), '(obs, pred)\n', (4431, 4442), False, 'from river_dl.loss_functions import rmse, nse, kge\n')] |
import os
import numpy as np
import datetime
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
from skimage.io import imread
from skimage.filters import threshold_otsu
from skimage import measure
from sklearn.externals import joblib
############################ prediction ########################################
current_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(current_dir, 'models/svc/svc.pkl')
plate_dir = os.path.join(current_dir, 'license_plate/detected/')
total_dir = os.path.join(current_dir, 'license_plate/result/')
model = joblib.load(model_dir)
############################ prediction end ########################################
############################ localization ######################################
#####################################function definition##########################################
def extract_text(car_image):
print(car_image.shape)
gray_car_image = car_image * 255
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(gray_car_image, cmap="gray")
threshold_value = threshold_otsu(gray_car_image)
binary_car_image = gray_car_image > threshold_value
ax2.imshow(binary_car_image, cmap="gray")
plt.show()
############################ localization end######################################
############################ cca ######################################
label_image = measure.label(binary_car_image)
fig, (ax1) = plt.subplots(1)
ax1.imshow(gray_car_image, cmap="gray");
plate_like_objects = []
plate_objects_cordinates = []
# regionprops creates a list of properties of all the labelled regions
for region in regionprops(label_image):
#if region.area < 3400 or region.area > 10000:
# if region.area < 19000 or region.area > 23000:
if region.area < 2000:
#if the region is so small then it's likely not a license plate
continue
# the bounding box coordinates
minRow, minCol, maxRow, maxCol = region.bbox
plate_like_objects.append(binary_car_image[minRow:maxRow,
minCol:maxCol])
plate_objects_cordinates.append((minRow, minCol,
maxRow, maxCol))
rectBorder = patches.Rectangle((minCol, minRow), maxCol-minCol, maxRow-minRow, edgecolor="red", linewidth=2, fill=False)
ax1.add_patch(rectBorder)
# let's draw a red rectangle over those regions
plt.show()
############################ cca end ##########################################
############################ segmentation ##########################################
# print("plate_like_objects",plate_like_objects)
if(plate_like_objects):
pass
else:
print("hello")
return None
license_plate = np.invert(plate_like_objects[0])
labelled_plate = measure.label(license_plate)
fig, ax1 = plt.subplots(1)
ax1.imshow(license_plate, cmap="gray")
character_dimensions = (0.35*license_plate.shape[0], 0.9*license_plate.shape[0], 0.01*license_plate.shape[1], 0.9*license_plate.shape[1])
min_height, max_height, min_width, max_width = character_dimensions
characters = []
counter=0
column_list = []
for regions in regionprops(labelled_plate):
y0, x0, y1, x1 = regions.bbox
region_height = y1 - y0
# print("region_height",region_height)
region_width = x1 - x0
# print("region_width",region_width)
if region_height > min_height and region_height < max_height and region_width > min_width and region_width < max_width:
roi = license_plate[y0:y1, x0:x1]
rect_border = patches.Rectangle((x0, y0), x1 - x0, y1 - y0, edgecolor="red",
linewidth=2, fill=False)
ax1.add_patch(rect_border)
# resize the characters to 20X20 and then append each character into the characters list
resized_char = resize(roi, (20, 20))
characters.append(resized_char)
# this is just to keep track of the arrangement of the characters
column_list.append(x0)
plt.show()
############################ segmentation end ##########################################
############################ prediction ########################################
classification_result = []
for each_character in characters:
# converts it to a 1D array
each_character = each_character.reshape(1, -1);
result = model.predict(each_character)
classification_result.append(result)
# print(classification_result)
plate_string = ''
for eachPredict in classification_result:
plate_string += eachPredict[0]
# print(plate_string)
# it's possible the characters are wrongly arranged
# since that's a possibility, the column_list will be
# used to sort the letters in the right order
column_list_copy = column_list[:]
column_list.sort()
rightplate_string = ''
for each in column_list:
rightplate_string += plate_string[column_list_copy.index(each)]
print(rightplate_string)
time=datetime.datetime.now()
# print(time)
# print(car_image)
file = open('plates.txt','a')
file.write( str(time) +" " + rightplate_string +"\n" )
file.close()
# os.rename(car_image,rightplate_string)
############################ prediction ########################################
# car_dir = os.path.join(current_dir,'license_plate/result')
path, dirs, files = next(os.walk(total_dir))
file_count = len(files)
print("file_count",file_count)
file = open('plates.txt','a')
file.write( "\n" )
file.close()
for each_number in range(1,file_count+1):
# for each_number in range(1,17):
try:
car_image = imread( plate_dir+ "vehicle" + str(each_number) + 'plate.png', as_grey=True )
p=extract_text(car_image)
print(str(each_number))
except:
pass
# print("car_image",car_image)
# if(car_image):
# pass
# else:
# break
# car_image = imread("input_img\plate1.png", as_grey=True)
# p=extract_text(car_image)
| [
"matplotlib.pyplot.show",
"skimage.filters.threshold_otsu",
"numpy.invert",
"matplotlib.patches.Rectangle",
"os.path.realpath",
"os.walk",
"matplotlib.pyplot.subplots",
"datetime.datetime.now",
"skimage.measure.label",
"skimage.transform.resize",
"sklearn.externals.joblib.load",
"os.path.join"... | [((511, 558), 'os.path.join', 'os.path.join', (['current_dir', '"""models/svc/svc.pkl"""'], {}), "(current_dir, 'models/svc/svc.pkl')\n", (523, 558), False, 'import os\n'), ((571, 623), 'os.path.join', 'os.path.join', (['current_dir', '"""license_plate/detected/"""'], {}), "(current_dir, 'license_plate/detected/')\n", (583, 623), False, 'import os\n'), ((636, 686), 'os.path.join', 'os.path.join', (['current_dir', '"""license_plate/result/"""'], {}), "(current_dir, 'license_plate/result/')\n", (648, 686), False, 'import os\n'), ((696, 718), 'sklearn.externals.joblib.load', 'joblib.load', (['model_dir'], {}), '(model_dir)\n', (707, 718), False, 'from sklearn.externals import joblib\n'), ((471, 497), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (487, 497), False, 'import os\n'), ((1107, 1125), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (1119, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1192, 1222), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['gray_car_image'], {}), '(gray_car_image)\n', (1206, 1222), False, 'from skimage.filters import threshold_otsu\n'), ((1329, 1339), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1337, 1339), True, 'import matplotlib.pyplot as plt\n'), ((1525, 1556), 'skimage.measure.label', 'measure.label', (['binary_car_image'], {}), '(binary_car_image)\n', (1538, 1556), False, 'from skimage import measure\n'), ((1574, 1589), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (1586, 1589), True, 'import matplotlib.pyplot as plt\n'), ((1792, 1816), 'skimage.measure.regionprops', 'regionprops', (['label_image'], {}), '(label_image)\n', (1803, 1816), False, 'from skimage.measure import regionprops\n'), ((2613, 2623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2621, 2623), True, 'import matplotlib.pyplot as plt\n'), ((2973, 3005), 'numpy.invert', 'np.invert', (['plate_like_objects[0]'], {}), '(plate_like_objects[0])\n', (2982, 3005), True, 'import numpy as np\n'), ((3028, 3056), 'skimage.measure.label', 'measure.label', (['license_plate'], {}), '(license_plate)\n', (3041, 3056), False, 'from skimage import measure\n'), ((3073, 3088), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (3085, 3088), True, 'import matplotlib.pyplot as plt\n'), ((3422, 3449), 'skimage.measure.regionprops', 'regionprops', (['labelled_plate'], {}), '(labelled_plate)\n', (3433, 3449), False, 'from skimage.measure import regionprops\n'), ((4331, 4341), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4339, 4341), True, 'import matplotlib.pyplot as plt\n'), ((5340, 5363), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5361, 5363), False, 'import datetime\n'), ((5745, 5763), 'os.walk', 'os.walk', (['total_dir'], {}), '(total_dir)\n', (5752, 5763), False, 'import os\n'), ((2410, 2525), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(minCol, minRow)', '(maxCol - minCol)', '(maxRow - minRow)'], {'edgecolor': '"""red"""', 'linewidth': '(2)', 'fill': '(False)'}), "((minCol, minRow), maxCol - minCol, maxRow - minRow,\n edgecolor='red', linewidth=2, fill=False)\n", (2427, 2525), True, 'import matplotlib.patches as patches\n'), ((3847, 3938), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x0, y0)', '(x1 - x0)', '(y1 - y0)'], {'edgecolor': '"""red"""', 'linewidth': '(2)', 'fill': '(False)'}), "((x0, y0), x1 - x0, y1 - y0, edgecolor='red', linewidth=2,\n fill=False)\n", (3864, 3938), True, 'import matplotlib.patches as patches\n'), ((4146, 4167), 'skimage.transform.resize', 'resize', (['roi', '(20, 20)'], {}), '(roi, (20, 20))\n', (4152, 4167), False, 'from skimage.transform import resize\n')] |
import sys
import numpy as np
b: np.bool_
u8: np.uint64
i8: np.int64
f8: np.float64
c8: np.complex64
c16: np.complex128
m: np.timedelta64
U: np.str_
S: np.bytes_
reveal_type(c8.real) # E: {float32}
reveal_type(c8.imag) # E: {float32}
reveal_type(c8.real.real) # E: {float32}
reveal_type(c8.real.imag) # E: {float32}
reveal_type(c8.itemsize) # E: int
reveal_type(c8.shape) # E: Tuple[]
reveal_type(c8.strides) # E: Tuple[]
reveal_type(c8.ndim) # E: Literal[0]
reveal_type(c8.size) # E: Literal[1]
reveal_type(c8.squeeze()) # E: {complex64}
reveal_type(c8.byteswap()) # E: {complex64}
reveal_type(c8.transpose()) # E: {complex64}
reveal_type(c8.dtype) # E: numpy.dtype[{complex64}]
reveal_type(c8.real) # E: {float32}
reveal_type(c16.imag) # E: {float64}
reveal_type(np.unicode_('foo')) # E: numpy.str_
reveal_type(np.str0('foo')) # E: numpy.str_
# Aliases
reveal_type(np.unicode_()) # E: numpy.str_
reveal_type(np.str0()) # E: numpy.str_
reveal_type(np.bool8()) # E: numpy.bool_
reveal_type(np.bytes0()) # E: numpy.bytes_
reveal_type(np.string_()) # E: numpy.bytes_
reveal_type(np.object0()) # E: numpy.object_
reveal_type(np.void0(0)) # E: numpy.void
reveal_type(np.byte()) # E: {byte}
reveal_type(np.short()) # E: {short}
reveal_type(np.intc()) # E: {intc}
reveal_type(np.intp()) # E: {intp}
reveal_type(np.int0()) # E: {intp}
reveal_type(np.int_()) # E: {int_}
reveal_type(np.longlong()) # E: {longlong}
reveal_type(np.ubyte()) # E: {ubyte}
reveal_type(np.ushort()) # E: {ushort}
reveal_type(np.uintc()) # E: {uintc}
reveal_type(np.uintp()) # E: {uintp}
reveal_type(np.uint0()) # E: {uintp}
reveal_type(np.uint()) # E: {uint}
reveal_type(np.ulonglong()) # E: {ulonglong}
reveal_type(np.half()) # E: {half}
reveal_type(np.single()) # E: {single}
reveal_type(np.double()) # E: {double}
reveal_type(np.float_()) # E: {double}
reveal_type(np.longdouble()) # E: {longdouble}
reveal_type(np.longfloat()) # E: {longdouble}
reveal_type(np.csingle()) # E: {csingle}
reveal_type(np.singlecomplex()) # E: {csingle}
reveal_type(np.cdouble()) # E: {cdouble}
reveal_type(np.complex_()) # E: {cdouble}
reveal_type(np.cfloat()) # E: {cdouble}
reveal_type(np.clongdouble()) # E: {clongdouble}
reveal_type(np.clongfloat()) # E: {clongdouble}
reveal_type(np.longcomplex()) # E: {clongdouble}
reveal_type(b.item()) # E: bool
reveal_type(i8.item()) # E: int
reveal_type(u8.item()) # E: int
reveal_type(f8.item()) # E: float
reveal_type(c16.item()) # E: complex
reveal_type(U.item()) # E: str
reveal_type(S.item()) # E: bytes
reveal_type(b.tolist()) # E: bool
reveal_type(i8.tolist()) # E: int
reveal_type(u8.tolist()) # E: int
reveal_type(f8.tolist()) # E: float
reveal_type(c16.tolist()) # E: complex
reveal_type(U.tolist()) # E: str
reveal_type(S.tolist()) # E: bytes
reveal_type(b.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(i8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(u8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]]
reveal_type(f8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(c16.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
reveal_type(U.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(S.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]
reveal_type(b.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(i8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(u8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]]
reveal_type(f8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(c16.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
reveal_type(U.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(S.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]
reveal_type(b.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(i8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(u8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]]
reveal_type(f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(c16.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
reveal_type(U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(S.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]
reveal_type(i8.astype(float)) # E: Any
reveal_type(i8.astype(np.float64)) # E: {float64}
reveal_type(i8.view()) # E: {int64}
reveal_type(i8.view(np.float64)) # E: {float64}
reveal_type(i8.view(float)) # E: Any
reveal_type(i8.view(np.float64, np.ndarray)) # E: {float64}
reveal_type(i8.getfield(float)) # E: Any
reveal_type(i8.getfield(np.float64)) # E: {float64}
reveal_type(i8.getfield(np.float64, 8)) # E: {float64}
reveal_type(f8.as_integer_ratio()) # E: Tuple[builtins.int, builtins.int]
reveal_type(f8.is_integer()) # E: bool
reveal_type(f8.__trunc__()) # E: int
reveal_type(f8.__getformat__("float")) # E: str
reveal_type(f8.hex()) # E: str
reveal_type(np.float64.fromhex("0x0.0p+0")) # E: {float64}
reveal_type(f8.__getnewargs__()) # E: Tuple[builtins.float]
reveal_type(c16.__getnewargs__()) # E: Tuple[builtins.float, builtins.float]
reveal_type(i8.numerator) # E: {int64}
reveal_type(i8.denominator) # E: Literal[1]
reveal_type(u8.numerator) # E: {uint64}
reveal_type(u8.denominator) # E: Literal[1]
reveal_type(m.numerator) # E: numpy.timedelta64
reveal_type(m.denominator) # E: Literal[1]
reveal_type(round(i8)) # E: int
reveal_type(round(i8, 3)) # E: {int64}
reveal_type(round(u8)) # E: int
reveal_type(round(u8, 3)) # E: {uint64}
reveal_type(round(f8)) # E: int
reveal_type(round(f8, 3)) # E: {float64}
if sys.version_info >= (3, 9):
reveal_type(f8.__ceil__()) # E: int
reveal_type(f8.__floor__()) # E: int
| [
"numpy.double",
"numpy.ubyte",
"numpy.short",
"numpy.longfloat",
"numpy.longcomplex",
"numpy.bytes0",
"numpy.int_",
"numpy.clongdouble",
"numpy.object0",
"numpy.float_",
"numpy.clongfloat",
"numpy.cdouble",
"numpy.csingle",
"numpy.void0",
"numpy.longdouble",
"numpy.int0",
"numpy.unic... | [((789, 807), 'numpy.unicode_', 'np.unicode_', (['"""foo"""'], {}), "('foo')\n", (800, 807), True, 'import numpy as np\n'), ((838, 852), 'numpy.str0', 'np.str0', (['"""foo"""'], {}), "('foo')\n", (845, 852), True, 'import numpy as np\n'), ((894, 907), 'numpy.unicode_', 'np.unicode_', ([], {}), '()\n', (905, 907), True, 'import numpy as np\n'), ((938, 947), 'numpy.str0', 'np.str0', ([], {}), '()\n', (945, 947), True, 'import numpy as np\n'), ((978, 988), 'numpy.bool8', 'np.bool8', ([], {}), '()\n', (986, 988), True, 'import numpy as np\n'), ((1020, 1031), 'numpy.bytes0', 'np.bytes0', ([], {}), '()\n', (1029, 1031), True, 'import numpy as np\n'), ((1064, 1076), 'numpy.string_', 'np.string_', ([], {}), '()\n', (1074, 1076), True, 'import numpy as np\n'), ((1109, 1121), 'numpy.object0', 'np.object0', ([], {}), '()\n', (1119, 1121), True, 'import numpy as np\n'), ((1155, 1166), 'numpy.void0', 'np.void0', (['(0)'], {}), '(0)\n', (1163, 1166), True, 'import numpy as np\n'), ((1198, 1207), 'numpy.byte', 'np.byte', ([], {}), '()\n', (1205, 1207), True, 'import numpy as np\n'), ((1234, 1244), 'numpy.short', 'np.short', ([], {}), '()\n', (1242, 1244), True, 'import numpy as np\n'), ((1272, 1281), 'numpy.intc', 'np.intc', ([], {}), '()\n', (1279, 1281), True, 'import numpy as np\n'), ((1308, 1317), 'numpy.intp', 'np.intp', ([], {}), '()\n', (1315, 1317), True, 'import numpy as np\n'), ((1344, 1353), 'numpy.int0', 'np.int0', ([], {}), '()\n', (1351, 1353), True, 'import numpy as np\n'), ((1380, 1389), 'numpy.int_', 'np.int_', ([], {}), '()\n', (1387, 1389), True, 'import numpy as np\n'), ((1416, 1429), 'numpy.longlong', 'np.longlong', ([], {}), '()\n', (1427, 1429), True, 'import numpy as np\n'), ((1461, 1471), 'numpy.ubyte', 'np.ubyte', ([], {}), '()\n', (1469, 1471), True, 'import numpy as np\n'), ((1499, 1510), 'numpy.ushort', 'np.ushort', ([], {}), '()\n', (1508, 1510), True, 'import numpy as np\n'), ((1539, 1549), 'numpy.uintc', 'np.uintc', ([], {}), '()\n', (1547, 1549), True, 'import numpy as np\n'), ((1577, 1587), 'numpy.uintp', 'np.uintp', ([], {}), '()\n', (1585, 1587), True, 'import numpy as np\n'), ((1615, 1625), 'numpy.uint0', 'np.uint0', ([], {}), '()\n', (1623, 1625), True, 'import numpy as np\n'), ((1653, 1662), 'numpy.uint', 'np.uint', ([], {}), '()\n', (1660, 1662), True, 'import numpy as np\n'), ((1689, 1703), 'numpy.ulonglong', 'np.ulonglong', ([], {}), '()\n', (1701, 1703), True, 'import numpy as np\n'), ((1736, 1745), 'numpy.half', 'np.half', ([], {}), '()\n', (1743, 1745), True, 'import numpy as np\n'), ((1772, 1783), 'numpy.single', 'np.single', ([], {}), '()\n', (1781, 1783), True, 'import numpy as np\n'), ((1812, 1823), 'numpy.double', 'np.double', ([], {}), '()\n', (1821, 1823), True, 'import numpy as np\n'), ((1852, 1863), 'numpy.float_', 'np.float_', ([], {}), '()\n', (1861, 1863), True, 'import numpy as np\n'), ((1892, 1907), 'numpy.longdouble', 'np.longdouble', ([], {}), '()\n', (1905, 1907), True, 'import numpy as np\n'), ((1940, 1954), 'numpy.longfloat', 'np.longfloat', ([], {}), '()\n', (1952, 1954), True, 'import numpy as np\n'), ((1988, 2000), 'numpy.csingle', 'np.csingle', ([], {}), '()\n', (1998, 2000), True, 'import numpy as np\n'), ((2030, 2048), 'numpy.singlecomplex', 'np.singlecomplex', ([], {}), '()\n', (2046, 2048), True, 'import numpy as np\n'), ((2078, 2090), 'numpy.cdouble', 'np.cdouble', ([], {}), '()\n', (2088, 2090), True, 'import numpy as np\n'), ((2120, 2133), 'numpy.complex_', 'np.complex_', ([], {}), '()\n', (2131, 2133), True, 'import numpy as np\n'), ((2163, 2174), 'numpy.cfloat', 'np.cfloat', ([], {}), '()\n', (2172, 2174), True, 'import numpy as np\n'), ((2204, 2220), 'numpy.clongdouble', 'np.clongdouble', ([], {}), '()\n', (2218, 2220), True, 'import numpy as np\n'), ((2254, 2269), 'numpy.clongfloat', 'np.clongfloat', ([], {}), '()\n', (2267, 2269), True, 'import numpy as np\n'), ((2303, 2319), 'numpy.longcomplex', 'np.longcomplex', ([], {}), '()\n', (2317, 2319), True, 'import numpy as np\n'), ((5093, 5123), 'numpy.float64.fromhex', 'np.float64.fromhex', (['"""0x0.0p+0"""'], {}), "('0x0.0p+0')\n", (5111, 5123), True, 'import numpy as np\n')] |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
# https://github.com/scikit-hep/awkward-1.0/issues/459#issuecomment-694941328
#
# So the rules would be,
# * if arrays have different `__array__` or `__record__` parameters, they are not equal;
# * if they otherwise have different parameters, the types can be equal, but merging
# (concatenation, option-simplify, or union-simplify) removes parameters other than
# `__array__` and `__record__`.
def test_0459_types():
plain_plain = ak._v2.highlevel.Array([0.0, 1.1, 2.2, 3.3, 4.4])
array_plain = ak._v2.operations.structure.with_parameter(
plain_plain, "__array__", "zoinks"
)
plain_isdoc = ak._v2.operations.structure.with_parameter(
plain_plain, "__doc__", "This is a zoink."
)
array_isdoc = ak._v2.operations.structure.with_parameter(
array_plain, "__doc__", "This is a zoink."
)
assert ak._v2.operations.describe.parameters(plain_plain) == {}
assert ak._v2.operations.describe.parameters(array_plain) == {"__array__": "zoinks"}
assert ak._v2.operations.describe.parameters(plain_isdoc) == {
"__doc__": "This is a zoink."
}
assert ak._v2.operations.describe.parameters(array_isdoc) == {
"__array__": "zoinks",
"__doc__": "This is a zoink.",
}
assert ak._v2.operations.describe.type(
plain_plain
) == ak._v2.operations.describe.type(plain_plain)
assert ak._v2.operations.describe.type(
array_plain
) == ak._v2.operations.describe.type(array_plain)
assert ak._v2.operations.describe.type(
plain_isdoc
) == ak._v2.operations.describe.type(plain_isdoc)
assert ak._v2.operations.describe.type(
array_isdoc
) == ak._v2.operations.describe.type(array_isdoc)
assert ak._v2.operations.describe.type(
plain_plain
) != ak._v2.operations.describe.type(array_plain)
assert ak._v2.operations.describe.type(
array_plain
) != ak._v2.operations.describe.type(plain_plain)
assert ak._v2.operations.describe.type(
plain_plain
) == ak._v2.operations.describe.type(plain_isdoc)
assert ak._v2.operations.describe.type(
plain_isdoc
) == ak._v2.operations.describe.type(plain_plain)
assert ak._v2.operations.describe.type(
array_plain
) == ak._v2.operations.describe.type(array_isdoc)
assert ak._v2.operations.describe.type(
array_isdoc
) == ak._v2.operations.describe.type(array_plain)
assert ak._v2.operations.describe.type(
plain_isdoc
) != ak._v2.operations.describe.type(array_isdoc)
assert ak._v2.operations.describe.type(
array_isdoc
) != ak._v2.operations.describe.type(plain_isdoc)
assert array_plain.layout.parameters == {"__array__": "zoinks"}
assert (
ak._v2.operations.structure.without_parameters(array_plain).layout.parameters
== {}
)
assert plain_isdoc.layout.parameters == {"__doc__": "This is a zoink."}
assert (
ak._v2.operations.structure.without_parameters(plain_isdoc).layout.parameters
== {}
)
assert array_isdoc.layout.parameters == {
"__array__": "zoinks",
"__doc__": "This is a zoink.",
}
assert (
ak._v2.operations.structure.without_parameters(array_isdoc).layout.parameters
== {}
)
def test_0459():
plain_plain = ak._v2.highlevel.Array([0.0, 1.1, 2.2, 3.3, 4.4])
array_plain = ak._v2.operations.structure.with_parameter(
plain_plain, "__array__", "zoinks"
)
plain_isdoc = ak._v2.operations.structure.with_parameter(
plain_plain, "__doc__", "This is a zoink."
)
array_isdoc = ak._v2.operations.structure.with_parameter(
array_plain, "__doc__", "This is a zoink."
)
assert ak._v2.operations.describe.parameters(plain_plain) == {}
assert ak._v2.operations.describe.parameters(array_plain) == {"__array__": "zoinks"}
assert ak._v2.operations.describe.parameters(plain_isdoc) == {
"__doc__": "This is a zoink."
}
assert ak._v2.operations.describe.parameters(array_isdoc) == {
"__array__": "zoinks",
"__doc__": "This is a zoink.",
}
assert (
ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([plain_plain, plain_plain])
)
== {}
)
assert ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([array_plain, array_plain])
) == {"__array__": "zoinks"}
assert ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([plain_isdoc, plain_isdoc])
) == {"__doc__": "This is a zoink."}
assert ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([array_isdoc, array_isdoc])
) == {
"__array__": "zoinks",
"__doc__": "This is a zoink.",
}
assert isinstance(
ak._v2.operations.structure.concatenate([plain_plain, plain_plain]).layout,
ak._v2.contents.NumpyArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([array_plain, array_plain]).layout,
ak._v2.contents.NumpyArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([plain_isdoc, plain_isdoc]).layout,
ak._v2.contents.NumpyArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([array_isdoc, array_isdoc]).layout,
ak._v2.contents.NumpyArray,
)
assert (
ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([plain_plain, array_plain])
)
== {}
)
assert (
ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([plain_isdoc, array_isdoc])
)
== {}
)
assert (
ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([array_plain, plain_plain])
)
== {}
)
assert (
ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([array_isdoc, plain_isdoc])
)
== {}
)
assert isinstance(
ak._v2.operations.structure.concatenate([plain_plain, array_plain]).layout,
ak._v2.contents.UnionArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([plain_isdoc, array_isdoc]).layout,
ak._v2.contents.UnionArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([array_plain, plain_plain]).layout,
ak._v2.contents.UnionArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([array_isdoc, plain_isdoc]).layout,
ak._v2.contents.UnionArray,
)
assert (
ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([plain_plain, plain_isdoc])
)
== {}
)
assert ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([array_plain, array_isdoc])
) == {"__array__": "zoinks"}
assert (
ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([plain_isdoc, plain_plain])
)
== {}
)
assert ak._v2.operations.describe.parameters(
ak._v2.operations.structure.concatenate([array_isdoc, array_plain])
) == {"__array__": "zoinks"}
assert isinstance(
ak._v2.operations.structure.concatenate([plain_plain, plain_isdoc]).layout,
ak._v2.contents.NumpyArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([array_plain, array_isdoc]).layout,
ak._v2.contents.NumpyArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([plain_isdoc, plain_plain]).layout,
ak._v2.contents.NumpyArray,
)
assert isinstance(
ak._v2.operations.structure.concatenate([array_isdoc, array_plain]).layout,
ak._v2.contents.NumpyArray,
)
def test_0522():
content1 = ak._v2.highlevel.Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
content2 = ak._v2.highlevel.Array([0, 100, 200, 300, 400]).layout
tags = ak._v2.index.Index8(np.array([0, 0, 0, 1, 1, 0, 0, 1, 1, 1], np.int8))
index = ak._v2.index.Index64(np.array([0, 1, 2, 0, 1, 3, 4, 2, 3, 4], np.int64))
unionarray = ak._v2.highlevel.Array(
ak._v2.contents.UnionArray(tags, index, [content1, content2])
)
assert unionarray.tolist() == [0.0, 1.1, 2.2, 0, 100, 3.3, 4.4, 200, 300, 400]
assert (unionarray + 10).tolist() == [
10.0,
11.1,
12.2,
10,
110,
13.3,
14.4,
210,
310,
410,
]
assert (10 + unionarray).tolist() == [
10.0,
11.1,
12.2,
10,
110,
13.3,
14.4,
210,
310,
410,
]
assert (unionarray + range(0, 100, 10)).tolist() == [
0.0,
11.1,
22.2,
30,
140,
53.3,
64.4,
270,
380,
490,
]
assert (range(0, 100, 10) + unionarray).tolist() == [
0.0,
11.1,
22.2,
30,
140,
53.3,
64.4,
270,
380,
490,
]
assert (unionarray + np.arange(0, 100, 10)).tolist() == [
0.0,
11.1,
22.2,
30,
140,
53.3,
64.4,
270,
380,
490,
]
assert (np.arange(0, 100, 10) + unionarray).tolist() == [
0.0,
11.1,
22.2,
30,
140,
53.3,
64.4,
270,
380,
490,
]
assert (unionarray + ak._v2.highlevel.Array(np.arange(0, 100, 10))).tolist() == [
0.0,
11.1,
22.2,
30,
140,
53.3,
64.4,
270,
380,
490,
]
assert (ak._v2.highlevel.Array(np.arange(0, 100, 10)) + unionarray).tolist() == [
0.0,
11.1,
22.2,
30,
140,
53.3,
64.4,
270,
380,
490,
]
assert (unionarray + unionarray).tolist() == [
0.0,
2.2,
4.4,
0,
200,
6.6,
8.8,
400,
600,
800,
]
| [
"awkward._v2.operations.describe.parameters",
"awkward._v2.highlevel.Array",
"awkward._v2.operations.structure.with_parameter",
"awkward._v2.operations.describe.type",
"awkward._v2.operations.structure.concatenate",
"numpy.array",
"awkward._v2.contents.UnionArray",
"numpy.arange",
"awkward._v2.opera... | [((641, 690), 'awkward._v2.highlevel.Array', 'ak._v2.highlevel.Array', (['[0.0, 1.1, 2.2, 3.3, 4.4]'], {}), '([0.0, 1.1, 2.2, 3.3, 4.4])\n', (663, 690), True, 'import awkward as ak\n'), ((709, 787), 'awkward._v2.operations.structure.with_parameter', 'ak._v2.operations.structure.with_parameter', (['plain_plain', '"""__array__"""', '"""zoinks"""'], {}), "(plain_plain, '__array__', 'zoinks')\n", (751, 787), True, 'import awkward as ak\n'), ((820, 910), 'awkward._v2.operations.structure.with_parameter', 'ak._v2.operations.structure.with_parameter', (['plain_plain', '"""__doc__"""', '"""This is a zoink."""'], {}), "(plain_plain, '__doc__',\n 'This is a zoink.')\n", (862, 910), True, 'import awkward as ak\n'), ((939, 1029), 'awkward._v2.operations.structure.with_parameter', 'ak._v2.operations.structure.with_parameter', (['array_plain', '"""__doc__"""', '"""This is a zoink."""'], {}), "(array_plain, '__doc__',\n 'This is a zoink.')\n", (981, 1029), True, 'import awkward as ak\n'), ((3534, 3583), 'awkward._v2.highlevel.Array', 'ak._v2.highlevel.Array', (['[0.0, 1.1, 2.2, 3.3, 4.4]'], {}), '([0.0, 1.1, 2.2, 3.3, 4.4])\n', (3556, 3583), True, 'import awkward as ak\n'), ((3602, 3680), 'awkward._v2.operations.structure.with_parameter', 'ak._v2.operations.structure.with_parameter', (['plain_plain', '"""__array__"""', '"""zoinks"""'], {}), "(plain_plain, '__array__', 'zoinks')\n", (3644, 3680), True, 'import awkward as ak\n'), ((3713, 3803), 'awkward._v2.operations.structure.with_parameter', 'ak._v2.operations.structure.with_parameter', (['plain_plain', '"""__doc__"""', '"""This is a zoink."""'], {}), "(plain_plain, '__doc__',\n 'This is a zoink.')\n", (3755, 3803), True, 'import awkward as ak\n'), ((3832, 3922), 'awkward._v2.operations.structure.with_parameter', 'ak._v2.operations.structure.with_parameter', (['array_plain', '"""__doc__"""', '"""This is a zoink."""'], {}), "(array_plain, '__doc__',\n 'This is a zoink.')\n", (3874, 3922), True, 'import awkward as ak\n'), ((1052, 1102), 'awkward._v2.operations.describe.parameters', 'ak._v2.operations.describe.parameters', (['plain_plain'], {}), '(plain_plain)\n', (1089, 1102), True, 'import awkward as ak\n'), ((1120, 1170), 'awkward._v2.operations.describe.parameters', 'ak._v2.operations.describe.parameters', (['array_plain'], {}), '(array_plain)\n', (1157, 1170), True, 'import awkward as ak\n'), ((1209, 1259), 'awkward._v2.operations.describe.parameters', 'ak._v2.operations.describe.parameters', (['plain_isdoc'], {}), '(plain_isdoc)\n', (1246, 1259), True, 'import awkward as ak\n'), ((1320, 1370), 'awkward._v2.operations.describe.parameters', 'ak._v2.operations.describe.parameters', (['array_isdoc'], {}), '(array_isdoc)\n', (1357, 1370), True, 'import awkward as ak\n'), ((1464, 1508), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_plain'], {}), '(plain_plain)\n', (1495, 1508), True, 'import awkward as ak\n'), ((1526, 1570), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_plain'], {}), '(plain_plain)\n', (1557, 1570), True, 'import awkward as ak\n'), ((1582, 1626), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_plain'], {}), '(array_plain)\n', (1613, 1626), True, 'import awkward as ak\n'), ((1644, 1688), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_plain'], {}), '(array_plain)\n', (1675, 1688), True, 'import awkward as ak\n'), ((1700, 1744), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_isdoc'], {}), '(plain_isdoc)\n', (1731, 1744), True, 'import awkward as ak\n'), ((1762, 1806), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_isdoc'], {}), '(plain_isdoc)\n', (1793, 1806), True, 'import awkward as ak\n'), ((1818, 1862), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_isdoc'], {}), '(array_isdoc)\n', (1849, 1862), True, 'import awkward as ak\n'), ((1880, 1924), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_isdoc'], {}), '(array_isdoc)\n', (1911, 1924), True, 'import awkward as ak\n'), ((1937, 1981), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_plain'], {}), '(plain_plain)\n', (1968, 1981), True, 'import awkward as ak\n'), ((1999, 2043), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_plain'], {}), '(array_plain)\n', (2030, 2043), True, 'import awkward as ak\n'), ((2055, 2099), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_plain'], {}), '(array_plain)\n', (2086, 2099), True, 'import awkward as ak\n'), ((2117, 2161), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_plain'], {}), '(plain_plain)\n', (2148, 2161), True, 'import awkward as ak\n'), ((2174, 2218), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_plain'], {}), '(plain_plain)\n', (2205, 2218), True, 'import awkward as ak\n'), ((2236, 2280), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_isdoc'], {}), '(plain_isdoc)\n', (2267, 2280), True, 'import awkward as ak\n'), ((2292, 2336), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_isdoc'], {}), '(plain_isdoc)\n', (2323, 2336), True, 'import awkward as ak\n'), ((2354, 2398), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_plain'], {}), '(plain_plain)\n', (2385, 2398), True, 'import awkward as ak\n'), ((2411, 2455), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_plain'], {}), '(array_plain)\n', (2442, 2455), True, 'import awkward as ak\n'), ((2473, 2517), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_isdoc'], {}), '(array_isdoc)\n', (2504, 2517), True, 'import awkward as ak\n'), ((2529, 2573), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_isdoc'], {}), '(array_isdoc)\n', (2560, 2573), True, 'import awkward as ak\n'), ((2591, 2635), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_plain'], {}), '(array_plain)\n', (2622, 2635), True, 'import awkward as ak\n'), ((2648, 2692), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_isdoc'], {}), '(plain_isdoc)\n', (2679, 2692), True, 'import awkward as ak\n'), ((2710, 2754), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_isdoc'], {}), '(array_isdoc)\n', (2741, 2754), True, 'import awkward as ak\n'), ((2766, 2810), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['array_isdoc'], {}), '(array_isdoc)\n', (2797, 2810), True, 'import awkward as ak\n'), ((2828, 2872), 'awkward._v2.operations.describe.type', 'ak._v2.operations.describe.type', (['plain_isdoc'], {}), '(plain_isdoc)\n', (2859, 2872), True, 'import awkward as ak\n'), ((3945, 3995), 'awkward._v2.operations.describe.parameters', 'ak._v2.operations.describe.parameters', (['plain_plain'], {}), '(plain_plain)\n', (3982, 3995), True, 'import awkward as ak\n'), ((4013, 4063), 'awkward._v2.operations.describe.parameters', 'ak._v2.operations.describe.parameters', (['array_plain'], {}), '(array_plain)\n', (4050, 4063), True, 'import awkward as ak\n'), ((4102, 4152), 'awkward._v2.operations.describe.parameters', 'ak._v2.operations.describe.parameters', (['plain_isdoc'], {}), '(plain_isdoc)\n', (4139, 4152), True, 'import awkward as ak\n'), ((4213, 4263), 'awkward._v2.operations.describe.parameters', 'ak._v2.operations.describe.parameters', (['array_isdoc'], {}), '(array_isdoc)\n', (4250, 4263), True, 'import awkward as ak\n'), ((8220, 8269), 'awkward._v2.highlevel.Array', 'ak._v2.highlevel.Array', (['[0.0, 1.1, 2.2, 3.3, 4.4]'], {}), '([0.0, 1.1, 2.2, 3.3, 4.4])\n', (8242, 8269), True, 'import awkward as ak\n'), ((8292, 8339), 'awkward._v2.highlevel.Array', 'ak._v2.highlevel.Array', (['[0, 100, 200, 300, 400]'], {}), '([0, 100, 200, 300, 400])\n', (8314, 8339), True, 'import awkward as ak\n'), ((8378, 8427), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 0, 0, 1, 1, 1]', 'np.int8'], {}), '([0, 0, 0, 1, 1, 0, 0, 1, 1, 1], np.int8)\n', (8386, 8427), True, 'import numpy as np\n'), ((8462, 8512), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1, 3, 4, 2, 3, 4]', 'np.int64'], {}), '([0, 1, 2, 0, 1, 3, 4, 2, 3, 4], np.int64)\n', (8470, 8512), True, 'import numpy as np\n'), ((8563, 8624), 'awkward._v2.contents.UnionArray', 'ak._v2.contents.UnionArray', (['tags', 'index', '[content1, content2]'], {}), '(tags, index, [content1, content2])\n', (8589, 8624), True, 'import awkward as ak\n'), ((4418, 4485), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_plain, plain_plain]'], {}), '([plain_plain, plain_plain])\n', (4457, 4485), True, 'import awkward as ak\n'), ((4574, 4641), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_plain, array_plain]'], {}), '([array_plain, array_plain])\n', (4613, 4641), True, 'import awkward as ak\n'), ((4733, 4800), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_isdoc, plain_isdoc]'], {}), '([plain_isdoc, plain_isdoc])\n', (4772, 4800), True, 'import awkward as ak\n'), ((4900, 4967), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_isdoc, array_isdoc]'], {}), '([array_isdoc, array_isdoc])\n', (4939, 4967), True, 'import awkward as ak\n'), ((5087, 5154), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_plain, plain_plain]'], {}), '([plain_plain, plain_plain])\n', (5126, 5154), True, 'import awkward as ak\n'), ((5236, 5303), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_plain, array_plain]'], {}), '([array_plain, array_plain])\n', (5275, 5303), True, 'import awkward as ak\n'), ((5385, 5452), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_isdoc, plain_isdoc]'], {}), '([plain_isdoc, plain_isdoc])\n', (5424, 5452), True, 'import awkward as ak\n'), ((5534, 5601), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_isdoc, array_isdoc]'], {}), '([array_isdoc, array_isdoc])\n', (5573, 5601), True, 'import awkward as ak\n'), ((5725, 5792), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_plain, array_plain]'], {}), '([plain_plain, array_plain])\n', (5764, 5792), True, 'import awkward as ak\n'), ((5895, 5962), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_isdoc, array_isdoc]'], {}), '([plain_isdoc, array_isdoc])\n', (5934, 5962), True, 'import awkward as ak\n'), ((6065, 6132), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_plain, plain_plain]'], {}), '([array_plain, plain_plain])\n', (6104, 6132), True, 'import awkward as ak\n'), ((6235, 6302), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_isdoc, plain_isdoc]'], {}), '([array_isdoc, plain_isdoc])\n', (6274, 6302), True, 'import awkward as ak\n'), ((6365, 6432), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_plain, array_plain]'], {}), '([plain_plain, array_plain])\n', (6404, 6432), True, 'import awkward as ak\n'), ((6514, 6581), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_isdoc, array_isdoc]'], {}), '([plain_isdoc, array_isdoc])\n', (6553, 6581), True, 'import awkward as ak\n'), ((6663, 6730), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_plain, plain_plain]'], {}), '([array_plain, plain_plain])\n', (6702, 6730), True, 'import awkward as ak\n'), ((6812, 6879), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_isdoc, plain_isdoc]'], {}), '([array_isdoc, plain_isdoc])\n', (6851, 6879), True, 'import awkward as ak\n'), ((7003, 7070), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_plain, plain_isdoc]'], {}), '([plain_plain, plain_isdoc])\n', (7042, 7070), True, 'import awkward as ak\n'), ((7159, 7226), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_plain, array_isdoc]'], {}), '([array_plain, array_isdoc])\n', (7198, 7226), True, 'import awkward as ak\n'), ((7332, 7399), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_isdoc, plain_plain]'], {}), '([plain_isdoc, plain_plain])\n', (7371, 7399), True, 'import awkward as ak\n'), ((7488, 7555), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_isdoc, array_plain]'], {}), '([array_isdoc, array_plain])\n', (7527, 7555), True, 'import awkward as ak\n'), ((7621, 7688), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_plain, plain_isdoc]'], {}), '([plain_plain, plain_isdoc])\n', (7660, 7688), True, 'import awkward as ak\n'), ((7770, 7837), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_plain, array_isdoc]'], {}), '([array_plain, array_isdoc])\n', (7809, 7837), True, 'import awkward as ak\n'), ((7919, 7986), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[plain_isdoc, plain_plain]'], {}), '([plain_isdoc, plain_plain])\n', (7958, 7986), True, 'import awkward as ak\n'), ((8068, 8135), 'awkward._v2.operations.structure.concatenate', 'ak._v2.operations.structure.concatenate', (['[array_isdoc, array_plain]'], {}), '([array_isdoc, array_plain])\n', (8107, 8135), True, 'import awkward as ak\n'), ((2963, 3022), 'awkward._v2.operations.structure.without_parameters', 'ak._v2.operations.structure.without_parameters', (['array_plain'], {}), '(array_plain)\n', (3009, 3022), True, 'import awkward as ak\n'), ((3158, 3217), 'awkward._v2.operations.structure.without_parameters', 'ak._v2.operations.structure.without_parameters', (['plain_isdoc'], {}), '(plain_isdoc)\n', (3204, 3217), True, 'import awkward as ak\n'), ((3399, 3458), 'awkward._v2.operations.structure.without_parameters', 'ak._v2.operations.structure.without_parameters', (['array_isdoc'], {}), '(array_isdoc)\n', (3445, 3458), True, 'import awkward as ak\n'), ((9502, 9523), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (9511, 9523), True, 'import numpy as np\n'), ((9690, 9711), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (9699, 9711), True, 'import numpy as np\n'), ((9928, 9949), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (9937, 9949), True, 'import numpy as np\n'), ((10140, 10161), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (10149, 10161), True, 'import numpy as np\n')] |
from copy import deepcopy
from scipy import linalg as spla
import numpy as np
import pycufsm.analysis
import pycufsm.cfsm
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.cm import jet
import pycufsm.helpers as helpers
import math
import mpl_toolkits.mplot3d as Ax3D
import plotly
### Function to run plotly on Google Colab...
### Plotly is an interactive plotting library that lets you render plots/figures
def configure_plotly_browser_state():
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
},
});
</script>
'''))
##Cross section
def crossect(node, elem, springs, constraint, flag):
nodeflag = flag[0]
elemflag = flag[1]
matflag = flag[2]
stressflag = flag[3]
stresspicflag = flag[4]
coordflag = flag[5]
constraintsflag = flag[6]
springsflag = flag[7]
originflag = flag[8]
patches = []
if len(flag) > 10:
propaxisflag = flag[9]
else:
propaxisflag = 0
if stresspicflag == 1:
scale = 1
maxstress = max(np.abs(node[:, 7]))
stress = np.append(
node[:, 0].reshape((len(node), 1)), (node[:, 7]/maxstress).reshape((len(node), 1)),
axis=1
)
maxi = np.max(np.abs(node[:, 1:3]))
maxoffset = scale*np.max(maxi)/10
stresscord = np.zeros((len(node), 3))
for i in range(len(stress)):
stresscord[i, 0:3] = [
node[i, 0], node[i, 1] + maxoffset*stress[i, 1], node[i, 2] - maxoffset*stress[i, 1]
]
#Plot the nodes
fig, ax1 = plt.subplots(constrained_layout=True, figsize=(6, 6))
plt.plot(node[:, 1], node[:, 2], 'bo', markersize=2)
#Plot the elements
for i in range((len(elem))):
nodei = int(elem[i, 1])
nodej = int(elem[i, 2])
xi = node[nodei, 1]
zi = node[nodei, 2]
xj = node[nodej, 1]
zj = node[nodej, 2]
theta = np.arctan2((zj - zi), (xj - xi))
t = elem[i, 3]*1
points = np.array([[xi - np.sin(theta)*t/2, zi + np.cos(theta)*t/2],
[xj - np.sin(theta)*t/2, zj + np.cos(theta)*t/2],
[xj + np.sin(theta)*t/2, zj - np.cos(theta)*t/2],
[xi + np.sin(theta)*t/2, zi - np.cos(theta)*t/2]])
plt.plot([xi, xj], [zi, zj], 'bo', markersize=0.5)
polygon = Polygon(points, True, ec='b', fc=(1, 1, 0, 1), lw=0.5)
ax1.add_artist(polygon)
#patches.append(polygon)
if stresspicflag == 1:
#get the stresses
sxi = stresscord[nodei, 1]
szi = stresscord[nodei, 2]
sxj = stresscord[nodej, 1]
szj = stresscord[nodej, 2]
#plot the stress in pseudo 3D
if node[nodei, 7] >= 0:
plt.plot([xi, sxi], [zi, szi], 'r')
else:
plt.plot([xi, sxi], [zi, szi], 'b')
if node[nodej, 7] >= 0:
plt.plot([xj, sxj], [zj, szj], 'r')
else:
plt.plot([xj, sxj], [zj, szj], 'b')
plt.plot([sxi, sxj], [szi, szj], 'k')
if stressflag == 1:
plt.text(sxi, szi, str(round(node[nodei, 7], 2)))
plt.text(sxj, szj, str(round(node[nodej, 7], 2)))
#plot the element labels if wanted
if elemflag == 1:
plt.text((xi + xj)/2, (zi + zj)/2, str(elem[i, 0] + 1), fontsize=8)
#plot the materials labels if wanted
if matflag == 1:
plt.text((xi + xj)/2 + 10, (zi + zj)/2 + 10, str(elem[i, 4]), fontsize=8)
#Plot th stress distribution in 3D if wanted
#####___#####
####Patches of cross section
p = PatchCollection(patches, cmap=jet, alpha=0.4)
#colors = np.zeros(len(patches))
#p.set_array(np.array(colors))
#plt.add_collection(p)
#plt.xlim((x_min - 25, x_max + 25))
#plt.ylim((y_min - 25, y_max + 25))
#Plot the node labels if wanted
if nodeflag == 1:
for z in range(len(node)):
plt.text(node[z, 1], node[z, 2], str(node[z, 0] + 1))
#Plot the stress at the node if wanted
if stressflag == 1 and stresspicflag == 0:
for z in range(len(node)):
plt.text(node[z, 1], node[z, 2], str(round(node[z, 7], 2)))
#Plot the origin point
if originflag == 1:
plt.plot(
0,
0,
'ko',
)
xmax = np.max(np.max(node[:, 1]))
zmax = np.max(np.max([node[:, 2]]))
ax_len = min(xmax, zmax)
plt.plot([0, 0.2*ax_len], [0, 0], 'k')
plt.text(0.22*ax_len, 0, 'x_o')
plt.plot([0, 0], [0, 0.2*ax_len], 'k')
plt.text(0, 0.22*ax_len, 'z_o')
if constraintsflag == 1:
for i in range(len(node)):
dofx = node[i, 3]
dofz = node[i, 4]
dofy = node[i, 5]
dofq = node[i, 6]
if min([dofx, dofz, dofy, dofq]) == 0:
plt.plot(node[i, 1], node[i, 2], 'sq')
if len(constraint) == 0:
print('No constraints')
else:
for i in range(len(constraint)):
nodee = constraint[i, 0]
nodek = constraint[i, 3]
plt.plot(node[nodee, 1], node[nodee, 2], 'xg')
plt.plot(node[nodek, 1], node[nodek, 2], 'hg')
#Plot the springs if wanted
####SPRINGS AND CONSTRAINTS REMAINING
springsscale = 0.05*np.max(np.max(np.abs(node[:, 1:3])))
plt.gca().set_aspect('equal', adjustable='box')
plt.axis('off')
# plt.savefig('Validation/'+address+'/CS.png')
plt.show()
#Cross section displacement function
def dispshap(undef, node, elem, mode, scalem, springs, m_a, BC, SurfPos):
#Determining Scaling Factor for the displaced shape
##dispmax=np.max(np.abs(mode))
dispmax = np.max(np.abs(mode))
membersize = np.max(np.max(node[:, 1:2])) - np.min(np.min(node[:, 1:2]))
scale = scalem*membersize/dispmax/10
#Generate and Plot
fig, ax = plt.subplots(constrained_layout=True, figsize=(6, 6))
nnnodes = len(node)
patches = []
defpatches = []
x_max = -np.inf
y_max = -np.inf
x_min = np.inf
y_min = np.inf
defpoints = []
if undef == 1:
for i in range(len(elem)):
nodei = int(elem[i, 1])
nodej = int(elem[i, 2])
xi = node[nodei, 1]
xj = node[nodej, 1]
zi = node[nodei, 2]
zj = node[nodej, 2]
#PLOT undeformed geometry
theta = np.arctan2((zj - zi), (xj - xi))
t = elem[i, 3]
points = np.array([[xi - np.sin(theta)*t/2, zi + np.cos(theta)*t/2],
[xj - np.sin(theta)*t/2, zj + np.cos(theta)*t/2],
[xj + np.sin(theta)*t/2, zj - np.cos(theta)*t/2],
[xi + np.sin(theta)*t/2, zi - np.cos(theta)*t/2]])
#Plot axis limits
x_max = max(x_max, np.max(points[:, 0]))
y_max = max(y_max, np.max(points[:, 1]))
x_min = min(x_min, np.min(points[:, 0]))
y_min = min(y_min, np.min(points[:, 1]))
#points = np.random.rand(5 ,2)
polygon = Polygon(points, True, ec='b', fc='y', lw=0.5)
ax.add_artist(polygon)
plt.plot([xi, xj], [zi, zj], 'bo', markersize=2)
#p = PatchCollection(patches, cmap =jet, alpha=0.4)
# colors = np.zeros(len(patches))
# p.set_array(np.array(colors))
#ax.add_collection(p)
#plt.xlim((x_min - 25, x_max + 25))
#plt.ylim((y_min - 25, y_max + 25))
nnodes = len(node)
for i in range(len(elem)):
#Get Element Geometry
nodei = int(elem[i, 1])
nodej = int(elem[i, 2])
xi = node[nodei, 1]
xj = node[nodej, 1]
zi = node[nodei, 2]
zj = node[nodej, 2]
#Determine the global element displacements
#dbar is the nodal displacements for the element in global
#coordinates dbar=[u1 v1 u2 v2 w1 o1 w2 o2]
dbar = np.zeros((8, 1))
dbarm = np.zeros((8, 1))
dlbarm = np.zeros((3, 9))
totalm = len(m_a)
for z in range(len(m_a)):
dbar[0:2, 0] = mode[4*nnodes*z + 2*(nodei + 1) - 2:4*nnodes*z + 2*(nodei + 1)]
dbar[2:4, 0] = mode[4*nnodes*z + 2*(nodej + 1) - 2:4*nnodes*z + 2*(nodej + 1)]
dbar[4:6, 0] = mode[4*nnodes*z + 2*nnodes + 2*(nodei + 1) - 2:4*nnodes*z + 2*nnodes
+ 2*(nodei + 1)]
dbar[6:8, 0] = mode[4*nnodes*z + 2*nnodes + 2*(nodej + 1) - 2:4*nnodes*z + 2*nnodes
+ 2*(nodej + 1)]
#Transform dbar into local coordinates
phi = np.arctan2(-(zj - zi), (xj - xi))
d = helpers.gammait(phi, dbar)
#Determine additional displacements in each element
links = 10
b = np.sqrt((xj - xi)**2 + (zj - zi)**2)
dl = helpers.shapef(links, d, b)
#Transform additional displacements into global coordinates
dlbar = helpers.gammait2(phi, dl)
cutloc = 1/SurfPos
if BC.startswith('S-S'):
dbarm = dbar*np.sin(m_a[z]*np.pi/cutloc) + dbarm
dlbarm = dlbar*np.sin(m_a[z]*np.pi/cutloc) + dlbarm
elif BC.startswith('C-C'):
dbarm = dbar*np.sin(m_a[z]*np.pi/cutloc)*np.sin(np.pi/cutloc) + dbarm
dlbarm = dlbar*np.sin(m_a[z]*np.pi/cutloc)*np.sin(np.pi/cutloc) + dlbarm
elif BC.startswith('S-C') or BC.startswith('C-S'):
dbarm = dbar*(
np.sin((m_a[z] + 1)*np.pi/cutloc) + (m_a[z] + 1)*np.sin(np.pi/cutloc)/m_a[z]
) + dbarm
dlbarm = dlbar*(
np.sin((m_a[z] + 1)*np.pi/cutloc) + (m_a[z] + 1)*np.sin(np.pi/cutloc)/m_a[z]
) + dlbarm
elif BC.startswith('F-C') or BC.startswith('C-F'):
dbarm = dbar*(1 - np.cos((m_a[z] - 1/2)*np.pi/cutloc)) + dbarm
dlbarm = dlbar*(1 - np.cos((m_a[z] - 1/2)*np.pi/cutloc)) + dlbarm
elif BC.startswith('G-C') or BC.startswith('C-G'):
dbarm = dbar*(np.sin((m_a[z] - 1/2)*pi/cutloc)*np.sin(np.pi/cutloc/2)) + dbarm
dlbarm = dlbar*(np.sin((m_a[z] - 1/2)*pi/cutloc)*np.sin(np.pi/cutloc/2)) + dlbarm
#Create a vertor of undisplaced coordinates "undisp"
undisp = np.zeros((2, links + 1))
# undisp[:, 0] = np.transpose([xi, zi])
# undisp[:, links] = np.transpose([xj, zj])
for j in range(0, links + 1):
undisp[:, j] = np.transpose([xi + (xj - xi)*(j)/links, zi + (zj - zi)*(j)/links])
#create a vector of displaced coordinated "disp"
disp = np.zeros((2, links + 1))
disp[:, 0] = np.transpose([xi + scale*dbarm[0], zi + scale*dbarm[4]])
disp[:, links] = np.transpose([xj + scale*dbarm[2], zj + scale*dbarm[6]])
disp[0, 1:links] = undisp[0, 1:links] + scale*dlbarm[0, :]
disp[1, 1:links] = undisp[1, 1:links] + scale*dlbarm[2, :]
#The angle of each link
thetalinks = np.arctan2(
disp[1, 1:links + 1] - disp[1, 0:links], disp[0, 1:links + 1] - disp[0, 0:links]
)
thetalinks = np.append(thetalinks, thetalinks[links - 1])
#Plot the deformed geometry
theta = np.arctan2((zj - zi), (xj - xi))
t = elem[i, 3]
#Deformed geomtery with appropriate thickness
dispout = np.array([[disp[0, :] + np.sin(thetalinks)*t/2],
[disp[1, :] - np.cos(thetalinks)*t/2]]).T
dispin = np.array([[disp[0, :] - np.sin(thetalinks)*t/2],
[disp[1, :] + np.cos(thetalinks)*t/2]]).T
dispout = dispout.reshape((11, 2))
dispin = dispin.reshape((11, 2))
for j in range(links):
defpoints = np.array([[dispout[j, 0], dispout[j, 1]], [dispin[j, 0], dispin[j, 1]],
[dispin[j + 1, 0], dispin[j + 1, 1]],
[dispout[j + 1, 0], dispout[j + 1, 1]]])
polygon = Polygon(defpoints, True, ec='r', fc='r', lw=0.5)
#defpatches = defpatches.append(polygon)
ax.add_artist(polygon)
plt.plot([disp[0, 0], disp[0, links]], [disp[1, 0], disp[1, links]], 'bo', markersize=2)
dp = PatchCollection(defpatches, cmap=jet, alpha=0.4)
# dcolors = 100*np.random.rand(len(patches))
# dp.set_array(np.array(dcolors))
# ax.add_collection(dp)
plt.gca().set_aspect('equal', adjustable='box')
plt.axis('off')
return disp
# if(figure == 1):
# plt.savefig('Validation/'+address+'/local.png')
# if(figure == 2):
# plt.savefig('Validation/'+address+'/distortional.png')
# if(figure == 3):
# plt.savefig('Validation/'+address+'/global.png')
# if(figure == 4):
# plt.savefig('Validation/'+address+'/global1.png')
# plt.show()
def thecurve3(
curvecell, clas, filedisplay, minopt, logopt, clasopt, xmin, xmax, ymin, ymax, modedisplay,
fileindex, modeindex, picpoint
):
curve = curvecell
marker = '.x+*sdv^<'
color1 = 'bgky'
fig, ax2 = plt.subplots(constrained_layout=True, figsize=(6, 6))
for i in range(len(filedisplay)):
mark = ['b', marker[(filedisplay[i]) % 10]]
mark2 = [marker[(filedisplay[i] % 10)], ':']
if logopt == 1:
for j in range(len(modedisplay)):
ax2.semilogx(
curve[:, modedisplay[j] - 1, 0],
curve[:, modedisplay[j] - 1, 1],
color=color1[(j%4)],
marker=mark[1]
)
# ax2.semilogx(curve_sign[:,0], curve_sign[:,1], 'k')
else:
for j in range(len(modedisplay)):
ax2.plot(
curve[:, modedisplay[j] - 1, 0],
curve[:, modedisplay[j] - 1, 1],
color=mark[0],
marker=mark[1]
)
# ax2.plot(curve_sign[:,0], curve_sign[:,1], 'k')
cr = 0
handl = []
if minopt == 1:
for j in range(len(modedisplay)):
for m in range(len(curve[:, 1, 1]) - 2):
load1 = curve[m, modedisplay[j]-1, 1]
load2 = curve[m + 1, modedisplay[j]-1, 1]
load3 = curve[m + 2, modedisplay[j]-1, 1]
if load2 < load1 and load2 <= load3:
cr = cr + 1
mstring = [
"{0:.2f}, {0:.2f}".format(curve[m + 1, modedisplay[j]-1, 0], curve[m + 1, modedisplay[j]-1, 1])
]
ax2.text(
curve[m + 1, modedisplay[j]-1, 0], curve[m + 1, modedisplay[j]-1, 1] - (ymax - ymin)/20,
str(np.round(curve[m + 1, modedisplay[j]-1, 0], 2)) +','+ str(np.round(curve[m + 1, modedisplay[j]-1, 1], 2)), color = 'r'
)
print(curve[m + 1, modedisplay[j]-1, 0], curve[m + 1, modedisplay[j]-1, 1])
# ax2.text(picpoint[0], picpoint[1],
# "{0:.2f}, {0:.2f}".format(curve[m + 1, j, 0], curve[m + 1, j, 1], color = 'r' )
# )
plt.xlim((xmin, xmax))
plt.ylim((ymin, ymax))
plt.xlabel('length')
plt.ylabel('load factor')
plt.title('Buckling curve')
plt.show()
#set the callback of curve
def template_pic(node, elem, geom, sect):
# %if center is not 1 then outer dimensions and inner radii came in and these
# %need to be corrected to all centerline for the use of this template
# if center==1
# else
# depth, b_1, l_1, b_2, l_2, rad, thick = preprocess.template_out_to_in(sect)
fig, ax1 = plt.subplots(constrained_layout=True, figsize=(6, 6))
plt.plot(node[:, 1], node[:, 2], 'bo', markersize=2)
#Plot the elements
for i in range((len(elem))):
nodei = int(elem[i, 1])
nodej = int(elem[i, 2])
xi = node[nodei, 1]
zi = node[nodei, 2]
xj = node[nodej, 1]
zj = node[nodej, 2]
theta = np.arctan2((zj - zi), (xj - xi))
t = elem[i, 3]*1
points = np.array([[xi - np.sin(theta)*t/2, zi + np.cos(theta)*t/2],
[xj - np.sin(theta)*t/2, zj + np.cos(theta)*t/2],
[xj + np.sin(theta)*t/2, zj - np.cos(theta)*t/2],
[xi + np.sin(theta)*t/2, zi - np.cos(theta)*t/2]])
plt.plot([xi, xj], [zi, zj], 'bo', markersize=0.5)
polygon = Polygon(points, True, ec='b', fc=(1, 1, 0, 1), lw=0.5)
ax1.add_artist(polygon)
xgeom =[]; zgeom = []
for i in range(len(geom)):
xgeom.append(geom[i]['x'])
zgeom.append(geom[i]['y'])
if sect['type'] == 'Z':
flip_b2 = -1
else:
flip_b2 = 1
plt.plot(sect['r_1']+sect['b'],sect['r_2'],'g.')
plt.plot(sect['r_1'],sect['r_1'],'g.')
plt.plot(flip_b2*sect['r_3'],(sect['r_1']+sect['d']),'g.')
plt.plot(flip_b2*(sect['r_3']+sect['b2']),(sect['r_1']+sect['d']+sect['r_3']-sect['r_4']),'g.')
if sect['r_1'] == 0 and sect['r_2'] == 0 and sect['r_3'] == 0 and sect['r_4'] == 0:
if sect['l_1'] == 0 and sect['l_2'] == 0:
plt.text((xgeom[0]+xgeom[1])/2,(zgeom[0]+zgeom[1])/2,'b_1='+ str(sect['b']),size = 12)
plt.text((xgeom[1]+xgeom[2])/2,(zgeom[2]+zgeom[3])/2, 'h='+str(sect['d']), size = 12)
plt.text((xgeom[3]+xgeom[4])/2,(zgeom[3]+zgeom[4])/2, 'b_2='+str(sect['b2']), size = 12)
else:
plt.text((xgeom[0]+xgeom[1])/2,(zgeom[0]+zgeom[1])/2,'d_1='+ str(sect['l_1']),size = 12)
plt.text((xgeom[1]+xgeom[2])/2,(zgeom[1]+zgeom[2])/2,'b_1='+ str(sect['b']), size = 12)
plt.text((xgeom[2]+xgeom[3])/2,(zgeom[2]+zgeom[3])/2, 'h='+str(sect['d']), size = 12)
plt.text((xgeom[3]+xgeom[4])/2,(zgeom[3]+zgeom[4])/2, 'b_2='+str(sect['b2']), size = 12)
plt.text((xgeom[4]+xgeom[5])/2,(zgeom[4]+zgeom[5])/2, 'd_2='+str(sect['l_2']), size = 12)
plt.text(sect['r_1'] + sect['b'], sect['r_2'], 'theta_1', size = 12)
plt.text(flip_b2*(sect['r_3']+sect['b2']), sect['r_1']+sect['d']+sect['r_3'] -sect['r_4'], 'theta_2', size = 12)
else:
if sect['l_1'] == 0 and sect['l_2'] == 0:
plt.text((xgeom[0]+xgeom[1])/2,(zgeom[0]+zgeom[1])/2,'b_1='+ str(sect['b']),size = 12)
plt.text((xgeom[2]+xgeom[3])/2,(zgeom[2]+zgeom[3])/2, 'h='+str(sect['d']), size = 12)
plt.text((xgeom[4]+xgeom[5])/2,(zgeom[4]+zgeom[5])/2, 'b_2='+str(sect['b2']), size = 12)
plt.text(sect['r_1'], sect['r_1'], 'r_1', size = 12)
plt.text(flip_b2*sect['r_3'], sect['r_1']+sect['d'], 'r_3', size = 12)
else:
plt.text((xgeom[0]+xgeom[1])/2+sect['t'],(zgeom[0]+zgeom[1])/2,'d_1='+ str(sect['l_1']),size = 12)
plt.text((xgeom[2]+xgeom[3])/2,(zgeom[2]+zgeom[3])/2 + sect['t'],'b_1='+ str(sect['b']), size = 12)
plt.text((xgeom[4]+xgeom[5])/2 + sect['t'],(zgeom[4]+zgeom[5])/2, 'h='+str(sect['d']), size = 12)
plt.text((xgeom[6]+xgeom[7])/2,(zgeom[6]+zgeom[7])/2+sect['t'], 'b_2='+str(sect['b2']), size = 12)
plt.text((xgeom[8]+xgeom[9])/2 + sect['t'],(zgeom[8]+zgeom[9])/2, 'd_2='+str(sect['l_2']), size = 12)
plt.text(sect['r_1'] + sect['b'], sect['r_2'], 'r_2, theta_1', size = 12)
plt.text(flip_b2*sect['r_3'], sect['r_1']+sect['d'], 'r_3', size = 12)
plt.text(sect['r_1'], sect['r_1'], 'r_1', size = 12)
plt.text(flip_b2*(sect['r_3']+sect['b2']),(sect['r_1']+sect['d']+sect['r_3']-sect['r_4']), 'r_4', size = 12)
plt.gca().set_aspect('equal', adjustable='box')
plt.axis('off')
# plt.savefig('Validation/'+address+'/CS.png')
plt.show()
def disp3D(undef, node, elem, mode, scalem, springs, m_a, BC, length, elevation, angle):
dispmax = np.max(np.abs(mode))
membersize = np.max(np.max(node[:, 1:2])) - np.min(np.min(node[:, 1:2]))
scale = scalem*membersize/dispmax/10
#Generate and Plot
# fig = Axes3D.figure(figsize=plt.figaspect(0.5)*1.5) #Adjusts the aspect ratio and enlarges the figure (text does not enlarge)
# ax = fig.gca(projection='3d')
# fig, ax = plt.subplots(constrained_layout=True, figsize=(6, 6))
import matplotlib.pyplot as pltt
import plotly.express as px
import plotly.express as px
x1, y1, z1 = np.array([0, 0]), np.array([0, 0]), np.array([0, 0])
fig1 = px.line_3d(x=x1, y=y1, z=z1)
# ax = pltt.axes(projection = '3d')
# ax.set_box_aspect((5, length, 3))
# ax.set_aspect([7, 2*length, 14])
nnnodes = len(node)
patches = []
defpatches = []
x_max = -np.inf
y_max = -np.inf
x_min = np.inf
y_min = np.inf
defpoints = []
size1 = np.max([5, int(length/3)])
SurfPos = np.linspace(0.00001, 1, size1)
if undef == 1:
for i in range(len(elem)):
for z in range(len(SurfPos)):
nodei = int(elem[i, 1])
nodej = int(elem[i, 2])
xi = node[nodei, 1]
xj = node[nodej, 1]
zi = node[nodei, 2]
zj = node[nodej, 2]
#PLOT undeformed geometry
theta = np.arctan2((zj - zi), (xj - xi))
t = elem[i, 3]
points = np.array([[xi - np.sin(theta)*t/2, zi + np.cos(theta)*t/2],
[xj - np.sin(theta)*t/2, zj + np.cos(theta)*t/2],
[xj + np.sin(theta)*t/2, zj - np.cos(theta)*t/2],
[xi + np.sin(theta)*t/2, zi - np.cos(theta)*t/2]])
#Plot axis limits
x_max = max(x_max, np.max(points[:, 0]))
y_max = max(y_max, np.max(points[:, 1]))
x_min = min(x_min, np.min(points[:, 0]))
y_min = min(y_min, np.min(points[:, 1]))
#points = np.random.rand(5 ,2)
# polygon = Polygon(points, True, ec='b', fc='y', lw=0.5)
# ax.plot( [xi, xj], [SurfPos[z]*length, SurfPos[z]*length],[zi, zj], 'b')
fig1.add_scatter3d(x=np.array([xi, xj]), y=np.array([SurfPos[z]*length, SurfPos[z]*length]),
z=np.array([zi, zj]), marker = dict(size = 0.1, color = 'darkblue'), line = dict(color = 'darkblue'))
if z!=len(SurfPos)-1:
fig1.add_scatter3d(x=np.array([xi, xi]), y=np.array([SurfPos[z]*length, SurfPos[z+1]*length]),
z=np.array([zi, zi]), marker = dict(size = 0.1, color = 'darkblue'), line = dict(color = 'darkblue'))
fig1.add_scatter3d(x=np.array([xj, xj]), y=np.array([SurfPos[z]*length, SurfPos[z+1]*length]),
z=np.array([zj, zj]), marker = dict(size = 0.1, color = 'darkblue'), line = dict(color = 'darkblue'))
# ax.plot([xi, xi], [SurfPos[z]*length, SurfPos[z+1]*length], [zi, zi], 'b')
# ax.plot([xj, xj], [SurfPos[z]*length, SurfPos[z+1]*length], [zj, zj], 'b')
# ax.add_artist(polygon)
# plt.plot([xi, xj], [zi, zj], 'bo', markersize=2)
#p = PatchCollection(patches, cmap =jet, alpha=0.4)
# colors = np.zeros(len(patches))
# p.set_array(np.array(colors))
#ax.add_collection(p)
#plt.xlim((x_min - 25, x_max + 25))
#plt.ylim((y_min - 25, y_max + 25))
nnodes = len(node)
for i in range(len(elem)):
#Get Element Geometry
nodei = int(elem[i, 1])
nodej = int(elem[i, 2])
xi = node[nodei, 1]
xj = node[nodej, 1]
zi = node[nodei, 2]
zj = node[nodej, 2]
#Determine the global element displacements
#dbar is the nodal displacements for the element in global
#coordinates dbar=[u1 v1 u2 v2 w1 o1 w2 o2]
for k in range(len(SurfPos)):
dbar = np.zeros((8, 1))
dbarm = np.zeros((8, 1))
dlbarm = np.zeros((3, 9))
totalm = len(m_a)
for z in range(len(m_a)):
dbar[0:2, 0] = mode[4*nnodes*z + 2*(nodei + 1) - 2:4*nnodes*z + 2*(nodei + 1)]
dbar[2:4, 0] = mode[4*nnodes*z + 2*(nodej + 1) - 2:4*nnodes*z + 2*(nodej + 1)]
dbar[4:6, 0] = mode[4*nnodes*z + 2*nnodes + 2*(nodei + 1) - 2:4*nnodes*z + 2*nnodes
+ 2*(nodei + 1)]
dbar[6:8, 0] = mode[4*nnodes*z + 2*nnodes + 2*(nodej + 1) - 2:4*nnodes*z + 2*nnodes
+ 2*(nodej + 1)]
#Transform dbar into local coordinates
phi = np.arctan2(-(zj - zi), (xj - xi))
d = helpers.gammait(phi, dbar)
#Determine additional displacements in each element
links = 10
b = np.sqrt((xj - xi)**2 + (zj - zi)**2)
dl = helpers.shapef(links, d, b)
#Transform additional displacements into global coordinates
dlbar = helpers.gammait2(phi, dl)
cutloc = 1/SurfPos[k]
if BC.startswith('S-S'):
dbarm = dbar*np.sin(m_a[z]*np.pi/cutloc) + dbarm
dlbarm = dlbar*np.sin(m_a[z]*np.pi/cutloc) + dlbarm
elif BC.startswith('C-C'):
dbarm = dbar*np.sin(m_a[z]*np.pi/cutloc)*np.sin(np.pi/cutloc) + dbarm
dlbarm = dlbar*np.sin(m_a[z]*np.pi/cutloc)*np.sin(np.pi/cutloc) + dlbarm
elif BC.startswith('S-C') or BC.startswith('C-S'):
dbarm = dbar*(
np.sin((m_a[z] + 1)*np.pi/cutloc) + (m_a[z] + 1)*np.sin(np.pi/cutloc)/m_a[z]
) + dbarm
dlbarm = dlbar*(
np.sin((m_a[z] + 1)*np.pi/cutloc) + (m_a[z] + 1)*np.sin(np.pi/cutloc)/m_a[z]
) + dlbarm
elif BC.startswith('F-C') or BC.startswith('C-F'):
dbarm = dbar*(1 - np.cos((m_a[z] - 1/2)*np.pi/cutloc)) + dbarm
dlbarm = dlbar*(1 - np.cos((m_a[z] - 1/2)*np.pi/cutloc)) + dlbarm
elif BC.startswith('G-C') or BC.startswith('C-G'):
dbarm = dbar*(np.sin((m_a[z] - 1/2)*pi/cutloc)*np.sin(pi/cutloc/2)) + dbarm
dlbarm = dlbar*(np.sin((m_a[z] - 1/2)*pi/cutloc)*np.sin(pi/cutloc/2)) + dlbarm
#Create a vertor of undisplaced coordinates "undisp"
undisp = np.zeros((2, links + 1))
# undisp[:, 0] = np.transpose([xi, zi])
# undisp[:, links] = np.transpose([xj, zj])
for j in range(0, links + 1):
undisp[:, j] = np.transpose([xi + (xj - xi)*(j)/links, zi + (zj - zi)*(j)/links])
#create a vector of displaced coordinated "disp"
disp = np.zeros((2, links + 1))
disp[:, 0] = np.transpose([xi + scale*dbarm[0], zi + scale*dbarm[4]])
disp[:, links] = np.transpose([xj + scale*dbarm[2], zj + scale*dbarm[6]])
disp[0, 1:links] = undisp[0, 1:links] + scale*dlbarm[0, :]
disp[1, 1:links] = undisp[1, 1:links] + scale*dlbarm[2, :]
#The angle of each link
thetalinks = np.arctan2(
disp[1, 1:links + 1] - disp[1, 0:links], disp[0, 1:links + 1] - disp[0, 0:links]
)
thetalinks = np.append(thetalinks, thetalinks[links - 1])
#Plot the deformed geometry
theta = np.arctan2((zj - zi), (xj - xi))
t = elem[i, 3]
#Deformed geomtery with appropriate thickness
dispout = np.array([[disp[0, :] + np.sin(thetalinks)*t/2],
[disp[1, :] - np.cos(thetalinks)*t/2]]).T
dispin = np.array([[disp[0, :] - np.sin(thetalinks)*t/2],
[disp[1, :] + np.cos(thetalinks)*t/2]]).T
dispout = dispout.reshape((11, 2))
dispin = dispin.reshape((11, 2))
for j in range(links):
defpoints = np.array([[dispout[j, 0], dispout[j, 1]], [dispin[j, 0], dispin[j, 1]],
[dispin[j + 1, 0], dispin[j + 1, 1]],
[dispout[j + 1, 0], dispout[j + 1, 1]]])
# polygon = Polygon(defpoints, True, ec='r', fc='r', lw=0.5)
# #defpatches = defpatches.append(polygon)
# ax.add_artist(polygon)
# ax.plot([disp[0, j], disp[0, j+1]],[SurfPos[k]*length, SurfPos[k]*length], [disp[1, j], disp[1, j+1]], 'r')
fig1.add_scatter3d(x=np.array([disp[0, j], disp[0, j+1]]), y=np.array([SurfPos[k]*length, SurfPos[k]*length]),
z=np.array([disp[1, j], disp[1, j+1]]), marker = dict(size = 0.1, color = 'darkred'), line = dict(color = 'red'))
if k!=0:
# ax.plot([disp1[0, j], disp[0, j]],[SurfPos[k-1]*length, SurfPos[k]*length], [disp1[1, j], disp[1, j]], 'r')
# ax.plot([disp1[0, j+1], disp[0, j+1]], [SurfPos[k-1]*length, SurfPos[k]*length],[disp1[1, j+1], disp[1, j+1]], 'r')
fig1.add_scatter3d(x=np.array([disp1[0, j], disp[0, j]]), y=np.array([SurfPos[k-1]*length, SurfPos[k]*length]),
z=np.array([disp1[1, j], disp[1, j]]), marker = dict(size = 0.1, color = 'darkred'), line = dict(color = 'red'))
fig1.add_scatter3d(x=np.array([disp1[0, j+1], disp[0, j+1]]), y=np.array([SurfPos[k-1]*length, SurfPos[k]*length]),
z=np.array([disp1[1, j+1], disp[1, j+1]]), marker = dict(size = 0.1, color = 'darkred'), line = dict(color = 'red'))
# ax.plot([disp[0, :]], [disp[:, 1]], [SurfPos[k]*length for a in range(8)])
disp1 = deepcopy(disp)
# dp = PatchCollection(defpatches, cmap=jet, alpha=0.4)
# dcolors = 100*np.random.rand(len(patches))
# dp.set_array(np.array(dcolors))
# ax.add_collection(dp)
# plt.gca().set_aspect('equal', adjustable='box')
# plt.axis('off')
#
# ax.view_init(elev = elevation, azim=angle)
# for ii in xrange(0,360,1):
# ax.view_init(elev=10., azim=ii)
# savefig("movie%d.png" % ii)
# print(type(ax))
fig1.update_layout(showlegend=False)
# fig1.update_layout(xaxis = {'showgrid': True},
# yaxis = {'showgrid':True})
return fig1 | [
"matplotlib.pyplot.title",
"numpy.arctan2",
"numpy.abs",
"pycufsm.helpers.gammait2",
"IPython.core.display.HTML",
"matplotlib.patches.Polygon",
"numpy.sin",
"matplotlib.pyplot.gca",
"numpy.round",
"pycufsm.helpers.shapef",
"numpy.transpose",
"plotly.express.line_3d",
"numpy.append",
"numpy... | [((1976, 2029), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)', 'figsize': '(6, 6)'}), '(constrained_layout=True, figsize=(6, 6))\n', (1988, 2029), True, 'import matplotlib.pyplot as plt\n'), ((2035, 2087), 'matplotlib.pyplot.plot', 'plt.plot', (['node[:, 1]', 'node[:, 2]', '"""bo"""'], {'markersize': '(2)'}), "(node[:, 1], node[:, 2], 'bo', markersize=2)\n", (2043, 2087), True, 'import matplotlib.pyplot as plt\n'), ((4157, 4202), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'cmap': 'jet', 'alpha': '(0.4)'}), '(patches, cmap=jet, alpha=0.4)\n', (4172, 4202), False, 'from matplotlib.collections import PatchCollection\n'), ((6023, 6038), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6031, 6038), True, 'import matplotlib.pyplot as plt\n'), ((6096, 6106), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6104, 6106), True, 'import matplotlib.pyplot as plt\n'), ((6512, 6565), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)', 'figsize': '(6, 6)'}), '(constrained_layout=True, figsize=(6, 6))\n', (6524, 6565), True, 'import matplotlib.pyplot as plt\n'), ((13029, 13077), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['defpatches'], {'cmap': 'jet', 'alpha': '(0.4)'}), '(defpatches, cmap=jet, alpha=0.4)\n', (13044, 13077), False, 'from matplotlib.collections import PatchCollection\n'), ((13254, 13269), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (13262, 13269), True, 'import matplotlib.pyplot as plt\n'), ((13888, 13941), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)', 'figsize': '(6, 6)'}), '(constrained_layout=True, figsize=(6, 6))\n', (13900, 13941), True, 'import matplotlib.pyplot as plt\n'), ((16037, 16059), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xmin, xmax)'], {}), '((xmin, xmax))\n', (16045, 16059), True, 'import matplotlib.pyplot as plt\n'), ((16065, 16087), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin, ymax)'], {}), '((ymin, ymax))\n', (16073, 16087), True, 'import matplotlib.pyplot as plt\n'), ((16093, 16113), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""length"""'], {}), "('length')\n", (16103, 16113), True, 'import matplotlib.pyplot as plt\n'), ((16119, 16144), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""load factor"""'], {}), "('load factor')\n", (16129, 16144), True, 'import matplotlib.pyplot as plt\n'), ((16150, 16177), 'matplotlib.pyplot.title', 'plt.title', (['"""Buckling curve"""'], {}), "('Buckling curve')\n", (16159, 16177), True, 'import matplotlib.pyplot as plt\n'), ((16183, 16193), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16191, 16193), True, 'import matplotlib.pyplot as plt\n'), ((16565, 16618), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)', 'figsize': '(6, 6)'}), '(constrained_layout=True, figsize=(6, 6))\n', (16577, 16618), True, 'import matplotlib.pyplot as plt\n'), ((16624, 16676), 'matplotlib.pyplot.plot', 'plt.plot', (['node[:, 1]', 'node[:, 2]', '"""bo"""'], {'markersize': '(2)'}), "(node[:, 1], node[:, 2], 'bo', markersize=2)\n", (16632, 16676), True, 'import matplotlib.pyplot as plt\n'), ((17694, 17746), 'matplotlib.pyplot.plot', 'plt.plot', (["(sect['r_1'] + sect['b'])", "sect['r_2']", '"""g."""'], {}), "(sect['r_1'] + sect['b'], sect['r_2'], 'g.')\n", (17702, 17746), True, 'import matplotlib.pyplot as plt\n'), ((17748, 17788), 'matplotlib.pyplot.plot', 'plt.plot', (["sect['r_1']", "sect['r_1']", '"""g."""'], {}), "(sect['r_1'], sect['r_1'], 'g.')\n", (17756, 17788), True, 'import matplotlib.pyplot as plt\n'), ((17792, 17854), 'matplotlib.pyplot.plot', 'plt.plot', (["(flip_b2 * sect['r_3'])", "(sect['r_1'] + sect['d'])", '"""g."""'], {}), "(flip_b2 * sect['r_3'], sect['r_1'] + sect['d'], 'g.')\n", (17800, 17854), True, 'import matplotlib.pyplot as plt\n'), ((17856, 17965), 'matplotlib.pyplot.plot', 'plt.plot', (["(flip_b2 * (sect['r_3'] + sect['b2']))", "(sect['r_1'] + sect['d'] + sect['r_3'] - sect['r_4'])", '"""g."""'], {}), "(flip_b2 * (sect['r_3'] + sect['b2']), sect['r_1'] + sect['d'] +\n sect['r_3'] - sect['r_4'], 'g.')\n", (17864, 17965), True, 'import matplotlib.pyplot as plt\n'), ((20635, 20650), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (20643, 20650), True, 'import matplotlib.pyplot as plt\n'), ((20708, 20718), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20716, 20718), True, 'import matplotlib.pyplot as plt\n'), ((21423, 21451), 'plotly.express.line_3d', 'px.line_3d', ([], {'x': 'x1', 'y': 'y1', 'z': 'z1'}), '(x=x1, y=y1, z=z1)\n', (21433, 21451), True, 'import plotly.express as px\n'), ((21796, 21824), 'numpy.linspace', 'np.linspace', (['(1e-05)', '(1)', 'size1'], {}), '(1e-05, 1, size1)\n', (21807, 21824), True, 'import numpy as np\n'), ((598, 943), 'IPython.core.display.HTML', 'IPython.core.display.HTML', (['"""\n <script src="/static/components/requirejs/require.js"></script>\n <script>\n requirejs.config({\n paths: {\n base: \'/static/base\',\n plotly: \'https://cdn.plot.ly/plotly-1.5.1.min.js?noext\',\n },\n });\n </script>\n """'], {}), '(\n """\n <script src="/static/components/requirejs/require.js"></script>\n <script>\n requirejs.config({\n paths: {\n base: \'/static/base\',\n plotly: \'https://cdn.plot.ly/plotly-1.5.1.min.js?noext\',\n },\n });\n </script>\n """\n )\n', (623, 943), False, 'import IPython\n'), ((2345, 2373), 'numpy.arctan2', 'np.arctan2', (['(zj - zi)', '(xj - xi)'], {}), '(zj - zi, xj - xi)\n', (2355, 2373), True, 'import numpy as np\n'), ((2726, 2776), 'matplotlib.pyplot.plot', 'plt.plot', (['[xi, xj]', '[zi, zj]', '"""bo"""'], {'markersize': '(0.5)'}), "([xi, xj], [zi, zj], 'bo', markersize=0.5)\n", (2734, 2776), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2850), 'matplotlib.patches.Polygon', 'Polygon', (['points', '(True)'], {'ec': '"""b"""', 'fc': '(1, 1, 0, 1)', 'lw': '(0.5)'}), "(points, True, ec='b', fc=(1, 1, 0, 1), lw=0.5)\n", (2803, 2850), False, 'from matplotlib.patches import Polygon\n'), ((4813, 4833), 'matplotlib.pyplot.plot', 'plt.plot', (['(0)', '(0)', '"""ko"""'], {}), "(0, 0, 'ko')\n", (4821, 4833), True, 'import matplotlib.pyplot as plt\n'), ((5016, 5056), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0.2 * ax_len]', '[0, 0]', '"""k"""'], {}), "([0, 0.2 * ax_len], [0, 0], 'k')\n", (5024, 5056), True, 'import matplotlib.pyplot as plt\n'), ((5064, 5097), 'matplotlib.pyplot.text', 'plt.text', (['(0.22 * ax_len)', '(0)', '"""x_o"""'], {}), "(0.22 * ax_len, 0, 'x_o')\n", (5072, 5097), True, 'import matplotlib.pyplot as plt\n'), ((5105, 5145), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[0, 0.2 * ax_len]', '"""k"""'], {}), "([0, 0], [0, 0.2 * ax_len], 'k')\n", (5113, 5145), True, 'import matplotlib.pyplot as plt\n'), ((5153, 5186), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(0.22 * ax_len)', '"""z_o"""'], {}), "(0, 0.22 * ax_len, 'z_o')\n", (5161, 5186), True, 'import matplotlib.pyplot as plt\n'), ((6339, 6351), 'numpy.abs', 'np.abs', (['mode'], {}), '(mode)\n', (6345, 6351), True, 'import numpy as np\n'), ((8603, 8619), 'numpy.zeros', 'np.zeros', (['(8, 1)'], {}), '((8, 1))\n', (8611, 8619), True, 'import numpy as np\n'), ((8637, 8653), 'numpy.zeros', 'np.zeros', (['(8, 1)'], {}), '((8, 1))\n', (8645, 8653), True, 'import numpy as np\n'), ((8672, 8688), 'numpy.zeros', 'np.zeros', (['(3, 9)'], {}), '((3, 9))\n', (8680, 8688), True, 'import numpy as np\n'), ((11056, 11080), 'numpy.zeros', 'np.zeros', (['(2, links + 1)'], {}), '((2, links + 1))\n', (11064, 11080), True, 'import numpy as np\n'), ((11391, 11415), 'numpy.zeros', 'np.zeros', (['(2, links + 1)'], {}), '((2, links + 1))\n', (11399, 11415), True, 'import numpy as np\n'), ((11438, 11498), 'numpy.transpose', 'np.transpose', (['[xi + scale * dbarm[0], zi + scale * dbarm[4]]'], {}), '([xi + scale * dbarm[0], zi + scale * dbarm[4]])\n', (11450, 11498), True, 'import numpy as np\n'), ((11521, 11581), 'numpy.transpose', 'np.transpose', (['[xj + scale * dbarm[2], zj + scale * dbarm[6]]'], {}), '([xj + scale * dbarm[2], zj + scale * dbarm[6]])\n', (11533, 11581), True, 'import numpy as np\n'), ((11769, 11865), 'numpy.arctan2', 'np.arctan2', (['(disp[1, 1:links + 1] - disp[1, 0:links])', '(disp[0, 1:links + 1] - disp[0, 0:links])'], {}), '(disp[1, 1:links + 1] - disp[1, 0:links], disp[0, 1:links + 1] -\n disp[0, 0:links])\n', (11779, 11865), True, 'import numpy as np\n'), ((11908, 11952), 'numpy.append', 'np.append', (['thetalinks', 'thetalinks[links - 1]'], {}), '(thetalinks, thetalinks[links - 1])\n', (11917, 11952), True, 'import numpy as np\n'), ((12007, 12035), 'numpy.arctan2', 'np.arctan2', (['(zj - zi)', '(xj - xi)'], {}), '(zj - zi, xj - xi)\n', (12017, 12035), True, 'import numpy as np\n'), ((12930, 13022), 'matplotlib.pyplot.plot', 'plt.plot', (['[disp[0, 0], disp[0, links]]', '[disp[1, 0], disp[1, links]]', '"""bo"""'], {'markersize': '(2)'}), "([disp[0, 0], disp[0, links]], [disp[1, 0], disp[1, links]], 'bo',\n markersize=2)\n", (12938, 13022), True, 'import matplotlib.pyplot as plt\n'), ((16934, 16962), 'numpy.arctan2', 'np.arctan2', (['(zj - zi)', '(xj - xi)'], {}), '(zj - zi, xj - xi)\n', (16944, 16962), True, 'import numpy as np\n'), ((17315, 17365), 'matplotlib.pyplot.plot', 'plt.plot', (['[xi, xj]', '[zi, zj]', '"""bo"""'], {'markersize': '(0.5)'}), "([xi, xj], [zi, zj], 'bo', markersize=0.5)\n", (17323, 17365), True, 'import matplotlib.pyplot as plt\n'), ((17385, 17439), 'matplotlib.patches.Polygon', 'Polygon', (['points', '(True)'], {'ec': '"""b"""', 'fc': '(1, 1, 0, 1)', 'lw': '(0.5)'}), "(points, True, ec='b', fc=(1, 1, 0, 1), lw=0.5)\n", (17392, 17439), False, 'from matplotlib.patches import Polygon\n'), ((20837, 20849), 'numpy.abs', 'np.abs', (['mode'], {}), '(mode)\n', (20843, 20849), True, 'import numpy as np\n'), ((21358, 21374), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (21366, 21374), True, 'import numpy as np\n'), ((21376, 21392), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (21384, 21392), True, 'import numpy as np\n'), ((21394, 21410), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (21402, 21410), True, 'import numpy as np\n'), ((1436, 1454), 'numpy.abs', 'np.abs', (['node[:, 7]'], {}), '(node[:, 7])\n', (1442, 1454), True, 'import numpy as np\n'), ((1636, 1656), 'numpy.abs', 'np.abs', (['node[:, 1:3]'], {}), '(node[:, 1:3])\n', (1642, 1656), True, 'import numpy as np\n'), ((3521, 3558), 'matplotlib.pyplot.plot', 'plt.plot', (['[sxi, sxj]', '[szi, szj]', '"""k"""'], {}), "([sxi, sxj], [szi, szj], 'k')\n", (3529, 3558), True, 'import matplotlib.pyplot as plt\n'), ((4908, 4926), 'numpy.max', 'np.max', (['node[:, 1]'], {}), '(node[:, 1])\n', (4914, 4926), True, 'import numpy as np\n'), ((4951, 4971), 'numpy.max', 'np.max', (['[node[:, 2]]'], {}), '([node[:, 2]])\n', (4957, 4971), True, 'import numpy as np\n'), ((5970, 5979), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5977, 5979), True, 'import matplotlib.pyplot as plt\n'), ((6378, 6398), 'numpy.max', 'np.max', (['node[:, 1:2]'], {}), '(node[:, 1:2])\n', (6384, 6398), True, 'import numpy as np\n'), ((6409, 6429), 'numpy.min', 'np.min', (['node[:, 1:2]'], {}), '(node[:, 1:2])\n', (6415, 6429), True, 'import numpy as np\n'), ((7054, 7082), 'numpy.arctan2', 'np.arctan2', (['(zj - zi)', '(xj - xi)'], {}), '(zj - zi, xj - xi)\n', (7064, 7082), True, 'import numpy as np\n'), ((7758, 7803), 'matplotlib.patches.Polygon', 'Polygon', (['points', '(True)'], {'ec': '"""b"""', 'fc': '"""y"""', 'lw': '(0.5)'}), "(points, True, ec='b', fc='y', lw=0.5)\n", (7765, 7803), False, 'from matplotlib.patches import Polygon\n'), ((7853, 7901), 'matplotlib.pyplot.plot', 'plt.plot', (['[xi, xj]', '[zi, zj]', '"""bo"""'], {'markersize': '(2)'}), "([xi, xj], [zi, zj], 'bo', markersize=2)\n", (7861, 7901), True, 'import matplotlib.pyplot as plt\n'), ((9300, 9331), 'numpy.arctan2', 'np.arctan2', (['(-(zj - zi))', '(xj - xi)'], {}), '(-(zj - zi), xj - xi)\n', (9310, 9331), True, 'import numpy as np\n'), ((9351, 9377), 'pycufsm.helpers.gammait', 'helpers.gammait', (['phi', 'dbar'], {}), '(phi, dbar)\n', (9366, 9377), True, 'import pycufsm.helpers as helpers\n'), ((9484, 9524), 'numpy.sqrt', 'np.sqrt', (['((xj - xi) ** 2 + (zj - zi) ** 2)'], {}), '((xj - xi) ** 2 + (zj - zi) ** 2)\n', (9491, 9524), True, 'import numpy as np\n'), ((9539, 9566), 'pycufsm.helpers.shapef', 'helpers.shapef', (['links', 'd', 'b'], {}), '(links, d, b)\n', (9553, 9566), True, 'import pycufsm.helpers as helpers\n'), ((9661, 9686), 'pycufsm.helpers.gammait2', 'helpers.gammait2', (['phi', 'dl'], {}), '(phi, dl)\n', (9677, 9686), True, 'import pycufsm.helpers as helpers\n'), ((11250, 11320), 'numpy.transpose', 'np.transpose', (['[xi + (xj - xi) * j / links, zi + (zj - zi) * j / links]'], {}), '([xi + (xj - xi) * j / links, zi + (zj - zi) * j / links])\n', (11262, 11320), True, 'import numpy as np\n'), ((12538, 12697), 'numpy.array', 'np.array', (['[[dispout[j, 0], dispout[j, 1]], [dispin[j, 0], dispin[j, 1]], [dispin[j + \n 1, 0], dispin[j + 1, 1]], [dispout[j + 1, 0], dispout[j + 1, 1]]]'], {}), '([[dispout[j, 0], dispout[j, 1]], [dispin[j, 0], dispin[j, 1]], [\n dispin[j + 1, 0], dispin[j + 1, 1]], [dispout[j + 1, 0], dispout[j + 1,\n 1]]])\n', (12546, 12697), True, 'import numpy as np\n'), ((12782, 12830), 'matplotlib.patches.Polygon', 'Polygon', (['defpoints', '(True)'], {'ec': '"""r"""', 'fc': '"""r"""', 'lw': '(0.5)'}), "(defpoints, True, ec='r', fc='r', lw=0.5)\n", (12789, 12830), False, 'from matplotlib.patches import Polygon\n'), ((13201, 13210), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13208, 13210), True, 'import matplotlib.pyplot as plt\n'), ((18928, 18994), 'matplotlib.pyplot.text', 'plt.text', (["(sect['r_1'] + sect['b'])", "sect['r_2']", '"""theta_1"""'], {'size': '(12)'}), "(sect['r_1'] + sect['b'], sect['r_2'], 'theta_1', size=12)\n", (18936, 18994), True, 'import matplotlib.pyplot as plt\n'), ((19010, 19133), 'matplotlib.pyplot.text', 'plt.text', (["(flip_b2 * (sect['r_3'] + sect['b2']))", "(sect['r_1'] + sect['d'] + sect['r_3'] - sect['r_4'])", '"""theta_2"""'], {'size': '(12)'}), "(flip_b2 * (sect['r_3'] + sect['b2']), sect['r_1'] + sect['d'] +\n sect['r_3'] - sect['r_4'], 'theta_2', size=12)\n", (19018, 19133), True, 'import matplotlib.pyplot as plt\n'), ((19499, 19549), 'matplotlib.pyplot.text', 'plt.text', (["sect['r_1']", "sect['r_1']", '"""r_1"""'], {'size': '(12)'}), "(sect['r_1'], sect['r_1'], 'r_1', size=12)\n", (19507, 19549), True, 'import matplotlib.pyplot as plt\n'), ((19565, 19637), 'matplotlib.pyplot.text', 'plt.text', (["(flip_b2 * sect['r_3'])", "(sect['r_1'] + sect['d'])", '"""r_3"""'], {'size': '(12)'}), "(flip_b2 * sect['r_3'], sect['r_1'] + sect['d'], 'r_3', size=12)\n", (19573, 19637), True, 'import matplotlib.pyplot as plt\n'), ((20227, 20298), 'matplotlib.pyplot.text', 'plt.text', (["(sect['r_1'] + sect['b'])", "sect['r_2']", '"""r_2, theta_1"""'], {'size': '(12)'}), "(sect['r_1'] + sect['b'], sect['r_2'], 'r_2, theta_1', size=12)\n", (20235, 20298), True, 'import matplotlib.pyplot as plt\n'), ((20314, 20386), 'matplotlib.pyplot.text', 'plt.text', (["(flip_b2 * sect['r_3'])", "(sect['r_1'] + sect['d'])", '"""r_3"""'], {'size': '(12)'}), "(flip_b2 * sect['r_3'], sect['r_1'] + sect['d'], 'r_3', size=12)\n", (20322, 20386), True, 'import matplotlib.pyplot as plt\n'), ((20401, 20451), 'matplotlib.pyplot.text', 'plt.text', (["sect['r_1']", "sect['r_1']", '"""r_1"""'], {'size': '(12)'}), "(sect['r_1'], sect['r_1'], 'r_1', size=12)\n", (20409, 20451), True, 'import matplotlib.pyplot as plt\n'), ((20467, 20586), 'matplotlib.pyplot.text', 'plt.text', (["(flip_b2 * (sect['r_3'] + sect['b2']))", "(sect['r_1'] + sect['d'] + sect['r_3'] - sect['r_4'])", '"""r_4"""'], {'size': '(12)'}), "(flip_b2 * (sect['r_3'] + sect['b2']), sect['r_1'] + sect['d'] +\n sect['r_3'] - sect['r_4'], 'r_4', size=12)\n", (20475, 20586), True, 'import matplotlib.pyplot as plt\n'), ((20582, 20591), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20589, 20591), True, 'import matplotlib.pyplot as plt\n'), ((20876, 20896), 'numpy.max', 'np.max', (['node[:, 1:2]'], {}), '(node[:, 1:2])\n', (20882, 20896), True, 'import numpy as np\n'), ((20907, 20927), 'numpy.min', 'np.min', (['node[:, 1:2]'], {}), '(node[:, 1:2])\n', (20913, 20927), True, 'import numpy as np\n'), ((24898, 24914), 'numpy.zeros', 'np.zeros', (['(8, 1)'], {}), '((8, 1))\n', (24906, 24914), True, 'import numpy as np\n'), ((24936, 24952), 'numpy.zeros', 'np.zeros', (['(8, 1)'], {}), '((8, 1))\n', (24944, 24952), True, 'import numpy as np\n'), ((24975, 24991), 'numpy.zeros', 'np.zeros', (['(3, 9)'], {}), '((3, 9))\n', (24983, 24991), True, 'import numpy as np\n'), ((27512, 27536), 'numpy.zeros', 'np.zeros', (['(2, links + 1)'], {}), '((2, links + 1))\n', (27520, 27536), True, 'import numpy as np\n'), ((27871, 27895), 'numpy.zeros', 'np.zeros', (['(2, links + 1)'], {}), '((2, links + 1))\n', (27879, 27895), True, 'import numpy as np\n'), ((27922, 27982), 'numpy.transpose', 'np.transpose', (['[xi + scale * dbarm[0], zi + scale * dbarm[4]]'], {}), '([xi + scale * dbarm[0], zi + scale * dbarm[4]])\n', (27934, 27982), True, 'import numpy as np\n'), ((28009, 28069), 'numpy.transpose', 'np.transpose', (['[xj + scale * dbarm[2], zj + scale * dbarm[6]]'], {}), '([xj + scale * dbarm[2], zj + scale * dbarm[6]])\n', (28021, 28069), True, 'import numpy as np\n'), ((28273, 28369), 'numpy.arctan2', 'np.arctan2', (['(disp[1, 1:links + 1] - disp[1, 0:links])', '(disp[0, 1:links + 1] - disp[0, 0:links])'], {}), '(disp[1, 1:links + 1] - disp[1, 0:links], disp[0, 1:links + 1] -\n disp[0, 0:links])\n', (28283, 28369), True, 'import numpy as np\n'), ((28424, 28468), 'numpy.append', 'np.append', (['thetalinks', 'thetalinks[links - 1]'], {}), '(thetalinks, thetalinks[links - 1])\n', (28433, 28468), True, 'import numpy as np\n'), ((28531, 28559), 'numpy.arctan2', 'np.arctan2', (['(zj - zi)', '(xj - xi)'], {}), '(zj - zi, xj - xi)\n', (28541, 28559), True, 'import numpy as np\n'), ((30867, 30881), 'copy.deepcopy', 'deepcopy', (['disp'], {}), '(disp)\n', (30875, 30881), False, 'from copy import deepcopy\n'), ((1685, 1697), 'numpy.max', 'np.max', (['maxi'], {}), '(maxi)\n', (1691, 1697), True, 'import numpy as np\n'), ((3238, 3273), 'matplotlib.pyplot.plot', 'plt.plot', (['[xi, sxi]', '[zi, szi]', '"""r"""'], {}), "([xi, sxi], [zi, szi], 'r')\n", (3246, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3310, 3345), 'matplotlib.pyplot.plot', 'plt.plot', (['[xi, sxi]', '[zi, szi]', '"""b"""'], {}), "([xi, sxi], [zi, szi], 'b')\n", (3318, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3400, 3435), 'matplotlib.pyplot.plot', 'plt.plot', (['[xj, sxj]', '[zj, szj]', '"""r"""'], {}), "([xj, sxj], [zj, szj], 'r')\n", (3408, 3435), True, 'import matplotlib.pyplot as plt\n'), ((3472, 3507), 'matplotlib.pyplot.plot', 'plt.plot', (['[xj, sxj]', '[zj, szj]', '"""b"""'], {}), "([xj, sxj], [zj, szj], 'b')\n", (3480, 3507), True, 'import matplotlib.pyplot as plt\n'), ((5444, 5482), 'matplotlib.pyplot.plot', 'plt.plot', (['node[i, 1]', 'node[i, 2]', '"""sq"""'], {}), "(node[i, 1], node[i, 2], 'sq')\n", (5452, 5482), True, 'import matplotlib.pyplot as plt\n'), ((5716, 5762), 'matplotlib.pyplot.plot', 'plt.plot', (['node[nodee, 1]', 'node[nodee, 2]', '"""xg"""'], {}), "(node[nodee, 1], node[nodee, 2], 'xg')\n", (5724, 5762), True, 'import matplotlib.pyplot as plt\n'), ((5780, 5826), 'matplotlib.pyplot.plot', 'plt.plot', (['node[nodek, 1]', 'node[nodek, 2]', '"""hg"""'], {}), "(node[nodek, 1], node[nodek, 2], 'hg')\n", (5788, 5826), True, 'import matplotlib.pyplot as plt\n'), ((5942, 5962), 'numpy.abs', 'np.abs', (['node[:, 1:3]'], {}), '(node[:, 1:3])\n', (5948, 5962), True, 'import numpy as np\n'), ((7507, 7527), 'numpy.max', 'np.max', (['points[:, 0]'], {}), '(points[:, 0])\n', (7513, 7527), True, 'import numpy as np\n'), ((7561, 7581), 'numpy.max', 'np.max', (['points[:, 1]'], {}), '(points[:, 1])\n', (7567, 7581), True, 'import numpy as np\n'), ((7615, 7635), 'numpy.min', 'np.min', (['points[:, 0]'], {}), '(points[:, 0])\n', (7621, 7635), True, 'import numpy as np\n'), ((7669, 7689), 'numpy.min', 'np.min', (['points[:, 1]'], {}), '(points[:, 1])\n', (7675, 7689), True, 'import numpy as np\n'), ((22224, 22252), 'numpy.arctan2', 'np.arctan2', (['(zj - zi)', '(xj - xi)'], {}), '(zj - zi, xj - xi)\n', (22234, 22252), True, 'import numpy as np\n'), ((25643, 25674), 'numpy.arctan2', 'np.arctan2', (['(-(zj - zi))', '(xj - xi)'], {}), '(-(zj - zi), xj - xi)\n', (25653, 25674), True, 'import numpy as np\n'), ((25698, 25724), 'pycufsm.helpers.gammait', 'helpers.gammait', (['phi', 'dbar'], {}), '(phi, dbar)\n', (25713, 25724), True, 'import pycufsm.helpers as helpers\n'), ((25843, 25883), 'numpy.sqrt', 'np.sqrt', (['((xj - xi) ** 2 + (zj - zi) ** 2)'], {}), '((xj - xi) ** 2 + (zj - zi) ** 2)\n', (25850, 25883), True, 'import numpy as np\n'), ((25902, 25929), 'pycufsm.helpers.shapef', 'helpers.shapef', (['links', 'd', 'b'], {}), '(links, d, b)\n', (25916, 25929), True, 'import pycufsm.helpers as helpers\n'), ((26032, 26057), 'pycufsm.helpers.gammait2', 'helpers.gammait2', (['phi', 'dl'], {}), '(phi, dl)\n', (26048, 26057), True, 'import pycufsm.helpers as helpers\n'), ((27722, 27792), 'numpy.transpose', 'np.transpose', (['[xi + (xj - xi) * j / links, zi + (zj - zi) * j / links]'], {}), '([xi + (xj - xi) * j / links, zi + (zj - zi) * j / links])\n', (27734, 27792), True, 'import numpy as np\n'), ((29099, 29258), 'numpy.array', 'np.array', (['[[dispout[j, 0], dispout[j, 1]], [dispin[j, 0], dispin[j, 1]], [dispin[j + \n 1, 0], dispin[j + 1, 1]], [dispout[j + 1, 0], dispout[j + 1, 1]]]'], {}), '([[dispout[j, 0], dispout[j, 1]], [dispin[j, 0], dispin[j, 1]], [\n dispin[j + 1, 0], dispin[j + 1, 1]], [dispout[j + 1, 0], dispout[j + 1,\n 1]]])\n', (29107, 29258), True, 'import numpy as np\n'), ((22696, 22716), 'numpy.max', 'np.max', (['points[:, 0]'], {}), '(points[:, 0])\n', (22702, 22716), True, 'import numpy as np\n'), ((22754, 22774), 'numpy.max', 'np.max', (['points[:, 1]'], {}), '(points[:, 1])\n', (22760, 22774), True, 'import numpy as np\n'), ((22812, 22832), 'numpy.min', 'np.min', (['points[:, 0]'], {}), '(points[:, 0])\n', (22818, 22832), True, 'import numpy as np\n'), ((22870, 22890), 'numpy.min', 'np.min', (['points[:, 1]'], {}), '(points[:, 1])\n', (22876, 22890), True, 'import numpy as np\n'), ((9787, 9818), 'numpy.sin', 'np.sin', (['(m_a[z] * np.pi / cutloc)'], {}), '(m_a[z] * np.pi / cutloc)\n', (9793, 9818), True, 'import numpy as np\n'), ((9855, 9886), 'numpy.sin', 'np.sin', (['(m_a[z] * np.pi / cutloc)'], {}), '(m_a[z] * np.pi / cutloc)\n', (9861, 9886), True, 'import numpy as np\n'), ((23145, 23163), 'numpy.array', 'np.array', (['[xi, xj]'], {}), '([xi, xj])\n', (23153, 23163), True, 'import numpy as np\n'), ((23167, 23219), 'numpy.array', 'np.array', (['[SurfPos[z] * length, SurfPos[z] * length]'], {}), '([SurfPos[z] * length, SurfPos[z] * length])\n', (23175, 23219), True, 'import numpy as np\n'), ((23237, 23255), 'numpy.array', 'np.array', (['[zi, zj]'], {}), '([zi, zj])\n', (23245, 23255), True, 'import numpy as np\n'), ((29669, 29707), 'numpy.array', 'np.array', (['[disp[0, j], disp[0, j + 1]]'], {}), '([disp[0, j], disp[0, j + 1]])\n', (29677, 29707), True, 'import numpy as np\n'), ((29709, 29761), 'numpy.array', 'np.array', (['[SurfPos[k] * length, SurfPos[k] * length]'], {}), '([SurfPos[k] * length, SurfPos[k] * length])\n', (29717, 29761), True, 'import numpy as np\n'), ((29779, 29817), 'numpy.array', 'np.array', (['[disp[1, j], disp[1, j + 1]]'], {}), '([disp[1, j], disp[1, j + 1]])\n', (29787, 29817), True, 'import numpy as np\n'), ((9990, 10012), 'numpy.sin', 'np.sin', (['(np.pi / cutloc)'], {}), '(np.pi / cutloc)\n', (9996, 10012), True, 'import numpy as np\n'), ((10079, 10101), 'numpy.sin', 'np.sin', (['(np.pi / cutloc)'], {}), '(np.pi / cutloc)\n', (10085, 10101), True, 'import numpy as np\n'), ((23418, 23436), 'numpy.array', 'np.array', (['[xi, xi]'], {}), '([xi, xi])\n', (23426, 23436), True, 'import numpy as np\n'), ((23440, 23496), 'numpy.array', 'np.array', (['[SurfPos[z] * length, SurfPos[z + 1] * length]'], {}), '([SurfPos[z] * length, SurfPos[z + 1] * length])\n', (23448, 23496), True, 'import numpy as np\n'), ((23512, 23530), 'numpy.array', 'np.array', (['[zi, zi]'], {}), '([zi, zi])\n', (23520, 23530), True, 'import numpy as np\n'), ((23654, 23672), 'numpy.array', 'np.array', (['[xj, xj]'], {}), '([xj, xj])\n', (23662, 23672), True, 'import numpy as np\n'), ((23676, 23732), 'numpy.array', 'np.array', (['[SurfPos[z] * length, SurfPos[z + 1] * length]'], {}), '([SurfPos[z] * length, SurfPos[z + 1] * length])\n', (23684, 23732), True, 'import numpy as np\n'), ((23748, 23766), 'numpy.array', 'np.array', (['[zj, zj]'], {}), '([zj, zj])\n', (23756, 23766), True, 'import numpy as np\n'), ((26173, 26204), 'numpy.sin', 'np.sin', (['(m_a[z] * np.pi / cutloc)'], {}), '(m_a[z] * np.pi / cutloc)\n', (26179, 26204), True, 'import numpy as np\n'), ((26245, 26276), 'numpy.sin', 'np.sin', (['(m_a[z] * np.pi / cutloc)'], {}), '(m_a[z] * np.pi / cutloc)\n', (26251, 26276), True, 'import numpy as np\n'), ((30248, 30283), 'numpy.array', 'np.array', (['[disp1[0, j], disp[0, j]]'], {}), '([disp1[0, j], disp[0, j]])\n', (30256, 30283), True, 'import numpy as np\n'), ((30287, 30343), 'numpy.array', 'np.array', (['[SurfPos[k - 1] * length, SurfPos[k] * length]'], {}), '([SurfPos[k - 1] * length, SurfPos[k] * length])\n', (30295, 30343), True, 'import numpy as np\n'), ((30366, 30401), 'numpy.array', 'np.array', (['[disp1[1, j], disp[1, j]]'], {}), '([disp1[1, j], disp[1, j]])\n', (30374, 30401), True, 'import numpy as np\n'), ((30519, 30562), 'numpy.array', 'np.array', (['[disp1[0, j + 1], disp[0, j + 1]]'], {}), '([disp1[0, j + 1], disp[0, j + 1]])\n', (30527, 30562), True, 'import numpy as np\n'), ((30562, 30618), 'numpy.array', 'np.array', (['[SurfPos[k - 1] * length, SurfPos[k] * length]'], {}), '([SurfPos[k - 1] * length, SurfPos[k] * length])\n', (30570, 30618), True, 'import numpy as np\n'), ((30641, 30684), 'numpy.array', 'np.array', (['[disp1[1, j + 1], disp[1, j + 1]]'], {}), '([disp1[1, j + 1], disp[1, j + 1]])\n', (30649, 30684), True, 'import numpy as np\n'), ((2438, 2451), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2444, 2451), True, 'import numpy as np\n'), ((2462, 2475), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2468, 2475), True, 'import numpy as np\n'), ((2516, 2529), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2522, 2529), True, 'import numpy as np\n'), ((2540, 2553), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2546, 2553), True, 'import numpy as np\n'), ((2594, 2607), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2600, 2607), True, 'import numpy as np\n'), ((2618, 2631), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2624, 2631), True, 'import numpy as np\n'), ((2672, 2685), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2678, 2685), True, 'import numpy as np\n'), ((2696, 2709), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2702, 2709), True, 'import numpy as np\n'), ((9962, 9993), 'numpy.sin', 'np.sin', (['(m_a[z] * np.pi / cutloc)'], {}), '(m_a[z] * np.pi / cutloc)\n', (9968, 9993), True, 'import numpy as np\n'), ((10051, 10082), 'numpy.sin', 'np.sin', (['(m_a[z] * np.pi / cutloc)'], {}), '(m_a[z] * np.pi / cutloc)\n', (10057, 10082), True, 'import numpy as np\n'), ((17027, 17040), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (17033, 17040), True, 'import numpy as np\n'), ((17051, 17064), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (17057, 17064), True, 'import numpy as np\n'), ((17105, 17118), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (17111, 17118), True, 'import numpy as np\n'), ((17129, 17142), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (17135, 17142), True, 'import numpy as np\n'), ((17183, 17196), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (17189, 17196), True, 'import numpy as np\n'), ((17207, 17220), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (17213, 17220), True, 'import numpy as np\n'), ((17261, 17274), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (17267, 17274), True, 'import numpy as np\n'), ((17285, 17298), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (17291, 17298), True, 'import numpy as np\n'), ((26388, 26410), 'numpy.sin', 'np.sin', (['(np.pi / cutloc)'], {}), '(np.pi / cutloc)\n', (26394, 26410), True, 'import numpy as np\n'), ((26481, 26503), 'numpy.sin', 'np.sin', (['(np.pi / cutloc)'], {}), '(np.pi / cutloc)\n', (26487, 26503), True, 'import numpy as np\n'), ((7153, 7166), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7159, 7166), True, 'import numpy as np\n'), ((7177, 7190), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7183, 7190), True, 'import numpy as np\n'), ((7235, 7248), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7241, 7248), True, 'import numpy as np\n'), ((7259, 7272), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7265, 7272), True, 'import numpy as np\n'), ((7317, 7330), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7323, 7330), True, 'import numpy as np\n'), ((7341, 7354), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7347, 7354), True, 'import numpy as np\n'), ((7399, 7412), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7405, 7412), True, 'import numpy as np\n'), ((7423, 7436), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7429, 7436), True, 'import numpy as np\n'), ((10226, 10263), 'numpy.sin', 'np.sin', (['((m_a[z] + 1) * np.pi / cutloc)'], {}), '((m_a[z] + 1) * np.pi / cutloc)\n', (10232, 10263), True, 'import numpy as np\n'), ((10385, 10422), 'numpy.sin', 'np.sin', (['((m_a[z] + 1) * np.pi / cutloc)'], {}), '((m_a[z] + 1) * np.pi / cutloc)\n', (10391, 10422), True, 'import numpy as np\n'), ((12162, 12180), 'numpy.sin', 'np.sin', (['thetalinks'], {}), '(thetalinks)\n', (12168, 12180), True, 'import numpy as np\n'), ((12230, 12248), 'numpy.cos', 'np.cos', (['thetalinks'], {}), '(thetalinks)\n', (12236, 12248), True, 'import numpy as np\n'), ((12300, 12318), 'numpy.sin', 'np.sin', (['thetalinks'], {}), '(thetalinks)\n', (12306, 12318), True, 'import numpy as np\n'), ((12367, 12385), 'numpy.cos', 'np.cos', (['thetalinks'], {}), '(thetalinks)\n', (12373, 12385), True, 'import numpy as np\n'), ((15703, 15751), 'numpy.round', 'np.round', (['curve[m + 1, modedisplay[j] - 1, 1]', '(2)'], {}), '(curve[m + 1, modedisplay[j] - 1, 1], 2)\n', (15711, 15751), True, 'import numpy as np\n'), ((26360, 26391), 'numpy.sin', 'np.sin', (['(m_a[z] * np.pi / cutloc)'], {}), '(m_a[z] * np.pi / cutloc)\n', (26366, 26391), True, 'import numpy as np\n'), ((26453, 26484), 'numpy.sin', 'np.sin', (['(m_a[z] * np.pi / cutloc)'], {}), '(m_a[z] * np.pi / cutloc)\n', (26459, 26484), True, 'import numpy as np\n'), ((10589, 10630), 'numpy.cos', 'np.cos', (['((m_a[z] - 1 / 2) * np.pi / cutloc)'], {}), '((m_a[z] - 1 / 2) * np.pi / cutloc)\n', (10595, 10630), True, 'import numpy as np\n'), ((10671, 10712), 'numpy.cos', 'np.cos', (['((m_a[z] - 1 / 2) * np.pi / cutloc)'], {}), '((m_a[z] - 1 / 2) * np.pi / cutloc)\n', (10677, 10712), True, 'import numpy as np\n'), ((15645, 15693), 'numpy.round', 'np.round', (['curve[m + 1, modedisplay[j] - 1, 0]', '(2)'], {}), '(curve[m + 1, modedisplay[j] - 1, 0], 2)\n', (15653, 15693), True, 'import numpy as np\n'), ((22331, 22344), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (22337, 22344), True, 'import numpy as np\n'), ((22355, 22368), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (22361, 22368), True, 'import numpy as np\n'), ((22414, 22427), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (22420, 22427), True, 'import numpy as np\n'), ((22438, 22451), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (22444, 22451), True, 'import numpy as np\n'), ((22497, 22510), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (22503, 22510), True, 'import numpy as np\n'), ((22521, 22534), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (22527, 22534), True, 'import numpy as np\n'), ((22580, 22593), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (22586, 22593), True, 'import numpy as np\n'), ((22604, 22617), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (22610, 22617), True, 'import numpy as np\n'), ((26640, 26677), 'numpy.sin', 'np.sin', (['((m_a[z] + 1) * np.pi / cutloc)'], {}), '((m_a[z] + 1) * np.pi / cutloc)\n', (26646, 26677), True, 'import numpy as np\n'), ((26811, 26848), 'numpy.sin', 'np.sin', (['((m_a[z] + 1) * np.pi / cutloc)'], {}), '((m_a[z] + 1) * np.pi / cutloc)\n', (26817, 26848), True, 'import numpy as np\n'), ((28698, 28716), 'numpy.sin', 'np.sin', (['thetalinks'], {}), '(thetalinks)\n', (28704, 28716), True, 'import numpy as np\n'), ((28770, 28788), 'numpy.cos', 'np.cos', (['thetalinks'], {}), '(thetalinks)\n', (28776, 28788), True, 'import numpy as np\n'), ((28844, 28862), 'numpy.sin', 'np.sin', (['thetalinks'], {}), '(thetalinks)\n', (28850, 28862), True, 'import numpy as np\n'), ((28912, 28930), 'numpy.cos', 'np.cos', (['thetalinks'], {}), '(thetalinks)\n', (28918, 28930), True, 'import numpy as np\n'), ((10275, 10297), 'numpy.sin', 'np.sin', (['(np.pi / cutloc)'], {}), '(np.pi / cutloc)\n', (10281, 10297), True, 'import numpy as np\n'), ((10434, 10456), 'numpy.sin', 'np.sin', (['(np.pi / cutloc)'], {}), '(np.pi / cutloc)\n', (10440, 10456), True, 'import numpy as np\n'), ((10812, 10850), 'numpy.sin', 'np.sin', (['((m_a[z] - 1 / 2) * pi / cutloc)'], {}), '((m_a[z] - 1 / 2) * pi / cutloc)\n', (10818, 10850), True, 'import numpy as np\n'), ((10845, 10871), 'numpy.sin', 'np.sin', (['(np.pi / cutloc / 2)'], {}), '(np.pi / cutloc / 2)\n', (10851, 10871), True, 'import numpy as np\n'), ((10910, 10948), 'numpy.sin', 'np.sin', (['((m_a[z] - 1 / 2) * pi / cutloc)'], {}), '((m_a[z] - 1 / 2) * pi / cutloc)\n', (10916, 10948), True, 'import numpy as np\n'), ((10943, 10969), 'numpy.sin', 'np.sin', (['(np.pi / cutloc / 2)'], {}), '(np.pi / cutloc / 2)\n', (10949, 10969), True, 'import numpy as np\n'), ((27027, 27068), 'numpy.cos', 'np.cos', (['((m_a[z] - 1 / 2) * np.pi / cutloc)'], {}), '((m_a[z] - 1 / 2) * np.pi / cutloc)\n', (27033, 27068), True, 'import numpy as np\n'), ((27113, 27154), 'numpy.cos', 'np.cos', (['((m_a[z] - 1 / 2) * np.pi / cutloc)'], {}), '((m_a[z] - 1 / 2) * np.pi / cutloc)\n', (27119, 27154), True, 'import numpy as np\n'), ((26689, 26711), 'numpy.sin', 'np.sin', (['(np.pi / cutloc)'], {}), '(np.pi / cutloc)\n', (26695, 26711), True, 'import numpy as np\n'), ((26860, 26882), 'numpy.sin', 'np.sin', (['(np.pi / cutloc)'], {}), '(np.pi / cutloc)\n', (26866, 26882), True, 'import numpy as np\n'), ((27262, 27300), 'numpy.sin', 'np.sin', (['((m_a[z] - 1 / 2) * pi / cutloc)'], {}), '((m_a[z] - 1 / 2) * pi / cutloc)\n', (27268, 27300), True, 'import numpy as np\n'), ((27295, 27318), 'numpy.sin', 'np.sin', (['(pi / cutloc / 2)'], {}), '(pi / cutloc / 2)\n', (27301, 27318), True, 'import numpy as np\n'), ((27361, 27399), 'numpy.sin', 'np.sin', (['((m_a[z] - 1 / 2) * pi / cutloc)'], {}), '((m_a[z] - 1 / 2) * pi / cutloc)\n', (27367, 27399), True, 'import numpy as np\n'), ((27394, 27417), 'numpy.sin', 'np.sin', (['(pi / cutloc / 2)'], {}), '(pi / cutloc / 2)\n', (27400, 27417), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright (c) 2019-2021 <NAME>.
#
# Distributed under the MIT License.
# See LICENSE for more info.
import numpy as np
from . import utils
from qtpy.QtCore import QDirIterator
from qtpy.QtCore import QFileInfo
from qtpy.QtCore import QMutex
from qtpy.QtCore import QMutexLocker
from .elementfactory import ElementFactory
from .elements.element import Element
from .elements.elementcollection import ElementCollection
from .elements.blockelement import BlockElement
from .elements.pointelement import PointElement
from .elements.lineelement import LineElement
from .elements.meshelement import MeshElement
from .elements.nullelement import NullElement
from .elements.tubeelement import TubeElement
from .parsers.parser import Parser
from .parsers.parsercollection import ParserCollection
from .parsers.dxfparser import DXFParser
from .parsers.offparser import OFFParser
from .parsers.h5mparser import H5MParser
from .parsers.h5pparser import H5PParser
from .parsers.csvparser import CSVParser
from .parsers.gslibparser import GSLibParser
class Model:
def __init__(self):
self._mutex = QMutex()
self.parser_collection = ParserCollection()
self.element_collection = ElementCollection()
self.factory = ElementFactory()
self.add_parser('dxf', DXFParser())
self.add_parser('off', OFFParser())
self.add_parser('h5m', H5MParser())
self.add_parser('h5p', H5PParser())
self.add_parser('csv', CSVParser())
self.add_parser('out', GSLibParser())
@property
def last_id(self) -> int:
return self.element_collection.last_id
"""
Utilities
"""
def add_parser(self, extension: str, handler: Parser) -> None:
self.parser_collection.add(extension, handler)
def get_parser(self, extension: str) -> Parser:
return self.parser_collection.get(extension)
@staticmethod
def get_paths_from_directory(path: str) -> list:
it = QDirIterator(path, QDirIterator.Subdirectories)
path_list = []
while it.hasNext():
next_path = it.next()
if QFileInfo(next_path).isFile():
path_list.append(next_path)
return sorted(path_list)
"""
Register methods
"""
def register_element(self, element):
# In a multi-threaded application, we can't risk assigning
# the same ID to different elements, so we use a mutex here.
with QMutexLocker(self._mutex):
self.element_collection.add(element)
return element
def register_element_by_path(self, path: str, generator, *args, **kwargs):
ext = path.split('.')[-1]
info = self.get_parser(ext).load_file(path, *args, **kwargs)
data = info.get('data', {})
properties = info.get('properties', {})
metadata = info.get('metadata', {})
# Prioritize kwargs over file properties (useful in CLI)
kwargs['data'] = kwargs.get('data', data)
for k, v in properties.items():
kwargs[k] = kwargs.get(k, v)
for k, v in metadata.items():
kwargs[k] = kwargs.get(k, v)
return self.register_element(generator(*args, **kwargs))
"""
Load methods by arguments
"""
def null(self, *args, **kwargs) -> NullElement:
# A NullElement won't be registered
return self.factory.null(*args, **kwargs)
def mesh(self, *args, **kwargs) -> MeshElement:
return self.register_element(self.factory.mesh(*args, **kwargs))
def blocks(self, *args, **kwargs) -> BlockElement:
return self.register_element(self.factory.blocks(*args, **kwargs))
def points(self, *args, **kwargs) -> PointElement:
return self.register_element(self.factory.points(*args, **kwargs))
def lines(self, *args, **kwargs) -> LineElement:
return self.register_element(self.factory.lines(*args, **kwargs))
def tubes(self, *args, **kwargs) -> TubeElement:
return self.register_element(self.factory.tubes(*args, **kwargs))
def text(self, *args, **kwargs) -> NullElement:
return self.register_element(self.factory.null(*args, **kwargs))
"""
Load methods by path
"""
def load_mesh(self, path: str, *args, **kwargs) -> MeshElement:
return self.register_element_by_path(path, self.factory.mesh, hint='mesh', *args, **kwargs)
def load_blocks(self, path: str, *args, **kwargs) -> BlockElement:
return self.register_element_by_path(path, self.factory.blocks, hint='blocks', *args, **kwargs)
def load_points(self, path: str, *args, **kwargs) -> PointElement:
return self.register_element_by_path(path, self.factory.points, hint='points', *args, **kwargs)
def load_lines(self, path: str, *args, **kwargs) -> LineElement:
return self.register_element_by_path(path, self.factory.lines, hint='lines', *args, **kwargs)
def load_tubes(self, path: str, *args, **kwargs) -> TubeElement:
return self.register_element_by_path(path, self.factory.tubes, hint='tubes', *args, **kwargs)
"""
Element handling
"""
def get(self, _id: int) -> Element:
return self.element_collection.get(_id)
def delete(self, _id: int) -> None:
self.element_collection.delete(_id)
def clear(self) -> None:
self.element_collection.clear()
"""
Element exporting
"""
def export(self, path: str, _id: int) -> None:
ext = path.split('.')[-1]
element = self.get(_id)
data = element.data
properties = element.properties
self.get_parser(ext).save_file(path=path, data=data, properties=properties)
"""
Adapter for viewer's utilities (slice/distance)
"""
@staticmethod
def slice_elements(origin: np.ndarray, normal: np.ndarray, elements: list, name: str) -> list:
"""
Returns a list of dicts, where each dict is the element ID and its sliced data (vertices or indices)
:param origin: Origin of the plane
:param normal: Normal of the plane
:param elements: A list of elements ready to be sliced
:param name: The name of the key in the return dictionary ('vertices' or 'indices')
:return: list[dict]
"""
result = []
for element in elements:
data = element.slice_with_plane(origin, normal)
if len(data) > 0:
result.append({
'element_id': element.id,
name: data,
})
return result
@staticmethod
def slice_meshes(origin: np.ndarray, normal: np.ndarray, meshes: list) -> list:
"""
Returns a list of dicts, where each dict is the mesh ID and its sliced vertices
"""
return Model.slice_elements(origin, normal, meshes, 'vertices')
@staticmethod
def slice_blocks(origin: np.ndarray, normal: np.ndarray, block_list: list) -> list:
"""
Returns a list of dicts, where each dict is the block ID and its sliced indices
"""
return Model.slice_elements(origin, normal, block_list, 'indices')
@staticmethod
def slice_points(origin: np.ndarray, normal: np.ndarray, point_list: list) -> list:
"""
Returns a list of dicts, where each dict is the point ID and its sliced indices
"""
return Model.slice_elements(origin, normal, point_list, 'indices')
@staticmethod
def measure_from_rays(origin_list: list, ray_list: list, meshes: list) -> dict:
"""
Returns a dict with the following structure:
{
'point_a': list(float) or None,
'point_b': list(float) or None',
'distance': float or None
}
"""
points_A = []
points_B = []
closest_A = None
closest_B = None
# Detect intersections
for mesh in meshes:
int_A = mesh.intersect_with_ray(origin_list[0], ray_list[0])
int_B = mesh.intersect_with_ray(origin_list[1], ray_list[1])
# Discard non-intersections
if int_A.size > 0:
points_A.append(utils.closest_point_to(origin_list[0], int_A))
if int_B.size > 0:
points_B.append(utils.closest_point_to(origin_list[1], int_B))
# Get closest points for each origin
if len(points_A) > 0:
points_A = np.vstack(points_A)
closest_A = utils.closest_point_to(origin_list[0], points_A)
if len(points_B) > 0:
points_B = np.vstack(points_B)
closest_B = utils.closest_point_to(origin_list[1], points_B)
# Calculate distance if possible
try:
distance = utils.magnitude(closest_B - closest_A)
except TypeError:
distance = None
return {
'point_a': closest_A,
'point_b': closest_B,
'distance': distance,
}
@staticmethod
def intersect_meshes(origin: np.ndarray, ray: np.ndarray, meshes: list) -> list:
attributes_list = []
for mesh in meshes:
intersections = mesh.intersect_with_ray(origin, ray)
closest_point = utils.closest_point_to(origin, intersections)
if closest_point is not None:
attributes = {**mesh.attributes,
'intersections': intersections,
'closest_point': closest_point,
}
attributes_list.append(attributes)
def sort_by_distance(x: dict) -> float:
return utils.magnitude(x.get('closest_point') - origin)
return sorted(attributes_list, key=sort_by_distance)
@staticmethod
def intersect_lines(origin: np.ndarray, ray: np.ndarray, lines: list) -> list:
attributes_list = []
for line in lines:
intersections = line.intersect_with_ray(origin, ray)
closest_point = utils.closest_point_to(origin, intersections)
if closest_point is not None:
attributes = {**line.attributes,
'intersections': intersections,
'closest_point': closest_point,
}
attributes_list.append(attributes)
return attributes_list
| [
"qtpy.QtCore.QDirIterator",
"qtpy.QtCore.QMutexLocker",
"qtpy.QtCore.QFileInfo",
"qtpy.QtCore.QMutex",
"numpy.vstack"
] | [((1132, 1140), 'qtpy.QtCore.QMutex', 'QMutex', ([], {}), '()\n', (1138, 1140), False, 'from qtpy.QtCore import QMutex\n'), ((1990, 2037), 'qtpy.QtCore.QDirIterator', 'QDirIterator', (['path', 'QDirIterator.Subdirectories'], {}), '(path, QDirIterator.Subdirectories)\n', (2002, 2037), False, 'from qtpy.QtCore import QDirIterator\n'), ((2476, 2501), 'qtpy.QtCore.QMutexLocker', 'QMutexLocker', (['self._mutex'], {}), '(self._mutex)\n', (2488, 2501), False, 'from qtpy.QtCore import QMutexLocker\n'), ((8442, 8461), 'numpy.vstack', 'np.vstack', (['points_A'], {}), '(points_A)\n', (8451, 8461), True, 'import numpy as np\n'), ((8589, 8608), 'numpy.vstack', 'np.vstack', (['points_B'], {}), '(points_B)\n', (8598, 8608), True, 'import numpy as np\n'), ((2139, 2159), 'qtpy.QtCore.QFileInfo', 'QFileInfo', (['next_path'], {}), '(next_path)\n', (2148, 2159), False, 'from qtpy.QtCore import QFileInfo\n')] |
# Contributors: <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
import OpenGL.GL as GL
import OpenGL.GLU as GLU
import OpenGL.GLUT as GLUT
import sys
import numpy as np
from pydart2.gui.opengl.scene import OpenGLScene
from pydart2.gui.glut.window import *
class StaticGLUTWindow(GLUTWindow):
def close(self):
GLUT.glutDestroyWindow(self.window)
GLUT.glutMainLoopEvent()
def drawGL(self, ):
self.scene.render(self.sim)
GLUT.glutSwapBuffers()
def runSingleStep(self):
GLUT.glutPostRedisplay()
GLUT.glutMainLoopEvent()
def getGrayscale(self, _width, _height):
# get compressed grayscale img
# for end to end learning
# Do not call it in other case
# there will be some potential problems
from PIL import Image
data = GL.glReadPixels(0, 0,
_width, _height,
GL.GL_RGBA,
GL.GL_UNSIGNED_BYTE)
img = Image.frombytes("RGBA", (_width, _height), data).convert('L')
img = np.array(img.getdata(), dtype=np.uint8)
return img.reshape(_width, _height)
def getFrame(self):
self.runSingleStep()
data = GL.glReadPixels(0, 0,
self.window_size[0], self.window_size[1],
GL.GL_RGBA,
GL.GL_UNSIGNED_BYTE)
img = np.frombuffer(data, dtype=np.uint8)
return img.reshape(self.window_size[1], self.window_size[0], 4)[::-1,:,0:3]
def mykeyboard(self, key, x, y):
keycode = ord(key)
key = key.decode('utf-8')
# print("key = [%s] = [%d]" % (key, ord(key)))
# n = sim.num_frames()
if keycode == 27:
self.close()
return
self.keyPressed(key, x, y)
def run(self, _width=None, _height=None, _show_window=True):
# Init glut
self._show_window = _show_window
GLUT.glutInit(())
GLUT.glutInitDisplayMode(GLUT.GLUT_RGBA |
GLUT.GLUT_DOUBLE |
GLUT.GLUT_ALPHA |
GLUT.GLUT_DEPTH)
if _width is not None and _height is not None:
GLUT.glutInitWindowSize(_width,_height)
#self.resizeGL(_width, _height) # this line crashes my program ??
else:
GLUT.glutInitWindowSize(*self.window_size)
GLUT.glutInitWindowPosition(0, 0)
self.window = GLUT.glutCreateWindow(self.title)
if not _show_window:
GLUT.glutHideWindow()
GLUT.glutDisplayFunc(self.drawGL)
GLUT.glutReshapeFunc(self.resizeGL)
GLUT.glutKeyboardFunc(self.mykeyboard)
GLUT.glutMouseFunc(self.mouseFunc)
GLUT.glutMotionFunc(self.motionFunc)
self.initGL(*self.window_size)
| [
"OpenGL.GLUT.glutKeyboardFunc",
"OpenGL.GLUT.glutDisplayFunc",
"OpenGL.GLUT.glutMainLoopEvent",
"OpenGL.GLUT.glutInitWindowPosition",
"OpenGL.GLUT.glutMouseFunc",
"numpy.frombuffer",
"OpenGL.GLUT.glutPostRedisplay",
"OpenGL.GL.glReadPixels",
"OpenGL.GLUT.glutInitDisplayMode",
"OpenGL.GLUT.glutResh... | [((314, 349), 'OpenGL.GLUT.glutDestroyWindow', 'GLUT.glutDestroyWindow', (['self.window'], {}), '(self.window)\n', (336, 349), True, 'import OpenGL.GLUT as GLUT\n'), ((358, 382), 'OpenGL.GLUT.glutMainLoopEvent', 'GLUT.glutMainLoopEvent', ([], {}), '()\n', (380, 382), True, 'import OpenGL.GLUT as GLUT\n'), ((452, 474), 'OpenGL.GLUT.glutSwapBuffers', 'GLUT.glutSwapBuffers', ([], {}), '()\n', (472, 474), True, 'import OpenGL.GLUT as GLUT\n'), ((513, 537), 'OpenGL.GLUT.glutPostRedisplay', 'GLUT.glutPostRedisplay', ([], {}), '()\n', (535, 537), True, 'import OpenGL.GLUT as GLUT\n'), ((546, 570), 'OpenGL.GLUT.glutMainLoopEvent', 'GLUT.glutMainLoopEvent', ([], {}), '()\n', (568, 570), True, 'import OpenGL.GLUT as GLUT\n'), ((822, 893), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', '_width', '_height', 'GL.GL_RGBA', 'GL.GL_UNSIGNED_BYTE'], {}), '(0, 0, _width, _height, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE)\n', (837, 893), True, 'import OpenGL.GL as GL\n'), ((1230, 1330), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', 'self.window_size[0]', 'self.window_size[1]', 'GL.GL_RGBA', 'GL.GL_UNSIGNED_BYTE'], {}), '(0, 0, self.window_size[0], self.window_size[1], GL.GL_RGBA,\n GL.GL_UNSIGNED_BYTE)\n', (1245, 1330), True, 'import OpenGL.GL as GL\n'), ((1434, 1469), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (1447, 1469), True, 'import numpy as np\n'), ((1980, 1997), 'OpenGL.GLUT.glutInit', 'GLUT.glutInit', (['()'], {}), '(())\n', (1993, 1997), True, 'import OpenGL.GLUT as GLUT\n'), ((2006, 2106), 'OpenGL.GLUT.glutInitDisplayMode', 'GLUT.glutInitDisplayMode', (['(GLUT.GLUT_RGBA | GLUT.GLUT_DOUBLE | GLUT.GLUT_ALPHA | GLUT.GLUT_DEPTH)'], {}), '(GLUT.GLUT_RGBA | GLUT.GLUT_DOUBLE | GLUT.\n GLUT_ALPHA | GLUT.GLUT_DEPTH)\n', (2030, 2106), True, 'import OpenGL.GLUT as GLUT\n'), ((2463, 2496), 'OpenGL.GLUT.glutInitWindowPosition', 'GLUT.glutInitWindowPosition', (['(0)', '(0)'], {}), '(0, 0)\n', (2490, 2496), True, 'import OpenGL.GLUT as GLUT\n'), ((2519, 2552), 'OpenGL.GLUT.glutCreateWindow', 'GLUT.glutCreateWindow', (['self.title'], {}), '(self.title)\n', (2540, 2552), True, 'import OpenGL.GLUT as GLUT\n'), ((2625, 2658), 'OpenGL.GLUT.glutDisplayFunc', 'GLUT.glutDisplayFunc', (['self.drawGL'], {}), '(self.drawGL)\n', (2645, 2658), True, 'import OpenGL.GLUT as GLUT\n'), ((2667, 2702), 'OpenGL.GLUT.glutReshapeFunc', 'GLUT.glutReshapeFunc', (['self.resizeGL'], {}), '(self.resizeGL)\n', (2687, 2702), True, 'import OpenGL.GLUT as GLUT\n'), ((2711, 2749), 'OpenGL.GLUT.glutKeyboardFunc', 'GLUT.glutKeyboardFunc', (['self.mykeyboard'], {}), '(self.mykeyboard)\n', (2732, 2749), True, 'import OpenGL.GLUT as GLUT\n'), ((2758, 2792), 'OpenGL.GLUT.glutMouseFunc', 'GLUT.glutMouseFunc', (['self.mouseFunc'], {}), '(self.mouseFunc)\n', (2776, 2792), True, 'import OpenGL.GLUT as GLUT\n'), ((2801, 2837), 'OpenGL.GLUT.glutMotionFunc', 'GLUT.glutMotionFunc', (['self.motionFunc'], {}), '(self.motionFunc)\n', (2820, 2837), True, 'import OpenGL.GLUT as GLUT\n'), ((2268, 2308), 'OpenGL.GLUT.glutInitWindowSize', 'GLUT.glutInitWindowSize', (['_width', '_height'], {}), '(_width, _height)\n', (2291, 2308), True, 'import OpenGL.GLUT as GLUT\n'), ((2412, 2454), 'OpenGL.GLUT.glutInitWindowSize', 'GLUT.glutInitWindowSize', (['*self.window_size'], {}), '(*self.window_size)\n', (2435, 2454), True, 'import OpenGL.GLUT as GLUT\n'), ((2594, 2615), 'OpenGL.GLUT.glutHideWindow', 'GLUT.glutHideWindow', ([], {}), '()\n', (2613, 2615), True, 'import OpenGL.GLUT as GLUT\n'), ((1001, 1049), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGBA"""', '(_width, _height)', 'data'], {}), "('RGBA', (_width, _height), data)\n", (1016, 1049), False, 'from PIL import Image\n')] |
"""
A Converter converts between:
examples (each one a dict with keys like "filename" and "label")
arrays (numpy arrays input to or output from a network)
Dataset augmentation can be accomplished with a Converter that returns a
different array each time to_array is called with the same example
"""
import os
import numpy as np
import random
import imutil
# TODO: Configure this
DATA_DIR = '/mnt/nfs/data'
# Converters can be used like a function, on a single example or a batch
class Converter(object):
def __call__(self, inputs):
if isinstance(inputs, np.ndarray):
return [self.from_array(e) for e in inputs]
elif isinstance(inputs, list):
return np.array([self.to_array(e) for e in inputs])
else:
return self.to_array(inputs)
# Crops, resizes, normalizes, performs any desired augmentations
# Outputs images as eg. 32x32x3 np.array or eg. 3x32x32 torch.FloatTensor
class ImageConverter(Converter):
def __init__(self,
dataset,
image_size=32,
crop_to_bounding_box=True,
random_horizontal_flip=False,
delete_background=False,
torch=True,
normalize=True,
**kwargs):
width, height = image_size, image_size
self.img_shape = (width, height)
self.bounding_box = crop_to_bounding_box
self.data_dir = dataset.data_dir
self.random_horizontal_flip = random_horizontal_flip
self.torch = torch
self.normalize = normalize
self.delete_background = delete_background
def to_array(self, example):
filename = os.path.expanduser(example['filename'])
if not filename.startswith('/'):
filename = os.path.join(DATA_DIR, filename)
box = example.get('box') if self.bounding_box else None
img = imutil.decode_jpg(filename,
resize_to=self.img_shape,
crop_to_box=box)
if self.delete_background:
seg_filename = os.path.expanduser(example['segmentation'])
segmentation = imutil.decode_jpg(seg_filename,
resize_to=self.img_shape,
crop_to_box=box)
foreground_mask = np.mean(segmentation, axis=-1) / 255.
img = img * np.expand_dims(foreground_mask, axis=-1)
if self.random_horizontal_flip and random.getrandbits(1):
img = np.flip(img, axis=1)
if self.torch:
img = img.transpose((2,0,1))
if self.normalize:
img *= 1.0 / 255
return img
def from_array(self, array):
return array
class SkyRTSConverter(Converter):
def __init__(self,
dataset,
**kwargs):
self.data_dir = dataset.data_dir
def to_array(self, example):
curr = self.filename_to_pixels(self.get_filename(example, 'filename'))
next = self.filename_to_pixels(self.get_filename(example, 'next_filename'))
return curr, next
def get_filename(self, example, key):
filename = os.path.expanduser(example[key])
if not filename.startswith('/'):
filename = os.path.join(DATA_DIR, filename)
return filename
def filename_to_pixels(self, filename):
# Input is a PNG composed of 6 40x40 monochrome images
# It encodes frames of a game, similar to the SC2 API
# From top-left to bottom-right, maps represent:
# Health, Agent, Small Towers, Big Towers, Friends, Enemies
img = imutil.decode_jpg(filename, resize_to=None)
assert img.shape == (40*3, 40*2, 3)
# Pytorch convnets require BCHW inputs
channels = np.zeros((6, 40, 40))
channels[0] = img[0:40, 0:40, 0]
channels[1] = img[0:40, 40:80, 0]
channels[2] = img[40:80, 0:40, 0]
channels[3] = img[40:80, 40:80, 0]
channels[4] = img[80:120, 0:40, 0]
channels[5] = img[80:120, 40:80, 0]
# Normalize to [0, 1]
return channels / 255.0
def from_array(self, array):
return array
# LabelConverter extracts the class labels from DatasetFile examples
# Each example can have only one class
class LabelConverter(Converter):
def __init__(self, dataset, label_key="label", **kwargs):
self.label_key = label_key
self.labels = get_labels(dataset, label_key)
self.num_classes = len(self.labels)
self.idx = {self.labels[i]: i for i in range(self.num_classes)}
print("LabelConverter: labels are {}".format(self.labels))
def to_array(self, example):
return self.idx[example[self.label_key]]
def from_array(self, array):
return self.labels[np.argmax(array)]
# FlexibleLabelConverter extracts class labels including partial and negative labels
# Each example now has a label for each class:
# 1 (X belongs to class Y)
# -1 (X does not belong to class Y)
# 0 (X might or might not belong to Y)
class FlexibleLabelConverter(Converter):
def __init__(self, dataset, label_key="label", negative_key="label_n", **kwargs):
self.label_key = label_key
self.negative_key = negative_key
self.labels = sorted(list(set(get_labels(dataset, label_key) + get_labels(dataset, negative_key))))
self.num_classes = len(self.labels)
self.idx = {self.labels[i]: i for i in range(self.num_classes)}
print("FlexibleLabelConverter: labels are {}".format(self.labels))
def to_array(self, example):
array = np.zeros(self.num_classes)
if self.label_key in example:
array[:] = -1 # Negative labels
idx = self.idx[example[self.label_key]]
array[idx] = 1 # Positive label
if self.negative_key in example:
idx = self.idx[example[self.negative_key]]
array[idx] = -1
return array
def from_array(self, array):
return self.labels[np.argmax(array)]
# QValueConverter extracts action-value pairs from Dataset files
# A network performs regression to a Q-value for each possible action
# The label consists of a ground truth value for one action (set to 1 in the mask)
# Other actions (set to 0 in the mask) should be ignored in the loss
class QValueConverter(Converter):
def __init__(self, dataset, action_key='action', value_key='value', **kwargs):
self.action_key = action_key
self.value_key = value_key
self.actions = sorted(list(set(get_labels(dataset, action_key))))
self.num_classes = len(self.actions)
print("QValueConverter: actions are {}".format(self.actions))
values = set(get_labels(dataset, value_key))
self.min_val = float(min(values))
self.max_val = float(max(values))
print('Q value range: from {} to {}'.format(self.min_val, self.max_val))
if self.min_val == self.max_val:
print('Warning: No Q value range')
self.max_val += 1.0
def to_array(self, example):
qvals = np.zeros(self.num_classes)
mask = np.zeros(self.num_classes)
qvals[example[self.action_key] - 1] = (example[self.value_key] - self.min_val) / (self.max_val - self.min_val)
mask[example[self.action_key] - 1] = 1
return qvals, mask
def get_labels(dataset, label_key):
unique_labels = set()
for example in dataset.examples:
if label_key in example:
unique_labels.add(example[label_key])
return sorted(list(unique_labels))
# AttributeConverter extracts boolean attributes from DatasetFile examples
# An example might have many attributes. Each attribute is True or False.
class AttributeConverter(Converter):
def __init__(self, dataset, **kwargs):
unique_attributes = set()
for example in dataset.examples:
for key in example:
if key.startswith('is_') or key.startswith('has_'):
unique_attributes.add(key)
self.attributes = sorted(list(unique_attributes))
self.num_attributes = len(self.attributes)
self.idx = {self.attributes[i]: i for i in range(self.num_attributes)}
def to_array(self, example):
attrs = np.zeros(self.num_attributes)
for i, attr in enumerate(self.attributes):
# Attributes not present on an example are set to False
attrs[i] = float(example.get(attr, False))
return attrs
def from_array(self, array):
return ",".join(self.attributes[i] for i in range(self.attributes) if array[i > .5])
| [
"numpy.flip",
"os.path.join",
"numpy.argmax",
"numpy.zeros",
"numpy.expand_dims",
"numpy.mean",
"random.getrandbits",
"imutil.decode_jpg",
"os.path.expanduser"
] | [((1647, 1686), 'os.path.expanduser', 'os.path.expanduser', (["example['filename']"], {}), "(example['filename'])\n", (1665, 1686), False, 'import os\n'), ((1862, 1932), 'imutil.decode_jpg', 'imutil.decode_jpg', (['filename'], {'resize_to': 'self.img_shape', 'crop_to_box': 'box'}), '(filename, resize_to=self.img_shape, crop_to_box=box)\n', (1879, 1932), False, 'import imutil\n'), ((3074, 3106), 'os.path.expanduser', 'os.path.expanduser', (['example[key]'], {}), '(example[key])\n', (3092, 3106), False, 'import os\n'), ((3537, 3580), 'imutil.decode_jpg', 'imutil.decode_jpg', (['filename'], {'resize_to': 'None'}), '(filename, resize_to=None)\n', (3554, 3580), False, 'import imutil\n'), ((3691, 3712), 'numpy.zeros', 'np.zeros', (['(6, 40, 40)'], {}), '((6, 40, 40))\n', (3699, 3712), True, 'import numpy as np\n'), ((5519, 5545), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {}), '(self.num_classes)\n', (5527, 5545), True, 'import numpy as np\n'), ((7005, 7031), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {}), '(self.num_classes)\n', (7013, 7031), True, 'import numpy as np\n'), ((7047, 7073), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {}), '(self.num_classes)\n', (7055, 7073), True, 'import numpy as np\n'), ((8181, 8210), 'numpy.zeros', 'np.zeros', (['self.num_attributes'], {}), '(self.num_attributes)\n', (8189, 8210), True, 'import numpy as np\n'), ((1751, 1783), 'os.path.join', 'os.path.join', (['DATA_DIR', 'filename'], {}), '(DATA_DIR, filename)\n', (1763, 1783), False, 'import os\n'), ((2027, 2070), 'os.path.expanduser', 'os.path.expanduser', (["example['segmentation']"], {}), "(example['segmentation'])\n", (2045, 2070), False, 'import os\n'), ((2098, 2172), 'imutil.decode_jpg', 'imutil.decode_jpg', (['seg_filename'], {'resize_to': 'self.img_shape', 'crop_to_box': 'box'}), '(seg_filename, resize_to=self.img_shape, crop_to_box=box)\n', (2115, 2172), False, 'import imutil\n'), ((2389, 2410), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (2407, 2410), False, 'import random\n'), ((2430, 2450), 'numpy.flip', 'np.flip', (['img'], {'axis': '(1)'}), '(img, axis=1)\n', (2437, 2450), True, 'import numpy as np\n'), ((3171, 3203), 'os.path.join', 'os.path.join', (['DATA_DIR', 'filename'], {}), '(DATA_DIR, filename)\n', (3183, 3203), False, 'import os\n'), ((4705, 4721), 'numpy.argmax', 'np.argmax', (['array'], {}), '(array)\n', (4714, 4721), True, 'import numpy as np\n'), ((5932, 5948), 'numpy.argmax', 'np.argmax', (['array'], {}), '(array)\n', (5941, 5948), True, 'import numpy as np\n'), ((2243, 2273), 'numpy.mean', 'np.mean', (['segmentation'], {'axis': '(-1)'}), '(segmentation, axis=-1)\n', (2250, 2273), True, 'import numpy as np\n'), ((2305, 2345), 'numpy.expand_dims', 'np.expand_dims', (['foreground_mask'], {'axis': '(-1)'}), '(foreground_mask, axis=-1)\n', (2319, 2345), True, 'import numpy as np\n')] |
import numpy as np
def precompute_BM(img, kHW, NHW, nHW, tauMatch):
"""
:search for similar patches
:param img: input image
:param kHW: length of side of patch
:param NHW: how many patches are stacked
:param nHW: length of side of search area
:param tauMatch: threshold determine whether two patches are similar
:return ri_rj_N__ni_nj: The top N most similar patches to the referred patch
:return threshold_count: according to tauMatch how many patches are similar to the referred one
"""
img = img.astype(np.float64)
height, width = img.shape
Ns = 2 * nHW + 1
threshold = tauMatch * kHW * kHW
sum_table = np.ones((Ns, Ns, height, width)) * 2 * threshold # di, dj, ph, pw
row_add_mat, column_add_mat = get_add_patch_matrix(height, width, nHW, kHW)
diff_margin = np.pad(np.ones((height - 2 * nHW, width - 2 * nHW)), nHW, 'constant', constant_values=0.)
sum_margin = (1 - diff_margin) * 2 * threshold
for di in range(-nHW, nHW + 1):
for dj in range(-nHW, nHW + 1):
t_img = translation_2d_mat(img, right=-dj, down=-di)
diff_table_2 = (img - t_img) * (img - t_img) * diff_margin
sum_diff_2 = row_add_mat @ diff_table_2 @ column_add_mat
sum_table[di + nHW, dj + nHW] = np.maximum(sum_diff_2, sum_margin) # sum_table (2n+1, 2n+1, height, width)
sum_table = sum_table.reshape((Ns * Ns, height * width)) # di_dj, ph_pw
sum_table_T = sum_table.transpose((1, 0)) # ph_pw__di_dj
argsort = np.argpartition(sum_table_T, range(NHW))[:, :NHW]
argsort[:, 0] = (Ns * Ns - 1) // 2
argsort_di = argsort // Ns - nHW
argsort_dj = argsort % Ns - nHW
near_pi = argsort_di.reshape((height, width, -1)) + np.arange(height)[:, np.newaxis, np.newaxis]
near_pj = argsort_dj.reshape((height, width, -1)) + np.arange(width)[np.newaxis, :, np.newaxis]
ri_rj_N__ni_nj = np.concatenate((near_pi[:, :, :, np.newaxis], near_pj[:, :, :, np.newaxis]), axis=-1)
sum_filter = np.where(sum_table_T < threshold, 1, 0)
threshold_count = np.sum(sum_filter, axis=1)
threshold_count = closest_power_of_2(threshold_count, max_=NHW)
threshold_count = threshold_count.reshape((height, width))
return ri_rj_N__ni_nj, threshold_count
def get_add_patch_matrix(h, w, nHW, kHW):
row_add = np.eye(h - 2 * nHW)
row_add = np.pad(row_add, nHW, 'constant')
row_add_mat = row_add.copy()
for k in range(1, kHW):
row_add_mat += translation_2d_mat(row_add, right=k, down=0)
column_add = np.eye(w - 2 * nHW)
column_add = np.pad(column_add, nHW, 'constant')
column_add_mat = column_add.copy()
for k in range(1, kHW):
column_add_mat += translation_2d_mat(column_add, right=0, down=k)
return row_add_mat, column_add_mat
def translation_2d_mat(mat, right, down):
mat = np.roll(mat, right, axis=1)
mat = np.roll(mat, down, axis=0)
return mat
def closest_power_of_2(M, max_):
M = np.where(max_ < M, max_, M)
while max_ > 1:
M = np.where((max_ // 2 < M) * (M < max_), max_ // 2, M)
max_ //= 2
return M
if __name__ == '__main__':
import os
import cv2
from utils import add_gaussian_noise, symetrize
# <hyper parameter>
# ref_i, ref_j = 196, 142
ref_i, ref_j = 164, 135
# ref_i, ref_j = 271, 206
kHW = 8
NHW = 3
nHW = 16
tauMatch = 2500
# <hyper parameter \>
im = cv2.imread('test_data/image/Cameraman.png', cv2.IMREAD_GRAYSCALE)
im = im[100:, :]
ref_i, ref_j = 64, 135
im_noisy = add_gaussian_noise(im, 10, seed=1)
img_noisy_p = symetrize(im_noisy, nHW)
near_pij, threshold_count = precompute_BM(img_noisy_p, kHW=kHW, NHW=NHW, nHW=nHW, tauMatch=tauMatch)
im = cv2.cvtColor(img_noisy_p, cv2.COLOR_GRAY2RGB)
# <draw search area>
points_list = [(ref_j - nHW, ref_i - nHW), (ref_j + nHW, ref_i - nHW), (ref_j - nHW, ref_i + nHW),
(ref_j + nHW, ref_i + nHW)]
for point in points_list:
cv2.circle(im, point, 0, (0, 0, 255), 1)
# <draw search area \>
# <draw reference patch>
cv2.rectangle(im, (ref_j, ref_i), (ref_j + kHW, ref_i + kHW), color=(255, 0, 0), thickness=1)
# <draw reference patch \>
# <draw similar patches>
count = threshold_count[ref_i, ref_j]
for i, Pnear in enumerate(near_pij[ref_i, ref_j]):
if i == 0:
continue
if i > count:
break
y, x = Pnear
cv2.rectangle(im, (x, y), (x + kHW, y + kHW), color=(0, 255, 0), thickness=1)
# <draw similar patches \>
# cv2.imshow('im', im)
# cv2.waitKey()
cv2.imwrite('BM_real_im_test.png', im)
| [
"numpy.pad",
"cv2.circle",
"numpy.sum",
"utils.add_gaussian_noise",
"numpy.maximum",
"numpy.roll",
"cv2.cvtColor",
"utils.symetrize",
"cv2.imwrite",
"numpy.ones",
"cv2.imread",
"numpy.where",
"numpy.arange",
"cv2.rectangle",
"numpy.eye",
"numpy.concatenate"
] | [((1915, 2004), 'numpy.concatenate', 'np.concatenate', (['(near_pi[:, :, :, np.newaxis], near_pj[:, :, :, np.newaxis])'], {'axis': '(-1)'}), '((near_pi[:, :, :, np.newaxis], near_pj[:, :, :, np.newaxis]),\n axis=-1)\n', (1929, 2004), True, 'import numpy as np\n'), ((2019, 2058), 'numpy.where', 'np.where', (['(sum_table_T < threshold)', '(1)', '(0)'], {}), '(sum_table_T < threshold, 1, 0)\n', (2027, 2058), True, 'import numpy as np\n'), ((2081, 2107), 'numpy.sum', 'np.sum', (['sum_filter'], {'axis': '(1)'}), '(sum_filter, axis=1)\n', (2087, 2107), True, 'import numpy as np\n'), ((2341, 2360), 'numpy.eye', 'np.eye', (['(h - 2 * nHW)'], {}), '(h - 2 * nHW)\n', (2347, 2360), True, 'import numpy as np\n'), ((2375, 2407), 'numpy.pad', 'np.pad', (['row_add', 'nHW', '"""constant"""'], {}), "(row_add, nHW, 'constant')\n", (2381, 2407), True, 'import numpy as np\n'), ((2555, 2574), 'numpy.eye', 'np.eye', (['(w - 2 * nHW)'], {}), '(w - 2 * nHW)\n', (2561, 2574), True, 'import numpy as np\n'), ((2592, 2627), 'numpy.pad', 'np.pad', (['column_add', 'nHW', '"""constant"""'], {}), "(column_add, nHW, 'constant')\n", (2598, 2627), True, 'import numpy as np\n'), ((2863, 2890), 'numpy.roll', 'np.roll', (['mat', 'right'], {'axis': '(1)'}), '(mat, right, axis=1)\n', (2870, 2890), True, 'import numpy as np\n'), ((2901, 2927), 'numpy.roll', 'np.roll', (['mat', 'down'], {'axis': '(0)'}), '(mat, down, axis=0)\n', (2908, 2927), True, 'import numpy as np\n'), ((2986, 3013), 'numpy.where', 'np.where', (['(max_ < M)', 'max_', 'M'], {}), '(max_ < M, max_, M)\n', (2994, 3013), True, 'import numpy as np\n'), ((3448, 3513), 'cv2.imread', 'cv2.imread', (['"""test_data/image/Cameraman.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('test_data/image/Cameraman.png', cv2.IMREAD_GRAYSCALE)\n", (3458, 3513), False, 'import cv2\n'), ((3577, 3611), 'utils.add_gaussian_noise', 'add_gaussian_noise', (['im', '(10)'], {'seed': '(1)'}), '(im, 10, seed=1)\n', (3595, 3611), False, 'from utils import add_gaussian_noise, symetrize\n'), ((3631, 3655), 'utils.symetrize', 'symetrize', (['im_noisy', 'nHW'], {}), '(im_noisy, nHW)\n', (3640, 3655), False, 'from utils import add_gaussian_noise, symetrize\n'), ((3771, 3816), 'cv2.cvtColor', 'cv2.cvtColor', (['img_noisy_p', 'cv2.COLOR_GRAY2RGB'], {}), '(img_noisy_p, cv2.COLOR_GRAY2RGB)\n', (3783, 3816), False, 'import cv2\n'), ((4132, 4229), 'cv2.rectangle', 'cv2.rectangle', (['im', '(ref_j, ref_i)', '(ref_j + kHW, ref_i + kHW)'], {'color': '(255, 0, 0)', 'thickness': '(1)'}), '(im, (ref_j, ref_i), (ref_j + kHW, ref_i + kHW), color=(255, 0,\n 0), thickness=1)\n', (4145, 4229), False, 'import cv2\n'), ((4654, 4692), 'cv2.imwrite', 'cv2.imwrite', (['"""BM_real_im_test.png"""', 'im'], {}), "('BM_real_im_test.png', im)\n", (4665, 4692), False, 'import cv2\n'), ((840, 884), 'numpy.ones', 'np.ones', (['(height - 2 * nHW, width - 2 * nHW)'], {}), '((height - 2 * nHW, width - 2 * nHW))\n', (847, 884), True, 'import numpy as np\n'), ((3046, 3098), 'numpy.where', 'np.where', (['((max_ // 2 < M) * (M < max_))', '(max_ // 2)', 'M'], {}), '((max_ // 2 < M) * (M < max_), max_ // 2, M)\n', (3054, 3098), True, 'import numpy as np\n'), ((4030, 4070), 'cv2.circle', 'cv2.circle', (['im', 'point', '(0)', '(0, 0, 255)', '(1)'], {}), '(im, point, 0, (0, 0, 255), 1)\n', (4040, 4070), False, 'import cv2\n'), ((4493, 4570), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + kHW, y + kHW)'], {'color': '(0, 255, 0)', 'thickness': '(1)'}), '(im, (x, y), (x + kHW, y + kHW), color=(0, 255, 0), thickness=1)\n', (4506, 4570), False, 'import cv2\n'), ((668, 700), 'numpy.ones', 'np.ones', (['(Ns, Ns, height, width)'], {}), '((Ns, Ns, height, width))\n', (675, 700), True, 'import numpy as np\n'), ((1301, 1335), 'numpy.maximum', 'np.maximum', (['sum_diff_2', 'sum_margin'], {}), '(sum_diff_2, sum_margin)\n', (1311, 1335), True, 'import numpy as np\n'), ((1749, 1766), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (1758, 1766), True, 'import numpy as np\n'), ((1850, 1866), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (1859, 1866), True, 'import numpy as np\n')] |
import numpy as np
from hfo import MOVE_TO, DRIBBLE_TO, KICK_TO, NOOP, DRIBBLE, PASS, MOVE, \
GO_TO_BALL
from plastic_agent.hfo_env.actions.base import BaseActions
from plastic_agent.hfo_env.game_interface import GameInterface
from plastic_agent.hfo_env.features.plastic import PlasticFeatures
from plastic_agent.utils import get_angle, get_opposite_vector
class PlasticActions(BaseActions):
name = "plasticActions"
# ACTIONS:
ACTIONS_WITHOUT_BALL = ["NOOP", "MOVE_TO_BALL", "MOVE_TO_GOAL",
"MOVE_TO_NEAR_TEAM", "MOVE_FROM_NEAR_TEAM",
"MOVE_TO_NEAR_OP", "MOVE_FROM_NEAR_OP"]
ACTIONS_WITH_BALL = ["SHOOT", "SHORT_DRIBBLE", "LONG_DRIBBLE"]
NUM_SHORT_DRIBBLE_STEPS = 4
NUM_LONG_DRIBBLE_STEPS = 15
NUM_GO_TO_BALL_STEPS = 10
NUM_MOVE_STEPS = 4
NUM_NOOP_STEPS = 4
NUM_STOP_STEPS = 1
def __init__(self, num_team: int, features: PlasticFeatures,
game_interface: GameInterface):
super().__init__(num_team, features, game_interface)
def best_shoot_ball(self):
""" Tries to shoot, if it fail, kicks to goal randomly """
# Get best shoot angle:
angles = []
goalie_coord = np.array([self.features.opponents[0].x_pos,
self.features.opponents[0].y_pos])
player_coord = np.array(self.features.get_pos_tuple())
for goal_pos in self.shoot_possible_coord:
angles.append(get_angle(goalie=goalie_coord, player=player_coord,
point=goal_pos))
idx = int(np.argmax(np.array(angles)))
best_shoot_coord = self.shoot_possible_coord[idx]
# Action parameters:
hfo_action = (KICK_TO, best_shoot_coord[0], best_shoot_coord[1], 2.3)
# Step game:
status, obs = self.game_interface.step(hfo_action)
# Update self.features:
self.features.update_features(obs)
return status, obs
def dribble_action(self, num_rep: int = 1, long: bool = False):
status = 0
observation = []
attempts = 0
while attempts <= num_rep and self.game_interface.in_game():
if not self.features.has_ball():
action = GO_TO_BALL
else:
action = DRIBBLE
status, observation = self.game_interface.step(action)
self.features.update_features(observation)
attempts += 1
while not self.features.has_ball() and self.game_interface.in_game():
action = GO_TO_BALL
status, observation = self.game_interface.step(action)
self.features.update_features(observation)
return status, observation
def move_to_nearest_teammate(self, num_rep: int = 1):
t_coord: np.ndarray = self.features.get_teammate_coord()
status = 0
observation = []
attempts = 0
while self.game_interface.in_game() and attempts < num_rep:
action = (MOVE_TO, t_coord[0], t_coord[1])
status, observation = self.game_interface.step(action)
self.features.update_features(observation)
dist_to_teammate = self.features.t_coord - self.features.a_coord
if abs(np.linalg.norm(dist_to_teammate)) <= 0.2:
break
attempts += 1
return status, observation
def move_away_from_nearest_teammate(self, num_rep: int = 1):
a_coord: np.ndarray = self.features.get_agent_coord()
t_coord: np.ndarray = self.features.get_teammate_coord()
op_vector = get_opposite_vector(a_coord, t_coord)
# Coordinates:
x_pos = a_coord[0] + op_vector[0]
y_pos = a_coord[1] + op_vector[1]
if abs(x_pos) > 0.8:
x_pos = 0.8 if x_pos > 0 else -0.8
if abs(y_pos) > 0.8:
y_pos = 0.8 if y_pos > 0 else -0.8
status = 0
observation = []
attempts = 0
while self.game_interface.in_game() and attempts < num_rep:
action = (MOVE_TO, x_pos, y_pos)
status, observation = self.game_interface.step(action)
self.features.update_features(observation)
attempts += 1
return status, observation
def move_to_nearest_opponent(self, num_rep: int = 1):
status = 0
observation = []
attempts = 0
while self.game_interface.in_game() and attempts < num_rep:
action = (MOVE_TO, self.features.near_op_coord[0],
self.features.near_op_coord[1])
status, observation = self.game_interface.step(action)
self.features.update_features(observation)
if abs(np.linalg.norm(self.features.near_op_coord -
self.features.a_coord)) <= 0.2:
break
attempts += 1
return status, observation
def move_away_from_nearest_opponent(self, num_rep: int = 1):
op_vector = get_opposite_vector(self.features.a_coord,
self.features.near_op_coord)
# Coordinates:
x_pos = self.features.a_coord[0] + op_vector[0]
y_pos = self.features.a_coord[1] + op_vector[1]
if abs(x_pos) > 0.8:
x_pos = 0.8 if x_pos > 0 else -0.8
if abs(y_pos) > 0.8:
y_pos = 0.8 if y_pos > 0 else -0.8
status = 0
observation = []
attempts = 0
while self.game_interface.in_game() and attempts < num_rep:
action = (MOVE_TO, x_pos, y_pos)
status, observation = self.game_interface.step(action)
self.features.update_features(observation)
attempts += 1
return status, observation
def pass_ball(self, teammate_id: int):
""" Tries to use the PASS action, if it fails, Kicks in the direction
of the teammate"""
uniform = self.features.teammates[teammate_id].uniform_num
status = 0
obs = []
attempts = 0
while self.game_interface.in_game() and self.features.has_ball():
if attempts >= 2:
x_pos = self.features.t_coord[0]
y_pos = self.features.t_coord[1]
hfo_action = (KICK_TO, x_pos, y_pos, 1.7)
status, obs = self.game_interface.step(hfo_action)
self.features.update_features(obs)
break
else:
hfo_action = (PASS, uniform)
status, obs = self.game_interface.step(hfo_action)
self.features.update_features(obs)
attempts += 1
return status, obs
def execute_action(self, action_idx: int, verbose: bool = False) -> \
(int, bool, bool):
""" Receiving the idx of the action, the agent executes it and
returns the game status """
# Check action_idx:
if action_idx < 0 or action_idx >= self.num_actions:
raise ValueError(f"[Actions] action_idx invalid {action_idx}")
action_name = self.actions[action_idx]
correct_action = True
passed_ball_succ = False
if self.features.has_ball():
if verbose:
if action_name in self.ACTIONS_WITH_BALL:
print(f"[Correct Action] {action_name};")
else:
print(f"[Wrong Action] {action_name};")
if action_name == "SHOOT":
status, _ = self.best_shoot_ball()
elif action_name == "SHORT_DRIBBLE":
status, _ = self.dribble_action(self.NUM_SHORT_DRIBBLE_STEPS)
elif action_name == "LONG_DRIBBLE":
status, _ = self.dribble_action(self.NUM_LONG_DRIBBLE_STEPS,
long=True)
elif "PASS" in action_name:
_, teammate_id = action_name.split("PASS")
status, _ = self.pass_ball(int(teammate_id))
passed_ball_succ = True
else:
correct_action = False
status, _ = self.do_nothing(self.NUM_STOP_STEPS)
else:
if verbose:
if action_name in self.ACTIONS_WITHOUT_BALL:
print(f"[Correct Action] {action_name};")
else:
print(f"[Wrong Action] {action_name};")
if action_name == "NOOP":
status, observation = self.do_nothing(self.NUM_NOOP_STEPS)
elif action_name == "MOVE_TO_BALL":
status, observation = self.move_to_ball(self.NUM_GO_TO_BALL_STEPS)
elif action_name == "MOVE_TO_GOAL":
status, observation = self.move_to_goal(self.NUM_MOVE_STEPS)
elif action_name == "MOVE_TO_NEAR_TEAM":
status, observation = self.move_to_nearest_teammate(
self.NUM_MOVE_STEPS)
elif action_name == "MOVE_FROM_NEAR_TEAM":
status, observation = self.move_away_from_nearest_teammate(
self.NUM_MOVE_STEPS)
elif action_name == "MOVE_TO_NEAR_OP":
status, observation = self.move_to_nearest_opponent(
self.NUM_MOVE_STEPS)
elif action_name == "MOVE_FROM_NEAR_OP":
status, observation = self.move_away_from_nearest_opponent(
self.NUM_MOVE_STEPS)
else:
correct_action = False
status, _ = self.do_nothing(self.NUM_STOP_STEPS)
return status, correct_action, passed_ball_succ
| [
"plastic_agent.utils.get_opposite_vector",
"numpy.linalg.norm",
"numpy.array",
"plastic_agent.utils.get_angle"
] | [((1239, 1317), 'numpy.array', 'np.array', (['[self.features.opponents[0].x_pos, self.features.opponents[0].y_pos]'], {}), '([self.features.opponents[0].x_pos, self.features.opponents[0].y_pos])\n', (1247, 1317), True, 'import numpy as np\n'), ((3660, 3697), 'plastic_agent.utils.get_opposite_vector', 'get_opposite_vector', (['a_coord', 't_coord'], {}), '(a_coord, t_coord)\n', (3679, 3697), False, 'from plastic_agent.utils import get_angle, get_opposite_vector\n'), ((5092, 5163), 'plastic_agent.utils.get_opposite_vector', 'get_opposite_vector', (['self.features.a_coord', 'self.features.near_op_coord'], {}), '(self.features.a_coord, self.features.near_op_coord)\n', (5111, 5163), False, 'from plastic_agent.utils import get_angle, get_opposite_vector\n'), ((1491, 1558), 'plastic_agent.utils.get_angle', 'get_angle', ([], {'goalie': 'goalie_coord', 'player': 'player_coord', 'point': 'goal_pos'}), '(goalie=goalie_coord, player=player_coord, point=goal_pos)\n', (1500, 1558), False, 'from plastic_agent.utils import get_angle, get_opposite_vector\n'), ((1624, 1640), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (1632, 1640), True, 'import numpy as np\n'), ((3322, 3354), 'numpy.linalg.norm', 'np.linalg.norm', (['dist_to_teammate'], {}), '(dist_to_teammate)\n', (3336, 3354), True, 'import numpy as np\n'), ((4795, 4862), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.features.near_op_coord - self.features.a_coord)'], {}), '(self.features.near_op_coord - self.features.a_coord)\n', (4809, 4862), True, 'import numpy as np\n')] |
"""Test cases for the plot module."""
from typing import Any, Callable
from unittest import mock
import numpy as np
from kernreg.config import TEST_RESOURCES
from kernreg.smooth import Result
from kernreg.utils import get_example_data
import kernreg.visualize as plot_module
@mock.patch(f"{__name__}.plot_module.plt")
def test_plot(mock_plt: Any, change_test_dir: Callable) -> None:
"""Uses mock object to call plot() function."""
motorcycle = get_example_data()
x, y = motorcycle["time"], motorcycle["accel"]
gridpoints = np.linspace(min(x), max(x), 101)
curvest = np.genfromtxt(TEST_RESOURCES / "mcycle_expect_user_bw.csv")
bandwidth = 3.3
rslt = Result(gridpoints=gridpoints, curvest=curvest, bandwidth=bandwidth)
plot_module.plot(x, y, rslt)
plot_module.plot(np.asarray(x), np.asarray(y), rslt)
plot_module.plot(np.asarray(x), np.asarray(y), rslt, save_as="curvefit.png")
assert mock_plt.figure.call_count == 3
| [
"kernreg.utils.get_example_data",
"numpy.asarray",
"kernreg.smooth.Result",
"numpy.genfromtxt",
"unittest.mock.patch",
"kernreg.visualize.plot"
] | [((280, 321), 'unittest.mock.patch', 'mock.patch', (['f"""{__name__}.plot_module.plt"""'], {}), "(f'{__name__}.plot_module.plt')\n", (290, 321), False, 'from unittest import mock\n'), ((456, 474), 'kernreg.utils.get_example_data', 'get_example_data', ([], {}), '()\n', (472, 474), False, 'from kernreg.utils import get_example_data\n'), ((591, 650), 'numpy.genfromtxt', 'np.genfromtxt', (["(TEST_RESOURCES / 'mcycle_expect_user_bw.csv')"], {}), "(TEST_RESOURCES / 'mcycle_expect_user_bw.csv')\n", (604, 650), True, 'import numpy as np\n'), ((682, 749), 'kernreg.smooth.Result', 'Result', ([], {'gridpoints': 'gridpoints', 'curvest': 'curvest', 'bandwidth': 'bandwidth'}), '(gridpoints=gridpoints, curvest=curvest, bandwidth=bandwidth)\n', (688, 749), False, 'from kernreg.smooth import Result\n'), ((755, 783), 'kernreg.visualize.plot', 'plot_module.plot', (['x', 'y', 'rslt'], {}), '(x, y, rslt)\n', (771, 783), True, 'import kernreg.visualize as plot_module\n'), ((805, 818), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (815, 818), True, 'import numpy as np\n'), ((820, 833), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (830, 833), True, 'import numpy as np\n'), ((862, 875), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (872, 875), True, 'import numpy as np\n'), ((877, 890), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (887, 890), True, 'import numpy as np\n')] |
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import re
import sys
import flatbuffers
import numpy as np
import nnef_tools.io.tensorflow.tflite_fb as tflite_fb
from nnef_tools.conversion.tensorflow import tflite_to_tf_py, tf_py_to_tflite
from nnef_tools.core import utils
from nnef_tools.io.tensorflow.tf_graph import *
# See this: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/schema/schema.fbs
OUTPUT_FILE_IDENTIFIER = "TFL3"
OUTPUT_SCHEMA_VERSION = 3
_BuiltinOptionsClasses = [
None,
tflite_fb.Conv2DOptions,
tflite_fb.DepthwiseConv2DOptions,
tflite_fb.ConcatEmbeddingsOptions,
tflite_fb.LSHProjectionOptions,
tflite_fb.Pool2DOptions,
tflite_fb.SVDFOptions,
tflite_fb.RNNOptions,
tflite_fb.FullyConnectedOptions,
tflite_fb.SoftmaxOptions,
tflite_fb.ConcatenationOptions,
tflite_fb.AddOptions,
tflite_fb.L2NormOptions,
tflite_fb.LocalResponseNormalizationOptions,
tflite_fb.LSTMOptions,
tflite_fb.ResizeBilinearOptions,
tflite_fb.CallOptions,
tflite_fb.ReshapeOptions,
tflite_fb.SkipGramOptions,
tflite_fb.SpaceToDepthOptions,
tflite_fb.EmbeddingLookupSparseOptions,
tflite_fb.MulOptions,
tflite_fb.PadOptions,
tflite_fb.GatherOptions,
tflite_fb.BatchToSpaceNDOptions,
tflite_fb.SpaceToBatchNDOptions,
tflite_fb.TransposeOptions,
tflite_fb.ReducerOptions,
tflite_fb.SubOptions,
tflite_fb.DivOptions,
tflite_fb.SqueezeOptions,
tflite_fb.SequenceRNNOptions,
tflite_fb.StridedSliceOptions,
tflite_fb.ExpOptions,
tflite_fb.TopKV2Options,
tflite_fb.SplitOptions,
tflite_fb.LogSoftmaxOptions,
tflite_fb.CastOptions,
tflite_fb.DequantizeOptions,
tflite_fb.MaximumMinimumOptions,
tflite_fb.ArgMaxOptions,
tflite_fb.LessOptions,
tflite_fb.NegOptions,
tflite_fb.PadV2Options,
tflite_fb.GreaterOptions,
tflite_fb.GreaterEqualOptions,
tflite_fb.LessEqualOptions,
tflite_fb.SelectOptions,
tflite_fb.SliceOptions,
tflite_fb.TransposeConvOptions,
tflite_fb.SparseToDenseOptions,
tflite_fb.TileOptions,
tflite_fb.ExpandDimsOptions,
tflite_fb.EqualOptions,
tflite_fb.NotEqualOptions,
tflite_fb.ShapeOptions,
tflite_fb.PowOptions,
tflite_fb.ArgMinOptions,
tflite_fb.FakeQuantOptions,
tflite_fb.PackOptions,
tflite_fb.LogicalOrOptions,
tflite_fb.OneHotOptions,
tflite_fb.LogicalAndOptions,
tflite_fb.LogicalNotOptions,
tflite_fb.UnpackOptions,
tflite_fb.FloorDivOptions,
tflite_fb.SquareOptions,
tflite_fb.ZerosLikeOptions,
tflite_fb.FillOptions,
tflite_fb.BidirectionalSequenceLSTMOptions,
tflite_fb.BidirectionalSequenceRNNOptions,
tflite_fb.UnidirectionalSequenceLSTMOptions,
tflite_fb.FloorModOptions,
tflite_fb.RangeOptions,
tflite_fb.ResizeNearestNeighborOptions,
tflite_fb.LeakyReluOptions,
tflite_fb.SquaredDifferenceOptions,
tflite_fb.MirrorPadOptions,
tflite_fb.AbsOptions,
tflite_fb.SplitVOptions,
tflite_fb.UniqueOptions,
tflite_fb.ReverseV2Options,
tflite_fb.AddNOptions,
tflite_fb.GatherNdOptions,
tflite_fb.CosOptions,
tflite_fb.WhereOptions,
tflite_fb.RankOptions,
tflite_fb.ReverseSequenceOptions,
tflite_fb.MatrixDiagOptions,
tflite_fb.QuantizeOptions,
tflite_fb.MatrixSetDiagOptions,
]
_BuiltinOptionsByOperator = {
tflite_fb.BuiltinOperator.ADD: tflite_fb.BuiltinOptions.AddOptions,
tflite_fb.BuiltinOperator.AVERAGE_POOL_2D: tflite_fb.BuiltinOptions.Pool2DOptions,
tflite_fb.BuiltinOperator.CONCATENATION: tflite_fb.BuiltinOptions.ConcatenationOptions,
tflite_fb.BuiltinOperator.CONV_2D: tflite_fb.BuiltinOptions.Conv2DOptions,
tflite_fb.BuiltinOperator.DEPTHWISE_CONV_2D: tflite_fb.BuiltinOptions.DepthwiseConv2DOptions,
tflite_fb.BuiltinOperator.DEQUANTIZE: tflite_fb.BuiltinOptions.DequantizeOptions,
tflite_fb.BuiltinOperator.EMBEDDING_LOOKUP: None,
tflite_fb.BuiltinOperator.FLOOR: None,
tflite_fb.BuiltinOperator.FULLY_CONNECTED: tflite_fb.BuiltinOptions.FullyConnectedOptions,
tflite_fb.BuiltinOperator.HASHTABLE_LOOKUP: None,
tflite_fb.BuiltinOperator.L2_NORMALIZATION: tflite_fb.BuiltinOptions.L2NormOptions,
tflite_fb.BuiltinOperator.L2_POOL_2D: tflite_fb.BuiltinOptions.Pool2DOptions,
tflite_fb.BuiltinOperator.LOCAL_RESPONSE_NORMALIZATION: tflite_fb.BuiltinOptions.LocalResponseNormalizationOptions,
tflite_fb.BuiltinOperator.LOGISTIC: None,
tflite_fb.BuiltinOperator.LSH_PROJECTION: None,
tflite_fb.BuiltinOperator.LSTM: tflite_fb.BuiltinOptions.LSTMOptions,
tflite_fb.BuiltinOperator.MAX_POOL_2D: tflite_fb.BuiltinOptions.Pool2DOptions,
tflite_fb.BuiltinOperator.MUL: tflite_fb.BuiltinOptions.MulOptions,
tflite_fb.BuiltinOperator.RELU: None,
tflite_fb.BuiltinOperator.RELU_N1_TO_1: None,
tflite_fb.BuiltinOperator.RELU6: None,
tflite_fb.BuiltinOperator.RESHAPE: tflite_fb.BuiltinOptions.ReshapeOptions,
tflite_fb.BuiltinOperator.RESIZE_BILINEAR: tflite_fb.BuiltinOptions.ResizeBilinearOptions,
tflite_fb.BuiltinOperator.RNN: tflite_fb.BuiltinOptions.RNNOptions,
tflite_fb.BuiltinOperator.SOFTMAX: tflite_fb.BuiltinOptions.SoftmaxOptions,
tflite_fb.BuiltinOperator.SPACE_TO_DEPTH: tflite_fb.BuiltinOptions.SpaceToDepthOptions,
tflite_fb.BuiltinOperator.SVDF: tflite_fb.BuiltinOptions.SVDFOptions,
tflite_fb.BuiltinOperator.TANH: None,
tflite_fb.BuiltinOperator.CONCAT_EMBEDDINGS: tflite_fb.BuiltinOptions.ConcatEmbeddingsOptions,
tflite_fb.BuiltinOperator.SKIP_GRAM: tflite_fb.BuiltinOptions.SkipGramOptions,
tflite_fb.BuiltinOperator.CALL: tflite_fb.BuiltinOptions.CallOptions,
tflite_fb.BuiltinOperator.CUSTOM: None,
tflite_fb.BuiltinOperator.EMBEDDING_LOOKUP_SPARSE: tflite_fb.BuiltinOptions.EmbeddingLookupSparseOptions,
tflite_fb.BuiltinOperator.PAD: tflite_fb.BuiltinOptions.PadOptions,
tflite_fb.BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_RNN: None,
tflite_fb.BuiltinOperator.GATHER: tflite_fb.BuiltinOptions.GatherOptions,
tflite_fb.BuiltinOperator.BATCH_TO_SPACE_ND: tflite_fb.BuiltinOptions.BatchToSpaceNDOptions,
tflite_fb.BuiltinOperator.SPACE_TO_BATCH_ND: tflite_fb.BuiltinOptions.SpaceToBatchNDOptions,
tflite_fb.BuiltinOperator.TRANSPOSE: tflite_fb.BuiltinOptions.TransposeOptions,
tflite_fb.BuiltinOperator.MEAN: tflite_fb.BuiltinOptions.ReducerOptions,
tflite_fb.BuiltinOperator.SUB: tflite_fb.BuiltinOptions.SubOptions,
tflite_fb.BuiltinOperator.DIV: tflite_fb.BuiltinOptions.DivOptions,
tflite_fb.BuiltinOperator.SQUEEZE: tflite_fb.BuiltinOptions.SqueezeOptions,
tflite_fb.BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_LSTM: tflite_fb.BuiltinOptions.UnidirectionalSequenceLSTMOptions,
tflite_fb.BuiltinOperator.STRIDED_SLICE: tflite_fb.BuiltinOptions.StridedSliceOptions,
tflite_fb.BuiltinOperator.BIDIRECTIONAL_SEQUENCE_RNN: tflite_fb.BuiltinOptions.BidirectionalSequenceRNNOptions,
tflite_fb.BuiltinOperator.EXP: tflite_fb.BuiltinOptions.ExpOptions,
tflite_fb.BuiltinOperator.TOPK_V2: tflite_fb.BuiltinOptions.TopKV2Options,
tflite_fb.BuiltinOperator.SPLIT: tflite_fb.BuiltinOptions.SplitOptions,
tflite_fb.BuiltinOperator.LOG_SOFTMAX: tflite_fb.BuiltinOptions.LogSoftmaxOptions,
tflite_fb.BuiltinOperator.DELEGATE: None,
tflite_fb.BuiltinOperator.BIDIRECTIONAL_SEQUENCE_LSTM: tflite_fb.BuiltinOptions.BidirectionalSequenceLSTMOptions,
tflite_fb.BuiltinOperator.CAST: tflite_fb.BuiltinOptions.CastOptions,
tflite_fb.BuiltinOperator.PRELU: None,
tflite_fb.BuiltinOperator.MAXIMUM: tflite_fb.BuiltinOptions.MaximumMinimumOptions,
tflite_fb.BuiltinOperator.ARG_MAX: tflite_fb.BuiltinOptions.ArgMaxOptions,
tflite_fb.BuiltinOperator.MINIMUM: tflite_fb.BuiltinOptions.MaximumMinimumOptions,
tflite_fb.BuiltinOperator.LESS: tflite_fb.BuiltinOptions.LessOptions,
tflite_fb.BuiltinOperator.NEG: tflite_fb.BuiltinOptions.NegOptions,
tflite_fb.BuiltinOperator.PADV2: tflite_fb.BuiltinOptions.PadV2Options,
tflite_fb.BuiltinOperator.GREATER: tflite_fb.BuiltinOptions.GreaterOptions,
tflite_fb.BuiltinOperator.GREATER_EQUAL: tflite_fb.BuiltinOptions.GreaterEqualOptions,
tflite_fb.BuiltinOperator.LESS_EQUAL: tflite_fb.BuiltinOptions.LessEqualOptions,
tflite_fb.BuiltinOperator.SELECT: tflite_fb.BuiltinOptions.SelectOptions,
tflite_fb.BuiltinOperator.SLICE: tflite_fb.BuiltinOptions.SliceOptions,
tflite_fb.BuiltinOperator.SIN: None,
tflite_fb.BuiltinOperator.TRANSPOSE_CONV: tflite_fb.BuiltinOptions.TransposeConvOptions,
tflite_fb.BuiltinOperator.SPARSE_TO_DENSE: tflite_fb.BuiltinOptions.SparseToDenseOptions,
tflite_fb.BuiltinOperator.TILE: tflite_fb.BuiltinOptions.TileOptions,
tflite_fb.BuiltinOperator.EXPAND_DIMS: tflite_fb.BuiltinOptions.ExpandDimsOptions,
tflite_fb.BuiltinOperator.EQUAL: tflite_fb.BuiltinOptions.EqualOptions,
tflite_fb.BuiltinOperator.NOT_EQUAL: tflite_fb.BuiltinOptions.NotEqualOptions,
tflite_fb.BuiltinOperator.LOG: None,
tflite_fb.BuiltinOperator.SUM: tflite_fb.BuiltinOptions.ReducerOptions,
tflite_fb.BuiltinOperator.SQRT: None,
tflite_fb.BuiltinOperator.RSQRT: None,
tflite_fb.BuiltinOperator.SHAPE: tflite_fb.BuiltinOptions.ShapeOptions,
tflite_fb.BuiltinOperator.POW: tflite_fb.BuiltinOptions.PowOptions,
tflite_fb.BuiltinOperator.ARG_MIN: tflite_fb.BuiltinOptions.ArgMinOptions,
tflite_fb.BuiltinOperator.FAKE_QUANT: tflite_fb.BuiltinOptions.FakeQuantOptions,
tflite_fb.BuiltinOperator.REDUCE_PROD: tflite_fb.BuiltinOptions.ReducerOptions,
tflite_fb.BuiltinOperator.REDUCE_MAX: tflite_fb.BuiltinOptions.ReducerOptions,
tflite_fb.BuiltinOperator.PACK: tflite_fb.BuiltinOptions.PackOptions,
tflite_fb.BuiltinOperator.LOGICAL_OR: tflite_fb.BuiltinOptions.LogicalOrOptions,
tflite_fb.BuiltinOperator.ONE_HOT: tflite_fb.BuiltinOptions.OneHotOptions,
tflite_fb.BuiltinOperator.LOGICAL_AND: tflite_fb.BuiltinOptions.LogicalAndOptions,
tflite_fb.BuiltinOperator.LOGICAL_NOT: tflite_fb.BuiltinOptions.LogicalNotOptions,
tflite_fb.BuiltinOperator.UNPACK: tflite_fb.BuiltinOptions.UnpackOptions,
tflite_fb.BuiltinOperator.REDUCE_MIN: tflite_fb.BuiltinOptions.ReducerOptions,
tflite_fb.BuiltinOperator.FLOOR_DIV: tflite_fb.BuiltinOptions.FloorDivOptions,
tflite_fb.BuiltinOperator.REDUCE_ANY: tflite_fb.BuiltinOptions.ReducerOptions,
tflite_fb.BuiltinOperator.SQUARE: tflite_fb.BuiltinOptions.SquareOptions,
tflite_fb.BuiltinOperator.ZEROS_LIKE: tflite_fb.BuiltinOptions.ZerosLikeOptions,
tflite_fb.BuiltinOperator.FILL: tflite_fb.BuiltinOptions.FillOptions,
tflite_fb.BuiltinOperator.FLOOR_MOD: tflite_fb.BuiltinOptions.FloorModOptions,
tflite_fb.BuiltinOperator.RANGE: tflite_fb.BuiltinOptions.RangeOptions,
tflite_fb.BuiltinOperator.RESIZE_NEAREST_NEIGHBOR: tflite_fb.BuiltinOptions.ResizeNearestNeighborOptions,
tflite_fb.BuiltinOperator.LEAKY_RELU: tflite_fb.BuiltinOptions.LeakyReluOptions,
tflite_fb.BuiltinOperator.SQUARED_DIFFERENCE: tflite_fb.BuiltinOptions.SquaredDifferenceOptions,
tflite_fb.BuiltinOperator.MIRROR_PAD: tflite_fb.BuiltinOptions.MirrorPadOptions,
tflite_fb.BuiltinOperator.ABS: tflite_fb.BuiltinOptions.AbsOptions,
tflite_fb.BuiltinOperator.SPLIT_V: tflite_fb.BuiltinOptions.SplitVOptions,
tflite_fb.BuiltinOperator.UNIQUE: tflite_fb.BuiltinOptions.UniqueOptions,
tflite_fb.BuiltinOperator.CEIL: None,
tflite_fb.BuiltinOperator.REVERSE_V2: tflite_fb.BuiltinOptions.ReverseV2Options,
tflite_fb.BuiltinOperator.ADD_N: tflite_fb.BuiltinOptions.AddNOptions,
tflite_fb.BuiltinOperator.GATHER_ND: tflite_fb.BuiltinOptions.GatherNdOptions,
tflite_fb.BuiltinOperator.COS: tflite_fb.BuiltinOptions.CosOptions,
tflite_fb.BuiltinOperator.WHERE: tflite_fb.BuiltinOptions.WhereOptions,
tflite_fb.BuiltinOperator.RANK: tflite_fb.BuiltinOptions.RankOptions,
tflite_fb.BuiltinOperator.ELU: None,
tflite_fb.BuiltinOperator.REVERSE_SEQUENCE: tflite_fb.BuiltinOptions.ReverseSequenceOptions,
tflite_fb.BuiltinOperator.MATRIX_DIAG: tflite_fb.BuiltinOptions.MatrixDiagOptions,
tflite_fb.BuiltinOperator.QUANTIZE: tflite_fb.BuiltinOptions.QuantizeOptions,
tflite_fb.BuiltinOperator.MATRIX_SET_DIAG: tflite_fb.BuiltinOptions.MatrixSetDiagOptions,
}
def _enumerate_options_getters(optionsClass):
return {_camel_to_snake(name): func for name, func in optionsClass.__dict__.items()
if not name.startswith('_')
and name != 'Init' and not name.startswith('GetRootAs')
and not name.endswith('AsNumpy') and not name.endswith('Length')
and not isinstance(func, classmethod)}
def _enumerate_options_length_getters(optionsClass):
return {_camel_to_snake(name[:-6]): func for name, func in optionsClass.__dict__.items()
if not name.startswith('_')
and not name.startswith('GetRootAs') and name.endswith('Length')}
def _enumerate_options_adders(optionsClass):
className = optionsClass.__name__
prefix = className + 'Add'
optionsModule = sys.modules[optionsClass.__module__]
return {_camel_to_snake(name[len(prefix):]): func for name, func in optionsModule.__dict__.items()
if name.startswith(prefix)}
def _enumerate_options_vector_starters(optionsClass):
className = optionsClass.__name__
prefix, suffix = className + 'Start', 'Vector'
optionsModule = sys.modules[optionsClass.__module__]
return {_camel_to_snake(name[len(prefix):-len(suffix)]): func for name, func in optionsModule.__dict__.items()
if name.startswith(prefix) and name.endswith(suffix)}
def _get_options_starter_ender(optionsClass):
className = optionsClass.__name__
optionsModule = sys.modules[optionsClass.__module__]
moduleDict = optionsModule.__dict__
return moduleDict[className + 'Start'], moduleDict[className + 'End']
def _enumerate_attributes(optionsClass, optionsObject):
getters = _enumerate_options_getters(optionsClass)
length_getters = _enumerate_options_length_getters(optionsClass)
attribs = {}
for name, getter in getters.items():
length_getter = length_getters.get(name)
value = getter(optionsObject) if length_getter is None else \
[getter(optionsObject, i) for i in range(length_getter(optionsObject))]
attribs[name] = _substitute_enum_value_with_name(name, value, optionsClass)
return attribs
def _substitute_enum_value_with_name(key, value, optionsClass):
cls, map = _OptionEnumNameByValueMaps.get(key, (None, None))
return map[value] if map is not None and (cls is None or cls == optionsClass) else value
def _substitute_enum_name_with_value(key, name, optionsClass):
cls, map = _OptionEnumValueByNameMaps.get(key, (None, None))
return map[name] if map is not None and (cls is None or cls == optionsClass) else name
def _generate_enum_value_by_name(enumClass):
return {name: value for name, value in enumClass.__dict__.items() if not name.startswith('_')}
def _generate_enum_name_by_value(enumClass):
return {value: name for name, value in enumClass.__dict__.items() if not name.startswith('_')}
_OptionEnumNameByValueMaps = {
'padding': (None, _generate_enum_name_by_value(tflite_fb.Padding)),
'fused_activation_function': (None, _generate_enum_name_by_value(tflite_fb.ActivationFunctionType)),
'weights_format': (
tflite_fb.FullyConnectedOptions, _generate_enum_name_by_value(tflite_fb.FullyConnectedOptionsWeightsFormat)),
'type': (tflite_fb.LSHProjectionOptions, _generate_enum_name_by_value(tflite_fb.LSHProjectionType)),
'kernel_type': (tflite_fb.LSTMOptions, _generate_enum_name_by_value(tflite_fb.LSTMKernelType)),
'combiner': (tflite_fb.EmbeddingLookupSparseOptions, _generate_enum_name_by_value(tflite_fb.CombinerType)),
}
_OptionEnumValueByNameMaps = {
'padding': (None, _generate_enum_value_by_name(tflite_fb.Padding)),
'fused_activation_function': (None, _generate_enum_value_by_name(tflite_fb.ActivationFunctionType)),
'weights_format': (
tflite_fb.FullyConnectedOptions, _generate_enum_value_by_name(tflite_fb.FullyConnectedOptionsWeightsFormat)),
'type': (tflite_fb.LSHProjectionOptions, _generate_enum_value_by_name(tflite_fb.LSHProjectionType)),
'kernel_type': (tflite_fb.LSTMOptions, _generate_enum_value_by_name(tflite_fb.LSTMKernelType)),
'combiner': (tflite_fb.EmbeddingLookupSparseOptions, _generate_enum_value_by_name(tflite_fb.CombinerType)),
}
_TensorTypeNameByValue = _generate_enum_name_by_value(tflite_fb.TensorType)
_TensorTypeValueByName = _generate_enum_value_by_name(tflite_fb.TensorType)
_BuiltinOperatorNameByValue = _generate_enum_name_by_value(tflite_fb.BuiltinOperator)
_BuiltinOperatorValueByName = _generate_enum_value_by_name(tflite_fb.BuiltinOperator)
_regex1 = re.compile('(.)([A-Z][a-z]+)')
_regex2 = re.compile('([a-z0-9])([A-Z])')
def _camel_to_snake(s):
subbed = _regex1.sub(r'\1_\2', s)
return _regex2.sub(r'\1_\2', subbed).lower()
def _snake_to_camel(s):
return ''.join(c for c in s.title() if c != '_')
def _get_quantization(tensor):
quant = tensor.Quantization()
if quant.MinLength() == 0:
min = None
elif quant.MinLength() == 1:
min = float(quant.Min(0))
else:
min = quant.MinAsNumpy()
if quant.MaxLength() == 0:
max = None
elif quant.MaxLength() == 1:
max = float(quant.Max(0))
else:
max = quant.MaxAsNumpy()
if quant.ScaleLength() == 0:
scale = None
elif quant.ScaleLength() == 1:
scale = float(quant.Scale(0))
else:
scale = quant.ScaleAsNumpy()
if quant.ZeroPointLength() == 0:
zero_point = None
elif quant.ZeroPointLength() == 1:
zero_point = int(quant.ZeroPoint(0))
else:
zero_point = quant.ZeroPointAsNumpy()
if all(x is None for x in [min, max, scale, zero_point]):
return None
else:
return TFTensor.Quantization(min, max, scale, zero_point)
def _get_data_as_ndarray(buffer, dtype, shape):
return buffer.DataAsNumpy().view(dtype).reshape(shape) if buffer.DataLength() != 0 else None
_TensorDtypeAsNumpy = [
np.float32,
np.float16,
np.int32,
np.uint8,
np.int64,
np.str,
np.bool,
np.int16,
np.complex64,
]
_NumpyDtypeAsTFLite = {
np.float32: tflite_fb.TensorType.FLOAT32,
np.float16: tflite_fb.TensorType.FLOAT16,
np.int32: tflite_fb.TensorType.INT32,
np.uint8: tflite_fb.TensorType.UINT8,
np.int64: tflite_fb.TensorType.INT64,
np.str: tflite_fb.TensorType.STRING,
np.bool: tflite_fb.TensorType.BOOL,
np.int16: tflite_fb.TensorType.INT16,
np.complex64: tflite_fb.TensorType.COMPLEX64,
}
def _CreateNumpyVector(builder, x):
if not isinstance(x, np.ndarray):
raise TypeError("non-numpy-ndarray passed to CreateNumpyVector")
if x.dtype.kind not in ['b', 'i', 'u', 'f']:
raise TypeError("numpy-ndarray holds elements of unsupported datatype")
if x.ndim > 1:
raise TypeError("multidimensional-ndarray passed to CreateNumpyVector")
builder.StartVector(x.itemsize, x.size, x.dtype.alignment)
# Ensure little endian byte ordering
if x.dtype.str[0] != "<":
x = x.byteswap()
length = x.itemsize * x.size
builder.head -= length
builder.Bytes[builder.head: builder.head + length] = x.tobytes()
return builder.EndVector(x.size)
def _build_buffer(builder, bytes):
data = _CreateNumpyVector(builder, bytes)
tflite_fb.BufferStart(builder)
tflite_fb.BufferAddData(builder, data)
return tflite_fb.BufferEnd(builder)
def _build_tensor(builder, tensor, buffer_index):
name = builder.CreateString(tensor.name)
type = _TensorTypeValueByName[tensor.dtype]
tflite_fb.TensorStartShapeVector(builder, len(tensor.shape))
for s in reversed(tensor.shape):
builder.PrependInt32(s)
shape = builder.EndVector(len(tensor.shape))
buffer = buffer_index if tensor.data is not None else 0
quant = _build_quantization(builder, tensor.quantization, tensor.dtype)
tflite_fb.TensorStart(builder)
tflite_fb.TensorAddName(builder, name)
tflite_fb.TensorAddShape(builder, shape)
tflite_fb.TensorAddType(builder, type)
tflite_fb.TensorAddBuffer(builder, buffer)
if quant is not None:
tflite_fb.TensorAddQuantization(builder, quant)
return tflite_fb.TensorEnd(builder)
def _ensure_numpy_array(x, dtype):
if isinstance(x, np.ndarray):
assert x.dtype == dtype
return x
else:
return np.array(x, dtype=dtype)
_custom_op_type_key = "__custom_op_type"
_custom_op_options_key = "custom"
def _builtin_code_and_custom_code(operator):
builtin_code = _BuiltinOperatorValueByName.get(operator.name, tflite_fb.BuiltinOperator.CUSTOM)
custom_code = None
if builtin_code == tflite_fb.BuiltinOperator.CUSTOM:
assert _custom_op_type_key in operator.attribs, \
"CUSTOM op name must be set as an attribute with the key '{}'".format(_custom_op_type_key)
custom_code = operator.attribs[_custom_op_type_key]
return (builtin_code, custom_code)
def _build_quantization(builder, quant, dtype):
if quant is None or quant.all_zero():
return None
min = _CreateNumpyVector(builder, _ensure_numpy_array(quant.min, dtype=np.float32))
max = _CreateNumpyVector(builder, _ensure_numpy_array(quant.max, dtype=np.float32))
scale = _CreateNumpyVector(builder, _ensure_numpy_array(quant.scale, dtype=np.float32))
zero_point = _CreateNumpyVector(builder, _ensure_numpy_array(quant.zero_point, dtype=np.int64))
if dtype == "INT32":
tflite_fb.QuantizationParametersStart(builder)
tflite_fb.QuantizationParametersAddScale(builder, scale)
tflite_fb.QuantizationParametersAddZeroPoint(builder, zero_point)
return tflite_fb.QuantizationParametersEnd(builder)
else:
tflite_fb.QuantizationParametersStart(builder)
tflite_fb.QuantizationParametersAddMin(builder, min)
tflite_fb.QuantizationParametersAddMax(builder, max)
tflite_fb.QuantizationParametersAddScale(builder, scale)
tflite_fb.QuantizationParametersAddZeroPoint(builder, zero_point)
return tflite_fb.QuantizationParametersEnd(builder)
def _build_operator_code(builder, builtinCode, customCode):
customCode_hndl = None
if builtinCode == tflite_fb.BuiltinOperator.CUSTOM:
customCode_hndl = builder.CreateString(customCode)
tflite_fb.OperatorCodeStart(builder)
tflite_fb.OperatorCodeAddBuiltinCode(builder, builtinCode)
if customCode_hndl:
tflite_fb.OperatorCodeAddCustomCode(builder, customCode_hndl)
return tflite_fb.OperatorCodeEnd(builder)
def _build_operator_options(builder, attribs, optionsClass):
starter, ender = _get_options_starter_ender(optionsClass)
adders = _enumerate_options_adders(optionsClass)
vector_starters = _enumerate_options_vector_starters(optionsClass)
vector_values = {}
for name, vector_starter in vector_starters.items():
value = attribs[name]
assert isinstance(value, list) and (len(value) == 0 or isinstance(value[0], int))
vector_starter(builder, len(value))
for i in reversed(value):
builder.PrependInt32(i)
vector_values[name] = builder.EndVector(len(value))
starter(builder)
for name, adder in adders.items():
if name == 'fused_activation_function' and name not in attribs:
value = 'NONE'
else:
value = attribs[name]
value = vector_values.get(name, value)
value = _substitute_enum_name_with_value(name, value, optionsClass)
adder(builder, value)
return ender(builder)
def _build_operator_custom_options(builder, attribs):
assert _custom_op_options_key in attribs, \
"'{}' must be set as an attribute in a CUSTOM op to build custom options".format(_custom_op_options_key)
custom = attribs[_custom_op_options_key]
tflite_fb.OperatorStartCustomOptionsVector(builder, len(custom))
for b in reversed(custom):
builder.PrependUint8(b)
return builder.EndVector(len(custom))
def _build_operator(builder, operation, op_code_index, tensor_index):
inputs = [tensor_index[tensor] for tensor in operation.inputs]
tflite_fb.OperatorStartInputsVector(builder, len(inputs))
for input in reversed(inputs):
builder.PrependInt32(input)
inputs = builder.EndVector(len(inputs))
outputs = [tensor_index[tensor] for tensor in operation.outputs]
tflite_fb.OperatorStartOutputsVector(builder, len(outputs))
for output in reversed(outputs):
builder.PrependInt32(output)
outputs = builder.EndVector(len(outputs))
attribs = {name: value for name, value in operation.attribs.items()}
builtin_code, custom_code = _builtin_code_and_custom_code(operation)
optionsType = _BuiltinOptionsByOperator[builtin_code]
if optionsType is None:
optionsType = 0
optionsClass = _BuiltinOptionsClasses[optionsType]
if optionsClass is not None:
options = _build_operator_options(builder, attribs, optionsClass)
else:
options = None
if custom_code and _custom_op_options_key in attribs:
custom_options = _build_operator_custom_options(builder, attribs)
else:
custom_options = None
tflite_fb.OperatorStart(builder)
tflite_fb.OperatorAddOpcodeIndex(builder, op_code_index[(builtin_code, custom_code)])
tflite_fb.OperatorAddInputs(builder, inputs)
tflite_fb.OperatorAddOutputs(builder, outputs)
tflite_fb.OperatorAddBuiltinOptionsType(builder, optionsType)
if options:
tflite_fb.OperatorAddBuiltinOptions(builder, options)
if custom_options:
tflite_fb.OperatorAddCustomOptions(builder, custom_options)
return tflite_fb.OperatorEnd(builder)
def read_tflite_graph_from_flatbuffers(filename):
with open(filename, 'rb') as file:
bytes = bytearray(file.read())
model = tflite_fb.Model.GetRootAsModel(bytes, 0)
if model.SubgraphsLength() != 1:
raise NotImplementedError('graphs with multiple sub-graphs are not supported')
subgraph = model.Subgraphs(0)
name = subgraph.Name()
graph = TFGraph(name.decode() if name is not None else None)
tensors = []
for i in range(subgraph.TensorsLength()):
tensor = subgraph.Tensors(i)
name = tensor.Name().decode()
shape = [tensor.Shape(i) for i in range(tensor.ShapeLength())]
dtype = _TensorTypeNameByValue[tensor.Type()]
buffer = model.Buffers(tensor.Buffer())
data = _get_data_as_ndarray(buffer, _TensorDtypeAsNumpy[tensor.Type()], shape)
quant = _get_quantization(tensor)
label = name if data is not None else None
tensors.append(TFTensor(graph,
utils.anystr_to_str(name),
shape,
dtype,
data,
utils.anystr_to_str(label) if label is not None else None,
quant))
for i in range(subgraph.OperatorsLength()):
operator = subgraph.Operators(i)
operatorCode = model.OperatorCodes(operator.OpcodeIndex())
name = _BuiltinOperatorNameByValue[operatorCode.BuiltinCode()]
options = operator.BuiltinOptions()
optionsClass = _BuiltinOptionsClasses[operator.BuiltinOptionsType()]
inputs = [tensors[operator.Inputs(i)] for i in range(operator.InputsLength()) if operator.Inputs(i) != -1]
outputs = [tensors[operator.Outputs(i)] for i in range(operator.OutputsLength()) if operator.Outputs(i) != -1]
if optionsClass is not None:
optionsObject = optionsClass()
optionsObject.Init(options.Bytes, options.Pos)
attribs = _enumerate_attributes(optionsClass, optionsObject)
else:
attribs = {}
if operatorCode.BuiltinCode() == tflite_fb.BuiltinOperator.CUSTOM:
assert _custom_op_type_key not in attribs, \
"'{}' shall not be set as an attribute".format(_custom_op_type_key)
attribs[_custom_op_type_key] = operatorCode.CustomCode().decode('ascii')
assert _custom_op_options_key not in attribs, \
"'{}' shall not be set as an attribute".format(_custom_op_options_key)
attribs[_custom_op_options_key] = operator.CustomOptionsAsNumpy().tolist()
TFOperation(graph, name, inputs, outputs, attribs)
inputs = []
for i in range(subgraph.InputsLength()):
tensor_index = subgraph.Inputs(i)
inputs.append(tensors[tensor_index])
outputs = []
for i in range(subgraph.OutputsLength()):
tensor_index = subgraph.Outputs(i)
outputs.append(tensors[tensor_index])
graph.inputs = inputs
graph.outputs = outputs
return graph
# https://github.com/google/flatbuffers/issues/4814
def FinishWithFileIdentifier(builder, rootTable, fid):
from flatbuffers import number_types as N
from flatbuffers import encode
if fid is None or len(fid) != 4:
raise Exception('fid must be 4 chars')
flags = N.Uint8Flags
prepSize = 4
builder.Prep(builder.minalign, prepSize + len(fid))
for i in range(3, -1, -1):
builder.head = builder.head - flags.bytewidth
encode.Write(flags.packer_type, builder.Bytes, builder.Head(), ord(fid[i]))
return builder.Finish(rootTable)
def write_tflite_graph_to_flatbuffers(graph, filename):
graph.sort()
builder = flatbuffers.Builder(0)
tflite_fb.BufferStartDataVector(builder, 0)
data = builder.EndVector(0)
tflite_fb.BufferStart(builder)
tflite_fb.BufferAddData(builder, data)
buffer = tflite_fb.BufferEnd(builder)
buffers = [buffer]
for tensor in graph.tensors:
if tensor.data is not None:
tensor_data = tensor.data
if isinstance(tensor_data, (list, tuple)):
tensor_data = np.array(tensor_data, dtype=_TensorDtypeAsNumpy[_TensorTypeValueByName[tensor.dtype]])
bytes = tensor_data.reshape([-1]).view(np.uint8)
buffers.append(_build_buffer(builder, bytes))
#metadata buffer
metadata_index = len(buffers)
buffers.append(_build_buffer(builder, np.frombuffer(b'1.14.0', dtype=np.uint8)))
tflite_fb.ModelStartBuffersVector(builder, len(buffers))
for buffer in reversed(buffers):
builder.PrependUOffsetTRelative(buffer)
buffers = builder.EndVector(len(buffers))
buffer_index = 1
tensors = []
tensor_index = {}
for tensor in graph.tensors:
tensor_index[tensor] = len(tensors)
tensors.append(_build_tensor(builder, tensor, buffer_index))
if tensor.data is not None:
buffer_index += 1
tflite_fb.SubGraphStartTensorsVector(builder, len(tensors))
for tensor in reversed(tensors):
builder.PrependUOffsetTRelative(tensor)
tensors = builder.EndVector(len(tensors))
op_codes = []
op_code_index = {}
for operation in graph.operations:
builtin_and_custom_codes = _builtin_code_and_custom_code(operation)
if builtin_and_custom_codes not in op_code_index:
op_code_index[builtin_and_custom_codes] = len(op_codes)
op_codes.append(_build_operator_code(builder, *builtin_and_custom_codes))
tflite_fb.ModelStartOperatorCodesVector(builder, len(op_codes))
for op_code in reversed(op_codes):
builder.PrependUOffsetTRelative(op_code)
op_codes = builder.EndVector(len(op_codes))
operators = []
for operation in graph.operations:
operators.append(_build_operator(builder, operation, op_code_index, tensor_index))
tflite_fb.SubGraphStartOperatorsVector(builder, len(operators))
for operator in reversed(operators):
builder.PrependUOffsetTRelative(operator)
operators = builder.EndVector(len(operators))
name = builder.CreateString(graph.name) if graph.name is not None else None
inputs = graph.inputs
tflite_fb.SubGraphStartInputsVector(builder, len(inputs))
for input in reversed(inputs):
builder.PrependInt32(tensor_index[input])
inputs = builder.EndVector(len(inputs))
outputs = graph.outputs
tflite_fb.SubGraphStartInputsVector(builder, len(outputs))
for output in reversed(outputs):
builder.PrependInt32(tensor_index[output])
outputs = builder.EndVector(len(outputs))
tflite_fb.SubGraphStart(builder)
if name is not None:
tflite_fb.SubGraphAddName(builder, name)
tflite_fb.SubGraphAddTensors(builder, tensors)
tflite_fb.SubGraphAddOperators(builder, operators)
tflite_fb.SubGraphAddInputs(builder, inputs)
tflite_fb.SubGraphAddOutputs(builder, outputs)
subgraph = tflite_fb.SubGraphEnd(builder)
tflite_fb.ModelStartSubgraphsVector(builder, 1)
builder.PrependUOffsetTRelative(subgraph)
subgraphs = builder.EndVector(1)
metadata_name = builder.CreateString("min_runtime_version")
tflite_fb.MetadataStart(builder)
tflite_fb.MetadataAddName(builder, metadata_name)
tflite_fb.MetadataAddBuffer(builder, metadata_index)
metadata = tflite_fb.MetadataEnd(builder)
tflite_fb.ModelStartMetadataVector(builder, 1)
builder.PrependUOffsetTRelative(metadata)
metadata_vector = builder.EndVector(1)
tflite_fb.ModelStart(builder)
tflite_fb.ModelAddVersion(builder, OUTPUT_SCHEMA_VERSION)
tflite_fb.ModelAddBuffers(builder, buffers)
tflite_fb.ModelAddOperatorCodes(builder, op_codes)
tflite_fb.ModelAddSubgraphs(builder, subgraphs)
tflite_fb.ModelAddMetadata(builder, metadata_vector)
model = tflite_fb.ModelEnd(builder)
FinishWithFileIdentifier(builder, model, OUTPUT_FILE_IDENTIFIER)
bytes = builder.Output()
with open(filename, 'wb') as file:
file.write(bytes)
class Reader(object):
def __init__(self, convert_to_tf_py=False):
self._convert_to_tf_py = convert_to_tf_py
def __call__(self, filename):
g = read_tflite_graph_from_flatbuffers(filename)
if self._convert_to_tf_py:
tflite_to_tf_py.convert(g)
return g
class Writer(object):
def __init__(self, convert_from_tf_py=False):
self._convert_from_tf_py = convert_from_tf_py
def __call__(self, graph, filename):
if self._convert_from_tf_py:
tf_py_to_tflite.convert(graph)
return write_tflite_graph_to_flatbuffers(graph, filename)
| [
"nnef_tools.io.tensorflow.tflite_fb.TensorStart",
"nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersStart",
"flatbuffers.Builder",
"nnef_tools.io.tensorflow.tflite_fb.BufferStart",
"nnef_tools.io.tensorflow.tflite_fb.TensorAddBuffer",
"nnef_tools.io.tensorflow.tflite_fb.OperatorAddOutputs",
"nne... | [((17523, 17553), 're.compile', 're.compile', (['"""(.)([A-Z][a-z]+)"""'], {}), "('(.)([A-Z][a-z]+)')\n", (17533, 17553), False, 'import re\n'), ((17564, 17595), 're.compile', 're.compile', (['"""([a-z0-9])([A-Z])"""'], {}), "('([a-z0-9])([A-Z])')\n", (17574, 17595), False, 'import re\n'), ((20234, 20264), 'nnef_tools.io.tensorflow.tflite_fb.BufferStart', 'tflite_fb.BufferStart', (['builder'], {}), '(builder)\n', (20255, 20264), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((20269, 20307), 'nnef_tools.io.tensorflow.tflite_fb.BufferAddData', 'tflite_fb.BufferAddData', (['builder', 'data'], {}), '(builder, data)\n', (20292, 20307), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((20319, 20347), 'nnef_tools.io.tensorflow.tflite_fb.BufferEnd', 'tflite_fb.BufferEnd', (['builder'], {}), '(builder)\n', (20338, 20347), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((20820, 20850), 'nnef_tools.io.tensorflow.tflite_fb.TensorStart', 'tflite_fb.TensorStart', (['builder'], {}), '(builder)\n', (20841, 20850), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((20855, 20893), 'nnef_tools.io.tensorflow.tflite_fb.TensorAddName', 'tflite_fb.TensorAddName', (['builder', 'name'], {}), '(builder, name)\n', (20878, 20893), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((20898, 20938), 'nnef_tools.io.tensorflow.tflite_fb.TensorAddShape', 'tflite_fb.TensorAddShape', (['builder', 'shape'], {}), '(builder, shape)\n', (20922, 20938), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((20943, 20981), 'nnef_tools.io.tensorflow.tflite_fb.TensorAddType', 'tflite_fb.TensorAddType', (['builder', 'type'], {}), '(builder, type)\n', (20966, 20981), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((20986, 21028), 'nnef_tools.io.tensorflow.tflite_fb.TensorAddBuffer', 'tflite_fb.TensorAddBuffer', (['builder', 'buffer'], {}), '(builder, buffer)\n', (21011, 21028), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((21122, 21150), 'nnef_tools.io.tensorflow.tflite_fb.TensorEnd', 'tflite_fb.TensorEnd', (['builder'], {}), '(builder)\n', (21141, 21150), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((23236, 23272), 'nnef_tools.io.tensorflow.tflite_fb.OperatorCodeStart', 'tflite_fb.OperatorCodeStart', (['builder'], {}), '(builder)\n', (23263, 23272), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((23277, 23335), 'nnef_tools.io.tensorflow.tflite_fb.OperatorCodeAddBuiltinCode', 'tflite_fb.OperatorCodeAddBuiltinCode', (['builder', 'builtinCode'], {}), '(builder, builtinCode)\n', (23313, 23335), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((23441, 23475), 'nnef_tools.io.tensorflow.tflite_fb.OperatorCodeEnd', 'tflite_fb.OperatorCodeEnd', (['builder'], {}), '(builder)\n', (23466, 23475), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26134, 26166), 'nnef_tools.io.tensorflow.tflite_fb.OperatorStart', 'tflite_fb.OperatorStart', (['builder'], {}), '(builder)\n', (26157, 26166), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26171, 26258), 'nnef_tools.io.tensorflow.tflite_fb.OperatorAddOpcodeIndex', 'tflite_fb.OperatorAddOpcodeIndex', (['builder', 'op_code_index[builtin_code, custom_code]'], {}), '(builder, op_code_index[builtin_code,\n custom_code])\n', (26203, 26258), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26261, 26305), 'nnef_tools.io.tensorflow.tflite_fb.OperatorAddInputs', 'tflite_fb.OperatorAddInputs', (['builder', 'inputs'], {}), '(builder, inputs)\n', (26288, 26305), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26310, 26356), 'nnef_tools.io.tensorflow.tflite_fb.OperatorAddOutputs', 'tflite_fb.OperatorAddOutputs', (['builder', 'outputs'], {}), '(builder, outputs)\n', (26338, 26356), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26361, 26422), 'nnef_tools.io.tensorflow.tflite_fb.OperatorAddBuiltinOptionsType', 'tflite_fb.OperatorAddBuiltinOptionsType', (['builder', 'optionsType'], {}), '(builder, optionsType)\n', (26400, 26422), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26606, 26636), 'nnef_tools.io.tensorflow.tflite_fb.OperatorEnd', 'tflite_fb.OperatorEnd', (['builder'], {}), '(builder)\n', (26627, 26636), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26780, 26820), 'nnef_tools.io.tensorflow.tflite_fb.Model.GetRootAsModel', 'tflite_fb.Model.GetRootAsModel', (['bytes', '(0)'], {}), '(bytes, 0)\n', (26810, 26820), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((30388, 30410), 'flatbuffers.Builder', 'flatbuffers.Builder', (['(0)'], {}), '(0)\n', (30407, 30410), False, 'import flatbuffers\n'), ((30416, 30459), 'nnef_tools.io.tensorflow.tflite_fb.BufferStartDataVector', 'tflite_fb.BufferStartDataVector', (['builder', '(0)'], {}), '(builder, 0)\n', (30447, 30459), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((30496, 30526), 'nnef_tools.io.tensorflow.tflite_fb.BufferStart', 'tflite_fb.BufferStart', (['builder'], {}), '(builder)\n', (30517, 30526), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((30531, 30569), 'nnef_tools.io.tensorflow.tflite_fb.BufferAddData', 'tflite_fb.BufferAddData', (['builder', 'data'], {}), '(builder, data)\n', (30554, 30569), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((30583, 30611), 'nnef_tools.io.tensorflow.tflite_fb.BufferEnd', 'tflite_fb.BufferEnd', (['builder'], {}), '(builder)\n', (30602, 30611), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33302, 33334), 'nnef_tools.io.tensorflow.tflite_fb.SubGraphStart', 'tflite_fb.SubGraphStart', (['builder'], {}), '(builder)\n', (33325, 33334), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33413, 33459), 'nnef_tools.io.tensorflow.tflite_fb.SubGraphAddTensors', 'tflite_fb.SubGraphAddTensors', (['builder', 'tensors'], {}), '(builder, tensors)\n', (33441, 33459), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33464, 33514), 'nnef_tools.io.tensorflow.tflite_fb.SubGraphAddOperators', 'tflite_fb.SubGraphAddOperators', (['builder', 'operators'], {}), '(builder, operators)\n', (33494, 33514), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33519, 33563), 'nnef_tools.io.tensorflow.tflite_fb.SubGraphAddInputs', 'tflite_fb.SubGraphAddInputs', (['builder', 'inputs'], {}), '(builder, inputs)\n', (33546, 33563), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33568, 33614), 'nnef_tools.io.tensorflow.tflite_fb.SubGraphAddOutputs', 'tflite_fb.SubGraphAddOutputs', (['builder', 'outputs'], {}), '(builder, outputs)\n', (33596, 33614), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33630, 33660), 'nnef_tools.io.tensorflow.tflite_fb.SubGraphEnd', 'tflite_fb.SubGraphEnd', (['builder'], {}), '(builder)\n', (33651, 33660), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33666, 33713), 'nnef_tools.io.tensorflow.tflite_fb.ModelStartSubgraphsVector', 'tflite_fb.ModelStartSubgraphsVector', (['builder', '(1)'], {}), '(builder, 1)\n', (33701, 33713), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33866, 33898), 'nnef_tools.io.tensorflow.tflite_fb.MetadataStart', 'tflite_fb.MetadataStart', (['builder'], {}), '(builder)\n', (33889, 33898), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33903, 33952), 'nnef_tools.io.tensorflow.tflite_fb.MetadataAddName', 'tflite_fb.MetadataAddName', (['builder', 'metadata_name'], {}), '(builder, metadata_name)\n', (33928, 33952), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33957, 34009), 'nnef_tools.io.tensorflow.tflite_fb.MetadataAddBuffer', 'tflite_fb.MetadataAddBuffer', (['builder', 'metadata_index'], {}), '(builder, metadata_index)\n', (33984, 34009), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34025, 34055), 'nnef_tools.io.tensorflow.tflite_fb.MetadataEnd', 'tflite_fb.MetadataEnd', (['builder'], {}), '(builder)\n', (34046, 34055), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34060, 34106), 'nnef_tools.io.tensorflow.tflite_fb.ModelStartMetadataVector', 'tflite_fb.ModelStartMetadataVector', (['builder', '(1)'], {}), '(builder, 1)\n', (34094, 34106), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34201, 34230), 'nnef_tools.io.tensorflow.tflite_fb.ModelStart', 'tflite_fb.ModelStart', (['builder'], {}), '(builder)\n', (34221, 34230), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34235, 34292), 'nnef_tools.io.tensorflow.tflite_fb.ModelAddVersion', 'tflite_fb.ModelAddVersion', (['builder', 'OUTPUT_SCHEMA_VERSION'], {}), '(builder, OUTPUT_SCHEMA_VERSION)\n', (34260, 34292), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34297, 34340), 'nnef_tools.io.tensorflow.tflite_fb.ModelAddBuffers', 'tflite_fb.ModelAddBuffers', (['builder', 'buffers'], {}), '(builder, buffers)\n', (34322, 34340), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34345, 34395), 'nnef_tools.io.tensorflow.tflite_fb.ModelAddOperatorCodes', 'tflite_fb.ModelAddOperatorCodes', (['builder', 'op_codes'], {}), '(builder, op_codes)\n', (34376, 34395), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34400, 34447), 'nnef_tools.io.tensorflow.tflite_fb.ModelAddSubgraphs', 'tflite_fb.ModelAddSubgraphs', (['builder', 'subgraphs'], {}), '(builder, subgraphs)\n', (34427, 34447), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34452, 34504), 'nnef_tools.io.tensorflow.tflite_fb.ModelAddMetadata', 'tflite_fb.ModelAddMetadata', (['builder', 'metadata_vector'], {}), '(builder, metadata_vector)\n', (34478, 34504), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((34517, 34544), 'nnef_tools.io.tensorflow.tflite_fb.ModelEnd', 'tflite_fb.ModelEnd', (['builder'], {}), '(builder)\n', (34535, 34544), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((21063, 21110), 'nnef_tools.io.tensorflow.tflite_fb.TensorAddQuantization', 'tflite_fb.TensorAddQuantization', (['builder', 'quant'], {}), '(builder, quant)\n', (21094, 21110), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((21296, 21320), 'numpy.array', 'np.array', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (21304, 21320), True, 'import numpy as np\n'), ((22396, 22442), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersStart', 'tflite_fb.QuantizationParametersStart', (['builder'], {}), '(builder)\n', (22433, 22442), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22451, 22507), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersAddScale', 'tflite_fb.QuantizationParametersAddScale', (['builder', 'scale'], {}), '(builder, scale)\n', (22491, 22507), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22516, 22581), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersAddZeroPoint', 'tflite_fb.QuantizationParametersAddZeroPoint', (['builder', 'zero_point'], {}), '(builder, zero_point)\n', (22560, 22581), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22597, 22641), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersEnd', 'tflite_fb.QuantizationParametersEnd', (['builder'], {}), '(builder)\n', (22632, 22641), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22660, 22706), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersStart', 'tflite_fb.QuantizationParametersStart', (['builder'], {}), '(builder)\n', (22697, 22706), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22715, 22767), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersAddMin', 'tflite_fb.QuantizationParametersAddMin', (['builder', 'min'], {}), '(builder, min)\n', (22753, 22767), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22776, 22828), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersAddMax', 'tflite_fb.QuantizationParametersAddMax', (['builder', 'max'], {}), '(builder, max)\n', (22814, 22828), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22837, 22893), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersAddScale', 'tflite_fb.QuantizationParametersAddScale', (['builder', 'scale'], {}), '(builder, scale)\n', (22877, 22893), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22902, 22967), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersAddZeroPoint', 'tflite_fb.QuantizationParametersAddZeroPoint', (['builder', 'zero_point'], {}), '(builder, zero_point)\n', (22946, 22967), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((22983, 23027), 'nnef_tools.io.tensorflow.tflite_fb.QuantizationParametersEnd', 'tflite_fb.QuantizationParametersEnd', (['builder'], {}), '(builder)\n', (23018, 23027), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((23368, 23429), 'nnef_tools.io.tensorflow.tflite_fb.OperatorCodeAddCustomCode', 'tflite_fb.OperatorCodeAddCustomCode', (['builder', 'customCode_hndl'], {}), '(builder, customCode_hndl)\n', (23403, 23429), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26448, 26501), 'nnef_tools.io.tensorflow.tflite_fb.OperatorAddBuiltinOptions', 'tflite_fb.OperatorAddBuiltinOptions', (['builder', 'options'], {}), '(builder, options)\n', (26483, 26501), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((26534, 26593), 'nnef_tools.io.tensorflow.tflite_fb.OperatorAddCustomOptions', 'tflite_fb.OperatorAddCustomOptions', (['builder', 'custom_options'], {}), '(builder, custom_options)\n', (26568, 26593), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((33368, 33408), 'nnef_tools.io.tensorflow.tflite_fb.SubGraphAddName', 'tflite_fb.SubGraphAddName', (['builder', 'name'], {}), '(builder, name)\n', (33393, 33408), True, 'import nnef_tools.io.tensorflow.tflite_fb as tflite_fb\n'), ((31132, 31172), 'numpy.frombuffer', 'np.frombuffer', (["b'1.14.0'"], {'dtype': 'np.uint8'}), "(b'1.14.0', dtype=np.uint8)\n", (31145, 31172), True, 'import numpy as np\n'), ((34974, 35000), 'nnef_tools.conversion.tensorflow.tflite_to_tf_py.convert', 'tflite_to_tf_py.convert', (['g'], {}), '(g)\n', (34997, 35000), False, 'from nnef_tools.conversion.tensorflow import tflite_to_tf_py, tf_py_to_tflite\n'), ((35238, 35268), 'nnef_tools.conversion.tensorflow.tf_py_to_tflite.convert', 'tf_py_to_tflite.convert', (['graph'], {}), '(graph)\n', (35261, 35268), False, 'from nnef_tools.conversion.tensorflow import tflite_to_tf_py, tf_py_to_tflite\n'), ((27637, 27662), 'nnef_tools.core.utils.anystr_to_str', 'utils.anystr_to_str', (['name'], {}), '(name)\n', (27656, 27662), False, 'from nnef_tools.core import utils\n'), ((30828, 30919), 'numpy.array', 'np.array', (['tensor_data'], {'dtype': '_TensorDtypeAsNumpy[_TensorTypeValueByName[tensor.dtype]]'}), '(tensor_data, dtype=_TensorDtypeAsNumpy[_TensorTypeValueByName[\n tensor.dtype]])\n', (30836, 30919), True, 'import numpy as np\n'), ((27812, 27838), 'nnef_tools.core.utils.anystr_to_str', 'utils.anystr_to_str', (['label'], {}), '(label)\n', (27831, 27838), False, 'from nnef_tools.core import utils\n')] |
import numpy as np
import itertools
def draw(n, k, draws):
sample = np.random.choice(
a=range(n), replace=False,
size=draws
).tolist()
return [k if i in sample else float('NAN')
for i in range(n)
]
def generate_string_data(z, a, k, draws=1):
data = np.array([
a * np.sin(z),
a * np.cos(z),
z
]).T
return list(zip(draw(len(z), float(k), draws), data))
def generate_springs(a, draws=1, *linspaces):
tmp = [generate_string_data(z, a, idx, draws)
for idx, z in enumerate(linspaces)
]
tmp3 = map(
func=lambda x: (x[0], *x[1]),
iter1=enumerate(itertools.chain(*tmp))
)
return list(tmp3)
| [
"numpy.sin",
"itertools.chain",
"numpy.cos"
] | [((678, 699), 'itertools.chain', 'itertools.chain', (['*tmp'], {}), '(*tmp)\n', (693, 699), False, 'import itertools\n'), ((332, 341), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (338, 341), True, 'import numpy as np\n'), ((355, 364), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (361, 364), True, 'import numpy as np\n')] |
"""Executable examples for using the pcap APIs.
This module has a rudimentary command line interface. For usage, run::
$ python -m ouster.sdk.examples.pcap -h
"""
import os
import argparse
from contextlib import closing
import numpy as np
from ouster import client, pcap
from .colormaps import normalize
def pcap_3d_one_scan(source: client.PacketSource,
metadata: client.SensorInfo,
num: int = 0) -> None:
"""Render one scan from a pcap file in the Open3D viewer.
Args:
source: PacketSource from pcap
metadata: associated SensorInfo for PacketSource
num: scan number in a given pcap file (satrs from *0*)
"""
try:
import open3d as o3d # type: ignore
except ModuleNotFoundError:
print(
"This example requires open3d, which may not be available on all "
"platforms. Try running `pip3 install open3d` first.")
exit(1)
from more_itertools import nth
# get single scan by index
scan = nth(client.Scans(source), num)
if not scan:
print(f"ERROR: Scan # {num} in not present in pcap file")
exit(1)
# [doc-stag-open3d-one-scan]
# compute point cloud using client.SensorInfo and client.LidarScan
xyz = client.XYZLut(metadata)(scan)
# create point cloud and coordinate axes geometries
cloud = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz.reshape((-1, 3)))) # type: ignore
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(1.0) # type: ignore
# [doc-etag-open3d-one-scan]
# initialize visualizer and rendering options
vis = o3d.visualization.Visualizer() # type: ignore
vis.create_window()
vis.add_geometry(cloud)
vis.add_geometry(axes)
ropt = vis.get_render_option()
ropt.point_size = 1.0
ropt.background_color = np.asarray([0, 0, 0])
# initialize camera settings
ctr = vis.get_view_control()
ctr.set_zoom(0.1)
ctr.set_lookat([0, 0, 0])
ctr.set_up([1, 0, 0])
# run visualizer main loop
print("Press Q or Excape to exit")
vis.run()
vis.destroy_window()
def pcap_display_xyz_points(source: client.PacketSource,
metadata: client.SensorInfo,
num: int = 0) -> None:
"""Plot point cloud using matplotlib."""
import matplotlib.pyplot as plt # type: ignore
# [doc-stag-pcap-plot-xyz-points]
from more_itertools import nth
scan = nth(client.Scans(source), num)
if not scan:
print(f"ERROR: Scan # {num} in not present in pcap file")
exit(1)
# set up figure
plt.figure()
ax = plt.axes(projection='3d')
r = 6
ax.set_xlim3d([-r, r])
ax.set_ylim3d([-r, r])
ax.set_zlim3d([-r, r])
plt.title("3D Points XYZ for scan")
# transform data to 3d points and graph
xyzlut = client.XYZLut(metadata)
xyz = xyzlut(scan)
key = scan.field(client.ChanField.SIGNAL)
[x, y, z] = [c.flatten() for c in np.dsplit(xyz, 3)]
ax.scatter(x, y, z, c=normalize(key.flatten()), s=0.2)
plt.show()
# [doc-etag-pcap-plot-xyz-points]
def pcap_to_csv(source: client.PacketSource,
metadata: client.SensorInfo,
num: int = 0,
csv_dir: str = ".",
csv_base: str = "pcap_out",
csv_ext: str = "csv") -> None:
"""Write scans from a pcap to csv files (one per lidar scan).
The number of saved lines per csv file is always H x W, which corresponds to
a full 2D image representation of a lidar scan.
Each line in a csv file is:
TIMESTAMP, RANGE (mm), SIGNAL, NEAR_IR, REFLECTIVITY, X (mm), Y (mm), Z (mm)
If ``csv_ext`` ends in ``.gz``, the file is automatically saved in
compressed gzip format. :func:`.numpy.loadtxt` can be used to read gzipped
files transparently back to :class:`.numpy.ndarray`.
Args:
source: PacketSource from pcap
metadata: associated SensorInfo for PacketSource
num: number of scans to save from pcap to csv files
csv_dir: path to the directory where csv files will be saved
csv_base: string to use as the base of the filename for pcap output
csv_ext: file extension to use, "csv" by default
"""
# ensure that base csv_dir exists
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
field_names = 'TIMESTAMP (ns), RANGE (mm), SIGNAL, NEAR_IR, REFLECTIVITY, X (mm), Y (mm), Z (mm)'
field_fmts = ['%d', '%d', '%d', '%d', '%d', '%d', '%d', '%d']
# [doc-stag-pcap-to-csv]
from itertools import islice
# precompute xyzlut to save computation in a loop
xyzlut = client.XYZLut(metadata)
# create an iterator of LidarScans from pcap and bound it if num is specified
scans = iter(client.Scans(source))
if num:
scans = islice(scans, num)
for idx, scan in enumerate(scans):
# copy per-column timestamps for each channel
timestamps = np.tile(scan.timestamp, (scan.h, 1))
# grab channel data
fields_values = [scan.field(ch) for ch in scan.fields]
# use integer mm to avoid loss of precision casting timestamps
xyz = (xyzlut(scan) * 1000).astype(np.int64)
# get all data as one H x W x 8 int64 array for savetxt()
frame = np.dstack((timestamps, *fields_values, xyz))
# not necessary, but output points in "image" vs. staggered order
frame = client.destagger(metadata, frame)
# write csv out to file
csv_path = os.path.join(csv_dir, f'{csv_base}_{idx:06d}.{csv_ext}')
print(f'write frame #{idx}, to file: {csv_path}')
header = '\n'.join([f'frame num: {idx}', field_names])
np.savetxt(csv_path,
frame.reshape(-1, frame.shape[2]),
fmt=field_fmts,
delimiter=',',
header=header)
# [doc-etag-pcap-to-csv]
def pcap_to_las(source: client.PacketSource,
metadata: client.SensorInfo,
num: int = 0,
las_dir: str = ".",
las_base: str = "las_out",
las_ext: str = "las") -> None:
"Write scans from a pcap to las files (one per lidar scan)."
from itertools import islice
import laspy # type: ignore
# precompute xyzlut to save computation in a loop
xyzlut = client.XYZLut(metadata)
# create an iterator of LidarScans from pcap and bound it if num is specified
scans = iter(client.Scans(source))
if num:
scans = islice(scans, num)
for idx, scan in enumerate(scans):
xyz = xyzlut(scan)
las = laspy.create()
las.x = xyz[:, :, 0].flatten()
las.y = xyz[:, :, 1].flatten()
las.z = xyz[:, :, 2].flatten()
las_path = os.path.join(las_dir, f'{las_base}_{idx:06d}.{las_ext}')
print(f'write frame #{idx} to file: {las_path}')
las.write(las_path)
def pcap_to_pcd(source: client.PacketSource,
metadata: client.SensorInfo,
num: int = 0,
pcd_dir: str = ".",
pcd_base: str = "pcd_out",
pcd_ext: str = "pcd") -> None:
"Write scans from a pcap to pcd files (one per lidar scan)."
from itertools import islice
try:
import open3d as o3d # type: ignore
except ModuleNotFoundError:
print(
"This example requires open3d, which may not be available on all "
"platforms. Try running `pip3 install open3d` first.")
exit(1)
if not os.path.exists(pcd_dir):
os.makedirs(pcd_dir)
# precompute xyzlut to save computation in a loop
xyzlut = client.XYZLut(metadata)
# create an iterator of LidarScans from pcap and bound it if num is specified
scans = iter(client.Scans(source))
if num:
scans = islice(scans, num)
for idx, scan in enumerate(scans):
xyz = xyzlut(scan)
pcd = o3d.geometry.PointCloud() # type: ignore
pcd.points = o3d.utility.Vector3dVector(xyz.reshape(-1, 3)) # type: ignore
pcd_path = os.path.join(pcd_dir, f'{pcd_base}_{idx:06d}.{pcd_ext}')
print(f'write frame #{idx} to file: {pcd_path}')
o3d.io.write_point_cloud(pcd_path, pcd) # type: ignore
def pcap_query_scan(source: client.PacketSource,
metadata: client.SensorInfo,
num: int = 0) -> None:
"""
Example: Query available fields in LidarScan
Args:
source: PacketSource from pcap
metadata: associated SensorInfo for PacketSource
num: scan number in a given pcap file (satrs from *0*)
"""
scans = iter(client.Scans(source))
# [doc-stag-pcap-query-scan]
scan = next(scans)
print("Available fields and corresponding dtype in LidarScan")
for field in scan.fields:
print('{0:15} {1}'.format(str(field), scan.field(field).dtype))
# [doc-etag-pcap-query-scan]
def pcap_read_packets(
source: client.PacketSource,
metadata: client.SensorInfo,
num: int = 0 # not used in this example
) -> None:
"""Basic read packets example from pcap file. """
# [doc-stag-pcap-read-packets]
for packet in source:
if isinstance(packet, client.LidarPacket):
# Now we can process the LidarPacket. In this case, we access
# the measurement ids, timestamps, and ranges
measurement_ids = packet.measurement_id
timestamps = packet.timestamp
ranges = packet.field(client.ChanField.RANGE)
print(f' encoder counts = {measurement_ids.shape}')
print(f' timestamps = {timestamps.shape}')
print(f' ranges = {ranges.shape}')
elif isinstance(packet, client.ImuPacket):
# and access ImuPacket content
print(f' acceleration = {packet.accel}')
print(f' angular_velocity = {packet.angular_vel}')
# [doc-etag-pcap-read-packets]
def main():
"""Pcap examples runner."""
examples = {
"open3d-one-scan": pcap_3d_one_scan,
"plot-xyz-points": pcap_display_xyz_points,
"pcap-to-csv": pcap_to_csv,
"pcap-to-las": pcap_to_las,
"pcap-to-pcd": pcap_to_pcd,
"query-scan": pcap_query_scan,
"read-packets": pcap_read_packets,
}
description = "Ouster Python SDK Pcap examples. The EXAMPLE must be one of:\n " + str.join(
'\n ', examples.keys())
parser = argparse.ArgumentParser(
description=description, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('pcap_path', metavar='PCAP', help='path to pcap file')
parser.add_argument('metadata_path',
metavar='METADATA',
help='path to metadata json')
parser.add_argument('example',
metavar='EXAMPLE',
choices=examples.keys(),
help='name of the example to run')
parser.add_argument('--scan-num',
type=int,
default=1,
help='index of scan to use')
args = parser.parse_args()
try:
example = examples[args.example]
except KeyError:
print(f"No such example: {args.example}")
print(description)
exit(1)
if not args.metadata_path or not os.path.exists(args.metadata_path):
print(f"Metadata file does not exist: {args.metadata_path}")
exit(1)
print(f'example: {args.example}')
with open(args.metadata_path, 'r') as f:
metadata = client.SensorInfo(f.read())
source = pcap.Pcap(args.pcap_path, metadata)
with closing(source):
example(source, metadata, args.scan_num) # type: ignore
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"matplotlib.pyplot.axes",
"open3d.geometry.PointCloud",
"ouster.client.Scans",
"matplotlib.pyplot.figure",
"numpy.tile",
"os.path.join",
"numpy.dsplit",
"os.path.exists",
"open3d.io.write_point_cloud",
"laspy.create",
"ouster.client.XYZLu... | [((1485, 1539), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', (['(1.0)'], {}), '(1.0)\n', (1534, 1539), True, 'import open3d as o3d\n'), ((1651, 1681), 'open3d.visualization.Visualizer', 'o3d.visualization.Visualizer', ([], {}), '()\n', (1679, 1681), True, 'import open3d as o3d\n'), ((1867, 1888), 'numpy.asarray', 'np.asarray', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1877, 1888), True, 'import numpy as np\n'), ((2648, 2660), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2658, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2670, 2695), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (2678, 2695), True, 'import matplotlib.pyplot as plt\n'), ((2792, 2827), 'matplotlib.pyplot.title', 'plt.title', (['"""3D Points XYZ for scan"""'], {}), "('3D Points XYZ for scan')\n", (2801, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2909), 'ouster.client.XYZLut', 'client.XYZLut', (['metadata'], {}), '(metadata)\n', (2899, 2909), False, 'from ouster import client, pcap\n'), ((3101, 3111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3109, 3111), True, 'import matplotlib.pyplot as plt\n'), ((4706, 4729), 'ouster.client.XYZLut', 'client.XYZLut', (['metadata'], {}), '(metadata)\n', (4719, 4729), False, 'from ouster import client, pcap\n'), ((6417, 6440), 'ouster.client.XYZLut', 'client.XYZLut', (['metadata'], {}), '(metadata)\n', (6430, 6440), False, 'from ouster import client, pcap\n'), ((7732, 7755), 'ouster.client.XYZLut', 'client.XYZLut', (['metadata'], {}), '(metadata)\n', (7745, 7755), False, 'from ouster import client, pcap\n'), ((10537, 10637), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=description, formatter_class=argparse.\n RawTextHelpFormatter)\n', (10560, 10637), False, 'import argparse\n'), ((11707, 11742), 'ouster.pcap.Pcap', 'pcap.Pcap', (['args.pcap_path', 'metadata'], {}), '(args.pcap_path, metadata)\n', (11716, 11742), False, 'from ouster import client, pcap\n'), ((1043, 1063), 'ouster.client.Scans', 'client.Scans', (['source'], {}), '(source)\n', (1055, 1063), False, 'from ouster import client, pcap\n'), ((1285, 1308), 'ouster.client.XYZLut', 'client.XYZLut', (['metadata'], {}), '(metadata)\n', (1298, 1308), False, 'from ouster import client, pcap\n'), ((2497, 2517), 'ouster.client.Scans', 'client.Scans', (['source'], {}), '(source)\n', (2509, 2517), False, 'from ouster import client, pcap\n'), ((4353, 4376), 'os.path.exists', 'os.path.exists', (['csv_dir'], {}), '(csv_dir)\n', (4367, 4376), False, 'import os\n'), ((4386, 4406), 'os.makedirs', 'os.makedirs', (['csv_dir'], {}), '(csv_dir)\n', (4397, 4406), False, 'import os\n'), ((4830, 4850), 'ouster.client.Scans', 'client.Scans', (['source'], {}), '(source)\n', (4842, 4850), False, 'from ouster import client, pcap\n'), ((4880, 4898), 'itertools.islice', 'islice', (['scans', 'num'], {}), '(scans, num)\n', (4886, 4898), False, 'from itertools import islice\n'), ((5015, 5051), 'numpy.tile', 'np.tile', (['scan.timestamp', '(scan.h, 1)'], {}), '(scan.timestamp, (scan.h, 1))\n', (5022, 5051), True, 'import numpy as np\n'), ((5352, 5396), 'numpy.dstack', 'np.dstack', (['(timestamps, *fields_values, xyz)'], {}), '((timestamps, *fields_values, xyz))\n', (5361, 5396), True, 'import numpy as np\n'), ((5488, 5521), 'ouster.client.destagger', 'client.destagger', (['metadata', 'frame'], {}), '(metadata, frame)\n', (5504, 5521), False, 'from ouster import client, pcap\n'), ((5574, 5630), 'os.path.join', 'os.path.join', (['csv_dir', 'f"""{csv_base}_{idx:06d}.{csv_ext}"""'], {}), "(csv_dir, f'{csv_base}_{idx:06d}.{csv_ext}')\n", (5586, 5630), False, 'import os\n'), ((6541, 6561), 'ouster.client.Scans', 'client.Scans', (['source'], {}), '(source)\n', (6553, 6561), False, 'from ouster import client, pcap\n'), ((6591, 6609), 'itertools.islice', 'islice', (['scans', 'num'], {}), '(scans, num)\n', (6597, 6609), False, 'from itertools import islice\n'), ((6693, 6707), 'laspy.create', 'laspy.create', ([], {}), '()\n', (6705, 6707), False, 'import laspy\n'), ((6845, 6901), 'os.path.join', 'os.path.join', (['las_dir', 'f"""{las_base}_{idx:06d}.{las_ext}"""'], {}), "(las_dir, f'{las_base}_{idx:06d}.{las_ext}')\n", (6857, 6901), False, 'import os\n'), ((7610, 7633), 'os.path.exists', 'os.path.exists', (['pcd_dir'], {}), '(pcd_dir)\n', (7624, 7633), False, 'import os\n'), ((7643, 7663), 'os.makedirs', 'os.makedirs', (['pcd_dir'], {}), '(pcd_dir)\n', (7654, 7663), False, 'import os\n'), ((7856, 7876), 'ouster.client.Scans', 'client.Scans', (['source'], {}), '(source)\n', (7868, 7876), False, 'from ouster import client, pcap\n'), ((7906, 7924), 'itertools.islice', 'islice', (['scans', 'num'], {}), '(scans, num)\n', (7912, 7924), False, 'from itertools import islice\n'), ((8008, 8033), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (8031, 8033), True, 'import open3d as o3d\n'), ((8155, 8211), 'os.path.join', 'os.path.join', (['pcd_dir', 'f"""{pcd_base}_{idx:06d}.{pcd_ext}"""'], {}), "(pcd_dir, f'{pcd_base}_{idx:06d}.{pcd_ext}')\n", (8167, 8211), False, 'import os\n'), ((8278, 8317), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['pcd_path', 'pcd'], {}), '(pcd_path, pcd)\n', (8302, 8317), True, 'import open3d as o3d\n'), ((8729, 8749), 'ouster.client.Scans', 'client.Scans', (['source'], {}), '(source)\n', (8741, 8749), False, 'from ouster import client, pcap\n'), ((11753, 11768), 'contextlib.closing', 'closing', (['source'], {}), '(source)\n', (11760, 11768), False, 'from contextlib import closing\n'), ((3019, 3036), 'numpy.dsplit', 'np.dsplit', (['xyz', '(3)'], {}), '(xyz, 3)\n', (3028, 3036), True, 'import numpy as np\n'), ((11441, 11475), 'os.path.exists', 'os.path.exists', (['args.metadata_path'], {}), '(args.metadata_path)\n', (11455, 11475), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Produces fake instrument data for testing.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import pandas as pds
import xarray as xr
import numpy as np
import pysat
platform = 'pysat'
name = 'testing2D_xarray'
pandas_format = False
def init(self):
self.new_thing=True
def load(fnames, tag=None, sat_id=None):
# create an artifical satellite data set
parts = os.path.split(fnames[0])[-1].split('-')
yr = int(parts[0])
month = int(parts[1])
day = int(parts[2][0:2])
date = pysat.datetime(yr,month,day)
# scalar divisor below used to reduce the number of time samples
# covered by the simulation per day. The higher the number the lower
# the number of samples (86400/scalar)
scalar = 1
num = 86400//scalar
num_array = np.arange(num)*scalar
# seed DataFrame with UT array
index = pds.date_range(date, date+pds.DateOffset(seconds=num-1), freq='S')
data = xr.Dataset({'uts': (('time'), index)}, coords={'time':index})
# need to create simple orbits here. Have start of first orbit
# at 2009,1, 0 UT. 14.84 orbits per day
# figure out how far in time from the root start
# use that info to create a signal that is continuous from that start
# going to presume there are 5820 seconds per orbit (97 minute period)
time_delta = date - pysat.datetime(2009,1,1)
# root start
uts_root = np.mod(time_delta.total_seconds(), 5820)
# mlt runs 0-24 each orbit.
mlt = np.mod(uts_root+np.arange(num)*scalar, 5820)*(24./5820.)
data['mlt'] = (('time'), mlt)
# do slt, 20 second offset from mlt
uts_root = np.mod(time_delta.total_seconds()+20, 5820)
data['slt'] = (('time'), np.mod(uts_root+np.arange(num)*scalar, 5820)*(24./5820.))
# create a fake longitude, resets every 6240 seconds
# sat moves at 360/5820 deg/s, Earth rotates at 360/86400, takes extra time
# to go around full longitude
long_uts_root = np.mod(time_delta.total_seconds(), 6240)
longitude = np.mod(long_uts_root+num_array, 6240)*(360./6240.)
data['longitude'] = (('time'), longitude)
# create latitude signal for testing polar orbits
latitude = 90.*np.cos(np.mod(uts_root+num_array, 5820)*(2.*np.pi/5820.))
data['latitude'] = (('time'), latitude)
# create some fake data to support testing of averaging routines
mlt_int = data['mlt'].astype(int)
long_int = (data['longitude']/15.).astype(int)
data['dummy1'] = (('time'), mlt_int)
data['dummy2'] = (('time'), long_int)
data['dummy3'] = (('time'), mlt_int + long_int*1000.)
data['dummy4'] = (('time'), num_array)
# create altitude 'profile' at each location
data['profiles'] = (('time', 'altitude'), data['dummy3'].values[:, np.newaxis]*np.ones((num, 15)))
data.coords['altitude'] = ('altitude', np.arange(15))
# profiles that could have different altitude values
data['variable_profiles'] = (('time', 'z'), data['dummy3'].values[:, np.newaxis]*np.ones((num, 15)))
data.coords['altitude2'] = (('time', 'z'), np.arange(15)[np.newaxis, :]*np.ones((num, 15)))
# basic image simulation
data['images'] = (('time', 'x', 'y'), data['dummy3'].values[:, np.newaxis, np.newaxis]*np.ones((num, 17, 17)))
data.coords['latitude'] = (('time', 'x', 'y'), np.arange(17)[np.newaxis, np.newaxis, :]*np.ones((num, 17, 17)))
data.coords['longitude'] = (('time', 'x', 'y'), np.arange(17)[np.newaxis, np.newaxis, :]*np.ones((num, 17, 17)))
return data, meta.copy()
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a fake list of files spanning a year"""
index = pds.date_range(pysat.datetime(2008,1,1), pysat.datetime(2010,12,31))
names = [ data_path+date.strftime('%Y-%m-%d')+'.nofile' for date in index]
return pysat.Series(names, index=index)
def download(date_array, tag, sat_id, data_path=None, user=None, password=None):
pass
# create very limited metadata
meta = pysat.Meta()
meta['uts'] = {'units':'s', 'long_name':'Universal Time'}
meta['mlt'] = {'units':'hours', 'long_name':'Magnetic Local Time'}
meta['slt'] = {'units':'hours', 'long_name':'Solar Local Time'}
meta['longitude'] = {'units':'degrees', 'long_name':'Longitude'}
meta['latitude'] = {'units':'degrees', 'long_name':'Latitude'}
series_profile_meta = pysat.Meta()
series_profile_meta['series_profiles'] = {'units':'', 'long_name':'series'}
meta['series_profiles'] = {'meta':series_profile_meta, 'units':'', 'long_name':'series'}
profile_meta = pysat.Meta()
profile_meta['density'] = {'units':'', 'long_name':'profiles'}
profile_meta['dummy_str'] = {'units':'', 'long_name':'profiles'}
profile_meta['dummy_ustr'] = {'units':'', 'long_name':'profiles'}
meta['profiles'] = {'meta':profile_meta, 'units':'', 'long_name':'profiles'}
alt_profile_meta = pysat.Meta()
alt_profile_meta['density'] = {'units':'', 'long_name':'profiles'}
alt_profile_meta['fraction'] = {'units':'', 'long_name':'profiles'}
meta['alt_profiles'] = {'meta':alt_profile_meta, 'units':'', 'long_name':'profiles'}
| [
"pysat.datetime",
"pysat.Meta",
"numpy.ones",
"numpy.mod",
"xarray.Dataset",
"pysat.Series",
"numpy.arange",
"pandas.DateOffset",
"os.path.split"
] | [((4053, 4065), 'pysat.Meta', 'pysat.Meta', ([], {}), '()\n', (4063, 4065), False, 'import pysat\n'), ((4405, 4417), 'pysat.Meta', 'pysat.Meta', ([], {}), '()\n', (4415, 4417), False, 'import pysat\n'), ((4598, 4610), 'pysat.Meta', 'pysat.Meta', ([], {}), '()\n', (4608, 4610), False, 'import pysat\n'), ((4901, 4913), 'pysat.Meta', 'pysat.Meta', ([], {}), '()\n', (4911, 4913), False, 'import pysat\n'), ((589, 619), 'pysat.datetime', 'pysat.datetime', (['yr', 'month', 'day'], {}), '(yr, month, day)\n', (603, 619), False, 'import pysat\n'), ((1005, 1065), 'xarray.Dataset', 'xr.Dataset', (["{'uts': ('time', index)}"], {'coords': "{'time': index}"}), "({'uts': ('time', index)}, coords={'time': index})\n", (1015, 1065), True, 'import xarray as xr\n'), ((3889, 3921), 'pysat.Series', 'pysat.Series', (['names'], {'index': 'index'}), '(names, index=index)\n', (3901, 3921), False, 'import pysat\n'), ((858, 872), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (867, 872), True, 'import numpy as np\n'), ((1408, 1434), 'pysat.datetime', 'pysat.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (1422, 1434), False, 'import pysat\n'), ((2074, 2113), 'numpy.mod', 'np.mod', (['(long_uts_root + num_array)', '(6240)'], {}), '(long_uts_root + num_array, 6240)\n', (2080, 2113), True, 'import numpy as np\n'), ((2897, 2910), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (2906, 2910), True, 'import numpy as np\n'), ((3744, 3770), 'pysat.datetime', 'pysat.datetime', (['(2008)', '(1)', '(1)'], {}), '(2008, 1, 1)\n', (3758, 3770), False, 'import pysat\n'), ((3770, 3798), 'pysat.datetime', 'pysat.datetime', (['(2010)', '(12)', '(31)'], {}), '(2010, 12, 31)\n', (3784, 3798), False, 'import pysat\n'), ((953, 984), 'pandas.DateOffset', 'pds.DateOffset', ([], {'seconds': '(num - 1)'}), '(seconds=num - 1)\n', (967, 984), True, 'import pandas as pds\n'), ((2834, 2852), 'numpy.ones', 'np.ones', (['(num, 15)'], {}), '((num, 15))\n', (2841, 2852), True, 'import numpy as np\n'), ((3055, 3073), 'numpy.ones', 'np.ones', (['(num, 15)'], {}), '((num, 15))\n', (3062, 3073), True, 'import numpy as np\n'), ((3151, 3169), 'numpy.ones', 'np.ones', (['(num, 15)'], {}), '((num, 15))\n', (3158, 3169), True, 'import numpy as np\n'), ((3296, 3318), 'numpy.ones', 'np.ones', (['(num, 17, 17)'], {}), '((num, 17, 17))\n', (3303, 3318), True, 'import numpy as np\n'), ((3412, 3434), 'numpy.ones', 'np.ones', (['(num, 17, 17)'], {}), '((num, 17, 17))\n', (3419, 3434), True, 'import numpy as np\n'), ((3529, 3551), 'numpy.ones', 'np.ones', (['(num, 17, 17)'], {}), '((num, 17, 17))\n', (3536, 3551), True, 'import numpy as np\n'), ((460, 484), 'os.path.split', 'os.path.split', (['fnames[0]'], {}), '(fnames[0])\n', (473, 484), False, 'import os\n'), ((2252, 2286), 'numpy.mod', 'np.mod', (['(uts_root + num_array)', '(5820)'], {}), '(uts_root + num_array, 5820)\n', (2258, 2286), True, 'import numpy as np\n'), ((3122, 3135), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (3131, 3135), True, 'import numpy as np\n'), ((3371, 3384), 'numpy.arange', 'np.arange', (['(17)'], {}), '(17)\n', (3380, 3384), True, 'import numpy as np\n'), ((3488, 3501), 'numpy.arange', 'np.arange', (['(17)'], {}), '(17)\n', (3497, 3501), True, 'import numpy as np\n'), ((1564, 1578), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (1573, 1578), True, 'import numpy as np\n'), ((1783, 1797), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (1792, 1797), True, 'import numpy as np\n')] |
"""
Data augmentation algorithms.
Each algorithm works on the HandwrittenData class. They have to be applied like
this:
>>> from hwrt.handwritten_data import HandwrittenData
>>> data_json = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> a = HandwrittenData(raw_data_id=2953, raw_data_json=data_json)
>>> multiplication_queue = [Multiply(10),
... Rotate(-30, 30, 5)]
>>> x = [f(a) for f in multiplication_queue]
"""
# Core Library modules
import math
import sys
from copy import deepcopy
# Third party modules
import numpy
# Local modules
from . import handwritten_data, utils
def get_data_multiplication_queue(model_description_multiply):
"""Get features from a list of dictionaries
>>> l = [{'Multiply': [{'nr': 1}]}, \
{'Rotate': [{'minimum':-30}, {'maximum': 30}, {'num': 5}]}]
>>> get_data_multiplication_queue(l)
[Multiply (1 times), Rotate (-30.00, 30.00, 5.00)]
"""
return utils.get_objectlist(
model_description_multiply,
config_key="data_multiplication",
module=sys.modules[__name__],
)
# Only data multiplication classes follow
# Everyone must have a __str__, __repr__, __call__ and get_dimension function
# where
# * __call__ must take exactly one argument of type HandwrittenData
# * __call__ must return a list of HandwrittenData objects
# Local features
class Multiply:
"""Copy the data n times."""
def __init__(self, nr=1):
self.nr = nr
def __repr__(self):
return "Multiply (%i times)" % self.nr
def __str__(self):
return repr(self)
def __call__(self, hwr_obj):
assert isinstance(
hwr_obj, handwritten_data.HandwrittenData
), "handwritten data is not of type HandwrittenData, but of %r" % type(hwr_obj)
new_trainging_set = []
for _ in range(self.nr):
new_trainging_set.append(hwr_obj)
training_set = new_trainging_set
return training_set
class Rotate:
"""Add rotational variants of the recording."""
def __init__(self, minimum=-30.0, maximum=30.0, num=5):
self.min = minimum
self.max = maximum
self.num = num
def __repr__(self):
return f"Rotate ({self.min:0.2f}, {self.max:0.2f}, {self.num:0.2f})"
def __str__(self):
return repr(self)
def __call__(self, hwr_obj):
assert isinstance(
hwr_obj, handwritten_data.HandwrittenData
), "handwritten data is not of type HandwrittenData, but of %r" % type(hwr_obj)
new_trainging_set = []
xc, yc = hwr_obj.get_center_of_mass()
pointlist = hwr_obj.get_pointlist()
for rotation in numpy.linspace(self.min, self.max, self.num):
new_poinlist = []
# Rotate pointlist around center of mass (xc, yc)
for line in pointlist:
new_line = []
for point in line:
# Calculate rotation
# xnew, ynew = xc, yc # noqa
# (xnew, ynew) += (x-xc, y-yc)* (cos(rot.) -sin(rot.))
# (sin(rot.) cos(rot.))
x, y = point["x"], point["y"]
cos = math.cos(math.radians(rotation))
sin = math.sin(math.radians(rotation))
xnew = cos * (x - xc) - sin * (y - yc) + xc
ynew = sin * (x - xc) + cos * (y - yc) + yc
new_line.append({"x": xnew, "y": ynew, "time": point["time"]})
new_poinlist.append(new_line)
# create the new handwritten data object
hwd_tmp = deepcopy(hwr_obj)
hwd_tmp.set_pointlist(new_poinlist)
new_trainging_set.append(hwd_tmp)
training_set = new_trainging_set
return training_set
| [
"math.radians",
"copy.deepcopy",
"numpy.linspace"
] | [((2680, 2724), 'numpy.linspace', 'numpy.linspace', (['self.min', 'self.max', 'self.num'], {}), '(self.min, self.max, self.num)\n', (2694, 2724), False, 'import numpy\n'), ((3661, 3678), 'copy.deepcopy', 'deepcopy', (['hwr_obj'], {}), '(hwr_obj)\n', (3669, 3678), False, 'from copy import deepcopy\n'), ((3246, 3268), 'math.radians', 'math.radians', (['rotation'], {}), '(rotation)\n', (3258, 3268), False, 'import math\n'), ((3305, 3327), 'math.radians', 'math.radians', (['rotation'], {}), '(rotation)\n', (3317, 3327), False, 'import math\n')] |
from copy import copy
import numpy as np
import scipy.sparse
import pandas as pd
from latbin.lattice import *
from latbin.matching import MatchingIndexer
class KernelWeightedMatchingInterpolator(object):
def __init__(self, x, y, x_scale, weighting_kernel=None, match_tolerance=6.0):
if weighting_kernel is None:
weighting_kernel = lambda x: np.exp(-0.5*x**2)#/(1.0+(x/0.1)**2)
self.weighting_kernel = weighting_kernel
self.x_scale = x_scale
self.m_indexer = MatchingIndexer(x/self.x_scale, tolerance=match_tolerance)
self.y = y
def __call__(self, x_interp, estimate_variance=False):
x_interp = x_interp/self.x_scale
dmat = self.m_indexer.distance_matrix(x_interp)
dmat.data = self.weighting_kernel(dmat.data)
weighted_data = dmat*self.y
weight_sums = dmat*np.ones(len(self.y))
if len(weighted_data.shape) == 1:
y_interp = weighted_data/weight_sums
else:
y_interp = weighted_data/weight_sums.reshape((-1, 1))
if not estimate_variance:
return y_interp
else:
sq_diff_sums = np.zeros(y_interp.shape)
dmat_sort = dmat.tocsc().sorted_indices()
indices = dmat_sort.indices
indptr = dmat_sort.indptr
for col_idx in range(len(indptr)-1):
lbi = indptr[col_idx]
ubi = indptr[col_idx+1]
row_indices = indices[lbi:ubi]
data_weight = dmat_sort.data[lbi:ubi]
if len(row_indices) > 0:
for row_index, dweight in zip(row_indices, data_weight):
delta_y_sq = (self.y[col_idx] - y_interp[row_index])**2
sq_diff_sums[row_index] += delta_y_sq*dweight
estimated_var = sq_diff_sums/weight_sums
return y_interp, estimated_var
| [
"latbin.matching.MatchingIndexer",
"numpy.zeros",
"numpy.exp"
] | [((514, 574), 'latbin.matching.MatchingIndexer', 'MatchingIndexer', (['(x / self.x_scale)'], {'tolerance': 'match_tolerance'}), '(x / self.x_scale, tolerance=match_tolerance)\n', (529, 574), False, 'from latbin.matching import MatchingIndexer\n'), ((1191, 1215), 'numpy.zeros', 'np.zeros', (['y_interp.shape'], {}), '(y_interp.shape)\n', (1199, 1215), True, 'import numpy as np\n'), ((373, 394), 'numpy.exp', 'np.exp', (['(-0.5 * x ** 2)'], {}), '(-0.5 * x ** 2)\n', (379, 394), True, 'import numpy as np\n')] |
from panda3d.egg import *
from panda3d.core import *
from obj2egg import ObjMaterial
from copy import deepcopy
import numpy as np
import cv2
import copy
from direct.gui.OnscreenImage import OnscreenImage
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import getCameraFromInfo
def calcDistance(point_1, point_2):
return pow(pow(point_1[0] - point_2[0], 2) + pow(point_1[1] - point_2[1], 2), 0.5)
def calcLineDim(line, lineWidth = -1):
if abs(line[0][0] - line[1][0]) > abs(line[0][1] - line[1][1]):
if lineWidth < 0 or abs(line[0][1] - line[1][1]) <= lineWidth:
return 0
pass
elif abs(line[0][0] - line[1][0]) < abs(line[0][1] - line[1][1]):
if lineWidth < 0 or abs(line[0][0] - line[1][0]) <= lineWidth:
return 1
else:
return -1
class PlaneScene():
def __init__(self, index):
#self.depth = cv2.imread('dump/' + str(index) + '_depth_pred.png').astype(np.float32) / 255 * 10
self.depth = np.load('dump/' + str(index) + '_depth.npy')
#cv2.imwrite('dump/alpha_0.5.png', np.zeros(self.depth[:, :, 0].shape).astype(np.uint8))
self.segmentation = np.load('dump/' + str(index) + '_segmentation.npy')
width = 640
height = 480
self.depth = cv2.resize(self.depth, (width, height))
self.segmentation = cv2.resize(self.segmentation, (width, height), interpolation=cv2.INTER_NEAREST)
self.planes = np.load('dump/' + str(index) + '_planes.npy')
self.numPlanes = self.planes.shape[0]
self.imageTexture = ObjMaterial()
self.imageTexture.name = 'image'
self.imageTexture.put('map_Kd', 'dump/' + str(index) + '_image.png')
self.width = self.depth.shape[1]
self.height = self.depth.shape[0]
self.info = np.load('dump/' + str(index) + '_info.npy')
self.camera = getCameraFromInfo(self.info)
self.scene_index = index
self.calcHorizontalPlanes()
return
def addRectangle(self, parent):
planesGroup = EggGroup('planes')
parent.addChild(planesGroup)
vp = EggVertexPool('plane_vertex')
parent.addChild(vp)
p0 = Point3D(-10, 1, 0)
p1 = Point3D(-10, 10, 0)
p2 = Point3D(10, 1, 0)
p3 = Point3D(10, 10, 0)
# p0 = Point3D(-10, , 0)
# p1 = Point3D(-10, 100, 0)
# p3 = Point3D(10, 100, 0)
# p2 = Point3D(10, 90, 0)
planeGroup = EggGroup('plane')
planesGroup.addChild(planeGroup)
poly = EggPolygon()
planeGroup.addChild(poly)
vertex = EggVertex()
vertex.setPos(p0)
vertex.setUv(Point2D(0, 0))
poly.addVertex(vp.addVertex(vertex))
vertex = EggVertex()
vertex.setPos(p1)
vertex.setUv(Point2D(0, 1))
poly.addVertex(vp.addVertex(vertex))
vertex = EggVertex()
vertex.setPos(p2)
vertex.setUv(Point2D(1, 1))
poly.addVertex(vp.addVertex(vertex))
poly = EggPolygon()
planeGroup.addChild(poly)
vertex = EggVertex()
vertex.setPos(p1)
vertex.setUv(Point2D(0, 1))
poly.addVertex(vp.addVertex(vertex))
vertex = EggVertex()
vertex.setPos(p2)
vertex.setUv(Point2D(1, 1))
poly.addVertex(vp.addVertex(vertex))
vertex = EggVertex()
vertex.setPos(p3)
vertex.setUv(Point2D(1, 0))
poly.addVertex(vp.addVertex(vertex))
# vertex = EggVertex()
# vertex.setPos(p2)
# vertex.setUv(Point2D(1, 1))
# poly.addVertex(vp.addVertex(vertex))
return
def generatePlanes(self, parent):
planesGroup = EggGroup('planes')
parent.addChild(planesGroup)
vp = EggVertexPool('plane_vertex')
parent.addChild(vp)
for planeIndex in range(self.numPlanes):
mask = (self.segmentation == planeIndex).astype(np.uint8) * 255
cv2.imwrite('dump/mask_' + str(planeIndex) + '.png', mask)
contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
plane = self.planes[planeIndex]
planeD = np.linalg.norm(plane)
planeNormal = plane / planeD
for contour in contours:
planeGroup = EggGroup('plane')
planesGroup.addChild(planeGroup)
poly = EggPolygon()
planeGroup.addChild(poly)
poly.setTexture(self.imageTexture.getEggTexture())
poly.setMaterial(self.imageTexture.getEggMaterial())
contour = contour.astype(np.float32)
u = (contour[:, 0, 0].astype(np.float32) / self.width * self.info[16] - self.camera['cx']) / self.camera['fx']
v = -(contour[:, 0, 1].astype(np.float32) / self.height * self.info[17] - self.camera['cy']) / self.camera['fy']
ranges = np.stack([u, np.ones(u.shape), v], axis=1)
depth = planeD / np.dot(ranges, planeNormal)
XYZ = ranges * np.expand_dims(depth, -1)
#print(contour)
#print(XYZ)
#exit(1)
for vertexIndex, uv in enumerate(contour):
vertex = EggVertex()
X, Y, Z = XYZ[vertexIndex]
vertex.setPos(Point3D(X, Y, Z))
u, v = uv[0]
vertex.setUv(Point2D(u / self.width, 1 - v / self.height))
poly.addVertex(vp.addVertex(vertex))
continue
continue
continue
return
def generateRectangle(self, parent):
planesGroup = EggGroup('planes')
parent.addChild(planesGroup)
vp = EggVertexPool('plane_vertex')
parent.addChild(vp)
poly = EggPolygon()
planesGroup.addChild(poly)
w = 0.5
p0 = Point3D(-w / 2, 0, -w / 2)
p1 = Point3D(-w / 2, 0, w / 2)
p2 = Point3D(w / 2, 0, w / 2)
p3 = Point3D(w / 2, 0, -w / 2)
poly.setTexture(self.plateTexture.getEggTexture())
poly.setMaterial(self.plateTexture.getEggMaterial())
vertex = EggVertex()
vertex.setPos(Point3D(0, 1, 0))
vertex.setUv(Point2D(0, 0))
poly.addVertex(vp.addVertex(vertex))
vertex = EggVertex()
vertex.setPos(Point3D(0, 1, 1))
vertex.setUv(Point2D(0, 1))
poly.addVertex(vp.addVertex(vertex))
vertex = EggVertex()
vertex.setPos(Point3D(1, 1, 1))
vertex.setUv(Point2D(1, 1))
poly.addVertex(vp.addVertex(vertex))
vertex = EggVertex()
vertex.setPos(Point3D(1, 1, 0))
vertex.setUv(Point2D(1, 0))
poly.addVertex(vp.addVertex(vertex))
return
def addCollisionPolygons(self, scene):
polygons = scene.findAllMatches("**/plane")
mesh = BulletTriangleMesh()
for polygon in polygons:
#cNode = scene.attachNewNode(CollisionNode('plane_solid'))
#cNode.node().addSolid(CollisionPolygon(polygon))
#polygon.setCollideMask(BitMask32.bit(1))
node = polygon.node()
print((node.getNumGeoms()))
for i in range(node.getNumGeoms()):
geom = node.getGeom(i)
mesh.addGeom(geom)
continue
continue
def test(self, scene):
groundMask=BitMask32(0b1)
parent = NodePath('cGeomConversionParent')
for c in incomingNode.findAllMatches('**/+GeomNode'):
if relativeTo:
xform=c.getMat(relativeTo).xformPoint
else:
xform=c.getMat().xformPoint
gni = 0
geomNode = c.node()
for g in range(geomNode.getNumGeoms()):
geom = geomNode.getGeom(g).decompose()
vdata = geom.getVertexData()
vreader = GeomVertexReader(vdata, 'vertex')
cChild = CollisionNode('cGeom-%s-gni%i' % (c.getName(), gni))
gni += 1
for p in range(geom.getNumPrimitives()):
prim = geom.getPrimitive(p)
for p2 in range(prim.getNumPrimitives()):
s = prim.getPrimitiveStart(p2)
e = prim.getPrimitiveEnd(p2)
v = []
for vi in range (s, e):
vreader.setRow(prim.getVertex(vi))
v.append (xform(vreader.getData3f()))
colPoly = CollisionPolygon(*v)
cChild.addSolid(colPoly)
n=parent.attachNewNode (cChild)
#n.show()
return parent
def generateEggModel(self):
data = EggData()
model = EggGroup('model')
data.addChild(model)
self.generatePlanes(model)
#self.generateRectangle(model)
data.writeEgg(Filename("dump/plane.egg"))
scene = NodePath(loadEggData(data))
#self.addCollisionPolygons(scene)
return scene
def getPlaneTriangles(self):
from skimage import measure
planeTriangles = []
planeNormals = []
horizontalPlaneTriangles = []
for planeIndex in range(self.numPlanes):
mask = (self.segmentation == planeIndex).astype(np.uint8) * 255
mask_ori = mask.copy()
#contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
masks = measure.label(mask.astype(np.int32), background=0)
contours = []
for maskIndex in range(masks.min() + 1, masks.max() + 1):
mask = masks == maskIndex
contour_mask = mask - np.logical_and(np.logical_and(np.roll(mask, shift=1, axis=0), np.roll(mask, shift=-1, axis=0)), np.logical_and(np.roll(mask, shift=1, axis=1), np.roll(mask, shift=-1, axis=1)))
contour_v, contour_u = contour_mask.nonzero()
contours.append(np.stack([contour_u, contour_v], axis=1))
continue
plane = self.planes[planeIndex]
planeD = np.linalg.norm(plane)
planeNormal = plane / np.maximum(planeD, 1e-4)
# cv2.imwrite('test/mask.png', mask_ori)
# #print(len(contours))
# mask_ori = np.stack([mask_ori, mask_ori, mask_ori], 2)
# count = 0
# for contour in contours:
# count += contour.shape[0]
# for uv in contour:
# #uv = uv[0]
# mask_ori[uv[1]][uv[0]] = np.array([255, 0, 0])
# continue
# continue
# cv2.imwrite('test/mask_contour.png', mask_ori)
# if planeIndex == 1:
# exit(1)
indices = np.arange(self.width * self.height).astype(np.float32)
us = indices % self.width
us = us / self.width * self.info[16] - self.camera['cx']
vs = indices / self.width
vs = -(vs / self.height * self.info[17] - self.camera['cy'])
ranges = np.stack([us / self.camera['fx'], np.ones(us.shape), vs / self.camera['fy']], axis=1)
#print(ranges)
#print(np.dot(ranges, planeNormal).shape)
#print(np.dot(ranges, planeNormal))
#print(ranges)
#exit(1)
depth = planeD / np.tensordot(ranges, planeNormal, axes=([1], [0]))
XYZ = ranges * np.expand_dims(depth, -1)
XYZ = XYZ.reshape((self.height, self.width, 3))
for contour in contours:
contour = contour.astype(np.float32)[::20]
if contour.shape[0] < 3:
continue
rect = (0, 0, self.width, self.height)
subdiv = cv2.Subdiv2D(rect)
for point in contour:
subdiv.insert((point[0], point[1]))
continue
triangleList = subdiv.getTriangleList()
#print(contour)
#print(triangleList)
#exit(1)
for triangle2D in triangleList:
triangle = []
for vertexIndex in range(3):
x = int(triangle2D[vertexIndex * 2 + 0])
y = int(triangle2D[vertexIndex * 2 + 1])
#print(x, y)
if x < 0 or x >= self.width or y < 0 or y >= self.height:
continue
triangle.append(XYZ[y][x])
continue
if len(triangle) == 3:
#print(triangle)
if np.dot(np.cross(planeNormal, triangle[1] - triangle[0]), triangle[2] - triangle[0]) > 0:
triangle = [triangle[0], triangle[2], triangle[1]]
pass
if planeIndex in self.horizontalPlanes:
horizontalPlaneTriangles.append(triangle)
else:
planeTriangles.append(triangle)
pass
#planeNormals.append(planeNormal)
pass
continue
continue
planeTriangles = np.array(planeTriangles)
#planeNormals = np.array(planeNormals)
np.save('dump/' + str(self.scene_index) + '_plane_triangles.npy', planeTriangles)
#np.save('dump/' + str(self.scene_index) + '_plane_normals.npy', planeNormals)
return planeTriangles, horizontalPlaneTriangles, self.gravityDirection
def getPlaneGeometries(self):
if os.path.exists('dump/' + str(self.scene_index) + '_plane_triangles.npy'):
print('loading')
planeTriangles = np.load('dump/' + str(self.scene_index) + '_plane_triangles.npy')
planeNormals = np.load('dump/' + str(self.scene_index) + '_plane_normals.npy')
return planeTriangles, planeNormals
pass
planeNormals = []
planeTriangles = []
for planeIndex in range(self.numPlanes):
mask = (self.segmentation == planeIndex).astype(np.uint8) * 255
#mask_ori = mask.copy()
#contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
plane = self.planes[planeIndex]
planeD = np.linalg.norm(plane)
planeNormal = plane / np.maximum(planeD, 1e-4)
#cv2.imwrite('test/mask.png', mask)
#v, u = mask.nonzero()
u = np.arange(self.width * self.height) % self.width
v = np.arange(self.width * self.height) / self.width
u = u.astype(np.float32) / self.width * self.info[16] - self.camera['cx']
v = -(v.astype(np.float32) / self.height * self.info[17] - self.camera['cy'])
ranges = np.stack([u / self.camera['fx'], np.ones(u.shape), v / self.camera['fy']], axis=1)
depth = planeD / np.dot(ranges, planeNormal)
XYZ = ranges * np.expand_dims(depth, -1)
XYZ = XYZ.reshape((self.height, self.width, 3))
triangles = []
for pixel in mask.reshape(-1).nonzero()[0]:
x = pixel % self.width
y = pixel / self.width
for neighbors in [((x - 1, y), (x, y - 1)), ((x - 1, y), (x, y + 1)), ((x + 1, y), (x, y - 1)), ((x + 1, y), (x, y + 1))]:
valid = True
for neighbor in neighbors:
if neighbor[0] < 0 or neighbor[0] >= self.width or neighbor[1] < 0 or neighbor[1] >= self.height or mask[neighbor[1]][neighbor[0]] == False:
valid = False
break
continue
if valid:
triangle = [XYZ[y][x]]
for neighbor in neighbors:
triangle.append(XYZ[neighbor[1], neighbor[0]])
continue
triangles.append(triangle)
pass
continue
continue
planeTriangles.append(triangles)
planeNormals.append(planeNormal)
continue
planeTriangles = np.array(planeTriangles)
#planeNormals = np.array(planeNormals)
#np.save('dump/' + str(self.scene_index) + '_plane_triangles.npy', planeTriangles)
#np.save('dump/' + str(self.scene_index) + '_plane_normals.npy', planeNormals)
return planeTriangles, planeNormals
def calcHorizontalPlanes(self):
from sklearn.cluster import KMeans
planesD = np.linalg.norm(self.planes, axis=-1, keepdims=True)
normals = self.planes / np.maximum(planesD, 1e-4)
normals[normals[:, 1] < 0] *= -1
kmeans = KMeans(n_clusters=3).fit(normals)
dominantNormals = kmeans.cluster_centers_
dominantNormals = dominantNormals / np.maximum(np.linalg.norm(dominantNormals, axis=-1, keepdims=True), 1e-4)
planeClusters = kmeans.predict(normals)
horizontalNormalIndex = np.argmax(np.abs(dominantNormals[:, 2]))
self.gravityDirection = dominantNormals[horizontalNormalIndex]
self.horizontalPlanes = (planeClusters == horizontalNormalIndex).nonzero()[0]
if self.gravityDirection[2] > 0:
self.gravityDirection *= -1
pass
print((self.horizontalPlanes))
print((self.gravityDirection))
return
def getHorizontalPlanes(self):
return self.gravityDirection, self.horizontalPlanes
def getHolePos(self):
floorPlaneIndex = 2
closePoint = np.array([0., 1.22, -0.2])
plane = self.planes[floorPlaneIndex]
planeD = np.linalg.norm(plane)
planeNormal = plane / planeD
distance = planeD - np.dot(planeNormal, closePoint)
distance *= 0.99
holePos = closePoint + planeNormal * distance
H = P = R = 0
H = -90 + np.rad2deg(np.arctan2(planeNormal[1], planeNormal[0]))
#P = 90 - np.rad2deg(np.arccos(np.abs(planeNormal[2])))
P = -90 + np.rad2deg(np.arccos(np.abs(planeNormal[2])))
#print(H, P, R)
return holePos, np.array([H, P, R])
def getPortalPos(self):
wallPlaneIndex = 1
closePoint_1 = np.array([0.5, 1.35, -0.5])
closePoint_2 = np.array([-0.4, 1, 0.19])
plane = self.planes[wallPlaneIndex]
planeD = np.linalg.norm(plane)
planeNormal = plane / planeD
distance = planeD - np.dot(planeNormal, closePoint_1)
distance *= 0.95
portalPos_1 = closePoint_1 + planeNormal * distance
distance = planeD - np.dot(planeNormal, closePoint_2)
distance *= 0.95
portalPos_2 = closePoint_2 + planeNormal * distance
H = P = R = 0
H = -90 + np.rad2deg(np.arctan2(planeNormal[1], planeNormal[0]))
#P = 90 - np.rad2deg(np.arccos(np.abs(planeNormal[2])))
P = -90 + np.rad2deg(np.arccos(-np.abs(planeNormal[2])))
#print(H, P, R)
return portalPos_1, np.array([H, P, R]), portalPos_2, np.array([H, P, R]), planeNormal
| [
"numpy.maximum",
"numpy.abs",
"numpy.arctan2",
"numpy.ones",
"numpy.linalg.norm",
"numpy.arange",
"os.path.abspath",
"sklearn.cluster.KMeans",
"cv2.resize",
"cv2.Subdiv2D",
"numpy.stack",
"numpy.roll",
"numpy.tensordot",
"numpy.cross",
"numpy.dot",
"numpy.expand_dims",
"numpy.array",... | [((1265, 1304), 'cv2.resize', 'cv2.resize', (['self.depth', '(width, height)'], {}), '(self.depth, (width, height))\n', (1275, 1304), False, 'import cv2\n'), ((1329, 1408), 'cv2.resize', 'cv2.resize', (['self.segmentation', '(width, height)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(self.segmentation, (width, height), interpolation=cv2.INTER_NEAREST)\n', (1339, 1408), False, 'import cv2\n'), ((1540, 1553), 'obj2egg.ObjMaterial', 'ObjMaterial', ([], {}), '()\n', (1551, 1553), False, 'from obj2egg import ObjMaterial\n'), ((1817, 1845), 'utils.getCameraFromInfo', 'getCameraFromInfo', (['self.info'], {}), '(self.info)\n', (1834, 1845), False, 'from utils import getCameraFromInfo\n'), ((12039, 12063), 'numpy.array', 'np.array', (['planeTriangles'], {}), '(planeTriangles)\n', (12047, 12063), True, 'import numpy as np\n'), ((14663, 14687), 'numpy.array', 'np.array', (['planeTriangles'], {}), '(planeTriangles)\n', (14671, 14687), True, 'import numpy as np\n'), ((15037, 15088), 'numpy.linalg.norm', 'np.linalg.norm', (['self.planes'], {'axis': '(-1)', 'keepdims': '(True)'}), '(self.planes, axis=-1, keepdims=True)\n', (15051, 15088), True, 'import numpy as np\n'), ((15989, 16016), 'numpy.array', 'np.array', (['[0.0, 1.22, -0.2]'], {}), '([0.0, 1.22, -0.2])\n', (15997, 16016), True, 'import numpy as np\n'), ((16070, 16091), 'numpy.linalg.norm', 'np.linalg.norm', (['plane'], {}), '(plane)\n', (16084, 16091), True, 'import numpy as np\n'), ((16590, 16617), 'numpy.array', 'np.array', (['[0.5, 1.35, -0.5]'], {}), '([0.5, 1.35, -0.5])\n', (16598, 16617), True, 'import numpy as np\n'), ((16637, 16662), 'numpy.array', 'np.array', (['[-0.4, 1, 0.19]'], {}), '([-0.4, 1, 0.19])\n', (16645, 16662), True, 'import numpy as np\n'), ((16716, 16737), 'numpy.linalg.norm', 'np.linalg.norm', (['plane'], {}), '(plane)\n', (16730, 16737), True, 'import numpy as np\n'), ((274, 299), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (289, 299), False, 'import os\n'), ((3766, 3828), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (3782, 3828), False, 'import cv2\n'), ((3882, 3903), 'numpy.linalg.norm', 'np.linalg.norm', (['plane'], {}), '(plane)\n', (3896, 3903), True, 'import numpy as np\n'), ((9393, 9414), 'numpy.linalg.norm', 'np.linalg.norm', (['plane'], {}), '(plane)\n', (9407, 9414), True, 'import numpy as np\n'), ((13052, 13073), 'numpy.linalg.norm', 'np.linalg.norm', (['plane'], {}), '(plane)\n', (13066, 13073), True, 'import numpy as np\n'), ((15117, 15144), 'numpy.maximum', 'np.maximum', (['planesD', '(0.0001)'], {}), '(planesD, 0.0001)\n', (15127, 15144), True, 'import numpy as np\n'), ((15485, 15514), 'numpy.abs', 'np.abs', (['dominantNormals[:, 2]'], {}), '(dominantNormals[:, 2])\n', (15491, 15514), True, 'import numpy as np\n'), ((16149, 16180), 'numpy.dot', 'np.dot', (['planeNormal', 'closePoint'], {}), '(planeNormal, closePoint)\n', (16155, 16180), True, 'import numpy as np\n'), ((16500, 16519), 'numpy.array', 'np.array', (['[H, P, R]'], {}), '([H, P, R])\n', (16508, 16519), True, 'import numpy as np\n'), ((16800, 16833), 'numpy.dot', 'np.dot', (['planeNormal', 'closePoint_1'], {}), '(planeNormal, closePoint_1)\n', (16806, 16833), True, 'import numpy as np\n'), ((16936, 16969), 'numpy.dot', 'np.dot', (['planeNormal', 'closePoint_2'], {}), '(planeNormal, closePoint_2)\n', (16942, 16969), True, 'import numpy as np\n'), ((17304, 17323), 'numpy.array', 'np.array', (['[H, P, R]'], {}), '([H, P, R])\n', (17312, 17323), True, 'import numpy as np\n'), ((17338, 17357), 'numpy.array', 'np.array', (['[H, P, R]'], {}), '([H, P, R])\n', (17346, 17357), True, 'import numpy as np\n'), ((9443, 9469), 'numpy.maximum', 'np.maximum', (['planeD', '(0.0001)'], {}), '(planeD, 0.0001)\n', (9453, 9469), True, 'import numpy as np\n'), ((10493, 10543), 'numpy.tensordot', 'np.tensordot', (['ranges', 'planeNormal'], {'axes': '([1], [0])'}), '(ranges, planeNormal, axes=([1], [0]))\n', (10505, 10543), True, 'import numpy as np\n'), ((10565, 10590), 'numpy.expand_dims', 'np.expand_dims', (['depth', '(-1)'], {}), '(depth, -1)\n', (10579, 10590), True, 'import numpy as np\n'), ((10843, 10861), 'cv2.Subdiv2D', 'cv2.Subdiv2D', (['rect'], {}), '(rect)\n', (10855, 10861), False, 'import cv2\n'), ((13102, 13128), 'numpy.maximum', 'np.maximum', (['planeD', '(0.0001)'], {}), '(planeD, 0.0001)\n', (13112, 13128), True, 'import numpy as np\n'), ((13209, 13244), 'numpy.arange', 'np.arange', (['(self.width * self.height)'], {}), '(self.width * self.height)\n', (13218, 13244), True, 'import numpy as np\n'), ((13268, 13303), 'numpy.arange', 'np.arange', (['(self.width * self.height)'], {}), '(self.width * self.height)\n', (13277, 13303), True, 'import numpy as np\n'), ((13602, 13629), 'numpy.dot', 'np.dot', (['ranges', 'planeNormal'], {}), '(ranges, planeNormal)\n', (13608, 13629), True, 'import numpy as np\n'), ((13651, 13676), 'numpy.expand_dims', 'np.expand_dims', (['depth', '(-1)'], {}), '(depth, -1)\n', (13665, 13676), True, 'import numpy as np\n'), ((15203, 15223), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)'}), '(n_clusters=3)\n', (15209, 15223), False, 'from sklearn.cluster import KMeans\n'), ((15334, 15389), 'numpy.linalg.norm', 'np.linalg.norm', (['dominantNormals'], {'axis': '(-1)', 'keepdims': '(True)'}), '(dominantNormals, axis=-1, keepdims=True)\n', (15348, 15389), True, 'import numpy as np\n'), ((16296, 16338), 'numpy.arctan2', 'np.arctan2', (['planeNormal[1]', 'planeNormal[0]'], {}), '(planeNormal[1], planeNormal[0])\n', (16306, 16338), True, 'import numpy as np\n'), ((17095, 17137), 'numpy.arctan2', 'np.arctan2', (['planeNormal[1]', 'planeNormal[0]'], {}), '(planeNormal[1], planeNormal[0])\n', (17105, 17137), True, 'import numpy as np\n'), ((4604, 4631), 'numpy.dot', 'np.dot', (['ranges', 'planeNormal'], {}), '(ranges, planeNormal)\n', (4610, 4631), True, 'import numpy as np\n'), ((4655, 4680), 'numpy.expand_dims', 'np.expand_dims', (['depth', '(-1)'], {}), '(depth, -1)\n', (4669, 4680), True, 'import numpy as np\n'), ((9263, 9303), 'numpy.stack', 'np.stack', (['[contour_u, contour_v]'], {'axis': '(1)'}), '([contour_u, contour_v], axis=1)\n', (9271, 9303), True, 'import numpy as np\n'), ((9967, 10002), 'numpy.arange', 'np.arange', (['(self.width * self.height)'], {}), '(self.width * self.height)\n', (9976, 10002), True, 'import numpy as np\n'), ((10271, 10288), 'numpy.ones', 'np.ones', (['us.shape'], {}), '(us.shape)\n', (10278, 10288), True, 'import numpy as np\n'), ((13529, 13545), 'numpy.ones', 'np.ones', (['u.shape'], {}), '(u.shape)\n', (13536, 13545), True, 'import numpy as np\n'), ((16435, 16457), 'numpy.abs', 'np.abs', (['planeNormal[2]'], {}), '(planeNormal[2])\n', (16441, 16457), True, 'import numpy as np\n'), ((4549, 4565), 'numpy.ones', 'np.ones', (['u.shape'], {}), '(u.shape)\n', (4556, 4565), True, 'import numpy as np\n'), ((17235, 17257), 'numpy.abs', 'np.abs', (['planeNormal[2]'], {}), '(planeNormal[2])\n', (17241, 17257), True, 'import numpy as np\n'), ((9038, 9068), 'numpy.roll', 'np.roll', (['mask'], {'shift': '(1)', 'axis': '(0)'}), '(mask, shift=1, axis=0)\n', (9045, 9068), True, 'import numpy as np\n'), ((9070, 9101), 'numpy.roll', 'np.roll', (['mask'], {'shift': '(-1)', 'axis': '(0)'}), '(mask, shift=-1, axis=0)\n', (9077, 9101), True, 'import numpy as np\n'), ((9119, 9149), 'numpy.roll', 'np.roll', (['mask'], {'shift': '(1)', 'axis': '(1)'}), '(mask, shift=1, axis=1)\n', (9126, 9149), True, 'import numpy as np\n'), ((9151, 9182), 'numpy.roll', 'np.roll', (['mask'], {'shift': '(-1)', 'axis': '(1)'}), '(mask, shift=-1, axis=1)\n', (9158, 9182), True, 'import numpy as np\n'), ((11564, 11612), 'numpy.cross', 'np.cross', (['planeNormal', '(triangle[1] - triangle[0])'], {}), '(planeNormal, triangle[1] - triangle[0])\n', (11572, 11612), True, 'import numpy as np\n')] |
# This script contains the Brianmodel class
# Calling makeneuron_ca() on a Brianmodel object will create a biophysical neuron
# Multiple other functions allow for plotting, animating, ...
from __future__ import division
#folder with parameters, equations and morphology
import os, sys
mod_path = os.path.abspath(os.path.join('..','Model'))
sys.path.append(mod_path)
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
import matplotlib.colors as colorz
import matplotlib.cm as clrm
import brian2 as br2
from brian2 import uF, cm, um, ohm, ms, siemens, mV, nA, us,psiemens
# This is the 3D plotting toolkit
from mpl_toolkits.mplot3d import Axes3D
#import parameters and equations for neuron
from oo_Parameters import *
from oo_equations import *
from MorphologyData import *
from Visualisation_functions import *
from oo_initScripts import set_init_nrn,set_init_syn
br2.start_scope()
br2.defaultclock.dt = defaultclock.dt
class BRIANModel(object):
"""
Neuron object in brian2
"""
def __init__(self, swc_model):
"""
Parameters
----------
swc_model: a char
path of the file containing the neuron model in .swc format
"""
# Brian morphology
self.morpho = br2.Morphology.from_file(swc_model)
morpho = self.morpho
# Store compartment numbers
self.segment,self.segment_swc = get_swc(swc_model)
# Initialise an dictionary for distances to the soma per compartment
self.distances = {}
# Initialise an dictionary for lines to plot the neuron
self.lines = {}
# Add the first section as soma
self.sections = {morpho.type: [self.morpho[0], 0, 0]}
# Set a name and distances for the soma
self.sections['soma'][0].name = 'soma'
self.sections['soma'][0].f_x = self.morpho[0].x/meter
self.sections['soma'][0].f_y = self.morpho[0].y/meter
self.sections['soma'][0].f_z = self.morpho[0].z/meter
self.sections['soma'][0].dist = 0
self.distances['soma'] = [0.]
# Initialize the dendrites numerotation
dend_b = 0
# Register soma's children in a sections dictionary
for sec in morpho.children:
# Create an attribut "name" for all children of the soma
if str(sec.type) == "dend":
sec.name = sec.type[:4]+"_"+str(dend_b)
dend_b += 1
else:
sec.name = sec.type
# store final coordinates of the parent (=soma) segment
sec.f_x = self.morpho[0].x[0]/meter
sec.f_y = self.morpho[0].y[0]/meter
sec.f_z = self.morpho[0].z[0]/meter
sec.dist = self.distances['soma'][0]
# add distances to the parent
self.distances = calc_dist(self.distances, sec)
# get the coordinates for all compartments in this section
xn = sec.x/meter
yn = sec.y/meter
zn = sec.z/meter
# get first coordinates (and make integer)
a=(int(round(xn[0]*1e9)),int(round(yn[0]*1e9)),int(round(zn[0]*1e9)))
# id for the section (they correspond to lnum in .swc)
line_num = self.segment[a]
# add id and section to the 'sections' dictionary
self.sections[sec.name] = [sec,line_num,line_num]
# Initialize the level value
level = [sec for sec in morpho.children]
while level != []:
for i, sec in enumerate(level):
for j, child in enumerate(sec.children):
# Create an attribut "name" for all children of sec
name = sec.name + str(j)
child.name = name
# Store parent final coordinates
child.f_x = sec.x[-1]/meter
child.f_y = sec.y[-1]/meter
child.f_z = sec.z[-1]/meter
# Store distances to the soma
child.dist = self.distances[sec.name][-1]
self.distances = calc_dist(self.distances, child)
# Get the coordinates for all compartments in this section
xn = child.x/meter
yn = child.y/meter
zn = child.z/meter
# get first coordinates (and make integer)
a=(int(round(xn[0]*1e9)),int(round(yn[0]*1e9)),int(round(zn[0]*1e9)))
# id for the section (corresponds to lnum in .swc)
line_num = self.segment[a]
# add id and section to the 'sections' dictionary
self.sections[name] = [child, line_num,line_num]
level = [sec.children for sec in level]
# Flatten the list at this level
level = [sublist for sl in level for sublist in sl]
################################################################################
# THE FUNCTION BELOW CAN BE CALLED TO CREATE A BIOPHYSICAL NEURON
################################################################################
def makeNeuron_Ca(self,morphodata):
"""return spatial neuron"""
# Set Biophysics
neuron = self.biophysics(morphodata)
return neuron
def biophysics(self,morpho_data):
"""Inserting biophysics"""
neuron = br2.SpatialNeuron(morphology=self.morpho, model=eqs, \
Cm=Capacit, Ri=R_axial, threshold = "v/mV>0", refractory = "v/mV > -10",
threshold_location = 0, reset = 's_trace += x_reset*(taux/ms)',method='heun') #
# define the different parts of the neuron
N_soma = neuron[morpho_data['soma'][0]:morpho_data['soma'][-1]+1]
N_axon = neuron[morpho_data['axon'][0]:morpho_data['axon'][-1]+1]
N_basal = neuron[morpho_data['basal'][0]:morpho_data['basal'][-1]+1]
N_apical = neuron[morpho_data['apical'][0]:morpho_data['apical'][-1]+1]
Theta_low = morpho_data['thetalow']*mV
# insert leak conductance
neuron.gLeak = g_leak
# noise
neuron.noise_sigma = 0*pA # initial value membrane voltage
neuron.noise_avg = 0*pA # initial value membrane voltage
N_soma.noise_sigma = noise_std # initial value membrane voltage
N_soma.noise_avg = noise_mean # initial value membrane voltage
####################
# ACTIVE CHANNELS
####################
# Na channels soma, axon, apical dendrites
N_soma.gNav = somaNa
N_axon.gNav = axonNa
N_apical.gNav = apicalNa
neuron.thi1 = thi1_all
N_axon.thi1 = thi1_axn
neuron.thi2 = thi2_all
N_axon.thi2 = thi2_axn
#Kv channels
N_soma.gKv = somagKv
N_basal.gKv = dendgKv
N_apical.gKv = dendgKv
N_axon.gKv = axongKv
#Ca channels sina
N_soma.gCav = ratio_ca*somaCa
N_soma.gIt = (1-ratio_ca)*somaCa
#Ka channels soma
N_soma.gKa_prox = somaKap
#Ka channels dendrites, Na channels basal dendrites, Ca channels dendrites, axon initial segment
for sec in self.sections:
secNr = self.sections[sec][2]
seclen = len(self.sections[sec][0].x)
#BASAL
if secNr in morpho_data['basal']:
# decreasing Na channels
gNa_diff = 0.5*np.array(self.distances[sec][:])*psiemens/um**2
neuron[secNr:secNr+seclen].gNav = np.multiply(basalNa - gNa_diff,basalNa - gNa_diff>0 )
# increasing Ka channels
gKa_diff = 0.7*np.array(self.distances[sec][:])*psiemens/um**2
ratio_A = np.multiply(1. - (1./300.)*np.array(self.distances[sec][:]),1. - (1./300.)*np.array(self.distances[sec][:])>0)
neuron[secNr:secNr+seclen].gKa_prox = ratio_A*np.multiply(basalKa + gKa_diff,basalKa + gKa_diff>0 )
neuron[secNr:secNr+seclen].gKa_dist = (1.-ratio_A)*np.multiply(basalKa + gKa_diff,basalKa + gKa_diff>0 )
# Ca channels
neuron[secNr:secNr+seclen].gCav = dendCa*ratio_ca*(np.array(self.distances[sec][:])>30) + somaCa*ratio_ca*(np.array(self.distances[sec][:])<=30)
neuron[secNr:secNr+seclen].gIt = dendCa*(1.-ratio_ca)*(np.array(self.distances[sec][:])>30) + somaCa*(1.-ratio_ca)*(np.array(self.distances[sec][:])<=30)
#spines
addSpines = np.array(self.distances[sec][:]) > spinedist
noSpines = np.array(self.distances[sec][:]) <= spinedist
neuron[secNr:secNr+seclen].gLeak = noSpines*g_leak + addSpines*g_leak_dend
neuron[secNr:secNr+seclen].Cm = noSpines*Capacit + addSpines*Capacit_dend
#APICAL
if secNr in morpho_data['apical']:
#ratio of Ka channels
ratio_A = np.multiply(1. - (1./300.)*np.array(self.distances[sec][:]),1. - (1./300.)*np.array(self.distances[sec][:])>0)
neuron[secNr:secNr+seclen].gKa_prox = ratio_A*apicalKa
neuron[secNr:secNr+seclen].gKa_dist = (1.-ratio_A)*apicalKa
# Ca channels
neuron[secNr:secNr+seclen].gCav = dendCa*ratio_ca*(np.array(self.distances[sec][:])>30) + somaCa*ratio_ca*(np.array(self.distances[sec][:])<=30)
neuron[secNr:secNr+seclen].gIt = dendCa*(1.-ratio_ca)*(np.array(self.distances[sec][:])>30) + somaCa*(1.-ratio_ca)*(np.array(self.distances[sec][:])<=30)
#spines
addSpines = np.array(self.distances[sec][:]) > spinedist
noSpines = np.array(self.distances[sec][:]) <= spinedist
neuron[secNr:secNr+seclen].gLeak = noSpines*g_leak + addSpines*g_leak_dend
neuron[secNr:secNr+seclen].Cm = noSpines*Capacit + addSpines*Capacit_dend
#AXON
if secNr in morpho_data['axon']:
#KL current
addKL = np.array(self.distances[sec][:]) > 35
neuron[secNr:secNr+seclen].gKL = addKL*axongL
neuron[1:6].gKv = np.array([40.,100.,500.,500.,500.])*psiemens/um**2
neuron[1:6].gKL = 1*np.array([20.,35.,125.,250.,0])*psiemens/um**2
neuron[1:6].gNav = 3*np.array([8000.,7000.,5000.,5000.,5000.])*psiemens/um**2
neuron[1:3].gCav = somaCa*ratio_ca
neuron[1:3].gIt = somaCa*(1.-ratio_ca)
# neuron[582:593].gCav = 10*psiemens/um**2
# neuron[582:593].gNav = 100*psiemens/um**2
# SET INITIAL VALUES
set_init_nrn(neuron,Theta_low)
return neuron
################################################################################
# The functions below are mainly for visualization of the neuron morphology
################################################################################
def print_dist(self, sec_number):
''' print the distance and diameter of a section to the soma'''
for sec in self.sections:
for ii in range(len(self.sections[sec][0].x)):
if (self.sections[sec][2]+ii == sec_number):
# print( 'Section '+ str(sec_number)+ ', part of: '+str(sec))
# print( 'Distance to soma: '+ str(self.distances[sec][ii]))
# print( 'Diameter: '+ str(self.sections[sec][0].diameter[ii]*1.e6))
sectiondistance = self.distances[sec][ii]
sectiondiameter = self.sections[sec][0].diameter[ii]*1.e6
return [sectiondistance,sectiondiameter]
def save_dist_vs_nr(self,maxNr):
''' save the distance and diameter in function of section nr'''
dist_nr = np.zeros(maxNr)
diam_nr = np.zeros(maxNr)
print('saving distances')
for sec in self.sections:
for ii in range(len(self.sections[sec][0].x)):
if self.sections[sec][2]+ii < maxNr:
dist_nr[self.sections[sec][2]+ii] = self.distances[sec][ii]
diam_nr[self.sections[sec][2]+ii] = self.sections[sec][0].diameter[ii]*1.e6
return dist_nr,diam_nr
def calc_distCompartments(self,sec_range,distances):
''' calculate the terminal ends of dendrites '''
term_vec = [0]
dist_vec = distances[sec_range]
for jj in range(len(dist_vec)-1):
if np.abs(dist_vec[jj+1]-dist_vec[jj])>20:
term_vec = np.append(term_vec,np.array([sec_range[0]+jj]),axis=0)
return term_vec,dist_vec
def show_shape3d(self, fov=400, fig=None, ax=None):
"""Show a 3D plot of the morphology"""
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
# Set fov
ax.set_xlim(-fov, fov)
ax.set_ylim(-fov, fov)
ax.set_zlim(-fov, fov)
ax.set_aspect("equal")
# data = self.sections["soma"][1]
for sec in self.sections.values():
self.lines = add_line3d(ax, self.lines, sec[0])
# cmap = plt.get_cmap('Spectral')
# cmap = plt.get_cmap('summer')
## print np.max(data)
# cNorm = colorz.Normalize(vmin=0, vmax=1)
# scalarMap = clrm.ScalarMappable(norm=cNorm, cmap=cmap)
# for sec in self.sections:
# for ii in range(len(self.lines[sec])):
## print ii
# vsec = self.sections[sec][1][ii]
# cval = scalarMap.to_rgba(vsec)
# self.lines[sec][ii].set_color(cval)
def show_segm(self, fov=500, fig=None, ax=None, segm_range = 0,colorv = 'r', segm_range2 = [0],colorv2 = 'k'):
"""Show a 2D plot of the morphology, highlight sections in range 'segm_range' """
if fig is None:
fig, ax = plt.subplots()
# Set fov
ax.set_xlim(-240, 110)
ax.set_ylim(-370, 200)
ax.set_aspect("equal")
#add lines
for sec in self.sections.values():
self.lines = add_line(ax, self.lines, sec[0])
# change colors
for sec in self.sections:
for ii in range(len(self.sections[sec][0].x)):
if (self.sections[sec][2]+ii in segm_range):
cval = colorv
self.lines[sec][ii].set_linewidth(3)
elif (self.sections[sec][2]+ii in segm_range2):
cval = colorv2
self.lines[sec][ii].set_linewidth(3)
else:
cval = 'black'
self.lines[sec][ii].set_color(cval)
# savefig('./'+'Neuron'+'.eps', format='eps', dpi=1000)
# fig.suptitle(str(int(distMin))+'um to '+str(int(distMax))+' um')
def show_segm_byName(self, fov=500, fig=None, ax=None, segmName='soma'):
"""Show a 2D plot of the morphology, highlight section with name 'segmName' """
if fig is None:
fig, ax = plt.subplots()
# Set fov
ax.set_xlim(-fov, fov)
ax.set_ylim(-fov, fov)
ax.set_aspect("equal")
for sec in self.sections.values():
self.lines = add_line(ax, self.lines, sec[0])
for sec in self.sections:
for ii in range(len(self.sections[sec][0].x)):
if (str(sec) == segmName):
cval = 'red'
print (self.sections[sec][2]+ii)
self.lines[sec][ii].set_linewidth(3)
else:
cval = 'black'
self.lines[sec][ii].set_color(cval)
def show_shape(self, fovx=200,fovy=200, fig=None, ax=None):
"""Show a 2D plot of the morphology"""
if fig is None:
fig, ax = plt.subplots()
# Set fov
ax.set_xlim(-fovx, fovx)
ax.set_ylim(-fovy, fovy)
ax.set_aspect("equal")
for sec in self.sections.values():
self.lines = add_line(ax, self.lines, sec[0])
# cmap = plt.get_cmap('spectral')
# print np.max(data)
# cNorm = colorz.Normalize(vmin=0, vmax=1)
# scalarMap = clrm.ScalarMappable(norm=cNorm, cmap=cmap)
# for sec in self.sections:
# for ii in range(len(self.lines[sec])):
## print ii
# vsec = self.sections[sec][1][ii]
# cval = scalarMap.to_rgba(vsec)
# self.lines[sec][ii].set_color(cval)
def animate3d(self):
""" Make an animation (3D) """
try:
nf = len(self.sections["soma"][1][0])
print( 'nf: '+str(nf))
except TypeError:
print( "running simulation first")
self.run()
nf = len(self.sections["soma"][1][0])
data = self.sections["soma"][1][0]
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
ax.set_xlim(-250, 250)
ax.set_ylim(-250, 250)
ax.set_zlim(-250, 250)
ax.set_aspect('equal')
self.show_shape3d(fig=fig, ax=ax)
cmap = plt.get_cmap('afmhot')
cNorm = colorz.Normalize(vmin=-75*mV, vmax=-0*mV)
scalarMap = clrm.ScalarMappable(norm=cNorm, cmap=cmap)
def anim3d(i):
for sec in self.sections:
for ii in range(len(self.lines[sec])):
# print ii
vsec = self.sections[sec][1][ii][i]
cval = scalarMap.to_rgba(vsec)
self.lines[sec][ii].set_color(cval)
return self.lines,
# call the animator.
anim3d = animation.FuncAnimation(fig, anim3d, interval=20,
frames=nf)
mywriter = animation.FFMpegWriter()
anim3d.save('basic_animation.avi', fps=30,writer=mywriter)
def animate(self,filename='animation.avi'):
""" Make an animation (2D) """
try:
nf = len(self.sections["soma"][1][0])
print( 'nf: '+str(nf))
except TypeError:
print( "running simulation first")
self.run()
nf = len(self.sections["soma"][1][0])
data = np.zeros([1,nf])
for x in self.sections.values():
data = np.append(data,np.array(x[1]),axis=0)
data = data[1:,:]
fig, ax = plt.subplots()
ax.set_xlim(-200, 200)
ax.set_ylim(-200, 500)
ax.set_aspect('equal')
plt.axis('off')
self.show_shape(fig=fig, ax=ax)
cmap = plt.get_cmap('afmhot')
#print(np.amin(data))
cNorm = colorz.Normalize(vmin=-0.07, vmax= 0 ) #np.amin(data) np.amax(data))
scalarMap = clrm.ScalarMappable(norm=cNorm, cmap=cmap)
def anim(i):
for sec in self.sections:
for ii in range(len(self.lines[sec])):
# print ii
vsec = self.sections[sec][1][ii][i]
cval = scalarMap.to_rgba(vsec)
self.lines[sec][ii].set_color(cval)
return self.lines,
# call the animator.
anim = animation.FuncAnimation(fig, anim, interval=2,
frames=nf)
mywriter = animation.FFMpegWriter(fps=33)
anim.save(filename, fps=33,writer=mywriter)
def show_property(self, var_to_show):
"""Show a 2D plot of the morphology, highlight the distribution of a parameter 'var_to_show' """
data = var_to_show
fig, ax = plt.subplots()
ax.set_xlim(-200, 150)
ax.set_ylim(-200, 150)
ax.set_aspect('equal')
# ax.set_axis_bgcolor((0.93,0.93,0.93))
self.show_shape(fig=fig, ax=ax)
minmin = np.amin(data)
maxmax = np.amax(data)
# cmap = plt.get_cmap('seismic')
# cNorm = colorz.Normalize(vmin=minmin, vmax= maxmax ) # np.amin(data) np.amax(data)
# scalarMap = clrm.ScalarMappable(norm=cNorm, cmap=cmap)
# scalarMap.set_array([minmin,maxmax])
orig_cmap = clrm.coolwarm #coolwarm, seismic,bwr,rainbow, jet
shiftc = 1 - maxmax/(maxmax + abs(minmin))
newcmap = shiftedColorMap(orig_cmap, start=0, midpoint=shiftc,
stop=1.0, name='shiftedcmap',
somspike=sspike,nmdaspike=nspike)
cNorm = colorz.Normalize(vmin=minmin, vmax= maxmax )
scalarMap = clrm.ScalarMappable(norm=cNorm,cmap=newcmap)
scalarMap.set_array([minmin,maxmax])
cbar = plt.colorbar(scalarMap,ticks = np.arange(minmin,maxmax+1))
lblvar = list(range(sspikemin,sspikemin+sspike))+[' ']+list(range(nspikemin+nspike,nspikemin,-1))
cbar.ax.set_yticklabels(lblvar)
for sec in self.sections:
sec_nr = self.sections[sec][2]
for ii in range(len(self.lines[sec])):
vsec = var_to_show[sec_nr+ii]
cval = scalarMap.to_rgba(vsec)
self.lines[sec][ii].set_color(cval)
# title('Location-dependence evoking spikes',fontsize=25)
# text(-350,150,'NMDA spike',color='r',fontsize=20)
# text(-350,100,'Somatic spike',color='b',fontsize=20)
# axis('off')
def show_nrn_cmap(self, var_to_show):
"""Show a 2D plot of the morphology, highlight the distribution of a parameter 'var_to_show' """
data = var_to_show
fig, ax = plt.subplots()
ax.set_xlim(-250, 150)
ax.set_ylim(-250, 250)
ax.set_aspect('equal')
self.show_shape(fig=fig, ax=ax)
# cmap = plt.get_cmap('afmhot')
cmap = plt.get_cmap('coolwarm') #coolwarm, seismic,bwr,rainbow
minmin = np.amin(data)
maxmax = np.amax(data)
# maxmax = np.amax(data)+15.
cNorm = colorz.Normalize(vmin=minmin, vmax= maxmax ) # np.amin(data) np.amax(data)
# cNorm = colorz.Normalize(vmin=np.amin(data), vmax= np.amax(data) ) #
scalarMap = clrm.ScalarMappable(norm=cNorm, cmap=cmap)
scalarMap.set_array([minmin,maxmax])
plt.colorbar(scalarMap)
def v_record(self, neuron):
"""Set a monitor for the voltage of all segments"""
return br2.StateMonitor(neuron, 'v', record=True)
def run(self,morphodata):
"""run """
# Set Biophysics
neuron = self.biophysics(morphodata)
neuron.run_regularly('Mgblock = 1./(1.+ exp(-0.062*vu2)/3.57)',dt=br2.defaultclock.dt)
# Record in every section
# monitor = self.v_record(neuron)
monitor = br2.StateMonitor(neuron, 'v', record=True, dt = 20*defaultclock.dt)
morph_data = morphodata
axo = len(morph_data['axon'])
bsl = list(morph_data['basal'])
apc = list(morph_data['apical'])
# dc = distal_compartments_Branco_eff
# pc = proximal_compartments_Branco
dc = distal_compartments_Acker_eff
pc = proximal_compartments_Acker
# nrComp = 10 #len(bsl)
#####################################################
# Input Neuron
#####################################################
Theta_low = morph_data['thetalow']*br2.mV
V_rest = 0.*br2.mV
V_thresh = 0.5*br2.mV
NrInGroups = 4
Nr_clust_dist = NrInGroups#5 # Nr of clustered ensembles distally
Nr_clust_prox = 0#5 # Nr of clustered ensembles proximally
Nr_scattered = 0#5 # Nr of scattered ensembles
Ens_size = 20 # Ensemble size
GrpSize = Ens_size*NrInGroups
NrEnsembles = (Nr_clust_dist+Nr_clust_prox+Nr_scattered)
NrGroups = int(NrEnsembles/NrInGroups)
NrIn = NrEnsembles*Ens_size # nr of input neurons
init_weight = .2 # initial weight
signal_rate = 30.*br2.Hz # activation rate of synapses
t_stim = 50*br2.ms # stimulation length
buffertime = 50*br2.ms # resting time between stimulations
reps = 1 # nr of activations of each ensemble
# Equations input neuron
eqs_in = '''
dv/dt = (V_rest-v)/ms: volt
v2 = rand()<rate_v*dt :1 (constant over dt)
rate_v :Hz
ds_trace/dt = -s_trace/taux :1
'''
#####################################################
# Create neurons
#####################################################
N_input = br2.NeuronGroup(NrIn, eqs_in, threshold='v+v2*2*V_thresh>V_thresh',
reset='v=V_rest;s_trace+=x_reset*(taux/ms)', method='linear')#
Syn_1 = br2.Synapses(N_input,neuron,
model= eq_1_nonPlast,
on_pre = eq_2_nonPlast,
method='heun'
)
# distally clustered ensembles
c_ndx = np.floor(np.random.rand(Nr_clust_dist)*len(dc))
for cc in range(Nr_clust_dist):
Syn_1.connect(i=range(cc*Ens_size,(cc+1)*Ens_size),
j=neuron[dc[int(c_ndx[cc])]:dc[int(c_ndx[cc])]+1])
# proximally clustered ensembles
c_ndx2 = np.floor(np.random.rand(Nr_clust_prox)*len(pc))
for cc in range(Nr_clust_prox):
Syn_1.connect(i=range(cc*Ens_size+Nr_clust_dist*Ens_size,(cc+1)*Ens_size+Nr_clust_dist*Ens_size),
j=neuron[pc[int(c_ndx2[cc])]:pc[int(c_ndx2[cc])]+1])
# distributed ensembles
rand_post_comp = np.floor(np.random.rand(Ens_size*Nr_scattered)*(len(bsl)-axo-1)+axo+1)
for pp in range(Ens_size*(Nr_clust_dist+Nr_clust_prox),NrIn):
Syn_1.connect(i=pp,j=neuron[int(rand_post_comp[pp-Ens_size*(Nr_clust_dist+Nr_clust_prox)]):
int(rand_post_comp[pp-Ens_size*(Nr_clust_dist+Nr_clust_prox)]+1)])
# Initialize the model
neuron.v = EL
neuron.I = 0.*br2.nA
# neuron.I[0] = 0.2*nA
set_init_syn(Syn_1,init_weight)
set_init_nrn(neuron,Theta_low)
N_input.v = V_rest
#####################################################
# Run
#####################################################
print('Simulating ...')
for tt in range(reps*NrGroups):
print(tt)
N_input.rate_v[np.mod(tt,NrGroups)*GrpSize:np.mod(tt,NrGroups)*GrpSize+GrpSize] = signal_rate
br2.run(t_stim)
N_input.rate_v = np.zeros(NrIn)
br2.run(buffertime)
print('Simulation Finished!')
#store data in sec[1]
for sec in self.sections.values():
kk = sec[2]
sec[1] = monitor.v[kk:kk+len(sec[0].x)]
# sec[1] = monitor.gKL[kk:kk+len(sec[0].x)]
plt.figure()
plt.plot(monitor.t/br2.ms,monitor.v[0]/br2.mV)
return monitor, neuron
if __name__ == "__main__":
# test_MDL = '../0. Model/Branco2010_Morpho.swc'
# morphodata = BrancoData
# distal_compartments = distal_compartments_Branco_eff
# proximal_compartments = proximal_compartments_Branco
test_MDL = '../0. Model/Acker2008.swc'
morphodata = AckerData
test_model = BRIANModel(test_MDL)
M, nrn = test_model.run(morphodata)
test_model.animate(filename='animation_A.avi')
# test_model.show_shape(fovx=200,fovy=500)
# test_model.show_segm(segm_range=[dist_c,dist_c2],colorv='r',segm_range2=[prox_c],colorv2='b')
| [
"numpy.abs",
"numpy.amin",
"brian2.run",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"numpy.arange",
"brian2.SpatialNeuron",
"os.path.join",
"sys.path.append",
"brian2.start_scope",
"numpy.multiply",
"matplotlib.colors.Normalize",
"numpy.random.rand",
"matplotlib.cm.S... | [((345, 370), 'sys.path.append', 'sys.path.append', (['mod_path'], {}), '(mod_path)\n', (360, 370), False, 'import os, sys\n'), ((964, 981), 'brian2.start_scope', 'br2.start_scope', ([], {}), '()\n', (979, 981), True, 'import brian2 as br2\n'), ((317, 344), 'os.path.join', 'os.path.join', (['""".."""', '"""Model"""'], {}), "('..', 'Model')\n", (329, 344), False, 'import os, sys\n'), ((1338, 1373), 'brian2.Morphology.from_file', 'br2.Morphology.from_file', (['swc_model'], {}), '(swc_model)\n', (1362, 1373), True, 'import brian2 as br2\n'), ((5658, 5862), 'brian2.SpatialNeuron', 'br2.SpatialNeuron', ([], {'morphology': 'self.morpho', 'model': 'eqs', 'Cm': 'Capacit', 'Ri': 'R_axial', 'threshold': '"""v/mV>0"""', 'refractory': '"""v/mV > -10"""', 'threshold_location': '(0)', 'reset': '"""s_trace += x_reset*(taux/ms)"""', 'method': '"""heun"""'}), "(morphology=self.morpho, model=eqs, Cm=Capacit, Ri=R_axial,\n threshold='v/mV>0', refractory='v/mV > -10', threshold_location=0,\n reset='s_trace += x_reset*(taux/ms)', method='heun')\n", (5675, 5862), True, 'import brian2 as br2\n'), ((11223, 11254), 'oo_initScripts.set_init_nrn', 'set_init_nrn', (['neuron', 'Theta_low'], {}), '(neuron, Theta_low)\n', (11235, 11254), False, 'from oo_initScripts import set_init_nrn, set_init_syn\n'), ((12431, 12446), 'numpy.zeros', 'np.zeros', (['maxNr'], {}), '(maxNr)\n', (12439, 12446), True, 'import numpy as np\n'), ((12465, 12480), 'numpy.zeros', 'np.zeros', (['maxNr'], {}), '(maxNr)\n', (12473, 12480), True, 'import numpy as np\n'), ((17659, 17671), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17669, 17671), True, 'import matplotlib.pyplot as plt\n'), ((17904, 17926), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""afmhot"""'], {}), "('afmhot')\n", (17916, 17926), True, 'import matplotlib.pyplot as plt\n'), ((17943, 17988), 'matplotlib.colors.Normalize', 'colorz.Normalize', ([], {'vmin': '(-75 * mV)', 'vmax': '(-0 * mV)'}), '(vmin=-75 * mV, vmax=-0 * mV)\n', (17959, 17988), True, 'import matplotlib.colors as colorz\n'), ((18005, 18047), 'matplotlib.cm.ScalarMappable', 'clrm.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cmap'}), '(norm=cNorm, cmap=cmap)\n', (18024, 18047), True, 'import matplotlib.cm as clrm\n'), ((18436, 18496), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'anim3d'], {'interval': '(20)', 'frames': 'nf'}), '(fig, anim3d, interval=20, frames=nf)\n', (18459, 18496), False, 'from matplotlib import animation\n'), ((18555, 18579), 'matplotlib.animation.FFMpegWriter', 'animation.FFMpegWriter', ([], {}), '()\n', (18577, 18579), False, 'from matplotlib import animation\n'), ((19003, 19020), 'numpy.zeros', 'np.zeros', (['[1, nf]'], {}), '([1, nf])\n', (19011, 19020), True, 'import numpy as np\n'), ((19162, 19176), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19174, 19176), True, 'import matplotlib.pyplot as plt\n'), ((19278, 19293), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (19286, 19293), True, 'import matplotlib.pyplot as plt\n'), ((19349, 19371), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""afmhot"""'], {}), "('afmhot')\n", (19361, 19371), True, 'import matplotlib.pyplot as plt\n'), ((19418, 19454), 'matplotlib.colors.Normalize', 'colorz.Normalize', ([], {'vmin': '(-0.07)', 'vmax': '(0)'}), '(vmin=-0.07, vmax=0)\n', (19434, 19454), True, 'import matplotlib.colors as colorz\n'), ((19507, 19549), 'matplotlib.cm.ScalarMappable', 'clrm.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cmap'}), '(norm=cNorm, cmap=cmap)\n', (19526, 19549), True, 'import matplotlib.cm as clrm\n'), ((19933, 19990), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'anim'], {'interval': '(2)', 'frames': 'nf'}), '(fig, anim, interval=2, frames=nf)\n', (19956, 19990), False, 'from matplotlib import animation\n'), ((20049, 20079), 'matplotlib.animation.FFMpegWriter', 'animation.FFMpegWriter', ([], {'fps': '(33)'}), '(fps=33)\n', (20071, 20079), False, 'from matplotlib import animation\n'), ((20358, 20372), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (20370, 20372), True, 'import matplotlib.pyplot as plt\n'), ((20580, 20593), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (20587, 20593), True, 'import numpy as np\n'), ((20611, 20624), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (20618, 20624), True, 'import numpy as np\n'), ((21250, 21292), 'matplotlib.colors.Normalize', 'colorz.Normalize', ([], {'vmin': 'minmin', 'vmax': 'maxmax'}), '(vmin=minmin, vmax=maxmax)\n', (21266, 21292), True, 'import matplotlib.colors as colorz\n'), ((21315, 21360), 'matplotlib.cm.ScalarMappable', 'clrm.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'newcmap'}), '(norm=cNorm, cmap=newcmap)\n', (21334, 21360), True, 'import matplotlib.cm as clrm\n'), ((22344, 22358), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (22356, 22358), True, 'import matplotlib.pyplot as plt\n'), ((22556, 22580), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (22568, 22580), True, 'import matplotlib.pyplot as plt\n'), ((22638, 22651), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (22645, 22651), True, 'import numpy as np\n'), ((22669, 22682), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (22676, 22682), True, 'import numpy as np\n'), ((22735, 22777), 'matplotlib.colors.Normalize', 'colorz.Normalize', ([], {'vmin': 'minmin', 'vmax': 'maxmax'}), '(vmin=minmin, vmax=maxmax)\n', (22751, 22777), True, 'import matplotlib.colors as colorz\n'), ((22939, 22981), 'matplotlib.cm.ScalarMappable', 'clrm.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cmap'}), '(norm=cNorm, cmap=cmap)\n', (22958, 22981), True, 'import matplotlib.cm as clrm\n'), ((23035, 23058), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['scalarMap'], {}), '(scalarMap)\n', (23047, 23058), True, 'import matplotlib.pyplot as plt\n'), ((23172, 23214), 'brian2.StateMonitor', 'br2.StateMonitor', (['neuron', '"""v"""'], {'record': '(True)'}), "(neuron, 'v', record=True)\n", (23188, 23214), True, 'import brian2 as br2\n'), ((23540, 23607), 'brian2.StateMonitor', 'br2.StateMonitor', (['neuron', '"""v"""'], {'record': '(True)', 'dt': '(20 * defaultclock.dt)'}), "(neuron, 'v', record=True, dt=20 * defaultclock.dt)\n", (23556, 23607), True, 'import brian2 as br2\n'), ((25434, 25568), 'brian2.NeuronGroup', 'br2.NeuronGroup', (['NrIn', 'eqs_in'], {'threshold': '"""v+v2*2*V_thresh>V_thresh"""', 'reset': '"""v=V_rest;s_trace+=x_reset*(taux/ms)"""', 'method': '"""linear"""'}), "(NrIn, eqs_in, threshold='v+v2*2*V_thresh>V_thresh', reset=\n 'v=V_rest;s_trace+=x_reset*(taux/ms)', method='linear')\n", (25449, 25568), True, 'import brian2 as br2\n'), ((25629, 25720), 'brian2.Synapses', 'br2.Synapses', (['N_input', 'neuron'], {'model': 'eq_1_nonPlast', 'on_pre': 'eq_2_nonPlast', 'method': '"""heun"""'}), "(N_input, neuron, model=eq_1_nonPlast, on_pre=eq_2_nonPlast,\n method='heun')\n", (25641, 25720), True, 'import brian2 as br2\n'), ((27052, 27084), 'oo_initScripts.set_init_syn', 'set_init_syn', (['Syn_1', 'init_weight'], {}), '(Syn_1, init_weight)\n', (27064, 27084), False, 'from oo_initScripts import set_init_nrn, set_init_syn\n'), ((27092, 27123), 'oo_initScripts.set_init_nrn', 'set_init_nrn', (['neuron', 'Theta_low'], {}), '(neuron, Theta_low)\n', (27104, 27123), False, 'from oo_initScripts import set_init_nrn, set_init_syn\n'), ((27877, 27889), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27887, 27889), True, 'import matplotlib.pyplot as plt\n'), ((27898, 27949), 'matplotlib.pyplot.plot', 'plt.plot', (['(monitor.t / br2.ms)', '(monitor.v[0] / br2.mV)'], {}), '(monitor.t / br2.ms, monitor.v[0] / br2.mV)\n', (27906, 27949), True, 'import matplotlib.pyplot as plt\n'), ((13433, 13445), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13443, 13445), True, 'import matplotlib.pyplot as plt\n'), ((14572, 14586), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (14584, 14586), True, 'import matplotlib.pyplot as plt\n'), ((15751, 15765), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (15763, 15765), True, 'import matplotlib.pyplot as plt\n'), ((16583, 16597), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (16595, 16597), True, 'import matplotlib.pyplot as plt\n'), ((27511, 27526), 'brian2.run', 'br2.run', (['t_stim'], {}), '(t_stim)\n', (27518, 27526), True, 'import brian2 as br2\n'), ((27556, 27570), 'numpy.zeros', 'np.zeros', (['NrIn'], {}), '(NrIn)\n', (27564, 27570), True, 'import numpy as np\n'), ((27583, 27602), 'brian2.run', 'br2.run', (['buffertime'], {}), '(buffertime)\n', (27590, 27602), True, 'import brian2 as br2\n'), ((7883, 7938), 'numpy.multiply', 'np.multiply', (['(basalNa - gNa_diff)', '(basalNa - gNa_diff > 0)'], {}), '(basalNa - gNa_diff, basalNa - gNa_diff > 0)\n', (7894, 7938), True, 'import numpy as np\n'), ((13123, 13162), 'numpy.abs', 'np.abs', (['(dist_vec[jj + 1] - dist_vec[jj])'], {}), '(dist_vec[jj + 1] - dist_vec[jj])\n', (13129, 13162), True, 'import numpy as np\n'), ((19095, 19109), 'numpy.array', 'np.array', (['x[1]'], {}), '(x[1])\n', (19103, 19109), True, 'import numpy as np\n'), ((21460, 21489), 'numpy.arange', 'np.arange', (['minmin', '(maxmax + 1)'], {}), '(minmin, maxmax + 1)\n', (21469, 21489), True, 'import numpy as np\n'), ((25929, 25958), 'numpy.random.rand', 'np.random.rand', (['Nr_clust_dist'], {}), '(Nr_clust_dist)\n', (25943, 25958), True, 'import numpy as np\n'), ((26228, 26257), 'numpy.random.rand', 'np.random.rand', (['Nr_clust_prox'], {}), '(Nr_clust_prox)\n', (26242, 26257), True, 'import numpy as np\n'), ((8274, 8329), 'numpy.multiply', 'np.multiply', (['(basalKa + gKa_diff)', '(basalKa + gKa_diff > 0)'], {}), '(basalKa + gKa_diff, basalKa + gKa_diff > 0)\n', (8285, 8329), True, 'import numpy as np\n'), ((8395, 8450), 'numpy.multiply', 'np.multiply', (['(basalKa + gKa_diff)', '(basalKa + gKa_diff > 0)'], {}), '(basalKa + gKa_diff, basalKa + gKa_diff > 0)\n', (8406, 8450), True, 'import numpy as np\n'), ((8914, 8946), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8922, 8946), True, 'import numpy as np\n'), ((8986, 9018), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8994, 9018), True, 'import numpy as np\n'), ((10090, 10122), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (10098, 10122), True, 'import numpy as np\n'), ((10162, 10194), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (10170, 10194), True, 'import numpy as np\n'), ((10560, 10592), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (10568, 10592), True, 'import numpy as np\n'), ((13209, 13238), 'numpy.array', 'np.array', (['[sec_range[0] + jj]'], {}), '([sec_range[0] + jj])\n', (13217, 13238), True, 'import numpy as np\n'), ((10705, 10749), 'numpy.array', 'np.array', (['[40.0, 100.0, 500.0, 500.0, 500.0]'], {}), '([40.0, 100.0, 500.0, 500.0, 500.0])\n', (10713, 10749), True, 'import numpy as np\n'), ((26571, 26610), 'numpy.random.rand', 'np.random.rand', (['(Ens_size * Nr_scattered)'], {}), '(Ens_size * Nr_scattered)\n', (26585, 26610), True, 'import numpy as np\n'), ((27420, 27440), 'numpy.mod', 'np.mod', (['tt', 'NrGroups'], {}), '(tt, NrGroups)\n', (27426, 27440), True, 'import numpy as np\n'), ((7785, 7817), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (7793, 7817), True, 'import numpy as np\n'), ((8027, 8059), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8035, 8059), True, 'import numpy as np\n'), ((8128, 8160), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8136, 8160), True, 'import numpy as np\n'), ((8563, 8595), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8571, 8595), True, 'import numpy as np\n'), ((8619, 8651), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8627, 8651), True, 'import numpy as np\n'), ((8728, 8760), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8736, 8760), True, 'import numpy as np\n'), ((8789, 8821), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8797, 8821), True, 'import numpy as np\n'), ((9411, 9443), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (9419, 9443), True, 'import numpy as np\n'), ((9756, 9788), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (9764, 9788), True, 'import numpy as np\n'), ((9812, 9844), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (9820, 9844), True, 'import numpy as np\n'), ((9921, 9953), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (9929, 9953), True, 'import numpy as np\n'), ((9982, 10014), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (9990, 10014), True, 'import numpy as np\n'), ((10794, 10833), 'numpy.array', 'np.array', (['[20.0, 35.0, 125.0, 250.0, 0]'], {}), '([20.0, 35.0, 125.0, 250.0, 0])\n', (10802, 10833), True, 'import numpy as np\n'), ((10879, 10929), 'numpy.array', 'np.array', (['[8000.0, 7000.0, 5000.0, 5000.0, 5000.0]'], {}), '([8000.0, 7000.0, 5000.0, 5000.0, 5000.0])\n', (10887, 10929), True, 'import numpy as np\n'), ((27448, 27468), 'numpy.mod', 'np.mod', (['tt', 'NrGroups'], {}), '(tt, NrGroups)\n', (27454, 27468), True, 'import numpy as np\n'), ((8176, 8208), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (8184, 8208), True, 'import numpy as np\n'), ((9459, 9491), 'numpy.array', 'np.array', (['self.distances[sec][:]'], {}), '(self.distances[sec][:])\n', (9467, 9491), True, 'import numpy as np\n')] |
import logging
import os
import pickle
import sys
import textwrap
from pathlib import Path
import numpy as np
import pandas as pd
from datarobot_drum.drum.artifact_predictors.keras_predictor import KerasPredictor
from datarobot_drum.drum.artifact_predictors.pmml_predictor import PMMLPredictor
from datarobot_drum.drum.artifact_predictors.sklearn_predictor import SKLearnPredictor
from datarobot_drum.drum.artifact_predictors.torch_predictor import PyTorchPredictor
from datarobot_drum.drum.artifact_predictors.xgboost_predictor import XGBoostPredictor
from datarobot_drum.drum.common import (
CUSTOM_FILE_NAME,
CustomHooks,
LOGGER_NAME_PREFIX,
NEGATIVE_CLASS_LABEL_ARG_KEYWORD,
POSITIVE_CLASS_LABEL_ARG_KEYWORD,
REGRESSION_PRED_COLUMN,
reroute_stdout_to_stderr,
)
from datarobot_drum.drum.custom_fit_wrapper import MAGIC_MARKER
from datarobot_drum.drum.exceptions import DrumCommonException
RUNNING_LANG_MSG = "Running environment language: Python."
class PythonModelAdapter:
def __init__(self, model_dir):
self._logger = logging.getLogger(LOGGER_NAME_PREFIX + "." + self.__class__.__name__)
# Get all the artifact predictors we have
# let `SKLearnPredictor` be the last item, as we iterate through this list to find the
# predictor for the given model artifact (based on the instance type of the estimator) it might
# overlap with other predictors especially the ones with `sklearn.pipeline`
self._artifact_predictors = [
KerasPredictor(),
XGBoostPredictor(),
PyTorchPredictor(),
PMMLPredictor(),
SKLearnPredictor(),
]
self._predictor_to_use = None
self._custom_hooks = {}
self._model = None
self._model_dir = model_dir
for hook in CustomHooks.ALL:
self._custom_hooks[hook] = None
def load_custom_hooks(self):
custom_file_paths = list(Path(self._model_dir).rglob("{}.py".format(CUSTOM_FILE_NAME)))
assert len(custom_file_paths) <= 1
if len(custom_file_paths) == 0:
print("No {}.py file detected in {}".format(CUSTOM_FILE_NAME, self._model_dir))
return
custom_file_path = custom_file_paths[0]
print("Detected {} .. trying to load hooks".format(custom_file_path))
sys.path.insert(0, os.path.dirname(custom_file_path))
try:
custom_module = __import__(CUSTOM_FILE_NAME)
for hook in CustomHooks.ALL:
self._custom_hooks[hook] = getattr(custom_module, hook, None)
if self._custom_hooks.get(CustomHooks.INIT):
# noinspection PyCallingNonCallable
self._custom_hooks[CustomHooks.INIT](code_dir=self._model_dir)
self._logger.debug("Hooks loaded: {}".format(self._custom_hooks))
except ImportError as e:
self._logger.error("Could not load hooks: {}".format(e))
raise DrumCommonException(
"\n\n{}\n"
"Failed loading hooks from [{}] : {}".format(RUNNING_LANG_MSG, custom_file_path, e)
)
def load_model_from_artifact(self):
"""
Load the serialized model from it's artifact.
Returns
-------
Any
The deserialized model
Raises
------
DrumCommonException if model loading failed.
"""
if self._custom_hooks[CustomHooks.LOAD_MODEL]:
self._model = self._load_model_via_hook()
else:
model_artifact_file = self._detect_model_artifact_file()
self._model = self._load_via_predictors(model_artifact_file)
# If a score hook is not given we need to find a predictor that can handle this model
if not self._custom_hooks[CustomHooks.SCORE]:
self._find_predictor_to_use()
return self._model
def _load_model_via_hook(self):
self._logger.debug("Load model hook will be used to load the model")
# noinspection PyCallingNonCallable
try:
model = self._custom_hooks[CustomHooks.LOAD_MODEL](self._model_dir)
except Exception as exc:
raise type(exc)(
"Model loading hook failed to load model: {}".format(exc)
).with_traceback(sys.exc_info()[2]) from None
if not model:
raise DrumCommonException("Model loading hook failed to load model")
self._logger.debug("Model was successfully loaded by load hook")
return model
def _detect_model_artifact_file(self):
# No model was loaded - so there is no local hook - so we are using our artifact predictors
all_supported_extensions = set(p.artifact_extension for p in self._artifact_predictors)
self._logger.debug("Supported suffixes: {}".format(all_supported_extensions))
model_artifact_file = None
for filename in os.listdir(self._model_dir):
path = os.path.join(self._model_dir, filename)
if os.path.isdir(path):
continue
if any(filename.endswith(extension) for extension in all_supported_extensions):
if model_artifact_file:
raise DrumCommonException(
"\n\n{}\n"
"Multiple serialized model files found. Remove extra artifacts "
"or overwrite custom.load_model()".format(RUNNING_LANG_MSG)
)
model_artifact_file = path
if not model_artifact_file:
files_list = os.listdir(self._model_dir)
files_list_str = " | ".join(files_list)
raise DrumCommonException(
"\n\n{}\n"
"Could not find model artifact file in: {} supported by default predictors.\n"
"They support filenames with the following extensions {}.\n"
"If your artifact is not supported by default predictor, implement custom.load_model hook.\n"
"List of files got here are: {}".format(
RUNNING_LANG_MSG,
self._model_dir,
list(all_supported_extensions),
files_list_str,
)
)
self._logger.debug("model_artifact_file: {}".format(model_artifact_file))
return model_artifact_file
def _load_via_predictors(self, model_artifact_file):
model = None
pred_that_support_artifact = []
for pred in self._artifact_predictors:
if pred.is_artifact_supported(model_artifact_file):
pred_that_support_artifact.append(pred)
if pred.can_load_artifact(model_artifact_file):
try:
model = pred.load_model_from_artifact(model_artifact_file)
except Exception as exc:
raise type(exc)(
"Could not load model from artifact file: {}".format(exc)
).with_traceback(sys.exc_info()[2]) from None
break
if not model:
if len(pred_that_support_artifact) > 0:
framework_err = """
The following frameworks support this model artifact
but could not load the model. Check if requirements are missing
"""
for pred in pred_that_support_artifact:
framework_err += "Framework: {}, requirements: {}".format(
pred.name, pred.framework_requirements()
)
raise DrumCommonException(textwrap.dedent(framework_err))
else:
raise DrumCommonException(
"\n\n{}\n"
"Could not load model from artifact file {}."
" No builtin support for this model was detected".format(
RUNNING_LANG_MSG, model_artifact_file
)
)
self._model = model
return model
def _find_predictor_to_use(self):
self._predictor_to_use = None
for pred in self._artifact_predictors:
if pred.can_use_model(self._model):
self._predictor_to_use = pred
break
if not self._predictor_to_use and not self._custom_hooks[CustomHooks.SCORE]:
raise DrumCommonException(
"\n\n{}\n"
"Could not find any framework to handle loaded model and a {} "
"hook is not provided".format(RUNNING_LANG_MSG, CustomHooks.SCORE)
)
self._logger.debug("Predictor to use: {}".format(self._predictor_to_use.name))
@staticmethod
def _validate_data(to_validate, hook):
if not isinstance(to_validate, (pd.DataFrame, np.ndarray)):
raise ValueError(
"{} must return a DataFrame; but received {}".format(hook, type(to_validate))
)
def _validate_predictions(self, to_validate, positive_class_label, negative_class_label):
self._validate_data(to_validate, "Predictions")
columns_to_validate = set(list(to_validate.columns))
if positive_class_label and negative_class_label:
if columns_to_validate != {positive_class_label, negative_class_label}:
raise ValueError(
"Expected predictions to have columns {}, but encountered {}".format(
{positive_class_label, negative_class_label}, columns_to_validate
)
)
try:
added_probs = [
a + b
for a, b in zip(
to_validate[positive_class_label], to_validate[negative_class_label]
)
]
np.testing.assert_almost_equal(added_probs, 1)
except AssertionError:
raise ValueError("Your prediction probabilities do not add up to 1.")
elif columns_to_validate != {REGRESSION_PRED_COLUMN}:
raise ValueError(
"Expected predictions to have a single {} column, but encountered {}".format(
REGRESSION_PRED_COLUMN, columns_to_validate
)
)
def predict(self, data, model=None, **kwargs):
"""
Makes predictions against the model using the custom predict
method and returns a pandas DataFrame
If the model is a regression model, the DataFrame will have a single column "Predictions"
If the model is a classification model, the DataFrame will have a column for each class label
with their respective probabilities. Positive/negative class labels will be passed in kwargs under
positive_class_label/negative_class_label keywords.
Parameters
----------
data: pd.DataFrame
Data to make predictions against
model: Any
The model
kwargs
Returns
-------
pd.DataFrame
"""
if self._custom_hooks.get(CustomHooks.TRANSFORM):
try:
# noinspection PyCallingNonCallable
data = self._custom_hooks[CustomHooks.TRANSFORM](data, model)
except Exception as exc:
raise type(exc)(
"Model transform hook failed to transform dataset: {}".format(exc)
).with_traceback(sys.exc_info()[2]) from None
self._validate_data(data, CustomHooks.TRANSFORM)
positive_class_label = kwargs.get(POSITIVE_CLASS_LABEL_ARG_KEYWORD, None)
negative_class_label = kwargs.get(NEGATIVE_CLASS_LABEL_ARG_KEYWORD, None)
if self._custom_hooks.get(CustomHooks.SCORE):
try:
# noinspection PyCallingNonCallable
predictions = self._custom_hooks[CustomHooks.SCORE](data, model, **kwargs)
except Exception as exc:
raise type(exc)(
"Model score hook failed to make predictions: {}".format(exc)
).with_traceback(sys.exc_info()[2]) from None
else:
try:
predictions = self._predictor_to_use.predict(data, model, **kwargs)
except Exception as exc:
raise type(exc)("Failure when making predictions: {}".format(exc)).with_traceback(
sys.exc_info()[2]
) from None
if self._custom_hooks.get(CustomHooks.POST_PROCESS):
try:
# noinspection PyCallingNonCallable
predictions = self._custom_hooks[CustomHooks.POST_PROCESS](predictions, model)
except Exception as exc:
raise type(exc)(
"Model post-process hook failed to post-process predictions: {}".format(exc)
).with_traceback(sys.exc_info()[2]) from None
self._validate_predictions(predictions, positive_class_label, negative_class_label)
return predictions
def _drum_autofit_internal(self, X, y, output_dir):
"""
A user can surround an sklearn pipeline or estimator with the drum_autofit() function,
importable from drum, which will tag the object that is passed in with a magic variable.
This function searches thru all the pipelines and estimators imported from all the modules
in the code directory, and looks for this magic variable. If it finds it, it will
load the object here, and call fit on it. Then, it will serialize the fit model out
to the output directory. If it can't find the wrapper, it will return False, if it
successfully runs fit, it will return True, otherwise it will throw a DrumCommonException.
Returns
-------
Boolean, whether fit was run
"""
import sklearn
model_dir_limit = 100
marked_object = None
files_in_model_dir = list(Path(self._model_dir).rglob("*.py"))
if len(files_in_model_dir) == 0:
return False
if len(files_in_model_dir) > model_dir_limit:
self._logger.warning(
"There are more than {} files in this directory".format(model_dir_limit)
)
return False
for filepath in files_in_model_dir:
filename = os.path.basename(filepath)
sys.path.insert(0, os.path.dirname(filepath))
try:
module = __import__(filename[:-3])
except ImportError as e:
self._logger.warning(
"File at path {} could not be imported: {}".format(filepath, str(e))
)
continue
for object_name in dir(module):
_object = getattr(module, object_name)
if isinstance(_object, sklearn.base.BaseEstimator):
if hasattr(_object, MAGIC_MARKER):
marked_object = _object
break
if marked_object is not None:
marked_object.fit(X, y)
with open("{}/artifact.pkl".format(output_dir), "wb") as fp:
pickle.dump(marked_object, fp)
return True
return False
def fit(self, X, y, output_dir, class_order=None, row_weights=None):
with reroute_stdout_to_stderr():
if self._custom_hooks.get(CustomHooks.FIT):
self._custom_hooks[CustomHooks.FIT](
X, y, output_dir, class_order=class_order, row_weights=row_weights
)
elif self._drum_autofit_internal(X, y, output_dir):
return
else:
hooks = [
"{}: {}".format(hook, fn is not None) for hook, fn in self._custom_hooks.items()
]
raise DrumCommonException(
"\n\n{}\n"
"\nfit() method must be implemented in a file named 'custom.py' in the provided code_dir: '{}' \n"
"Here is a list of files in this dir. {}\n"
"Here are the hooks your custom.py file has: {}".format(
RUNNING_LANG_MSG, self._model_dir, os.listdir(self._model_dir)[:100], hooks
)
)
| [
"os.listdir",
"textwrap.dedent",
"pickle.dump",
"datarobot_drum.drum.artifact_predictors.sklearn_predictor.SKLearnPredictor",
"os.path.basename",
"os.path.isdir",
"datarobot_drum.drum.artifact_predictors.torch_predictor.PyTorchPredictor",
"os.path.dirname",
"numpy.testing.assert_almost_equal",
"da... | [((1068, 1137), 'logging.getLogger', 'logging.getLogger', (["(LOGGER_NAME_PREFIX + '.' + self.__class__.__name__)"], {}), "(LOGGER_NAME_PREFIX + '.' + self.__class__.__name__)\n", (1085, 1137), False, 'import logging\n'), ((4940, 4967), 'os.listdir', 'os.listdir', (['self._model_dir'], {}), '(self._model_dir)\n', (4950, 4967), False, 'import os\n'), ((1522, 1538), 'datarobot_drum.drum.artifact_predictors.keras_predictor.KerasPredictor', 'KerasPredictor', ([], {}), '()\n', (1536, 1538), False, 'from datarobot_drum.drum.artifact_predictors.keras_predictor import KerasPredictor\n'), ((1552, 1570), 'datarobot_drum.drum.artifact_predictors.xgboost_predictor.XGBoostPredictor', 'XGBoostPredictor', ([], {}), '()\n', (1568, 1570), False, 'from datarobot_drum.drum.artifact_predictors.xgboost_predictor import XGBoostPredictor\n'), ((1584, 1602), 'datarobot_drum.drum.artifact_predictors.torch_predictor.PyTorchPredictor', 'PyTorchPredictor', ([], {}), '()\n', (1600, 1602), False, 'from datarobot_drum.drum.artifact_predictors.torch_predictor import PyTorchPredictor\n'), ((1616, 1631), 'datarobot_drum.drum.artifact_predictors.pmml_predictor.PMMLPredictor', 'PMMLPredictor', ([], {}), '()\n', (1629, 1631), False, 'from datarobot_drum.drum.artifact_predictors.pmml_predictor import PMMLPredictor\n'), ((1645, 1663), 'datarobot_drum.drum.artifact_predictors.sklearn_predictor.SKLearnPredictor', 'SKLearnPredictor', ([], {}), '()\n', (1661, 1663), False, 'from datarobot_drum.drum.artifact_predictors.sklearn_predictor import SKLearnPredictor\n'), ((2369, 2402), 'os.path.dirname', 'os.path.dirname', (['custom_file_path'], {}), '(custom_file_path)\n', (2384, 2402), False, 'import os\n'), ((4396, 4458), 'datarobot_drum.drum.exceptions.DrumCommonException', 'DrumCommonException', (['"""Model loading hook failed to load model"""'], {}), "('Model loading hook failed to load model')\n", (4415, 4458), False, 'from datarobot_drum.drum.exceptions import DrumCommonException\n'), ((4988, 5027), 'os.path.join', 'os.path.join', (['self._model_dir', 'filename'], {}), '(self._model_dir, filename)\n', (5000, 5027), False, 'import os\n'), ((5043, 5062), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5056, 5062), False, 'import os\n'), ((5604, 5631), 'os.listdir', 'os.listdir', (['self._model_dir'], {}), '(self._model_dir)\n', (5614, 5631), False, 'import os\n'), ((14391, 14417), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (14407, 14417), False, 'import os\n'), ((15378, 15404), 'datarobot_drum.drum.common.reroute_stdout_to_stderr', 'reroute_stdout_to_stderr', ([], {}), '()\n', (15402, 15404), False, 'from datarobot_drum.drum.common import CUSTOM_FILE_NAME, CustomHooks, LOGGER_NAME_PREFIX, NEGATIVE_CLASS_LABEL_ARG_KEYWORD, POSITIVE_CLASS_LABEL_ARG_KEYWORD, REGRESSION_PRED_COLUMN, reroute_stdout_to_stderr\n'), ((9866, 9912), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['added_probs', '(1)'], {}), '(added_probs, 1)\n', (9896, 9912), True, 'import numpy as np\n'), ((14449, 14474), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (14464, 14474), False, 'import os\n'), ((15215, 15245), 'pickle.dump', 'pickle.dump', (['marked_object', 'fp'], {}), '(marked_object, fp)\n', (15226, 15245), False, 'import pickle\n'), ((1957, 1978), 'pathlib.Path', 'Path', (['self._model_dir'], {}), '(self._model_dir)\n', (1961, 1978), False, 'from pathlib import Path\n'), ((7652, 7682), 'textwrap.dedent', 'textwrap.dedent', (['framework_err'], {}), '(framework_err)\n', (7667, 7682), False, 'import textwrap\n'), ((14005, 14026), 'pathlib.Path', 'Path', (['self._model_dir'], {}), '(self._model_dir)\n', (14009, 14026), False, 'from pathlib import Path\n'), ((4326, 4340), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4338, 4340), False, 'import sys\n'), ((11497, 11511), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (11509, 11511), False, 'import sys\n'), ((12152, 12166), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12164, 12166), False, 'import sys\n'), ((12452, 12466), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12464, 12466), False, 'import sys\n'), ((12924, 12938), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12936, 12938), False, 'import sys\n'), ((7047, 7061), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7059, 7061), False, 'import sys\n'), ((16263, 16290), 'os.listdir', 'os.listdir', (['self._model_dir'], {}), '(self._model_dir)\n', (16273, 16290), False, 'import os\n')] |
import numpy as np
from numpy import fft
from scipy.optimize import curve_fit
from scipy.interpolate import CubicSpline
from scipy.ndimage.filters import gaussian_filter1d
from scale import np_scale
from plotter_utils_consts import n_pts_smooth, default_fourier_n_harm
def gauss(x, a, x0, sigma):
return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2))
def gaussian_fit(x, y, x_smooth=None, n_pts=n_pts_smooth):
"""
Fits a Gaussian to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to. Must have range [0,1].
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
"""
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x), n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
mean, sigma = np.nanmean(y), np.nanstd(y)
popt, pcov = curve_fit(gauss, np_scale(x), y, p0=[1, mean, sigma],
maxfev=np.iinfo(np.int32).max)
y_smooth = gauss(np_scale(x_smooth), *popt)
return x_smooth, y_smooth
def gaussian_filter_fit(x, y, x_smooth=None, n_pts=n_pts_smooth, sigma=None):
"""
Fits a Gaussian filter to some data - x and y. Returns predicted interpolation values.
Currently, smoothing is achieved by fitting a cubic spline to the gaussian filter fit
of `x` and `y`.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like, optional
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int, optional
The number of evenly spaced points spanning the range of `x` to interpolate for.
sigma: numeric, optional
The standard deviation of the Gaussian kernel. A larger value yields a smoother curve,
but also reduced the closeness of the fit. By default, it is `4 * np.std(y)`.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
"""
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x)-1, n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
sigma = sigma if sigma is not None else 4 * np.std(y)
gauss_filter_y = gaussian_filter1d(y, sigma)
cs = CubicSpline(x, gauss_filter_y)
y_smooth = cs(x_smooth)
return x_smooth, y_smooth
def poly_fit(x, y, degree, x_smooth=None, n_pts=n_pts_smooth):
"""
Fits a polynomial of any positive integer degree to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
degree: int
The degree of the polynomial to fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
"""
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x), n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
y_smooth = np.array([np.array([coef * (x_val ** current_degree) for
coef, current_degree in zip(np.polyfit(x, y, degree),
range(degree, -1, -1))]).sum() for x_val in x_smooth])
return x_smooth, y_smooth
def fourier_fit(x, y, n_predict=0, x_smooth=None, n_pts=n_pts_smooth,
n_harm=default_fourier_n_harm):
"""
Creates a Fourier fit of a NumPy array. Also supports extrapolation.
Credit goes to https://gist.github.com/tartakynov/83f3cd8f44208a1856ce.
Parameters
----------
x, y: numpy.ndarray
1D NumPy arrays of the x and y values to fit to.
Must not contain NaNs.
n_predict: int
The number of points to extrapolate.
The points will be spaced evenly by the mean spacing of values in `x`.
x_smooth: list-like, optional
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int, optional
The number of evenly spaced points spanning the range of `x` to interpolate for.
n_harm: int
The number of harmonics to use. A higher value yields a closer fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
"""
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x), n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
n_predict_smooth = int((len(x_smooth) / len(x)) * n_predict)
# These points are evenly spaced for the fourier fit implementation we use.
# More points are selected than are in `x_smooth` so we can interpolate accurately.
fourier_mult_pts = 2
x_smooth_fourier = np.linspace(x_smooth.min(), x_smooth.max(),
fourier_mult_pts * len(x_smooth))
y_smooth_fourier = np.interp(x_smooth_fourier, x, y)
n_predict_smooth_fourier = int((len(x_smooth_fourier) / len(x)) * n_predict)
# Perform the Fourier fit and extrapolation.
n = y_smooth_fourier.size
t = np.arange(0, n)
p = np.polyfit(t, y_smooth_fourier, 1) # find linear trend in arr
x_notrend = y_smooth_fourier - p[0] * t # detrended arr
x_freqdom = fft.fft(x_notrend) # detrended arr in frequency domain
f = fft.fftfreq(n) # frequencies
# sort indexes by frequency, lower -> higher
indexes = list(range(n))
indexes.sort(key=lambda i: np.absolute(x_freqdom[i]))
indexes.reverse()
t = np.arange(0, n + n_predict_smooth_fourier)
restored_sig = np.zeros(t.size)
for i in indexes[:1 + n_harm * 2]:
ampli = np.absolute(x_freqdom[i]) / n # amplitude
phase = np.angle(x_freqdom[i]) # phase
restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)
y_smooth_fourier = restored_sig + p[0] * t
# Find the points in `x_smooth_fourier` that are near to points in `x_smooth`
# and then interpolate the y values to match the new x values.
x_smooth = x_smooth_fourier[np.searchsorted(x_smooth_fourier, x_smooth)]
# Ensure `x_smooth` includes the extrapolations.
mean_x_smooth_space = np.diff(x_smooth).mean()
x_predict_smooth = np.linspace(x_smooth[-1] + mean_x_smooth_space,
x_smooth[-1] + mean_x_smooth_space * n_predict_smooth,
n_predict_smooth)
x_smooth = np.concatenate((x_smooth, x_predict_smooth))
# Ensure `x_smooth_fourier` includes the extrapolations.
mean_x_smooth_fourier_space = np.diff(x_smooth).mean()
x_predict_smooth_fourier = \
np.linspace(
x_smooth_fourier[-1] + mean_x_smooth_fourier_space,
x_smooth_fourier[-1] + mean_x_smooth_fourier_space * n_predict_smooth_fourier,
n_predict_smooth_fourier)
x_smooth_fourier = np.concatenate((x_smooth_fourier, x_predict_smooth_fourier))
y_smooth = np.interp(x_smooth, x_smooth_fourier, y_smooth_fourier)
return x_smooth, y_smooth | [
"numpy.absolute",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.polyfit",
"scipy.interpolate.CubicSpline",
"numpy.angle",
"numpy.iinfo",
"numpy.arange",
"numpy.exp",
"scale.np_scale",
"numpy.interp",
"numpy.nanmean",
"numpy.std",
"numpy.fft.fft",
"numpy.fft.fftfreq",
"numpy.linspace"... | [((2622, 2649), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['y', 'sigma'], {}), '(y, sigma)\n', (2639, 2649), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((2659, 2689), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['x', 'gauss_filter_y'], {}), '(x, gauss_filter_y)\n', (2670, 2689), False, 'from scipy.interpolate import CubicSpline\n'), ((5483, 5516), 'numpy.interp', 'np.interp', (['x_smooth_fourier', 'x', 'y'], {}), '(x_smooth_fourier, x, y)\n', (5492, 5516), True, 'import numpy as np\n'), ((5686, 5701), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (5695, 5701), True, 'import numpy as np\n'), ((5710, 5744), 'numpy.polyfit', 'np.polyfit', (['t', 'y_smooth_fourier', '(1)'], {}), '(t, y_smooth_fourier, 1)\n', (5720, 5744), True, 'import numpy as np\n'), ((5850, 5868), 'numpy.fft.fft', 'fft.fft', (['x_notrend'], {}), '(x_notrend)\n', (5857, 5868), False, 'from numpy import fft\n'), ((5914, 5928), 'numpy.fft.fftfreq', 'fft.fftfreq', (['n'], {}), '(n)\n', (5925, 5928), False, 'from numpy import fft\n'), ((6110, 6152), 'numpy.arange', 'np.arange', (['(0)', '(n + n_predict_smooth_fourier)'], {}), '(0, n + n_predict_smooth_fourier)\n', (6119, 6152), True, 'import numpy as np\n'), ((6172, 6188), 'numpy.zeros', 'np.zeros', (['t.size'], {}), '(t.size)\n', (6180, 6188), True, 'import numpy as np\n'), ((6805, 6930), 'numpy.linspace', 'np.linspace', (['(x_smooth[-1] + mean_x_smooth_space)', '(x_smooth[-1] + mean_x_smooth_space * n_predict_smooth)', 'n_predict_smooth'], {}), '(x_smooth[-1] + mean_x_smooth_space, x_smooth[-1] + \n mean_x_smooth_space * n_predict_smooth, n_predict_smooth)\n', (6816, 6930), True, 'import numpy as np\n'), ((7011, 7055), 'numpy.concatenate', 'np.concatenate', (['(x_smooth, x_predict_smooth)'], {}), '((x_smooth, x_predict_smooth))\n', (7025, 7055), True, 'import numpy as np\n'), ((7217, 7394), 'numpy.linspace', 'np.linspace', (['(x_smooth_fourier[-1] + mean_x_smooth_fourier_space)', '(x_smooth_fourier[-1] + mean_x_smooth_fourier_space * n_predict_smooth_fourier)', 'n_predict_smooth_fourier'], {}), '(x_smooth_fourier[-1] + mean_x_smooth_fourier_space, \n x_smooth_fourier[-1] + mean_x_smooth_fourier_space *\n n_predict_smooth_fourier, n_predict_smooth_fourier)\n', (7228, 7394), True, 'import numpy as np\n'), ((7446, 7506), 'numpy.concatenate', 'np.concatenate', (['(x_smooth_fourier, x_predict_smooth_fourier)'], {}), '((x_smooth_fourier, x_predict_smooth_fourier))\n', (7460, 7506), True, 'import numpy as np\n'), ((7522, 7577), 'numpy.interp', 'np.interp', (['x_smooth', 'x_smooth_fourier', 'y_smooth_fourier'], {}), '(x_smooth, x_smooth_fourier, y_smooth_fourier)\n', (7531, 7577), True, 'import numpy as np\n'), ((315, 356), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (321, 356), True, 'import numpy as np\n'), ((1169, 1182), 'numpy.nanmean', 'np.nanmean', (['y'], {}), '(y)\n', (1179, 1182), True, 'import numpy as np\n'), ((1184, 1196), 'numpy.nanstd', 'np.nanstd', (['y'], {}), '(y)\n', (1193, 1196), True, 'import numpy as np\n'), ((1231, 1242), 'scale.np_scale', 'np_scale', (['x'], {}), '(x)\n', (1239, 1242), False, 'from scale import np_scale\n'), ((1347, 1365), 'scale.np_scale', 'np_scale', (['x_smooth'], {}), '(x_smooth)\n', (1355, 1365), False, 'from scale import np_scale\n'), ((6303, 6325), 'numpy.angle', 'np.angle', (['x_freqdom[i]'], {}), '(x_freqdom[i])\n', (6311, 6325), True, 'import numpy as np\n'), ((6633, 6676), 'numpy.searchsorted', 'np.searchsorted', (['x_smooth_fourier', 'x_smooth'], {}), '(x_smooth_fourier, x_smooth)\n', (6648, 6676), True, 'import numpy as np\n'), ((2591, 2600), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (2597, 2600), True, 'import numpy as np\n'), ((6244, 6269), 'numpy.absolute', 'np.absolute', (['x_freqdom[i]'], {}), '(x_freqdom[i])\n', (6255, 6269), True, 'import numpy as np\n'), ((6367, 6403), 'numpy.cos', 'np.cos', (['(2 * np.pi * f[i] * t + phase)'], {}), '(2 * np.pi * f[i] * t + phase)\n', (6373, 6403), True, 'import numpy as np\n'), ((6757, 6774), 'numpy.diff', 'np.diff', (['x_smooth'], {}), '(x_smooth)\n', (6764, 6774), True, 'import numpy as np\n'), ((7151, 7168), 'numpy.diff', 'np.diff', (['x_smooth'], {}), '(x_smooth)\n', (7158, 7168), True, 'import numpy as np\n'), ((1302, 1320), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1310, 1320), True, 'import numpy as np\n'), ((6053, 6078), 'numpy.absolute', 'np.absolute', (['x_freqdom[i]'], {}), '(x_freqdom[i])\n', (6064, 6078), True, 'import numpy as np\n'), ((3752, 3776), 'numpy.polyfit', 'np.polyfit', (['x', 'y', 'degree'], {}), '(x, y, degree)\n', (3762, 3776), True, 'import numpy as np\n')] |
import copy
import numpy
from . import splinefitstable
from .glam import glam, bspline
def pad_knots(knots, order=2):
"""
Pad knots out for full support at the boundaries
"""
pre = knots[0] - (knots[1]-knots[0])*numpy.arange(order, 0, -1)
post = knots[-1] + (knots[-1]-knots[-2])*numpy.arange(1, order+1)
return numpy.concatenate((pre, knots, post))
class TableSlice(object):
"""A slice of a photonics table, with spline CDF and PDF evaluates."""
table_pdf = None
table_cdf = None
spline_pdf = None
spline_cdf = None
edges = None
centers = None
def __init__(self, table, spline, slices, density = 1):
self.table = table
self.spline = spline
self.slices = slices[:table.values.ndim]
self.density = density
self.make_grid()
if spline.level == 2:
if len(spline.knots) == 3:
norm = True
is_log = False
else:
norm = False
is_log = True
else:
if len(spline.knots) == 4:
norm = True
is_log = False
else:
norm = False
is_log = True
self.slice(is_log, norm)
self.eval(is_log)
def collapse(self):
"""Collapse non-extended dimensions, returning 1 or 2-d arrays
for use with matplotlib.
"""
new = copy.copy(self)
ext_dims = [i for i in range(len(self.centers)) if len(self.centers[i]) > 1]
new.edges = [self.edges[i] for i in ext_dims]
new.centers = [self.centers[i] for i in ext_dims]
return new
def flatten(self):
"""Flatten grid to columns for use with Gnuplot"""
coords = numpy.meshgrid_nd(*self.centers,**{'lex_order':True})
coords = numpy.column_stack([arr.reshape(arr.size) for arr in coords])
bigdat = numpy.column_stack((coords,self.table_cdf.flatten()))
if self.table_pdf is not None:
bigdat = numpy.column_stack((bigdat,self.table_pdf.flatten()))
bigdat = numpy.column_stack((bigdat,self.spline_cdf.flatten()))
if self.spline_pdf is not None:
bigdat = numpy.column_stack((bigdat,self.spline_pdf.flatten()))
return bigdat
def make_grid(self, density = 1):
slices = self.slices
centers = [bins[_slice] for bins,_slice in zip(self.table.bin_centers,slices)]
for i,sl in enumerate(slices):
if isinstance(sl,int):
centers[i] = numpy.array([centers[i]])
elif self.density > 1: # add extra grid points in between the bin centers
gaps = numpy.diff(centers[i])
extras = []
for j in range(1,self.density):
scale = j/float(self.density)
extras.append(centers[i][:-1]+scale*gaps)
centers[i] = numpy.concatenate(tuple([centers[i]] + extras))
centers[i].sort()
self.centers = centers
widths = [bins[_slice] for bins,_slice in zip(self.table.bin_widths,slices)]
for i,sl in enumerate(slices):
if isinstance(sl,int):
widths[i] = numpy.array([widths[i]])
elif self.density > 1:
# subdividing the widths is much easier!
rep = self.density*numpy.ones(widths[i].size, dtype=int)
rep[-1] = 1
widths[i] = (widths[i]/self.density).repeat(rep)
edges = [c - w/2.0 for c,w in zip(centers,widths)]
edges = [numpy.append(e, c[-1]+w[-1]/2.0) for e,c,w in zip(edges,centers,widths)]
self.edges = edges
if len(self.spline.knots) == 4:
# XXX: evaluate CDF at right edge of the time bin
self.centers[3] = self.edges[3][1:]
def slice(self, is_log = False, norm = True):
timing = len(self.spline.knots) == 4
table_pdf = None
table_cdf = None
slices = self.slices
table = self.table
if self.table.normed and self.slices[-1] == slice(None): # already normed
table_cdf = self.table.values[slices].copy()
if timing:
print(table_cdf.shape, table.bin_widths[-1].size)
table_pdf = numpy.append([0],
numpy.diff(table_cdf, axis=-1))/table.bin_widths[-1]
elif self.table.normed:
# already normed, but we're slicing perpendicular to time.
# skip the PDF.
table_cdf = self.table.values[slices].copy()
elif len(self.spline.knots) == 3:
# abs spline, just sum amplitudes
tslices = list(self.slices)[:3]
tslices += [slice(None)]
bigslice = self.table.values[tslices]
table_cdf = numpy.sum(bigslice, axis=-1)
elif timing and self.slices[-1] == slice(None): # we're slicing in time, compute CDF for slice
table_slice = self.table.values[slices]
#table_cdf = numpy.cumsum(table_slice*table.bin_widths[3], axis=-1)
table_cdf = numpy.cumsum(table_slice, axis=-1)
#table_pdf = table_slice
table_pdf = table_slice/table.bin_widths[3]
if norm:
normslices = [slice(None)]*len(table_cdf.shape)
normslices[-1] = -1
newshape = list(table_cdf.shape)
newshape[-1] = 1
normval = table_cdf[tuple(normslices)].reshape(tuple(newshape))
table_cdf /= normval
table_pdf /= normval
else:
# we're slicing perpendicular to time, so slice in the dim-t plane
# , compute the CDF, then slice in t
tslices = list(self.slices)
if timing:
tslices[-1] = slice(None)
else:
tslices += [slice(None)]
print(tslices)
bigslice = self.table.values[tslices]
table_cdf = numpy.cumsum(bigslice, axis=-1)
# remove integer indexes, since those dimensions are already gone
tslices = [t for t in tslices if not isinstance(t,int)]
# now slice at the time we want
tslices[-1] = slices[-1]
nslice = [slice(None)]*table_cdf.ndim
nslice[-1] = -1
normval = table_cdf[nslice]
table_cdf = table_cdf[tslices]
if timing:
# careful! table_pdf is a view into table.values
table_pdf = table.values[slices].copy()
else:
table_cdf = normval
if norm:
table_cdf /= normval
table_pdf /= normval
if self.density > 1: # insert zeros into table values
expanded_shape = tuple([s + (self.density-1)*(s-1) for s in table_cdf.shape])
insert_slices = [slice(None,None,self.density)]*len(table_cdf.shape)
t_cdf = numpy.zeros(expanded_shape)
t_cdf[insert_slices] = table_cdf
table_cdf = t_cdf
if timing:
t_pdf = numpy.zeros(expanded_shape)
t_pdf[insert_slices] = table_pdf
table_pdf = t_pdf
self.table_cdf = table_cdf
self.table_pdf = table_pdf
def eval_cdf(self, is_log):
vals = glam.grideval(self.spline, self.centers)
shape = vals.shape
vals = vals[numpy.isfinite(vals)] + self.spline.bias
shape = tuple([shape[i] for i in range(len(shape)) if shape[i] > 1])
vals = vals.reshape(shape)
if is_log:
vals = numpy.exp(vals)
self.spline_cdf = vals
def eval_pdf(self, is_log):
splfuncs = [bspline.bspline]*4
splfuncs[-1] = bspline.bspline_deriv
deriv_basis = [bspline.splinebasis(self.spline.knots[i], self.spline.order[i],self.centers[i],
self.spline.periods[i],spline=splfuncs[i]) for i in range(0,len(self.spline.knots))]
pdf_vals = glam.grideval(self.spline, self.centers, bases = deriv_basis)
shape = pdf_vals.shape
pdf_vals = pdf_vals[numpy.isfinite(pdf_vals)] #+ spline.bias
if is_log:
# chain rule!
pdf_vals *= self.spline_cdf
shape = tuple([shape[i] for i in range(len(shape)) if shape[i] > 1])
pdf_vals = pdf_vals.reshape(shape)
self.spline_pdf = pdf_vals
def eval(self, is_log = False):
"""is_log => fit is in log-space"""
self.eval_cdf(is_log)
# only calculate PDFs for timing fits
if (len(self.spline.knots)) == 4:
self.eval_pdf(is_log)
| [
"numpy.sum",
"copy.copy",
"numpy.zeros",
"numpy.isfinite",
"numpy.ones",
"numpy.append",
"numpy.cumsum",
"numpy.diff",
"numpy.arange",
"numpy.exp",
"numpy.array",
"numpy.meshgrid_nd",
"numpy.concatenate"
] | [((319, 356), 'numpy.concatenate', 'numpy.concatenate', (['(pre, knots, post)'], {}), '((pre, knots, post))\n', (336, 356), False, 'import numpy\n'), ((1176, 1191), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (1185, 1191), False, 'import copy\n'), ((1471, 1526), 'numpy.meshgrid_nd', 'numpy.meshgrid_nd', (['*self.centers'], {}), "(*self.centers, **{'lex_order': True})\n", (1488, 1526), False, 'import numpy\n'), ((217, 243), 'numpy.arange', 'numpy.arange', (['order', '(0)', '(-1)'], {}), '(order, 0, -1)\n', (229, 243), False, 'import numpy\n'), ((286, 312), 'numpy.arange', 'numpy.arange', (['(1)', '(order + 1)'], {}), '(1, order + 1)\n', (298, 312), False, 'import numpy\n'), ((2989, 3025), 'numpy.append', 'numpy.append', (['e', '(c[-1] + w[-1] / 2.0)'], {}), '(e, c[-1] + w[-1] / 2.0)\n', (3001, 3025), False, 'import numpy\n'), ((5722, 5749), 'numpy.zeros', 'numpy.zeros', (['expanded_shape'], {}), '(expanded_shape)\n', (5733, 5749), False, 'import numpy\n'), ((6259, 6274), 'numpy.exp', 'numpy.exp', (['vals'], {}), '(vals)\n', (6268, 6274), False, 'import numpy\n'), ((6728, 6752), 'numpy.isfinite', 'numpy.isfinite', (['pdf_vals'], {}), '(pdf_vals)\n', (6742, 6752), False, 'import numpy\n'), ((2161, 2186), 'numpy.array', 'numpy.array', (['[centers[i]]'], {}), '([centers[i]])\n', (2172, 2186), False, 'import numpy\n'), ((2699, 2723), 'numpy.array', 'numpy.array', (['[widths[i]]'], {}), '([widths[i]])\n', (2710, 2723), False, 'import numpy\n'), ((5833, 5860), 'numpy.zeros', 'numpy.zeros', (['expanded_shape'], {}), '(expanded_shape)\n', (5844, 5860), False, 'import numpy\n'), ((6095, 6115), 'numpy.isfinite', 'numpy.isfinite', (['vals'], {}), '(vals)\n', (6109, 6115), False, 'import numpy\n'), ((2275, 2297), 'numpy.diff', 'numpy.diff', (['centers[i]'], {}), '(centers[i])\n', (2285, 2297), False, 'import numpy\n'), ((4013, 4041), 'numpy.sum', 'numpy.sum', (['bigslice'], {'axis': '(-1)'}), '(bigslice, axis=-1)\n', (4022, 4041), False, 'import numpy\n'), ((2818, 2855), 'numpy.ones', 'numpy.ones', (['widths[i].size'], {'dtype': 'int'}), '(widths[i].size, dtype=int)\n', (2828, 2855), False, 'import numpy\n'), ((3613, 3643), 'numpy.diff', 'numpy.diff', (['table_cdf'], {'axis': '(-1)'}), '(table_cdf, axis=-1)\n', (3623, 3643), False, 'import numpy\n'), ((4268, 4302), 'numpy.cumsum', 'numpy.cumsum', (['table_slice'], {'axis': '(-1)'}), '(table_slice, axis=-1)\n', (4280, 4302), False, 'import numpy\n'), ((4947, 4978), 'numpy.cumsum', 'numpy.cumsum', (['bigslice'], {'axis': '(-1)'}), '(bigslice, axis=-1)\n', (4959, 4978), False, 'import numpy\n')] |
"""Tests for the `illustris_python.snapshot` submodule.
Running Tests
-------------
To run all tests, this script can be executed as:
`$ python tests/snapshot_test.py [-v] [--nocapture]`
from the root directory.
Alternatively, `nosetests` can be run and it will find the tests:
`$ nosetests [-v] [--nocapture]`
To run particular tests (for example),
`$ nosetests tests/snapshot_test.py:test_snapshot_partTypeNum_1`
To include coverage information,
`$ nosetests --with-coverage --cover-package=.`
"""
import numpy as np
from nose.tools import assert_equal, assert_raises, assert_true
# `illustris_python` is imported as `ill` in local `__init__.py`
from . import ill, BASE_PATH_ILLUSTRIS_1
def test_snapshot_partTypeNum_1():
names = ['gas', 'dm', 'tracers', 'stars', 'blackhole', 'GaS', 'blackholes']
nums = [0, 1, 3, 4, 5, 0, 5]
for name, num in zip(names, nums):
pn = ill.snapshot.partTypeNum(name)
print("\npartTypeNum('{}') = '{}' (should be '{}')".format(name, pn, num))
assert_equal(pn, num)
return
def test_snapshot_partTypeNum_2():
# These should fail and raise an exception
names = ['peanuts', 'monkeys']
nums = [0, 1]
for name, num in zip(names, nums):
print("\npartTypeNum('{}') should raise `Exception`".format(name))
assert_raises(Exception, ill.snapshot.partTypeNum, name)
return
'''
# Too slow
def test_loadSubset():
from datetime import datetime
snap = 135
fields = ['Masses']
beg = datetime.now()
gas_mass = ill.snapshot.loadSubset(BASE_PATH_ILLUSTRIS_1, snap, 'gas', fields=fields)
print("Loaded after '{}'".format(datetime.now() - beg))
print(np.shape(gas_mass))
print(np.log10(np.mean(gas_mass, dtype='double')*1e10/0.704))
return
'''
def test_loadHalo():
snap = 135
halo_num = 100
# Values for Illustris-1, snap=135, halo 100
coords = [[19484.6576131, 20662.6423522],
[54581.7254122, 55598.2078751],
[60272.0348192, 61453.9991835]]
num_star_keys = 15
stars = ill.snapshot.loadHalo(BASE_PATH_ILLUSTRIS_1, snap, halo_num, 'stars')
assert_equal(len(stars.keys()), num_star_keys)
for i in range(3):
_min = np.min(stars['Coordinates'][:, i])
_max = np.max(stars['Coordinates'][:, i])
print("Coords axis '{}' min, max: {}, {} (should be {}, {})".format(
i, _min, _max, coords[i][0], coords[i][1]))
assert_true(np.isclose(_min, coords[i][0]))
assert_true(np.isclose(_max, coords[i][1]))
return
| [
"nose.tools.assert_equal",
"numpy.min",
"numpy.max",
"numpy.isclose",
"nose.tools.assert_raises"
] | [((1036, 1057), 'nose.tools.assert_equal', 'assert_equal', (['pn', 'num'], {}), '(pn, num)\n', (1048, 1057), False, 'from nose.tools import assert_equal, assert_raises, assert_true\n'), ((1330, 1386), 'nose.tools.assert_raises', 'assert_raises', (['Exception', 'ill.snapshot.partTypeNum', 'name'], {}), '(Exception, ill.snapshot.partTypeNum, name)\n', (1343, 1386), False, 'from nose.tools import assert_equal, assert_raises, assert_true\n'), ((2238, 2272), 'numpy.min', 'np.min', (["stars['Coordinates'][:, i]"], {}), "(stars['Coordinates'][:, i])\n", (2244, 2272), True, 'import numpy as np\n'), ((2288, 2322), 'numpy.max', 'np.max', (["stars['Coordinates'][:, i]"], {}), "(stars['Coordinates'][:, i])\n", (2294, 2322), True, 'import numpy as np\n'), ((2476, 2506), 'numpy.isclose', 'np.isclose', (['_min', 'coords[i][0]'], {}), '(_min, coords[i][0])\n', (2486, 2506), True, 'import numpy as np\n'), ((2528, 2558), 'numpy.isclose', 'np.isclose', (['_max', 'coords[i][1]'], {}), '(_max, coords[i][1])\n', (2538, 2558), True, 'import numpy as np\n')] |
import json
import pickle
import event_model
import numpy
import pytest
def test_documents():
dn = event_model.DocumentNames
for k in ('stop', 'start', 'descriptor',
'event', 'bulk_events', 'datum',
'resource', 'bulk_datum', 'event_page', 'datum_page'):
assert dn(k) == getattr(dn, k)
def test_len():
assert 10 == len(event_model.DocumentNames)
def test_schemas():
for k in event_model.DocumentNames:
assert k in event_model.SCHEMA_NAMES
assert event_model.schemas[k]
def test_schema_validators():
for name in event_model.schemas.keys():
assert name in event_model.schema_validators
assert len(event_model.schema_validators) == len(event_model.schemas)
def test_compose_run():
# Compose each kind of document type. These calls will trigger
# jsonschema.validate and ensure that the document-generation code composes
# valid documents.
bundle = event_model.compose_run()
start_doc, compose_descriptor, compose_resource, compose_stop = bundle
assert bundle.start_doc is start_doc
assert bundle.compose_descriptor is compose_descriptor
assert bundle.compose_resource is compose_resource
assert bundle.compose_stop is compose_stop
bundle = compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},
'image': {'shape': [512, 512], 'dtype': 'number',
'source': '...', 'external': 'FILESTORE:'}},
name='primary')
descriptor_doc, compose_event, compose_event_page = bundle
assert bundle.descriptor_doc is descriptor_doc
assert bundle.compose_event is compose_event
assert bundle.compose_event_page is compose_event_page
bundle = compose_resource(
spec='TIFF', root='/tmp', resource_path='stack.tiff',
resource_kwargs={})
resource_doc, compose_datum, compose_datum_page = bundle
assert bundle.resource_doc is resource_doc
assert bundle.compose_datum is compose_datum
assert bundle.compose_datum_page is compose_datum_page
datum_doc = compose_datum(datum_kwargs={'slice': 5})
event_doc = compose_event(
data={'motor': 0, 'image': datum_doc['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False})
datum_page = compose_datum_page(datum_kwargs={'slice': [10, 15]})
event_page = compose_event_page(data={'motor': [1, 2], 'image':
datum_page['datum_id']},
timestamps={'motor': [0, 0],
'image': [0, 0]},
filled={'image': [False, False]},
seq_num=[1, 2])
assert 'descriptor' in event_doc
assert 'descriptor' in event_page
assert event_doc['seq_num'] == 1
stop_doc = compose_stop()
assert 'primary' in stop_doc['num_events']
assert stop_doc['num_events']['primary'] == 3
def test_round_trip_pagination():
run_bundle = event_model.compose_run()
desc_bundle = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},
'image': {'shape': [512, 512], 'dtype': 'number',
'source': '...', 'external': 'FILESTORE:'}},
name='primary')
res_bundle = run_bundle.compose_resource(
spec='TIFF', root='/tmp', resource_path='stack.tiff',
resource_kwargs={})
datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})
datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})
datum_doc3 = res_bundle.compose_datum(datum_kwargs={'slice': 15})
event_doc1 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc1['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=1)
event_doc2 = desc_bundle.compose_event(
data={'motor': 1, 'image': datum_doc2['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=1)
event_doc3 = desc_bundle.compose_event(
data={'motor': 2, 'image': datum_doc3['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=1)
# Round trip single event -> event_page -> event.
expected = event_doc1
actual, = event_model.unpack_event_page(
event_model.pack_event_page(expected))
assert actual == expected
# Round trip two events -> event_page -> events.
expected = [event_doc1, event_doc2]
actual = list(event_model.unpack_event_page(
event_model.pack_event_page(*expected)))
assert actual == expected
# Round trip three events -> event_page -> events.
expected = [event_doc1, event_doc2, event_doc3]
actual = list(event_model.unpack_event_page(
event_model.pack_event_page(*expected)))
assert actual == expected
# Round trip on docs that don't have a filled key
unfilled_doc1 = event_doc1
unfilled_doc1.pop('filled')
unfilled_doc2 = event_doc2
unfilled_doc2.pop('filled')
unfilled_doc3 = event_doc3
unfilled_doc3.pop('filled')
expected = [unfilled_doc1, unfilled_doc2, unfilled_doc3]
actual = list(event_model.unpack_event_page(
event_model.pack_event_page(*expected)))
for doc in actual:
doc.pop('filled')
assert actual == expected
# Round trip one datum -> datum_page -> datum.
expected = datum_doc1
actual, = event_model.unpack_datum_page(
event_model.pack_datum_page(expected))
assert actual == expected
# Round trip two datum -> datum_page -> datum.
expected = [datum_doc1, datum_doc2]
actual = list(event_model.unpack_datum_page(
event_model.pack_datum_page(*expected)))
assert actual == expected
# Round trip three datum -> datum_page -> datum.
expected = [datum_doc1, datum_doc2, datum_doc3]
actual = list(event_model.unpack_datum_page(
event_model.pack_datum_page(*expected)))
assert actual == expected
# Check edge case where datum_kwargs are empty.
datum_doc1 = res_bundle.compose_datum(datum_kwargs={})
datum_doc2 = res_bundle.compose_datum(datum_kwargs={})
datum_doc3 = res_bundle.compose_datum(datum_kwargs={})
# Round trip one datum -> datum_page -> datum.
expected = datum_doc1
actual, = event_model.unpack_datum_page(
event_model.pack_datum_page(expected))
assert actual == expected
# Round trip two datum -> datum_page -> datum.
expected = [datum_doc1, datum_doc2]
actual = list(event_model.unpack_datum_page(
event_model.pack_datum_page(*expected)))
assert actual == expected
# Round trip three datum -> datum_page -> datum.
expected = [datum_doc1, datum_doc2, datum_doc3]
actual = list(event_model.unpack_datum_page(
event_model.pack_datum_page(*expected)))
assert actual == expected
def test_bulk_events_to_event_page(tmp_path):
run_bundle = event_model.compose_run()
desc_bundle = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},
'image': {'shape': [512, 512], 'dtype': 'number',
'source': '...', 'external': 'FILESTORE:'}},
name='primary')
desc_bundle_baseline = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},
name='baseline')
path_root = str(tmp_path)
res_bundle = run_bundle.compose_resource(
spec='TIFF', root=path_root, resource_path='stack.tiff',
resource_kwargs={})
datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})
datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})
event1 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc1['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=1)
event2 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc2['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=2)
event3 = desc_bundle_baseline.compose_event(
data={'motor': 0},
timestamps={'motor': 0},
seq_num=1)
primary_event_page = event_model.pack_event_page(event1, event2)
baseline_event_page = event_model.pack_event_page(event3)
bulk_events = {'primary': [event1, event2], 'baseline': [event3]}
pages = event_model.bulk_events_to_event_pages(bulk_events)
assert tuple(pages) == (primary_event_page, baseline_event_page)
def test_sanitize_doc():
run_bundle = event_model.compose_run()
desc_bundle = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},
'image': {'shape': [512, 512], 'dtype': 'number',
'source': '...', 'external': 'FILESTORE:'}},
name='primary')
desc_bundle_baseline = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},
name='baseline')
event1 = desc_bundle.compose_event(
data={'motor': 0, 'image': numpy.ones((512, 512))},
timestamps={'motor': 0, 'image': 0}, filled={'image': True},
seq_num=1)
event2 = desc_bundle.compose_event(
data={'motor': 0, 'image': numpy.ones((512, 512))},
timestamps={'motor': 0, 'image': 0}, filled={'image': True},
seq_num=2)
event3 = desc_bundle_baseline.compose_event(
data={'motor': 0},
timestamps={'motor': 0},
seq_num=1)
event_page = event_model.pack_event_page(event1, event2)
bulk_events = {'primary': [event1, event2], 'baseline': [event3]}
json.dumps(event_model.sanitize_doc(event_page))
json.dumps(event_model.sanitize_doc(bulk_events))
json.dumps(event_model.sanitize_doc(event1))
def test_bulk_datum_to_datum_page():
run_bundle = event_model.compose_run()
res_bundle = run_bundle.compose_resource(
spec='TIFF', root='/tmp', resource_path='stack.tiff',
resource_kwargs={})
datum1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})
datum2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})
actual = event_model.pack_datum_page(datum1, datum2)
bulk_datum = {'resource': res_bundle.resource_doc['uid'],
'datum_kwarg_list': [datum1['datum_kwargs'],
datum2['datum_kwargs']],
'datum_ids': [datum1['datum_id'], datum2['datum_id']]}
expected = event_model.bulk_datum_to_datum_page(bulk_datum)
assert actual == expected
def test_document_router_smoke_test():
dr = event_model.DocumentRouter()
run_bundle = event_model.compose_run()
dr('start', run_bundle.start_doc)
desc_bundle = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},
'image': {'shape': [512, 512], 'dtype': 'number',
'source': '...', 'external': 'FILESTORE:'}},
name='primary')
dr('descriptor', desc_bundle.descriptor_doc)
desc_bundle_baseline = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},
name='baseline')
dr('descriptor', desc_bundle_baseline.descriptor_doc)
res_bundle = run_bundle.compose_resource(
spec='TIFF', root='/tmp', resource_path='stack.tiff',
resource_kwargs={})
dr('resource', res_bundle.resource_doc)
datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})
datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})
dr('datum', datum_doc1)
dr('datum', datum_doc2)
event1 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc1['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=1)
dr('event', event1)
event2 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc2['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=2)
dr('event', event2)
event3 = desc_bundle_baseline.compose_event(
data={'motor': 0},
timestamps={'motor': 0},
seq_num=1)
dr('event', event3)
dr('stop', run_bundle.compose_stop())
def test_document_router_with_validation():
dr = event_model.DocumentRouter()
run_bundle = event_model.compose_run()
dr('start', run_bundle.start_doc, validate=True)
desc_bundle = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},
'image': {'shape': [512, 512], 'dtype': 'number',
'source': '...', 'external': 'FILESTORE:'}},
name='primary')
dr('descriptor', desc_bundle.descriptor_doc, validate=True)
desc_bundle_baseline = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},
name='baseline')
dr('descriptor', desc_bundle_baseline.descriptor_doc, validate=True)
res_bundle = run_bundle.compose_resource(
spec='TIFF', root='/tmp', resource_path='stack.tiff',
resource_kwargs={})
dr('resource', res_bundle.resource_doc, validate=True)
datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})
datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})
dr('datum', datum_doc1, validate=True)
dr('datum', datum_doc2, validate=True)
event1 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc1['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=1)
dr('event', event1, validate=True)
event2 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc2['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=2)
dr('event', event2, validate=True)
event3 = desc_bundle_baseline.compose_event(
data={'motor': 0},
timestamps={'motor': 0},
seq_num=1)
dr('event', event3, validate=True)
dr('stop', run_bundle.compose_stop(), validate=True)
def test_document_router_dispatch_event():
event_calls = [] # used for counting calls
event_page_calls = [] # used for counting calls
# example documents
event1 = {'data': {'x': 1},
'timestamps': {'x': 0.},
'uid': 'placeholder X',
'descriptor': 'placeholder Y',
'time': 0.,
'seq_num': 1}
event2 = {'data': {'x': 2},
'timestamps': {'x': 1.},
'uid': 'placeholder X',
'descriptor': 'placeholder Y',
'time': 1.,
'seq_num': 2}
event_page = event_model.pack_event_page(event1, event2)
def check(ret, original=None):
name, doc = ret
assert doc is not None
assert doc is not NotImplemented
if original is not None:
# Verify that a copy is returned.
assert doc is not original # ret is such a poser, dude.
doc.pop('filled', None)
original.pop('filled', None)
assert doc == original
class DefinesNeitherEventNorEventPage(event_model.DocumentRouter):
def event(self, doc):
event_calls.append(object())
# This returns NotImplemented.
return super().event_page(doc)
def event_page(self, doc):
event_page_calls.append(object())
# This returns NotImplemented.
return super().event_page(doc)
dr = DefinesNeitherEventNorEventPage()
# Test that Event is routed to Event and EventPage.
check(dr('event', event1))
assert len(event_calls) == 1
assert len(event_page_calls) == 1
event_calls.clear()
event_page_calls.clear()
# Test that EventPage is routed to EventPage and Event *once* before
# giving up.
check(dr('event_page', event_page))
assert len(event_page_calls) == 1
assert len(event_calls) == 1
event_calls.clear()
event_page_calls.clear()
class DefinesEventNotEventPage(event_model.DocumentRouter):
def event(self, doc):
# Just a dumb test that check something particular to these example
# documents.
assert doc['data']['x'] == doc['seq_num']
event_calls.append(object())
return dict(doc)
def event_page(self, doc):
event_page_calls.append(object())
# This returns NotImplemented.
return super().event_page(doc)
dr = DefinesEventNotEventPage()
# Test that Event is routed to Event.
check(dr('event', event1), event1)
assert len(event_calls) == 1
assert len(event_page_calls) == 0
event_calls.clear()
event_page_calls.clear()
# Test that EventPage is unpacked and routed to Event one at a time.
check(dr('event_page', event_page), event_page)
assert len(event_page_calls) == 1
assert len(event_calls) == 2
event_calls.clear()
event_page_calls.clear()
class DefinesEventPageNotEvent(event_model.DocumentRouter):
def event(self, doc):
event_calls.append(object())
# This returns NotImplemented.
return super().event(doc)
def event_page(self, doc):
# Just a dumb test that check something particular to these example
# documents.
assert doc['data']['x'][0] == 1
event_page_calls.append(object())
return dict(doc)
dr = DefinesEventPageNotEvent()
# Test that Event is packed and routed to EventPage.
check(dr('event', event1), event1)
assert len(event_calls) == 1
assert len(event_page_calls) == 1
event_calls.clear()
event_page_calls.clear()
# Test that EventPage is routed to EventPage.
check(dr('event_page', event_page), event_page)
assert len(event_page_calls) == 1
assert len(event_calls) == 0
event_calls.clear()
event_page_calls.clear()
class DefinesEventPageAndEvent(event_model.DocumentRouter):
def event(self, doc):
# Just a dumb test that check something particular to these example
# documents.
assert doc['data']['x'] == doc['seq_num']
event_calls.append(object())
return dict(doc)
def event_page(self, doc):
# Just a dumb test that check something particular to these example
# documents.
assert doc['data']['x'][0] == 1
event_page_calls.append(object())
return dict(doc)
dr = DefinesEventPageAndEvent()
# Test that Event is routed to Event.
check(dr('event', event1), event1)
assert len(event_calls) == 1
assert len(event_page_calls) == 0
event_calls.clear()
event_page_calls.clear()
# Test that EventPage is routed to EventPage.
check(dr('event_page', event_page), event_page)
assert len(event_page_calls) == 1
assert len(event_calls) == 0
event_calls.clear()
event_page_calls.clear()
def test_document_router_dispatch_datum():
datum_calls = [] # used for counting calls
datum_page_calls = [] # used for counting calls
# example documents
datum1 = {'datum_id': 'placeholder/1',
'resource': 'placeholder',
'datum_kwargs': {'index': 1}}
datum2 = {'datum_id': 'placholder/2',
'resource': 'placeholder',
'datum_kwargs': {'index': 2}}
datum_page = event_model.pack_datum_page(datum1, datum2)
def check(ret, original=None):
name, doc = ret
assert doc is not None
assert doc is not NotImplemented
if original is not None:
# Verify that a copy is returned.
assert doc is not original # ret is such a poser, dude.
assert doc == original
class DefinesNeitherDatumNorDatumPage(event_model.DocumentRouter):
def datum(self, doc):
datum_calls.append(object())
# This returns NotImplemented.
return super().datum(doc)
def datum_page(self, doc):
datum_page_calls.append(object())
# This returns NotImplemented.
return super().datum_page(doc)
dr = DefinesNeitherDatumNorDatumPage()
# Test that Datum is routed to Datum and DatumPage.
check(dr('datum', datum1))
assert len(datum_calls) == 1
assert len(datum_page_calls) == 1
datum_calls.clear()
datum_page_calls.clear()
# Test that DatumPage is routed to DatumPage and Datum *once* before giving
# up.
check(dr('datum_page', datum_page))
assert len(datum_page_calls) == 1
assert len(datum_calls) == 1
datum_calls.clear()
datum_page_calls.clear()
class DefinesDatumNotDatumPage(event_model.DocumentRouter):
def datum(self, doc):
# Just a dumb test that check something particular to these example
# documents.
assert doc['datum_kwargs']['index'] == int(doc['datum_id'][-1])
datum_calls.append(object())
return dict(doc)
def datum_page(self, doc):
datum_page_calls.append(object())
# This returns NotImplemented.
return super().datum_page(doc)
dr = DefinesDatumNotDatumPage()
# Test that Datum is routed to Datum.
check(dr('datum', datum1), datum1)
assert len(datum_calls) == 1
assert len(datum_page_calls) == 0
datum_calls.clear()
datum_page_calls.clear()
# Test that DatumPage is unpacked and routed to Datum one at a time.
check(dr('datum_page', datum_page), datum_page)
assert len(datum_page_calls) == 1
assert len(datum_calls) == 2
datum_calls.clear()
datum_page_calls.clear()
class DefinesDatumPageNotDatum(event_model.DocumentRouter):
def datum(self, doc):
datum_calls.append(object())
# This returns NotImplemented.
return super().datum_page(doc)
def datum_page(self, doc):
# Just a dumb test that check something particular to these example
# documents.
assert doc['datum_kwargs']['index'][0] == int(doc['datum_id'][0][-1])
datum_page_calls.append(object())
return dict(doc)
dr = DefinesDatumPageNotDatum()
# Test that Datum is packed and routed to DatumPage.
check(dr('datum', datum1), datum1)
assert len(datum_calls) == 1
assert len(datum_page_calls) == 1
datum_calls.clear()
datum_page_calls.clear()
# Test that DatumPage is routed to DatumPage.
check(dr('datum_page', datum_page), datum_page)
assert len(datum_page_calls) == 1
assert len(datum_calls) == 0
datum_calls.clear()
datum_page_calls.clear()
# Test that DatumPage is routed to DatumPage.
class DefinesDatumPageAndDatum(event_model.DocumentRouter):
def datum(self, doc):
# Just a dumb test that check something particular to these example
# documents.
assert doc['datum_kwargs']['index'] == int(doc['datum_id'][-1])
datum_calls.append(object())
return dict(doc)
def datum_page(self, doc):
# Just a dumb test that check something particular to these example
# documents.
assert doc['datum_kwargs']['index'][0] == int(doc['datum_id'][0][-1])
datum_page_calls.append(object())
return dict(doc)
dr = DefinesDatumPageAndDatum()
# Test that Datum is routed to Datum.
check(dr('datum', datum1), datum1)
assert len(datum_calls) == 1
assert len(datum_page_calls) == 0
datum_calls.clear()
datum_page_calls.clear()
# Test that DatumPage is routed to DatumPage.
check(dr('datum_page', datum_page), datum_page)
assert len(datum_page_calls) == 1
assert len(datum_calls) == 0
datum_calls.clear()
datum_page_calls.clear()
def test_single_run_document_router():
sr = event_model.SingleRunDocumentRouter()
with pytest.raises(event_model.EventModelError):
sr.get_start()
run_bundle = event_model.compose_run()
sr('start', run_bundle.start_doc)
assert sr.get_start() == run_bundle.start_doc
desc_bundle = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},
'image': {'shape': [512, 512], 'dtype': 'number',
'source': '...', 'external': 'FILESTORE:'}},
name='primary')
sr('descriptor', desc_bundle.descriptor_doc)
desc_bundle_baseline = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},
name='baseline')
sr('descriptor', desc_bundle_baseline.descriptor_doc)
res_bundle = run_bundle.compose_resource(
spec='TIFF', root='/tmp', resource_path='stack.tiff',
resource_kwargs={})
sr('resource', res_bundle.resource_doc)
datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})
datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})
sr('datum', datum_doc1)
sr('datum', datum_doc2)
event1 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc1['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=1)
sr('event', event1)
event2 = desc_bundle.compose_event(
data={'motor': 0, 'image': datum_doc2['datum_id']},
timestamps={'motor': 0, 'image': 0}, filled={'image': False},
seq_num=2)
sr('event', event2)
event3 = desc_bundle_baseline.compose_event(
data={'motor': 0},
timestamps={'motor': 0},
seq_num=1)
sr('event', event3)
with pytest.raises(event_model.EventModelValueError):
sr.get_descriptor(res_bundle.resource_doc)
with pytest.raises(event_model.EventModelValueError):
sr.get_descriptor(datum_doc1)
assert sr.get_descriptor(event1) == desc_bundle.descriptor_doc
assert sr.get_stream_name(event1) == desc_bundle.descriptor_doc.get('name')
assert sr.get_descriptor(event2) == desc_bundle.descriptor_doc
assert sr.get_stream_name(event2) == desc_bundle.descriptor_doc.get('name')
assert sr.get_descriptor(event3) == desc_bundle_baseline.descriptor_doc
assert sr.get_stream_name(event3) == desc_bundle_baseline.descriptor_doc.get('name')
desc_bundle_unused = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},
name='unused')
event4 = desc_bundle_unused.compose_event(
data={'motor': 0},
timestamps={'motor': 0},
seq_num=1)
with pytest.raises(event_model.EventModelValueError):
sr.get_descriptor(event4)
with pytest.raises(event_model.EventModelValueError):
sr.get_stream_name(event4)
sr('stop', run_bundle.compose_stop())
# tests against a second run
run_bundle = event_model.compose_run()
with pytest.raises(event_model.EventModelValueError):
sr('start', run_bundle.start_doc)
desc_bundle = run_bundle.compose_descriptor(
data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},
'image': {'shape': [512, 512], 'dtype': 'number',
'source': '...', 'external': 'FILESTORE:'}},
name='primary')
with pytest.raises(event_model.EventModelValueError):
sr('descriptor', desc_bundle.descriptor_doc)
def test_rechunk_event_pages():
def event_page_gen(page_size, num_pages):
"""
Generator event_pages for testing.
"""
data_keys = ['x', 'y', 'z']
array_keys = ['seq_num', 'time', 'uid']
for _ in range(num_pages):
yield {'descriptor': 'DESCRIPTOR',
**{key: list(range(page_size)) for key in array_keys},
'data': {key: list(range(page_size)) for key in data_keys},
'timestamps': {key: list(range(page_size)) for key in data_keys},
'filled': {key: list(range(page_size)) for key in data_keys}}
# Get a list of event pages of size 13.
event_pages = list(event_page_gen(13, 31))
# Change the size of the event_pages to size 7.
event_pages_7 = list(event_model.rechunk_event_pages(event_pages, 7))
assert [7] * 57 + [4] == [len(page['uid']) for page in event_pages_7]
# Change the size back to 13.
event_pages_13 = event_model.rechunk_event_pages(event_pages_7, 13)
# Check that it is equal to the original list of event_pages.
assert event_pages == list(event_pages_13)
def test_rechunk_datum_pages():
def datum_page_gen(page_size, num_pages):
"""
Generator datum_pages for testing.
"""
data_keys = ['x', 'y', 'z']
array_keys = ['datum_id']
for _ in range(num_pages):
yield {'resource': 'RESOURCE',
**{key: list(range(page_size)) for key in array_keys},
'datum_kwargs': {key: list(range(page_size))
for key in data_keys}}
# Get a list of datum pages of size 13.
datum_pages = list(datum_page_gen(13, 31))
# Change the size of the datum_pages to size 7.
datum_pages_7 = list(event_model.rechunk_datum_pages(datum_pages, 7))
assert [7] * 57 + [4] == [len(page['datum_id']) for page in datum_pages_7]
# Change the size back to 13.
datum_pages_13 = event_model.rechunk_datum_pages(datum_pages_7, 13)
# Check that it is equal to the original list of datum_pages.
assert datum_pages == list(datum_pages_13)
def test_pack_empty_raises():
with pytest.raises(ValueError):
event_model.pack_event_page()
with pytest.raises(ValueError):
event_model.pack_datum_page()
@pytest.mark.parametrize('retry_intervals', [(1,), [1], (), [], None])
def test_retry_intervals_input_normalization(retry_intervals):
filler = event_model.Filler({}, retry_intervals=retry_intervals,
inplace=False)
assert isinstance(filler.retry_intervals, list)
def test_attempt_with_retires():
mutable = []
expected_args = (1, 2)
expected_kwargs = {'c': 3, 'd': 4}
expected_result = 10
class LocalException1(Exception):
pass
class LocalException2(Exception):
pass
def func(*args, **kwargs):
# Fails when called the first two times;
# on the third time, returns expected_result.
assert args == expected_args
assert kwargs == expected_kwargs
mutable.append(object())
if len(mutable) < 3:
raise LocalException1()
return expected_result
# Test with a total of three attempts, just sufficient to succeed.
result = event_model._attempt_with_retries(
func=func,
args=expected_args,
kwargs=expected_kwargs,
error_to_catch=LocalException1,
error_to_raise=LocalException2,
intervals=[0, 0.01, 0.01])
assert result == expected_result
mutable.clear()
# Test one fewer than the needed number of attempts to succeed.
with pytest.raises(LocalException2):
event_model._attempt_with_retries(
func=func,
args=expected_args,
kwargs=expected_kwargs,
error_to_catch=LocalException1,
error_to_raise=LocalException2,
intervals=[0, 0.01])
def test_round_trip_event_page_with_empty_data():
event_page = {
'time': [1, 2, 3],
'seq_num': [1, 2, 3],
'uid': ['a', 'b', 'c'],
'descriptor': 'd',
'data': {},
'timestamps': {},
'filled': {}}
events = list(event_model.unpack_event_page(event_page))
assert len(events) == 3
page_again = event_model.pack_event_page(*events)
assert page_again == event_page
def test_round_trip_datum_page_with_empty_data():
datum_page = {
'datum_id': ['a', 'b', 'c'],
'resource': 'd',
'datum_kwargs': {}}
datums = list(event_model.unpack_datum_page(datum_page))
assert len(datums) == 3
page_again = event_model.pack_datum_page(*datums)
assert page_again == datum_page
def test_register_coercion():
# Re-registration should be fine.
assert 'as_is' in event_model._coercion_registry # implementation detail
event_model.register_coercion('as_is', event_model.as_is)
# but registering something different to the same name should raise.
with pytest.raises(event_model.EventModelValueError):
event_model.register_coercion('as_is', object)
def test_register_coercion_misspelled():
"The function register_coercion was originally released as register_coersion."
# Re-registration should be fine.
assert 'as_is' in event_model._coercion_registry # implementation detail
event_model.register_coersion('as_is', event_model.as_is)
# but registering something different to the same name should raise.
with pytest.raises(event_model.EventModelValueError):
event_model.register_coersion('as_is', object)
def test_pickle_filler():
filler = event_model.Filler({}, inplace=False)
serialized = pickle.dumps(filler)
deserialized = pickle.loads(serialized)
assert filler == deserialized
| [
"numpy.ones",
"event_model.Filler",
"event_model.rechunk_datum_pages",
"pytest.mark.parametrize",
"event_model.DocumentRouter",
"event_model.unpack_event_page",
"event_model.pack_datum_page",
"event_model.schemas.keys",
"event_model.sanitize_doc",
"pytest.raises",
"event_model.SingleRunDocumentR... | [((30102, 30171), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retry_intervals"""', '[(1,), [1], (), [], None]'], {}), "('retry_intervals', [(1,), [1], (), [], None])\n", (30125, 30171), False, 'import pytest\n'), ((591, 617), 'event_model.schemas.keys', 'event_model.schemas.keys', ([], {}), '()\n', (615, 617), False, 'import event_model\n'), ((956, 981), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (979, 981), False, 'import event_model\n'), ((3064, 3089), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (3087, 3089), False, 'import event_model\n'), ((7055, 7080), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (7078, 7080), False, 'import event_model\n'), ((8381, 8424), 'event_model.pack_event_page', 'event_model.pack_event_page', (['event1', 'event2'], {}), '(event1, event2)\n', (8408, 8424), False, 'import event_model\n'), ((8451, 8486), 'event_model.pack_event_page', 'event_model.pack_event_page', (['event3'], {}), '(event3)\n', (8478, 8486), False, 'import event_model\n'), ((8569, 8620), 'event_model.bulk_events_to_event_pages', 'event_model.bulk_events_to_event_pages', (['bulk_events'], {}), '(bulk_events)\n', (8607, 8620), False, 'import event_model\n'), ((8734, 8759), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (8757, 8759), False, 'import event_model\n'), ((9740, 9783), 'event_model.pack_event_page', 'event_model.pack_event_page', (['event1', 'event2'], {}), '(event1, event2)\n', (9767, 9783), False, 'import event_model\n'), ((10066, 10091), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (10089, 10091), False, 'import event_model\n'), ((10373, 10416), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['datum1', 'datum2'], {}), '(datum1, datum2)\n', (10400, 10416), False, 'import event_model\n'), ((10694, 10742), 'event_model.bulk_datum_to_datum_page', 'event_model.bulk_datum_to_datum_page', (['bulk_datum'], {}), '(bulk_datum)\n', (10730, 10742), False, 'import event_model\n'), ((10823, 10851), 'event_model.DocumentRouter', 'event_model.DocumentRouter', ([], {}), '()\n', (10849, 10851), False, 'import event_model\n'), ((10869, 10894), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (10892, 10894), False, 'import event_model\n'), ((12548, 12576), 'event_model.DocumentRouter', 'event_model.DocumentRouter', ([], {}), '()\n', (12574, 12576), False, 'import event_model\n'), ((12594, 12619), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (12617, 12619), False, 'import event_model\n'), ((14973, 15016), 'event_model.pack_event_page', 'event_model.pack_event_page', (['event1', 'event2'], {}), '(event1, event2)\n', (15000, 15016), False, 'import event_model\n'), ((19754, 19797), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['datum1', 'datum2'], {}), '(datum1, datum2)\n', (19781, 19797), False, 'import event_model\n'), ((24234, 24271), 'event_model.SingleRunDocumentRouter', 'event_model.SingleRunDocumentRouter', ([], {}), '()\n', (24269, 24271), False, 'import event_model\n'), ((24366, 24391), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (24389, 24391), False, 'import event_model\n'), ((27233, 27258), 'event_model.compose_run', 'event_model.compose_run', ([], {}), '()\n', (27256, 27258), False, 'import event_model\n'), ((28746, 28796), 'event_model.rechunk_event_pages', 'event_model.rechunk_event_pages', (['event_pages_7', '(13)'], {}), '(event_pages_7, 13)\n', (28777, 28796), False, 'import event_model\n'), ((29755, 29805), 'event_model.rechunk_datum_pages', 'event_model.rechunk_datum_pages', (['datum_pages_7', '(13)'], {}), '(datum_pages_7, 13)\n', (29786, 29805), False, 'import event_model\n'), ((30248, 30318), 'event_model.Filler', 'event_model.Filler', (['{}'], {'retry_intervals': 'retry_intervals', 'inplace': '(False)'}), '({}, retry_intervals=retry_intervals, inplace=False)\n', (30266, 30318), False, 'import event_model\n'), ((31077, 31266), 'event_model._attempt_with_retries', 'event_model._attempt_with_retries', ([], {'func': 'func', 'args': 'expected_args', 'kwargs': 'expected_kwargs', 'error_to_catch': 'LocalException1', 'error_to_raise': 'LocalException2', 'intervals': '[0, 0.01, 0.01]'}), '(func=func, args=expected_args, kwargs=\n expected_kwargs, error_to_catch=LocalException1, error_to_raise=\n LocalException2, intervals=[0, 0.01, 0.01])\n', (31110, 31266), False, 'import event_model\n'), ((32091, 32127), 'event_model.pack_event_page', 'event_model.pack_event_page', (['*events'], {}), '(*events)\n', (32118, 32127), False, 'import event_model\n'), ((32432, 32468), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['*datums'], {}), '(*datums)\n', (32459, 32468), False, 'import event_model\n'), ((32657, 32714), 'event_model.register_coercion', 'event_model.register_coercion', (['"""as_is"""', 'event_model.as_is'], {}), "('as_is', event_model.as_is)\n", (32686, 32714), False, 'import event_model\n'), ((33148, 33205), 'event_model.register_coersion', 'event_model.register_coersion', (['"""as_is"""', 'event_model.as_is'], {}), "('as_is', event_model.as_is)\n", (33177, 33205), False, 'import event_model\n'), ((33434, 33471), 'event_model.Filler', 'event_model.Filler', (['{}'], {'inplace': '(False)'}), '({}, inplace=False)\n', (33452, 33471), False, 'import event_model\n'), ((33489, 33509), 'pickle.dumps', 'pickle.dumps', (['filler'], {}), '(filler)\n', (33501, 33509), False, 'import pickle\n'), ((33529, 33553), 'pickle.loads', 'pickle.loads', (['serialized'], {}), '(serialized)\n', (33541, 33553), False, 'import pickle\n'), ((4443, 4480), 'event_model.pack_event_page', 'event_model.pack_event_page', (['expected'], {}), '(expected)\n', (4470, 4480), False, 'import event_model\n'), ((5583, 5620), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['expected'], {}), '(expected)\n', (5610, 5620), False, 'import event_model\n'), ((6467, 6504), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['expected'], {}), '(expected)\n', (6494, 6504), False, 'import event_model\n'), ((9869, 9905), 'event_model.sanitize_doc', 'event_model.sanitize_doc', (['event_page'], {}), '(event_page)\n', (9893, 9905), False, 'import event_model\n'), ((9922, 9959), 'event_model.sanitize_doc', 'event_model.sanitize_doc', (['bulk_events'], {}), '(bulk_events)\n', (9946, 9959), False, 'import event_model\n'), ((9976, 10008), 'event_model.sanitize_doc', 'event_model.sanitize_doc', (['event1'], {}), '(event1)\n', (10000, 10008), False, 'import event_model\n'), ((24281, 24323), 'pytest.raises', 'pytest.raises', (['event_model.EventModelError'], {}), '(event_model.EventModelError)\n', (24294, 24323), False, 'import pytest\n'), ((26009, 26056), 'pytest.raises', 'pytest.raises', (['event_model.EventModelValueError'], {}), '(event_model.EventModelValueError)\n', (26022, 26056), False, 'import pytest\n'), ((26119, 26166), 'pytest.raises', 'pytest.raises', (['event_model.EventModelValueError'], {}), '(event_model.EventModelValueError)\n', (26132, 26166), False, 'import pytest\n'), ((26962, 27009), 'pytest.raises', 'pytest.raises', (['event_model.EventModelValueError'], {}), '(event_model.EventModelValueError)\n', (26975, 27009), False, 'import pytest\n'), ((27055, 27102), 'pytest.raises', 'pytest.raises', (['event_model.EventModelValueError'], {}), '(event_model.EventModelValueError)\n', (27068, 27102), False, 'import pytest\n'), ((27268, 27315), 'pytest.raises', 'pytest.raises', (['event_model.EventModelValueError'], {}), '(event_model.EventModelValueError)\n', (27281, 27315), False, 'import pytest\n'), ((27664, 27711), 'pytest.raises', 'pytest.raises', (['event_model.EventModelValueError'], {}), '(event_model.EventModelValueError)\n', (27677, 27711), False, 'import pytest\n'), ((28568, 28615), 'event_model.rechunk_event_pages', 'event_model.rechunk_event_pages', (['event_pages', '(7)'], {}), '(event_pages, 7)\n', (28599, 28615), False, 'import event_model\n'), ((29572, 29619), 'event_model.rechunk_datum_pages', 'event_model.rechunk_datum_pages', (['datum_pages', '(7)'], {}), '(datum_pages, 7)\n', (29603, 29619), False, 'import event_model\n'), ((29960, 29985), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (29973, 29985), False, 'import pytest\n'), ((29995, 30024), 'event_model.pack_event_page', 'event_model.pack_event_page', ([], {}), '()\n', (30022, 30024), False, 'import event_model\n'), ((30034, 30059), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (30047, 30059), False, 'import pytest\n'), ((30069, 30098), 'event_model.pack_datum_page', 'event_model.pack_datum_page', ([], {}), '()\n', (30096, 30098), False, 'import event_model\n'), ((31442, 31472), 'pytest.raises', 'pytest.raises', (['LocalException2'], {}), '(LocalException2)\n', (31455, 31472), False, 'import pytest\n'), ((31482, 31665), 'event_model._attempt_with_retries', 'event_model._attempt_with_retries', ([], {'func': 'func', 'args': 'expected_args', 'kwargs': 'expected_kwargs', 'error_to_catch': 'LocalException1', 'error_to_raise': 'LocalException2', 'intervals': '[0, 0.01]'}), '(func=func, args=expected_args, kwargs=\n expected_kwargs, error_to_catch=LocalException1, error_to_raise=\n LocalException2, intervals=[0, 0.01])\n', (31515, 31665), False, 'import event_model\n'), ((32002, 32043), 'event_model.unpack_event_page', 'event_model.unpack_event_page', (['event_page'], {}), '(event_page)\n', (32031, 32043), False, 'import event_model\n'), ((32343, 32384), 'event_model.unpack_datum_page', 'event_model.unpack_datum_page', (['datum_page'], {}), '(datum_page)\n', (32372, 32384), False, 'import event_model\n'), ((32798, 32845), 'pytest.raises', 'pytest.raises', (['event_model.EventModelValueError'], {}), '(event_model.EventModelValueError)\n', (32811, 32845), False, 'import pytest\n'), ((32855, 32901), 'event_model.register_coercion', 'event_model.register_coercion', (['"""as_is"""', 'object'], {}), "('as_is', object)\n", (32884, 32901), False, 'import event_model\n'), ((33289, 33336), 'pytest.raises', 'pytest.raises', (['event_model.EventModelValueError'], {}), '(event_model.EventModelValueError)\n', (33302, 33336), False, 'import pytest\n'), ((33346, 33392), 'event_model.register_coersion', 'event_model.register_coersion', (['"""as_is"""', 'object'], {}), "('as_is', object)\n", (33375, 33392), False, 'import event_model\n'), ((4663, 4701), 'event_model.pack_event_page', 'event_model.pack_event_page', (['*expected'], {}), '(*expected)\n', (4690, 4701), False, 'import event_model\n'), ((4899, 4937), 'event_model.pack_event_page', 'event_model.pack_event_page', (['*expected'], {}), '(*expected)\n', (4926, 4937), False, 'import event_model\n'), ((5332, 5370), 'event_model.pack_event_page', 'event_model.pack_event_page', (['*expected'], {}), '(*expected)\n', (5359, 5370), False, 'import event_model\n'), ((5801, 5839), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['*expected'], {}), '(*expected)\n', (5828, 5839), False, 'import event_model\n'), ((6035, 6073), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['*expected'], {}), '(*expected)\n', (6062, 6073), False, 'import event_model\n'), ((6685, 6723), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['*expected'], {}), '(*expected)\n', (6712, 6723), False, 'import event_model\n'), ((6919, 6957), 'event_model.pack_datum_page', 'event_model.pack_datum_page', (['*expected'], {}), '(*expected)\n', (6946, 6957), False, 'import event_model\n'), ((9293, 9315), 'numpy.ones', 'numpy.ones', (['(512, 512)'], {}), '((512, 512))\n', (9303, 9315), False, 'import numpy\n'), ((9481, 9503), 'numpy.ones', 'numpy.ones', (['(512, 512)'], {}), '((512, 512))\n', (9491, 9503), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import argparse
import sys
import json
from astropy.time import Time
predefined_bands = ["g", "r", "i", "z", "y", "J", "H", "K"]
def _parse_command_line_args():
'''
Parses and returns the command line arguments.
'''
parser = argparse.ArgumentParser(description='Parse Open Astronomy Catalog (OAC) JSON files')
parser.add_argument('--t0', type=float, default=0, help='Initial time (t=0 for event)')
parser.add_argument('--f', help='Filename for JSON file')
parser.add_argument('--b', action='append', help='Data bands to store')
parser.add_argument('--out', help='Directory to save data to')
parser.add_argument('--maxpts', type=float, default=np.inf, help='Maximum number of points to keep for each band')
parser.add_argument('--tmax', type=float, default=np.inf, help='Upper bound for time points to keep')
parser.add_argument('--time-format', type=str, default='gps', help='Time format (MJD or GPS)')
parser.add_argument('--telescopes', action='append', nargs='+', help='Telescopes to use (defaults to all)')
for b in predefined_bands:
parser.add_argument('--tmax-' + b, type=float, help="Upper bound for time in " + b + " band")
return parser.parse_args()
def _read_data(t0, file, bands, out, maxpts, tmax, telescopes, args):
if telescopes is not None:
telescopes = set(telescopes)
name = file.split('/')[-1] # get rid of path except for filename
name = name.split('.')[0] # get event name from filename
### read in the data
with open(file, "r") as read_file:
data = json.load(read_file, encoding="UTF-8")[name]['photometry']
### create empty data arrays
data_dict = {}
for band in bands:
data_dict[band] = np.empty((4, 0))
for entry in data:
if 'band' in entry:
band = entry['band']
### check that it's a band we want and that it has an error magnitude
if (band in bands and 'e_magnitude' in entry and 'telescope' in entry and 'source' in entry
and (telescopes is None or entry['telescope'] in telescopes)
and 'realization' not in entry):
### [time, time error, magnitude, magnitude error]
to_append = np.array([[entry['time']], [0], [entry['magnitude']], [entry['e_magnitude']]]).astype(np.float)
to_append[0] -= t0
tmax_here = tmax
if "tmax_" + band in args.keys() and args["tmax_" + band] is not None:
tmax_here = min(tmax, args["tmax_" + band])
if to_append[0] < tmax_here:
data_dict[band] = np.append(data_dict[band], to_append, axis=1)
for band in data_dict:
data = data_dict[band]
### check if we have too much data
if data.shape[1] > maxpts:
### basically, generate random indices, take the columns (data points)
### specified by those columns, and then sort them based on times
### (sorting is not strictly necessary but it seems like a good idea
### to keep data ordered)
cols = np.random.randint(0, data.shape[1], int(maxpts))
data = data[:,cols]
data = data[:,data[0].argsort()]
data_dict[band] = data
return data_dict
def _save_data(out, data_dict):
for band in data_dict:
filename = out + band + '.txt'
np.savetxt(filename, data_dict[band].T)
def _convert_time(t0):
t = Time(t0, format='gps')
return t.mjd
def parse_json(t0, file, bands, out, maxpts=np.inf, tmax=np.inf, gps_time=False, telescopes=None, args={}):
'''
Parse JSON file.
Parameters
----------
t0 : int
Initial time (t=0) for the event
file : string
Name of JSON file
bands : list
List of names of data bands to keep
out : string
Directory to save data to
maxpts : int
Maximum number of points to keep for each band
tmax : float
Upper bound for time points to keep
time_format :
'''
if gps_time:
t0 = _convert_time(t0)
data_dict = _read_data(t0, file, bands, out, maxpts, tmax, telescopes, args)
_save_data(out, data_dict)
def main():
args = _parse_command_line_args()
parse_json(args.t0, args.f, args.b, args.out, args.maxpts, args.tmax, (args.time_format == 'gps'), args.telescopes, vars(args))
if __name__ == '__main__':
main()
| [
"json.load",
"argparse.ArgumentParser",
"astropy.time.Time",
"numpy.empty",
"numpy.savetxt",
"numpy.append",
"numpy.array"
] | [((325, 414), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse Open Astronomy Catalog (OAC) JSON files"""'}), "(description=\n 'Parse Open Astronomy Catalog (OAC) JSON files')\n", (348, 414), False, 'import argparse\n'), ((3563, 3585), 'astropy.time.Time', 'Time', (['t0'], {'format': '"""gps"""'}), "(t0, format='gps')\n", (3567, 3585), False, 'from astropy.time import Time\n'), ((1815, 1831), 'numpy.empty', 'np.empty', (['(4, 0)'], {}), '((4, 0))\n', (1823, 1831), True, 'import numpy as np\n'), ((3491, 3530), 'numpy.savetxt', 'np.savetxt', (['filename', 'data_dict[band].T'], {}), '(filename, data_dict[band].T)\n', (3501, 3530), True, 'import numpy as np\n'), ((1655, 1693), 'json.load', 'json.load', (['read_file'], {'encoding': '"""UTF-8"""'}), "(read_file, encoding='UTF-8')\n", (1664, 1693), False, 'import json\n'), ((2721, 2766), 'numpy.append', 'np.append', (['data_dict[band]', 'to_append'], {'axis': '(1)'}), '(data_dict[band], to_append, axis=1)\n', (2730, 2766), True, 'import numpy as np\n'), ((2323, 2401), 'numpy.array', 'np.array', (["[[entry['time']], [0], [entry['magnitude']], [entry['e_magnitude']]]"], {}), "([[entry['time']], [0], [entry['magnitude']], [entry['e_magnitude']]])\n", (2331, 2401), True, 'import numpy as np\n')] |
import numpy as np
import random
import uuid
class MazeGen:
def __init__(self, width=50):
# width is the side length of the square maze.
# num is the number of explorable cells
self.width = width
self.map = np.ones((width, width))
self.directions = ((0, 1), (0, -1), (1, 0), (-1, 0))
def get_neighbors(self, pos):
# look through the neighbors and return them if any
neighbors = []
r, c = pos
for direction in self.directions:
dr, dc = direction
candidate_row = r + dr
candidate_col = c + dc
candidate_neigh = (candidate_row, candidate_col)
if self.is_neighbor(candidate_neigh):
neighbors.append(candidate_neigh)
return neighbors
def is_neighbor(self, pos):
r, c = pos
return not (r < 0 or r >= self.width or c < 0 or c >= self.width)
def is_valid(self, pos):
# a neighbor is valid if it is not a cell, and it is not surrounded by more than one cell
r, c = pos
if self.map[r, c] == 0:
return False
neighbors = self.get_neighbors(pos)
num_cells = 0
for neighbor in neighbors:
if self.map[neighbor[0], neighbor[1]] == 0:
num_cells += 1
if num_cells <= 1:
return True
def maze_gen(self, start=None):
if start is None:
start = (random.choice(range(self.width)), random.choice(range(self.width)))
self.map[start[0], start[1]] = 0
neighbors = self.get_neighbors(start)
if not neighbors:
return
for neighbor in random.sample(neighbors, len(neighbors)):
if not self.is_valid(neighbor):
continue
if not self.maze_gen(neighbor):
return True
def print_help():
print()
print("\tpython maze_gen.py [--width WIDTH] [--help | -h]")
print()
print("\t\t--width:\twidth of square maze, default is 75")
print()
if __name__ == '__main__':
import cv2
import sys
WIDTH = 75 # 50 x 50 map
# lazy argument parser
if "-h" in sys.argv or "--help" in sys.argv:
print_help()
sys.exit()
for i in range(1, len(sys.argv), 2):
if sys.argv[i] == "--width":
WIDTH = int(sys.argv[i + 1])
maze = MazeGen(width=WIDTH)
try:
maze.maze_gen()
except Exception as e:
print(e)
finally:
maze_name = f"maps/" + str(uuid.uuid1()) + ".png"
cv2.imwrite(maze_name, maze.map * 255)
| [
"cv2.imwrite",
"uuid.uuid1",
"numpy.ones",
"sys.exit"
] | [((245, 268), 'numpy.ones', 'np.ones', (['(width, width)'], {}), '((width, width))\n', (252, 268), True, 'import numpy as np\n'), ((2236, 2246), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2244, 2246), False, 'import sys\n'), ((2556, 2594), 'cv2.imwrite', 'cv2.imwrite', (['maze_name', '(maze.map * 255)'], {}), '(maze_name, maze.map * 255)\n', (2567, 2594), False, 'import cv2\n'), ((2525, 2537), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (2535, 2537), False, 'import uuid\n')] |
import numpy as np
def sigmoid(x):
"""Computes the element wise logistic sigmoid of x.
Inputs:
x: Either a row vector or a column vector.
"""
return 1.0 / (1.0 + np.exp(-x))
def load_train(filePath):
"""Loads training data."""
with open(filePath, 'rb') as f:
train_set = np.load(f)
train_inputs = train_set['train_inputs']
train_targets = train_set['train_targets']
return train_inputs, train_targets
def load_valid(filePath):
"""Loads validation data."""
with open(filePath, 'rb') as f:
valid_set = np.load(f)
valid_inputs = valid_set['valid_inputs']
valid_targets = valid_set['valid_targets']
return valid_inputs, valid_targets
def load_test(filePath):
"""Loads test data."""
with open(filePath, 'rb') as f:
test_set = np.load(f)
test_inputs = test_set['test_inputs']
test_targets = test_set['test_targets']
return test_inputs, test_targets
| [
"numpy.load",
"numpy.exp"
] | [((314, 324), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (321, 324), True, 'import numpy as np\n'), ((581, 591), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (588, 591), True, 'import numpy as np\n'), ((845, 855), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (852, 855), True, 'import numpy as np\n'), ((188, 198), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (194, 198), True, 'import numpy as np\n')] |
import numpy as np
def SMAPE(y_true, y_pred):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
score = np.abs(y_pred - y_true) / ((np.abs(y_true) + np.abs(y_pred)) / 2)
score = np.where(np.isnan(score), 0, score)
return score
def MAPE(y_true, y_pred):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
idx = y_true > 0
y_true = y_true[idx]
y_pred = y_pred[idx]
score = np.abs(y_pred - y_true) / np.abs(y_true)
return np.mean(score)
def _get_score_metric(y_true, y_pred, metric='mape'):
'''
:param y_true: array-like of shape (n_samples,) or (n_samples, n_outputs). Ground truth (correct) target values.
:param y_pred: array-like of shape (n_samples,) or (n_samples, n_outputs). Estimated target values.
:param metric: str, one of ['mae', 'mape', 'mse', 'rmse', 'msle', 'rmsle', 'smape'], default = 'mape'.
:return:
'''
y_true = np.array(y_true) if type(y_true) == list else y_true
y_pred = np.array(y_pred) if type(y_true) == list else y_pred
if metric == 'mae':
return np.mean(np.abs(y_true - y_pred))
if metric == 'mape':
return MAPE(y_true, y_pred)
elif metric == 'mse':
return np.mean((y_true - y_pred) ** 2)
elif metric == 'rmse':
return np.mean((y_true - y_pred) ** 2) ** 0.5
elif metric == 'msle':
return np.mean((np.log1p(y_true) - np.log1p(y_pred)) ** 2)
elif metric == 'rmsle':
return np.mean((np.log1p(y_true) - np.log1p(y_pred)) ** 2) ** 0.5
elif metric == 'smape':
return np.mean(SMAPE(y_true, y_pred))
return (y_true == y_pred).sum() | [
"numpy.abs",
"numpy.isnan",
"numpy.mean",
"numpy.array",
"numpy.log1p"
] | [((60, 76), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (68, 76), True, 'import numpy as np\n'), ((90, 106), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (98, 106), True, 'import numpy as np\n'), ((290, 306), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (298, 306), True, 'import numpy as np\n'), ((320, 336), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (328, 336), True, 'import numpy as np\n'), ((474, 488), 'numpy.mean', 'np.mean', (['score'], {}), '(score)\n', (481, 488), True, 'import numpy as np\n'), ((119, 142), 'numpy.abs', 'np.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (125, 142), True, 'import numpy as np\n'), ((206, 221), 'numpy.isnan', 'np.isnan', (['score'], {}), '(score)\n', (214, 221), True, 'import numpy as np\n'), ((422, 445), 'numpy.abs', 'np.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (428, 445), True, 'import numpy as np\n'), ((448, 462), 'numpy.abs', 'np.abs', (['y_true'], {}), '(y_true)\n', (454, 462), True, 'import numpy as np\n'), ((914, 930), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (922, 930), True, 'import numpy as np\n'), ((980, 996), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (988, 996), True, 'import numpy as np\n'), ((1081, 1104), 'numpy.abs', 'np.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (1087, 1104), True, 'import numpy as np\n'), ((1208, 1239), 'numpy.mean', 'np.mean', (['((y_true - y_pred) ** 2)'], {}), '((y_true - y_pred) ** 2)\n', (1215, 1239), True, 'import numpy as np\n'), ((147, 161), 'numpy.abs', 'np.abs', (['y_true'], {}), '(y_true)\n', (153, 161), True, 'import numpy as np\n'), ((164, 178), 'numpy.abs', 'np.abs', (['y_pred'], {}), '(y_pred)\n', (170, 178), True, 'import numpy as np\n'), ((1282, 1313), 'numpy.mean', 'np.mean', (['((y_true - y_pred) ** 2)'], {}), '((y_true - y_pred) ** 2)\n', (1289, 1313), True, 'import numpy as np\n'), ((1372, 1388), 'numpy.log1p', 'np.log1p', (['y_true'], {}), '(y_true)\n', (1380, 1388), True, 'import numpy as np\n'), ((1391, 1407), 'numpy.log1p', 'np.log1p', (['y_pred'], {}), '(y_pred)\n', (1399, 1407), True, 'import numpy as np\n'), ((1467, 1483), 'numpy.log1p', 'np.log1p', (['y_true'], {}), '(y_true)\n', (1475, 1483), True, 'import numpy as np\n'), ((1486, 1502), 'numpy.log1p', 'np.log1p', (['y_pred'], {}), '(y_pred)\n', (1494, 1502), True, 'import numpy as np\n')] |
# pylint: disable=W0201
from statsmodels.compat.python import iteritems, string_types, range
import numpy as np
from statsmodels.tools.decorators import cache_readonly
import pandas as pd
from . import var_model as _model
from . import util
from . import plotting
FULL_SAMPLE = 0
ROLLING = 1
EXPANDING = 2
def _get_window_type(window_type):
if window_type in (FULL_SAMPLE, ROLLING, EXPANDING):
return window_type
elif isinstance(window_type, string_types):
window_type_up = window_type.upper()
if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'):
return FULL_SAMPLE
elif window_type_up == 'ROLLING':
return ROLLING
elif window_type_up == 'EXPANDING':
return EXPANDING
raise Exception('Unrecognized window type: %s' % window_type)
class DynamicVAR(object):
"""
Estimates time-varying vector autoregression (VAR(p)) using
equation-by-equation least squares
Parameters
----------
data : pandas.DataFrame
lag_order : int, default 1
window : int
window_type : {'expanding', 'rolling'}
min_periods : int or None
Minimum number of observations to require in window, defaults to window
size if None specified
trend : {'c', 'nc', 'ct', 'ctt'}
TODO
Returns
-------
**Attributes**:
coefs : WidePanel
items : coefficient names
major_axis : dates
minor_axis : VAR equation names
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.names = list(data.columns)
self.neqs = len(self.names)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = _get_window_type(window_type)
if self._is_rolling:
if window is None:
raise Exception('Must pass window when doing rolling '
'regression')
if min_periods is None:
min_periods = window
else:
window = len(self.x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
@cache_readonly
def T(self):
"""
Number of time periods in results
"""
return len(self.result_index)
@property
def nobs(self):
# Stub, do I need this?
data = dict((eq, r.nobs) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
@cache_readonly
def equations(self):
eqs = {}
for col, ts in iteritems(self.y):
model = pd.ols(y=ts, x=self.x, window=self._window,
window_type=self._window_type,
min_periods=self._min_periods)
eqs[col] = model
return eqs
@cache_readonly
def coefs(self):
"""
Return dynamic regression coefficients as WidePanel
"""
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.beta
panel = pd.WidePanel.fromDict(data)
# Coefficient names become items
return panel.swapaxes('items', 'minor')
@property
def result_index(self):
return self.coefs.major_axis
@cache_readonly
def _coefs_raw(self):
"""
Reshape coefficients to be more amenable to dynamic calculations
Returns
-------
coefs : (time_periods x lag_order x neqs x neqs)
"""
coef_panel = self.coefs.copy()
del coef_panel['intercept']
coef_values = coef_panel.swapaxes('items', 'major').values
coef_values = coef_values.reshape((len(coef_values),
self.lag_order,
self.neqs, self.neqs))
return coef_values
@cache_readonly
def _intercepts_raw(self):
"""
Similar to _coefs_raw, return intercept values in easy-to-use matrix
form
Returns
-------
intercepts : (T x K)
"""
return self.coefs['intercept'].values
@cache_readonly
def resid(self):
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.resid
return pd.DataFrame(data)
def forecast(self, steps=1):
"""
Produce dynamic forecast
Parameters
----------
steps
Returns
-------
forecasts : pandas.DataFrame
"""
output = np.empty((self.T - steps, self.neqs))
y_values = self.y.values
y_index_map = dict((d, idx) for idx, d in enumerate(self.y.index))
result_index_map = dict((d, idx) for idx, d in enumerate(self.result_index))
coefs = self._coefs_raw
intercepts = self._intercepts_raw
# can only produce this many forecasts
forc_index = self.result_index[steps:]
for i, date in enumerate(forc_index):
# TODO: check that this does the right thing in weird cases...
idx = y_index_map[date] - steps
result_idx = result_index_map[date] - steps
y_slice = y_values[:idx]
forcs = _model.forecast(y_slice, coefs[result_idx],
intercepts[result_idx], steps)
output[i] = forcs[-1]
return pd.DataFrame(output, index=forc_index, columns=self.names)
def plot_forecast(self, steps=1, figsize=(10, 10)):
"""
Plot h-step ahead forecasts against actual realizations of time
series. Note that forecasts are lined up with their respective
realizations.
Parameters
----------
steps :
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=figsize, nrows=self.neqs,
sharex=True)
forc = self.forecast(steps=steps)
dates = forc.index
y_overlay = self.y.reindex(dates)
for i, col in enumerate(forc.columns):
ax = axes[i]
y_ts = y_overlay[col]
forc_ts = forc[col]
y_handle = ax.plot(dates, y_ts.values, 'k.', ms=2)
forc_handle = ax.plot(dates, forc_ts.values, 'k-')
fig.legend((y_handle, forc_handle), ('Y', 'Forecast'))
fig.autofmt_xdate()
fig.suptitle('Dynamic %d-step forecast' % steps)
# pretty things up a bit
plotting.adjust_subplots(bottom=0.15, left=0.10)
plt.draw_if_interactive()
@property
def _is_rolling(self):
return self._window_type == ROLLING
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
data = dict((eq, r.r2) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
class DynamicPanelVAR(DynamicVAR):
"""
Dynamic (time-varying) panel vector autoregression using panel ordinary
least squares
Parameters
----------
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.neqs = len(data.columns)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _filter_data(lhs, rhs):
"""
Data filtering routine for dynamic VAR
lhs : DataFrame
original data
rhs : DataFrame
lagged variables
Returns
-------
"""
def _has_all_columns(df):
return np.isfinite(df.values).sum(1) == len(df.columns)
rhs_valid = _has_all_columns(rhs)
if not rhs_valid.all():
pre_filtered_rhs = rhs[rhs_valid]
else:
pre_filtered_rhs = rhs
index = lhs.index.union(rhs.index)
if not index.equals(rhs.index) or not index.equals(lhs.index):
rhs = rhs.reindex(index)
lhs = lhs.reindex(index)
rhs_valid = _has_all_columns(rhs)
lhs_valid = _has_all_columns(lhs)
valid = rhs_valid & lhs_valid
if not valid.all():
filt_index = rhs.index[valid]
filtered_rhs = rhs.reindex(filt_index)
filtered_lhs = lhs.reindex(filt_index)
else:
filtered_rhs, filtered_lhs = rhs, lhs
return filtered_lhs, filtered_rhs, pre_filtered_rhs, index, valid
def _make_lag_matrix(x, lags):
data = {}
columns = []
for i in range(1, 1 + lags):
lagstr = 'L%d.'% i
lag = x.shift(i).rename(columns=lambda c: lagstr + c)
data.update(lag._series)
columns.extend(lag.columns)
return pd.DataFrame(data, columns=columns)
class Equation(object):
"""
Stub, estimate one equation
"""
def __init__(self, y, x):
pass
if __name__ == '__main__':
import pandas.util.testing as ptest
ptest.N = 500
data = ptest.makeTimeDataFrame().cumsum(0)
var = DynamicVAR(data, lag_order=2, window_type='expanding')
var2 = DynamicVAR(data, lag_order=2, window=10,
window_type='rolling')
| [
"pandas.DataFrame",
"pandas.ols",
"numpy.empty",
"matplotlib.pyplot.draw_if_interactive",
"statsmodels.compat.python.range",
"matplotlib.pyplot.subplots",
"numpy.isfinite",
"pandas.WidePanel.fromDict",
"pandas.util.testing.makeTimeDataFrame",
"statsmodels.compat.python.iteritems"
] | [((9290, 9308), 'statsmodels.compat.python.range', 'range', (['(1)', '(1 + lags)'], {}), '(1, 1 + lags)\n', (9295, 9308), False, 'from statsmodels.compat.python import iteritems, string_types, range\n'), ((9480, 9515), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (9492, 9515), True, 'import pandas as pd\n'), ((3015, 3033), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3027, 3033), True, 'import pandas as pd\n'), ((3120, 3137), 'statsmodels.compat.python.iteritems', 'iteritems', (['self.y'], {}), '(self.y)\n', (3129, 3137), False, 'from statsmodels.compat.python import iteritems, string_types, range\n'), ((3539, 3564), 'statsmodels.compat.python.iteritems', 'iteritems', (['self.equations'], {}), '(self.equations)\n', (3548, 3564), False, 'from statsmodels.compat.python import iteritems, string_types, range\n'), ((3618, 3645), 'pandas.WidePanel.fromDict', 'pd.WidePanel.fromDict', (['data'], {}), '(data)\n', (3639, 3645), True, 'import pandas as pd\n'), ((4767, 4792), 'statsmodels.compat.python.iteritems', 'iteritems', (['self.equations'], {}), '(self.equations)\n', (4776, 4792), False, 'from statsmodels.compat.python import iteritems, string_types, range\n'), ((4846, 4864), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (4858, 4864), True, 'import pandas as pd\n'), ((5096, 5133), 'numpy.empty', 'np.empty', (['(self.T - steps, self.neqs)'], {}), '((self.T - steps, self.neqs))\n', (5104, 5133), True, 'import numpy as np\n'), ((5940, 5998), 'pandas.DataFrame', 'pd.DataFrame', (['output'], {'index': 'forc_index', 'columns': 'self.names'}), '(output, index=forc_index, columns=self.names)\n', (5952, 5998), True, 'import pandas as pd\n'), ((6361, 6420), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'nrows': 'self.neqs', 'sharex': '(True)'}), '(figsize=figsize, nrows=self.neqs, sharex=True)\n', (6373, 6420), True, 'import matplotlib.pyplot as plt\n'), ((7083, 7108), 'matplotlib.pyplot.draw_if_interactive', 'plt.draw_if_interactive', ([], {}), '()\n', (7106, 7108), True, 'import matplotlib.pyplot as plt\n'), ((7364, 7382), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (7376, 7382), True, 'import pandas as pd\n'), ((3159, 3268), 'pandas.ols', 'pd.ols', ([], {'y': 'ts', 'x': 'self.x', 'window': 'self._window', 'window_type': 'self._window_type', 'min_periods': 'self._min_periods'}), '(y=ts, x=self.x, window=self._window, window_type=self._window_type,\n min_periods=self._min_periods)\n', (3165, 3268), True, 'import pandas as pd\n'), ((9731, 9756), 'pandas.util.testing.makeTimeDataFrame', 'ptest.makeTimeDataFrame', ([], {}), '()\n', (9754, 9756), True, 'import pandas.util.testing as ptest\n'), ((2973, 2998), 'statsmodels.compat.python.iteritems', 'iteritems', (['self.equations'], {}), '(self.equations)\n', (2982, 2998), False, 'from statsmodels.compat.python import iteritems, string_types, range\n'), ((7322, 7347), 'statsmodels.compat.python.iteritems', 'iteritems', (['self.equations'], {}), '(self.equations)\n', (7331, 7347), False, 'from statsmodels.compat.python import iteritems, string_types, range\n'), ((8442, 8464), 'numpy.isfinite', 'np.isfinite', (['df.values'], {}), '(df.values)\n', (8453, 8464), True, 'import numpy as np\n')] |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import time
from collections import OrderedDict
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
import torch
from torch.autograd import Variable
from data.base_dataset import get_params, get_transform
from PIL import Image
import os
import numpy as np
import shutil
import video_utils
opt = TrainOptions().parse()
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
if opt.continue_train:
try:
start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)
except:
start_epoch, epoch_iter = 1, 0
print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))
else:
start_epoch, epoch_iter = 1, 0
# additional enforced options for video
opt.video_mode = True
opt.label_nc = 0
opt.no_instance = True
# could be changed, but not tested
opt.resize_or_crop = "none"
opt.batchSize = 1
opt.flat = False
# this debug directory will contain input/output frame pairs
if opt.debug:
debug_dir = os.path.join(opt.checkpoints_dir, opt.name, 'debug')
if os.path.isdir(debug_dir):
shutil.rmtree(debug_dir)
os.mkdir(debug_dir)
if opt.scheduled_sampling:
print('ATTN! scheduled_sampling True by default')
if opt.batchSize > 1:
raise Exception('(for now) in "scheduled sampling" mode, --batchSize has to be 1')
if not opt.serial_batches:
raise Exception('(for now) in "scheduled sampling" mode, the --serial_batches option is necessary')
if not opt.no_flip:
raise Exception('(for now) in "scheduled sampling" mode, the --no_flip option is necessary')
latest_generated_frame = None
recursion = 0
opt.video_mode = True
# load frames dataset at the specified location
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
print(f'Training data loaded from: {opt.dataroot}')
print(f'Saving model to: {opt.name}')
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
total_steps = (start_epoch-1) * dataset_size + epoch_iter
# print(f'Cur mem allocated: {torch.cuda.memory_allocated() / 1e6} MB')
model = create_model(opt)
# print(f'After model init. Cur mem allocated: {torch.cuda.memory_allocated() / 1e6} MB')
if opt.gpu:
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
# print(f'Cur mem allocated: {torch.cuda.memory_allocated() / 1e6} MB')
# move model to gpu
model = model.to('cuda')
# print(f'After model moved. Cur mem allocated: {torch.cuda.memory_allocated() / 1e6} MB')
visualizer = Visualizer(opt)
display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
if epoch != start_epoch:
epoch_iter = epoch_iter % dataset_size
for i, data in enumerate(dataset, start=epoch_iter):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
left_frame = Image.open(data['left_path'][0])
right_frame = Image.open(data['right_path'][0])
params = get_params(opt, left_frame.size)
transform = get_transform(opt, params)
left_frame = transform(left_frame.convert('RGB'))
right_frame = transform(right_frame.convert('RGB'))
if opt.gpu:
left_frame = left_frame.unsqueeze(0).to('cuda')
# print(f'After left frame moved. Cur mem allocated: {torch.cuda.memory_allocated() / 1e6} MB')
right_frame = right_frame.unsqueeze(0).to('cuda')
# print(f'After right frame moved. Cur mem allocated: {torch.cuda.memory_allocated() / 1e6} MB')
if opt.debug:
video_utils.save_tensor(left_frame, debug_dir + "/step-%d-left-r%d.jpg" % (total_steps, recursion))
video_utils.save_tensor(right_frame, debug_dir + "/step-%d-right.jpg" % total_steps)
# print(f"LEFT FRAME: {left_frame.size()}")
# print(f"RIGHT FRAME: {right_frame.size()}")
losses, latest_generated_frame = model(
left_frame, None,
right_frame, None,
infer=opt.scheduled_sampling
)
# sum per device losses
losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ]
loss_dict = dict(zip(model.module.loss_names, losses))
# calculate final loss scalar
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0)
############### Backward Pass - frame1->frame2 ####################
# update generator weights
model.module.optimizer_G.zero_grad()
loss_G.backward()
model.module.optimizer_G.step()
# update discriminator weights
model.module.optimizer_D.zero_grad()
loss_D.backward()
model.module.optimizer_D.step()
#call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"])
############## Display results and errors ##########
### print out errors
if total_steps % opt.print_freq == print_delta:
errors = {k: v.item() if not isinstance(v, int) else v for k, v in loss_dict.items()}
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
visualizer.plot_current_errors(errors, total_steps)
### save latest model
if total_steps % opt.save_latest_freq == save_delta:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.module.save('latest')
np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
if epoch_iter >= dataset_size:
break
# end of epoch
iter_end_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
### save model for this epoch
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.module.save('latest')
model.module.save(epoch)
np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d')
### instead of only training the local enhancer, train the entire network after certain iterations
if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
model.module.update_fixed_params()
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
model.module.update_learning_rate()
| [
"torch.mean",
"os.mkdir",
"os.path.isdir",
"data.base_dataset.get_params",
"numpy.savetxt",
"data.data_loader.CreateDataLoader",
"time.time",
"PIL.Image.open",
"data.base_dataset.get_transform",
"torch.cuda.is_available",
"numpy.loadtxt",
"video_utils.save_tensor",
"util.visualizer.Visualize... | [((650, 705), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.name', '"""iter.txt"""'], {}), "(opt.checkpoints_dir, opt.name, 'iter.txt')\n", (662, 705), False, 'import os\n'), ((2030, 2051), 'data.data_loader.CreateDataLoader', 'CreateDataLoader', (['opt'], {}), '(opt)\n', (2046, 2051), False, 'from data.data_loader import CreateDataLoader\n'), ((2394, 2411), 'models.models.create_model', 'create_model', (['opt'], {}), '(opt)\n', (2406, 2411), False, 'from models.models import create_model\n'), ((3000, 3015), 'util.visualizer.Visualizer', 'Visualizer', (['opt'], {}), '(opt)\n', (3010, 3015), False, 'from util.visualizer import Visualizer\n'), ((1287, 1339), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.name', '"""debug"""'], {}), "(opt.checkpoints_dir, opt.name, 'debug')\n", (1299, 1339), False, 'import os\n'), ((1347, 1371), 'os.path.isdir', 'os.path.isdir', (['debug_dir'], {}), '(debug_dir)\n', (1360, 1371), False, 'import os\n'), ((1410, 1429), 'os.mkdir', 'os.mkdir', (['debug_dir'], {}), '(debug_dir)\n', (1418, 1429), False, 'import os\n'), ((2566, 2591), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2589, 2591), False, 'import torch\n'), ((3244, 3255), 'time.time', 'time.time', ([], {}), '()\n', (3253, 3255), False, 'import time\n'), ((6390, 6401), 'time.time', 'time.time', ([], {}), '()\n', (6399, 6401), False, 'import time\n'), ((615, 629), 'options.train_options.TrainOptions', 'TrainOptions', ([], {}), '()\n', (627, 629), False, 'from options.train_options import TrainOptions\n'), ((772, 819), 'numpy.loadtxt', 'np.loadtxt', (['iter_path'], {'delimiter': '""","""', 'dtype': 'int'}), "(iter_path, delimiter=',', dtype=int)\n", (782, 819), True, 'import numpy as np\n'), ((1381, 1405), 'shutil.rmtree', 'shutil.rmtree', (['debug_dir'], {}), '(debug_dir)\n', (1394, 1405), False, 'import shutil\n'), ((3416, 3427), 'time.time', 'time.time', ([], {}), '()\n', (3425, 3427), False, 'import time\n'), ((3531, 3563), 'PIL.Image.open', 'Image.open', (["data['left_path'][0]"], {}), "(data['left_path'][0])\n", (3541, 3563), False, 'from PIL import Image\n'), ((3586, 3619), 'PIL.Image.open', 'Image.open', (["data['right_path'][0]"], {}), "(data['right_path'][0])\n", (3596, 3619), False, 'from PIL import Image\n'), ((3638, 3670), 'data.base_dataset.get_params', 'get_params', (['opt', 'left_frame.size'], {}), '(opt, left_frame.size)\n', (3648, 3670), False, 'from data.base_dataset import get_params, get_transform\n'), ((3691, 3717), 'data.base_dataset.get_transform', 'get_transform', (['opt', 'params'], {}), '(opt, params)\n', (3704, 3717), False, 'from data.base_dataset import get_params, get_transform\n'), ((6782, 6844), 'numpy.savetxt', 'np.savetxt', (['iter_path', '(epoch + 1, 0)'], {'delimiter': '""","""', 'fmt': '"""%d"""'}), "(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')\n", (6792, 6844), True, 'import numpy as np\n'), ((4232, 4336), 'video_utils.save_tensor', 'video_utils.save_tensor', (['left_frame', "(debug_dir + '/step-%d-left-r%d.jpg' % (total_steps, recursion))"], {}), "(left_frame, debug_dir + '/step-%d-left-r%d.jpg' % (\n total_steps, recursion))\n", (4255, 4336), False, 'import video_utils\n'), ((4344, 4432), 'video_utils.save_tensor', 'video_utils.save_tensor', (['right_frame', "(debug_dir + '/step-%d-right.jpg' % total_steps)"], {}), "(right_frame, debug_dir + '/step-%d-right.jpg' %\n total_steps)\n", (4367, 4432), False, 'import video_utils\n'), ((6224, 6291), 'numpy.savetxt', 'np.savetxt', (['iter_path', '(epoch, epoch_iter)'], {'delimiter': '""","""', 'fmt': '"""%d"""'}), "(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')\n", (6234, 6291), True, 'import numpy as np\n'), ((4749, 4762), 'torch.mean', 'torch.mean', (['x'], {}), '(x)\n', (4759, 4762), False, 'import torch\n'), ((5800, 5811), 'time.time', 'time.time', ([], {}), '()\n', (5809, 5811), False, 'import time\n'), ((6506, 6517), 'time.time', 'time.time', ([], {}), '()\n', (6515, 6517), False, 'import time\n')] |
import numpy as np
def gauss(img_size, mx, my, sx, sy, amp=0.01):
x = np.arange(img_size)[None].astype(np.float)
y = x.T
g = amp * np.exp(-((y - my) ** 2) / sy).dot(np.exp(-((x - mx) ** 2) / sx))
return g
| [
"numpy.arange",
"numpy.exp"
] | [((179, 206), 'numpy.exp', 'np.exp', (['(-(x - mx) ** 2 / sx)'], {}), '(-(x - mx) ** 2 / sx)\n', (185, 206), True, 'import numpy as np\n'), ((76, 95), 'numpy.arange', 'np.arange', (['img_size'], {}), '(img_size)\n', (85, 95), True, 'import numpy as np\n'), ((145, 172), 'numpy.exp', 'np.exp', (['(-(y - my) ** 2 / sy)'], {}), '(-(y - my) ** 2 / sy)\n', (151, 172), True, 'import numpy as np\n')] |
import numpy as np
import argparse
import osgeo.gdal as gdal
from scipy.spatial import voronoi_plot_2d, Voronoi
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull, convex_hull_plot_2d
from numpy import genfromtxt
import pandas as pd
import gdal
import os
import xarray as xr
import clhs as cl
import csv
import numpy as np
from scipy.linalg import solve_triangular, get_lapack_funcs, get_blas_funcs
from maxvolpy.maxvol import maxvol
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def f_no_cut(idx, i, copy=False):
if copy:
idx = np.copy(idx)
idx[i] = 0
return idx
def f_cut_eps(idx, i, X, eps=0.1, copy=False):
if copy:
idx = np.copy(idx)
#print(np.abs(X - X[i]) < eps)
print(idx.shape, X.shape)
idx[np.abs(X - X[i]) < eps] = 0
return idx
def rect_maxvol_cut(A, tol = 1., maxK = None, min_add_K = None, minK = None, start_maxvol_iters = 10, identity_submatrix = True, top_k_index = -1, cut_fun=None, penalty=None):
"""Python implementation of rectangular 2-volume maximization. For information see :py:func:`rect_maxvol` function"""
# tol2 - square of parameter tol
tol2 = tol**2
# N - number of rows, r - number of columns of matrix A
N, r = A.shape
if N <= r:
return np.arange(N, dtype = int), np.eye(N, dtype = A.dtype)
if maxK is None or maxK > N:
maxK = N
if maxK < r:
maxK = r
if minK is None or minK < r:
minK = r
if minK > N:
minK = N
if min_add_K is not None:
minK = max(minK, r + min_add_K)
if minK > maxK:
minK = maxK
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < r:
top_k_index = r
if cut_fun is None:
cut_fun = f_no_cut
if penalty is None:
#penalty_fun = np.ones(top_k_index, dtype=int)
chosen = np.ones(top_k_index, dtype=int)
else:
chosen = np.copy(penalty)
index = np.zeros(N, dtype = int)
tmp_index, C = maxvol(A, tol = 1, max_iters = start_maxvol_iters, top_k_index = top_k_index)
# --
index[:r] = tmp_index
#chosen[tmp_index] = 0 -- replaced
for ti in tmp_index:
cut_fun(chosen, ti)
C = np.asfortranarray(C)
# compute square 2-norms of each row in matrix C
row_norm_sqr = np.array([chosen[i]*np.linalg.norm(C[i], 2)**2 for i in range(top_k_index)])
# find maximum value in row_norm_sqr
i = np.argmax(row_norm_sqr)
K = r
# set cgeru or zgeru for complex numbers and dger or sger for float numbers
try:
ger = get_blas_funcs('geru', [C])
except:
ger = get_blas_funcs('ger', [C])
while (row_norm_sqr[i] > tol2 and K < maxK) or K < minK:
# add i to index and recompute C and square norms of each row by SVM-formula
index[K] = i
#chosen[i] = 0 -- replaced by the next line
#print(chosen)
cut_fun(chosen, i)
if (chosen == 0).all():
print('Failed')
c = C[i].copy()
v = C.dot(c.conj())
l = 1.0/(1+v[i])
ger(-l,v,c,a=C,overwrite_a=1)
C = np.hstack([C, l*v.reshape(-1,1)])
row_norm_sqr -= (l*v[:top_k_index]*v[:top_k_index].conj()).real
row_norm_sqr *= chosen
# find maximum value in row_norm_sqr
i = row_norm_sqr.argmax()
K += 1
if identity_submatrix:
C[index[:K]] = np.eye(K, dtype = C.dtype)
return index[:K].copy(), C
def make_dist(X):
n = len(X)
A = np.empty((n, n), dtype=X.dtype)
for ix, x in enumerate(X):
for iy, y in enumerate(X):
A[ix, iy] = np.abs(x - y)
return A
def f_penal(X, bnd, level=0.0):
Xmin = np.min(X)
Xmax = np.max(X)
bnd_abs = (Xmax - Xmin)*bnd
dist = np.minimum(np.abs(X - Xmin), np.abs(Xmax - X))
def lin_func(x):
if bnd == 0:
return x*0.0 + 1.0 # crookedly, but it works. Ann, never do like this!
else:
return (1.0 - level)*np.minimum(x, bnd_abs)/bnd_abs + level
return lin_func(dist)
def f_penal_2D(X, Y, bnd, level=0.0):
return f_penal(X, bnd=bnd, level=level)*f_penal(Y, bnd=bnd, level=level)
def norm_data(X, bounds=(-1.0, 1.0), copy=True):
X = np.array(X, copy=copy).T
for i, x in enumerate(X):
# print(len(x))
min_v, max_v = np.min(x), np.max(x)
b = (bounds[0]*max_v - bounds[1]*min_v)/(max_v-min_v)
k = float(bounds[1] - bounds[0])/(max_v-min_v)
X[i] *= k
X[i] += b
return X.T
def points_selection(X, max_n_pnts, min_n_pnts, cut_fun=None, penalty = None):
"""Function for selecting optimal parameters for dimentionality reduction method and for clustering.
Parameters
----------------
X: array with shape (number_of_pixels*number_of_features)
Initial data
"""
#MaxVol
res = rect_maxvol_cut(X, maxK=max_n_pnts, minK=min_n_pnts, cut_fun=cut_fun, penalty=penalty)[0]
return res
def add_coords(X=None, size=(285, 217), order='C', idx_good_mask=None):
"""
order can by 'C' or 'F'
"""
w, h = size
x_coord, y_coord = np.meshgrid(np.arange(h), np.arange(w))
if idx_good_mask is None:
idx_good_mask = np.arange(x_coord.size)
if X is None:
return np.hstack((
x_coord.flatten(order=order)[idx_good_mask, None],
y_coord.flatten(order=order)[idx_good_mask, None]))
else:
return np.hstack((np.array(X, copy=False),
x_coord.flatten(order=order)[idx_good_mask, None],
y_coord.flatten(order=order)[idx_good_mask, None]))
def gen_input(mode, data, shapes,mask):
modes = ['usual', 'normed',
'XY', 'XY_normed']
fn_X_embedded = modes[mode]
return [
lambda x: np.array(x),
lambda x: norm_data(x),
lambda x: add_coords(
x, size=shapes[0], idx_good_mask=mask),
lambda x: norm_data(gen_input(2, x, shapes, mask)[0], copy=False),
][mode](data), fn_X_embedded
def my_score(a, b):
a = np.array(a, copy=False)
b = np.array(b, copy=False)
n = len(a)
assert len(b) == n, 'Arrays of differnet shapes :((('
m = len(a[a==b])
return float(m)/float(n)
def f_no_cut(idx, i, copy=False):
if copy:
idx = np.copy(idx)
idx[i] = 0
return idx
def f_cut_eps(idx, i, X, eps=0.1, copy=False):
if copy:
idx = np.copy(idx)
xx = X[:, -2]
yy = X[:, -1]
#idx[i] = 0
idx[(xx - xx[i])**2 + (yy-yy[i])**2 <= eps**2] = 0
return idx
def calc_score(idx, X, y, to_ret_pred=True):
gnb = GaussianNB()
gnb_model = gnb.fit(X[idx], y[idx])
if to_ret_pred:
scores = extend_score(y, gnb_model.predict(X))
else:
scores = gnb_model.score(X, y)
return scores
def good_points_brute_force(idx, num, X, y):
sc = -1
cmb_good = None
for comb in combinations(idx, num):
comb = np.array(comb)
#print(comb)
sc_curr = calc_score(comb, X=X, y=y, to_ret_pred=True)
if sc_curr > sc:
sc = sc_curr
cmb_good = comb
return cmb_good, sc
def idx_to_idx(idx_big, idx):
hass = dict()
for i, elem in enumerate(idx_big):
hass[elem] = i
return np.array([hass[i] for i in idx])
class MaxVolSampling():
"""
Class to proccess data with MaxVol, cLHS and Random
Input: DEM, terrain features
Return: Sampling points indices
"""
def __init__(self):
self.original_data = None
self.maxvol_dist = None
self.cLHS_dist = None
self.random_dist = None
self.maxvol_indices = None
self.soil_feature = None
self.num_of_points = 15
self.path_to_file_with_indices = None
self.wd = None
self.soil_data = None
self.X = None
self.lons = None
self.lats = None
self.path_to_interpolation_file = None
self.interpolation_array = None
def data_preparation(self, wd, data_m, dem_dir):
"""
Function to orginize tif files in flatten vectos, remove NaN and stack vectors into matrix
"""
fl_names = list(filter(lambda fl: fl.endswith('.tif'), os.listdir(wd+'/ndvi_features/')))
files = list(map(lambda x: gdal.Open(os.path.join(wd+'/ndvi_features/', x)), fl_names))
arrays = list(map(lambda x: x.ReadAsArray().flatten(), files))
shapes = [x.ReadAsArray().shape for x in files]
nodatas = list(map(lambda x: x.GetRasterBand(1).GetNoDataValue(), files))
names = list(map(lambda x: x.replace('.tif','').split('.')[0], fl_names))
if dem_dir is None:
dem_raw = gdal.Open(wd+'/dem.tif')
dem = dem_raw.ReadAsArray()
else:
dem_raw = gdal.Open(dem_dir)
dem = dem_raw.ReadAsArray()
xmin, xres, xskew, ymax, yskew, yres = dem_raw.GetGeoTransform()
xmax = xmin + (dem_raw.RasterXSize * xres)
ymin = ymax + (dem_raw.RasterYSize * yres)
boundary_box = {'xmin':xmin, 'xmax':xmax, 'ymin':ymin, 'ymax':ymax}
dem_flat = dem.flatten()
dem_nodata = dem_raw.GetRasterBand(1).GetNoDataValue()
init_dem_shape = dem.shape
idx_nodata_0 = np.where(dem_flat == dem_nodata)[0]
arrays_no_nodatas = np.zeros((len(arrays[0])-len(idx_nodata_0), len(arrays)))
idx_dem_nodata = np.where(dem_flat == dem_nodata)[0]
idx_dem = np.where(dem_flat != dem_nodata)[0]
dem_no_nodata = np.delete(dem_flat, idx_dem_nodata)
#process with interp data
if self.path_to_interpolation_file is not None:
interpolation_raw_data = np.load(self.path_to_interpolation_file)[::-1]
flatten_interpolation = interpolation_raw_data.flatten()
interpolation_no_nan = np.delete(flatten_interpolation, np.isnan(flatten_interpolation))
self.interpolation_array = interpolation_no_nan
for i in range(len(arrays)):
idx_nodata = np.where(arrays[i] == nodatas[i])[0]
array = arrays[i].copy()
array[idx_nodata]=0
arrays_no_nodatas[:,i] = np.delete(array, idx_nodata_0)
data_arr = arrays_no_nodatas.copy()
# Prepare data
# U can normilize data, and/or add coords to it
mode = data_m # Change to 0, 1, 2 or 3
X, fn_X_embedded = gen_input(mode, data_arr, shapes, idx_dem)
self.X = X
# X = np.vstack((X, X[:1,:]))
return X, dem_flat, dem_nodata, init_dem_shape, idx_dem, boundary_box
def create_polygon(self, shape, vertices, value=1):
"""
Creates np.array with dimensions defined by shape
Fills polygon defined by vertices with ones, all other values zero"""
base_array = np.zeros(shape, dtype=float) # Initialize your array of zeros
fill = np.ones(base_array.shape) * True # Initialize boolean array defining shape fill
# Create check array for each edge segment, combine into fill array
for k in range(vertices.shape[0]):
fill = np.all([fill, self.check(vertices[k-1], vertices[k], base_array)], axis=0)
# Set all values inside polygon to one
base_array[fill] = value
return base_array,fill
def find_nearest(self, array, value):
array = np.asarray(array)
idx = np.unravel_index(np.argmin((np.abs(array - value)), axis=None), array.shape)
return array[idx], idx
def check(self, p1, p2, base_array):
"""
Uses the line defined by p1 and p2 to check array of
input indices against interpolated value
Returns boolean array, with True inside and False outside of shape
"""
idxs = np.indices(base_array.shape) # Create 3D array of indices
p1 = p1.astype(float)
p2 = p2.astype(float)
# Calculate max column idx for each row idx based on interpolated line between two points
if p1[0] == p2[0]:
max_col_idx = (idxs[0] - p1[0]) * idxs.shape[1]
sign = np.sign(p2[1] - p1[1])
else:
max_col_idx = (idxs[0] - p1[0]) / (p2[0] - p1[0]) * (p2[1] - p1[1]) + p1[1]
sign = np.sign(p2[0] - p1[0])
return idxs[1] * sign <= max_col_idx * sign
def original_soil_data(self, feature):
soil_data = self.soil_data
data = soil_data[feature]
self.original_data = np.array(data)
def dataframe_to_points(self):
dem_raw = gdal.Open('dem.tif') #('/home/apetrovskaya/maxvol_soil_sampling/src/dem.tif')
dem = dem_raw.ReadAsArray()
self.init_dem_shape = dem.shape
FEATURE = self.soil_feature
soil_data = self.soil_data
lons=soil_data['LON']
self.lons = lons
lats=soil_data['LAT']
self.lats = lats
data = soil_data[FEATURE]
self.original_data = np.array(data)
#coordinate mesh
xmin, ymin, xmax, ymax = [416949.0957, 5750852.2926,417891.8549,5751465.6945] #!!!HARDCODE
st = dem
xv = np.linspace(xmin,xmax, num=st.shape[1])
yv = np.linspace(ymax,ymin, num=st.shape[0])
coords = np.meshgrid(xv,yv)
number_of_points=len(lons)
points_idx=np.zeros((number_of_points,2))
for i in range(number_of_points):
a = self.find_nearest(coords[0],lons[i])[1][1]
b = self.find_nearest(coords[1],lats[i])[1][0]
points_idx[i,:]=[a,b]
points_idx = points_idx.astype(int)
return points_idx, data
def distr_from_voronoi(self):
points_idx,data = self.dataframe_to_points()
#add points for right simplex
points_idx_add = points_idx.copy()
for i in range(-50,self.init_dem_shape[0]+50,50):
points_idx_add = np.vstack((points_idx_add,[-50, i]))
points_idx_add = np.vstack((points_idx_add,[self.init_dem_shape[1]+50,i]))
for i in range(-50,self.init_dem_shape[1]+50,50):
points_idx_add = np.vstack((points_idx_add,[i, -50]))
points_idx_add = np.vstack((points_idx_add,[i,self.init_dem_shape[0]+50]))
# generate Voronoi tessellation
vor_add=Voronoi(points_idx_add)
# cycle to fill regions in numpy array
pol=np.zeros((self.init_dem_shape[1],self.init_dem_shape[0]))
for r in range(len(vor_add.point_region)):
region = vor_add.regions[vor_add.point_region[r]]
if not -1 in region:
value = data[r]
polygon = [vor_add.vertices[i] for i in region]
polygon = np.asarray(polygon)
hull = ConvexHull(polygon)
_, fill = self.create_polygon((self.init_dem_shape[1],self.init_dem_shape[0]),polygon[hull.vertices][::-1])
pol[fill] = value
pol[pol<min(data)]=min(data)
polygons_in_array=pol.T
self.voronoi_map = polygons_in_array.flatten()
return self.voronoi_map
def i_am_maxvol_function(self):
self.num_of_points
dist_pts = 0.1
wd = self.wd
data_m=3
dem_dir = None
max_n_pnts = self.num_of_points
min_n_pnts = self.num_of_points
X, dem_flat, dem_nodata, init_dem_shape, idx_dem, boundary_box = self.data_preparation(wd, data_m, dem_dir)
#function for distance between points
f_cut = lambda idx, i : f_cut_eps(idx, i, X=X, eps = dist_pts)
#function for distance from border
# f_penal = f_penal_2D(X = X[:, -2], Y = X[:, -1], bnd = 0.3, level = 0.3)
f_penal = f_penal_2D(X = X[:, -2], Y = X[:, -1], bnd = 0.2, level = 0.3) #Change to 0.2
result = points_selection(X, max_n_pnts = max_n_pnts, min_n_pnts = min_n_pnts, cut_fun = f_cut, penalty = f_penal)
#coordinates
# xmin, ymin, xmax, ymax = [37.7928399,51.90236556, 37.8064010,51.90774268]
xmin = boundary_box['xmin']
xmax = boundary_box['xmax']
ymin = boundary_box['ymin']
ymax = boundary_box['ymax']
dem_flat_img = dem_flat.copy()-np.min(dem_flat)
dem_flat_img[np.where(dem_flat == dem_nodata)] = float('NaN')
st = dem_flat_img.reshape(init_dem_shape)
xv = np.linspace(xmin,xmax, num=st.shape[1])
yv = np.linspace(ymax,ymin, num=st.shape[0])
coords = np.meshgrid(xv,yv)
mask = idx_dem
#select corresponding points by indecies
y_c,x_c = coords[0].flatten()[mask, None],coords[1].flatten()[mask, None]
y_idx, x_idx = y_c[result],x_c[result]
coord_idx = np.hstack((y_idx,x_idx))
self.maxvol_indices = result
return self.maxvol_indices
def i_am_clhs(self, num_iter):
n_pnts = self.num_of_points
#cLHS
sampled=cl.clhs(self.X[:,:-2], n_pnts, max_iterations=num_iter, progress=False)
self.cLHS_indices = sampled['sample_indices']
return self.cLHS_indices
def i_am_random(self):
random_dist = np.random.randint(low=0,high=self.X.shape[0],size=self.num_of_points)
return random_dist
if __name__ == "__main__":
SAR = MaxVolSampling() | [
"numpy.load",
"numpy.abs",
"numpy.argmax",
"numpy.empty",
"numpy.ones",
"scipy.spatial.Voronoi",
"numpy.isnan",
"numpy.random.randint",
"numpy.arange",
"numpy.linalg.norm",
"os.path.join",
"numpy.meshgrid",
"scipy.linalg.get_blas_funcs",
"numpy.copy",
"clhs.clhs",
"numpy.max",
"numpy... | [((2096, 2118), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (2104, 2118), True, 'import numpy as np\n'), ((2140, 2211), 'maxvolpy.maxvol.maxvol', 'maxvol', (['A'], {'tol': '(1)', 'max_iters': 'start_maxvol_iters', 'top_k_index': 'top_k_index'}), '(A, tol=1, max_iters=start_maxvol_iters, top_k_index=top_k_index)\n', (2146, 2211), False, 'from maxvolpy.maxvol import maxvol\n'), ((2354, 2374), 'numpy.asfortranarray', 'np.asfortranarray', (['C'], {}), '(C)\n', (2371, 2374), True, 'import numpy as np\n'), ((2573, 2596), 'numpy.argmax', 'np.argmax', (['row_norm_sqr'], {}), '(row_norm_sqr)\n', (2582, 2596), True, 'import numpy as np\n'), ((3629, 3660), 'numpy.empty', 'np.empty', (['(n, n)'], {'dtype': 'X.dtype'}), '((n, n), dtype=X.dtype)\n', (3637, 3660), True, 'import numpy as np\n'), ((3823, 3832), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (3829, 3832), True, 'import numpy as np\n'), ((3844, 3853), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (3850, 3853), True, 'import numpy as np\n'), ((6242, 6265), 'numpy.array', 'np.array', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (6250, 6265), True, 'import numpy as np\n'), ((6274, 6297), 'numpy.array', 'np.array', (['b'], {'copy': '(False)'}), '(b, copy=False)\n', (6282, 6297), True, 'import numpy as np\n'), ((7477, 7509), 'numpy.array', 'np.array', (['[hass[i] for i in idx]'], {}), '([hass[i] for i in idx])\n', (7485, 7509), True, 'import numpy as np\n'), ((696, 708), 'numpy.copy', 'np.copy', (['idx'], {}), '(idx)\n', (703, 708), True, 'import numpy as np\n'), ((814, 826), 'numpy.copy', 'np.copy', (['idx'], {}), '(idx)\n', (821, 826), True, 'import numpy as np\n'), ((2006, 2037), 'numpy.ones', 'np.ones', (['top_k_index'], {'dtype': 'int'}), '(top_k_index, dtype=int)\n', (2013, 2037), True, 'import numpy as np\n'), ((2065, 2081), 'numpy.copy', 'np.copy', (['penalty'], {}), '(penalty)\n', (2072, 2081), True, 'import numpy as np\n'), ((2710, 2737), 'scipy.linalg.get_blas_funcs', 'get_blas_funcs', (['"""geru"""', '[C]'], {}), "('geru', [C])\n", (2724, 2737), False, 'from scipy.linalg import solve_triangular, get_lapack_funcs, get_blas_funcs\n'), ((3528, 3552), 'numpy.eye', 'np.eye', (['K'], {'dtype': 'C.dtype'}), '(K, dtype=C.dtype)\n', (3534, 3552), True, 'import numpy as np\n'), ((3908, 3924), 'numpy.abs', 'np.abs', (['(X - Xmin)'], {}), '(X - Xmin)\n', (3914, 3924), True, 'import numpy as np\n'), ((3926, 3942), 'numpy.abs', 'np.abs', (['(Xmax - X)'], {}), '(Xmax - X)\n', (3932, 3942), True, 'import numpy as np\n'), ((4359, 4381), 'numpy.array', 'np.array', (['X'], {'copy': 'copy'}), '(X, copy=copy)\n', (4367, 4381), True, 'import numpy as np\n'), ((5301, 5313), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (5310, 5313), True, 'import numpy as np\n'), ((5315, 5327), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (5324, 5327), True, 'import numpy as np\n'), ((5393, 5416), 'numpy.arange', 'np.arange', (['x_coord.size'], {}), '(x_coord.size)\n', (5402, 5416), True, 'import numpy as np\n'), ((6483, 6495), 'numpy.copy', 'np.copy', (['idx'], {}), '(idx)\n', (6490, 6495), True, 'import numpy as np\n'), ((6601, 6613), 'numpy.copy', 'np.copy', (['idx'], {}), '(idx)\n', (6608, 6613), True, 'import numpy as np\n'), ((7132, 7146), 'numpy.array', 'np.array', (['comb'], {}), '(comb)\n', (7140, 7146), True, 'import numpy as np\n'), ((9812, 9847), 'numpy.delete', 'np.delete', (['dem_flat', 'idx_dem_nodata'], {}), '(dem_flat, idx_dem_nodata)\n', (9821, 9847), True, 'import numpy as np\n'), ((11097, 11125), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (11105, 11125), True, 'import numpy as np\n'), ((11643, 11660), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (11653, 11660), True, 'import numpy as np\n'), ((12051, 12079), 'numpy.indices', 'np.indices', (['base_array.shape'], {}), '(base_array.shape)\n', (12061, 12079), True, 'import numpy as np\n'), ((12738, 12752), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (12746, 12752), True, 'import numpy as np\n'), ((12817, 12837), 'gdal.Open', 'gdal.Open', (['"""dem.tif"""'], {}), "('dem.tif')\n", (12826, 12837), False, 'import gdal\n'), ((13235, 13249), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (13243, 13249), True, 'import numpy as np\n'), ((13404, 13444), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax'], {'num': 'st.shape[1]'}), '(xmin, xmax, num=st.shape[1])\n', (13415, 13444), True, 'import numpy as np\n'), ((13457, 13497), 'numpy.linspace', 'np.linspace', (['ymax', 'ymin'], {'num': 'st.shape[0]'}), '(ymax, ymin, num=st.shape[0])\n', (13468, 13497), True, 'import numpy as np\n'), ((13514, 13533), 'numpy.meshgrid', 'np.meshgrid', (['xv', 'yv'], {}), '(xv, yv)\n', (13525, 13533), True, 'import numpy as np\n'), ((13588, 13619), 'numpy.zeros', 'np.zeros', (['(number_of_points, 2)'], {}), '((number_of_points, 2))\n', (13596, 13619), True, 'import numpy as np\n'), ((14545, 14568), 'scipy.spatial.Voronoi', 'Voronoi', (['points_idx_add'], {}), '(points_idx_add)\n', (14552, 14568), False, 'from scipy.spatial import voronoi_plot_2d, Voronoi\n'), ((14629, 14687), 'numpy.zeros', 'np.zeros', (['(self.init_dem_shape[1], self.init_dem_shape[0])'], {}), '((self.init_dem_shape[1], self.init_dem_shape[0]))\n', (14637, 14687), True, 'import numpy as np\n'), ((16610, 16650), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax'], {'num': 'st.shape[1]'}), '(xmin, xmax, num=st.shape[1])\n', (16621, 16650), True, 'import numpy as np\n'), ((16663, 16703), 'numpy.linspace', 'np.linspace', (['ymax', 'ymin'], {'num': 'st.shape[0]'}), '(ymax, ymin, num=st.shape[0])\n', (16674, 16703), True, 'import numpy as np\n'), ((16720, 16739), 'numpy.meshgrid', 'np.meshgrid', (['xv', 'yv'], {}), '(xv, yv)\n', (16731, 16739), True, 'import numpy as np\n'), ((16961, 16986), 'numpy.hstack', 'np.hstack', (['(y_idx, x_idx)'], {}), '((y_idx, x_idx))\n', (16970, 16986), True, 'import numpy as np\n'), ((17162, 17234), 'clhs.clhs', 'cl.clhs', (['self.X[:, :-2]', 'n_pnts'], {'max_iterations': 'num_iter', 'progress': '(False)'}), '(self.X[:, :-2], n_pnts, max_iterations=num_iter, progress=False)\n', (17169, 17234), True, 'import clhs as cl\n'), ((17385, 17456), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self.X.shape[0]', 'size': 'self.num_of_points'}), '(low=0, high=self.X.shape[0], size=self.num_of_points)\n', (17402, 17456), True, 'import numpy as np\n'), ((901, 917), 'numpy.abs', 'np.abs', (['(X - X[i])'], {}), '(X - X[i])\n', (907, 917), True, 'import numpy as np\n'), ((1407, 1430), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (1416, 1430), True, 'import numpy as np\n'), ((1434, 1458), 'numpy.eye', 'np.eye', (['N'], {'dtype': 'A.dtype'}), '(N, dtype=A.dtype)\n', (1440, 1458), True, 'import numpy as np\n'), ((2764, 2790), 'scipy.linalg.get_blas_funcs', 'get_blas_funcs', (['"""ger"""', '[C]'], {}), "('ger', [C])\n", (2778, 2790), False, 'from scipy.linalg import solve_triangular, get_lapack_funcs, get_blas_funcs\n'), ((3751, 3764), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (3757, 3764), True, 'import numpy as np\n'), ((4461, 4470), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (4467, 4470), True, 'import numpy as np\n'), ((4472, 4481), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4478, 4481), True, 'import numpy as np\n'), ((8976, 9002), 'gdal.Open', 'gdal.Open', (["(wd + '/dem.tif')"], {}), "(wd + '/dem.tif')\n", (8985, 9002), False, 'import gdal\n'), ((9078, 9096), 'gdal.Open', 'gdal.Open', (['dem_dir'], {}), '(dem_dir)\n', (9087, 9096), False, 'import gdal\n'), ((9548, 9580), 'numpy.where', 'np.where', (['(dem_flat == dem_nodata)'], {}), '(dem_flat == dem_nodata)\n', (9556, 9580), True, 'import numpy as np\n'), ((9697, 9729), 'numpy.where', 'np.where', (['(dem_flat == dem_nodata)'], {}), '(dem_flat == dem_nodata)\n', (9705, 9729), True, 'import numpy as np\n'), ((9751, 9783), 'numpy.where', 'np.where', (['(dem_flat != dem_nodata)'], {}), '(dem_flat != dem_nodata)\n', (9759, 9783), True, 'import numpy as np\n'), ((10463, 10493), 'numpy.delete', 'np.delete', (['array', 'idx_nodata_0'], {}), '(array, idx_nodata_0)\n', (10472, 10493), True, 'import numpy as np\n'), ((11176, 11201), 'numpy.ones', 'np.ones', (['base_array.shape'], {}), '(base_array.shape)\n', (11183, 11201), True, 'import numpy as np\n'), ((12376, 12398), 'numpy.sign', 'np.sign', (['(p2[1] - p1[1])'], {}), '(p2[1] - p1[1])\n', (12383, 12398), True, 'import numpy as np\n'), ((12520, 12542), 'numpy.sign', 'np.sign', (['(p2[0] - p1[0])'], {}), '(p2[0] - p1[0])\n', (12527, 12542), True, 'import numpy as np\n'), ((14152, 14189), 'numpy.vstack', 'np.vstack', (['(points_idx_add, [-50, i])'], {}), '((points_idx_add, [-50, i]))\n', (14161, 14189), True, 'import numpy as np\n'), ((14218, 14279), 'numpy.vstack', 'np.vstack', (['(points_idx_add, [self.init_dem_shape[1] + 50, i])'], {}), '((points_idx_add, [self.init_dem_shape[1] + 50, i]))\n', (14227, 14279), True, 'import numpy as np\n'), ((14364, 14401), 'numpy.vstack', 'np.vstack', (['(points_idx_add, [i, -50])'], {}), '((points_idx_add, [i, -50]))\n', (14373, 14401), True, 'import numpy as np\n'), ((14430, 14491), 'numpy.vstack', 'np.vstack', (['(points_idx_add, [i, self.init_dem_shape[0] + 50])'], {}), '((points_idx_add, [i, self.init_dem_shape[0] + 50]))\n', (14439, 14491), True, 'import numpy as np\n'), ((16459, 16475), 'numpy.min', 'np.min', (['dem_flat'], {}), '(dem_flat)\n', (16465, 16475), True, 'import numpy as np\n'), ((16497, 16529), 'numpy.where', 'np.where', (['(dem_flat == dem_nodata)'], {}), '(dem_flat == dem_nodata)\n', (16505, 16529), True, 'import numpy as np\n'), ((5630, 5653), 'numpy.array', 'np.array', (['X'], {'copy': '(False)'}), '(X, copy=False)\n', (5638, 5653), True, 'import numpy as np\n'), ((8481, 8515), 'os.listdir', 'os.listdir', (["(wd + '/ndvi_features/')"], {}), "(wd + '/ndvi_features/')\n", (8491, 8515), False, 'import os\n'), ((9977, 10017), 'numpy.load', 'np.load', (['self.path_to_interpolation_file'], {}), '(self.path_to_interpolation_file)\n', (9984, 10017), True, 'import numpy as np\n'), ((10162, 10193), 'numpy.isnan', 'np.isnan', (['flatten_interpolation'], {}), '(flatten_interpolation)\n', (10170, 10193), True, 'import numpy as np\n'), ((10318, 10351), 'numpy.where', 'np.where', (['(arrays[i] == nodatas[i])'], {}), '(arrays[i] == nodatas[i])\n', (10326, 10351), True, 'import numpy as np\n'), ((11703, 11724), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (11709, 11724), True, 'import numpy as np\n'), ((14956, 14975), 'numpy.asarray', 'np.asarray', (['polygon'], {}), '(polygon)\n', (14966, 14975), True, 'import numpy as np\n'), ((14999, 15018), 'scipy.spatial.ConvexHull', 'ConvexHull', (['polygon'], {}), '(polygon)\n', (15009, 15018), False, 'from scipy.spatial import ConvexHull, convex_hull_plot_2d\n'), ((2467, 2490), 'numpy.linalg.norm', 'np.linalg.norm', (['C[i]', '(2)'], {}), '(C[i], 2)\n', (2481, 2490), True, 'import numpy as np\n'), ((4117, 4139), 'numpy.minimum', 'np.minimum', (['x', 'bnd_abs'], {}), '(x, bnd_abs)\n', (4127, 4139), True, 'import numpy as np\n'), ((5978, 5989), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5986, 5989), True, 'import numpy as np\n'), ((8561, 8600), 'os.path.join', 'os.path.join', (["(wd + '/ndvi_features/')", 'x'], {}), "(wd + '/ndvi_features/', x)\n", (8573, 8600), False, 'import os\n')] |
# coding: utf-8
# Distributed under the terms of the MIT License.
from .model import AppModel
from ababe.stru.element import Specie
from ababe.stru.scaffold import GeneralCell
from ababe.stru.sogen import OccupyGenerator
from ababe.io.io import GeneralIO
from ababe.stru.restriction import MinDistanceRestriction
import numpy as np
import os
class App(AppModel):
def __init__(self, infile, comment, element, speckle, nspeckle, trs, refined, outmode, mpr):
# read comment & zoom from setting file first
# if not exist, read from cmd args, then default
self.cell = GeneralIO.from_file(infile)
self.comment = comment or self.cell.comment
self.element = element
# Get number and index of target element
num = self.cell.numbers
if element is None:
tgt_ele = int(num[1])
else:
tgt_ele = Specie(element).Z
tgt_ele_index = np.where(num == tgt_ele)[0]
if speckle is None:
self.speckle = Specie('G')
else:
self.speckle = Specie(speckle)
# self.ele for function all-speckle-gen-of-ele in run
self.ele = Specie.from_num(tgt_ele)
if nspeckle is None:
# If not given speckle to number most - 1
self.nmax = tgt_ele_index.size - 1
else:
self.nmax = nspeckle
# if there no restriction given then no restriction
if trs != ():
self.tr = trs[0]
else:
self.tr = None
self.refined = refined
self.outmode = outmode
self.mpr = mpr
def run(self):
# Create directory contain POSCARs
import random
import string
rd_suffix = ''.join(random.choices(string.ascii_uppercase
+ string.digits, k=4))
working_path = os.getcwd()
out_dir = os.path.join(working_path,
'STRUCTURES_{0:}_{1:}'.format(self.comment,
rd_suffix))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
else:
shutil.rmtree(out_dir)
os.makedirs(out_dir)
ogg = OccupyGenerator(self.cell)
gg = ogg.all_speckle_gen_of_ele(self.nmax, self.ele, self.speckle)
if self.tr is not None:
tr = (Specie(self.tr[0]), self.tr[1])
applied_restriction = MinDistanceRestriction(tr)
for i, outer_gen in enumerate(gg):
# print("Processing: {0:3}s substitue {1:2d}...".format(speckle, i+1))
for n_count, c in enumerate(outer_gen):
if self.mpr:
if self.tr is not None:
condition = c.is_primitive() and applied_restriction.is_satisfied(c)
else:
condition = c.is_primitive()
else:
if self.tr is not None:
condition = applied_restriction.is_satisfied(c)
else:
condition = True
if condition:
if self.refined:
c = c.get_refined_pcell()
out = GeneralIO(c)
f_suffix = ''.join(random.choices(string.ascii_uppercase
+ string.digits, k=4))
ofname = "STRUCTURE_{:}_{:}.{:}".format(c.comment, f_suffix, self.outmode)
lastpath = os.path.join(out_dir, ofname)
out.write_file(lastpath)
| [
"os.makedirs",
"os.getcwd",
"ababe.stru.element.Specie.from_num",
"random.choices",
"os.path.exists",
"ababe.io.io.GeneralIO",
"ababe.stru.element.Specie",
"ababe.io.io.GeneralIO.from_file",
"numpy.where",
"ababe.stru.restriction.MinDistanceRestriction",
"os.path.join",
"ababe.stru.sogen.Occup... | [((595, 622), 'ababe.io.io.GeneralIO.from_file', 'GeneralIO.from_file', (['infile'], {}), '(infile)\n', (614, 622), False, 'from ababe.io.io import GeneralIO\n'), ((1163, 1187), 'ababe.stru.element.Specie.from_num', 'Specie.from_num', (['tgt_ele'], {}), '(tgt_ele)\n', (1178, 1187), False, 'from ababe.stru.element import Specie\n'), ((1867, 1878), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1876, 1878), False, 'import os\n'), ((2247, 2273), 'ababe.stru.sogen.OccupyGenerator', 'OccupyGenerator', (['self.cell'], {}), '(self.cell)\n', (2262, 2273), False, 'from ababe.stru.sogen import OccupyGenerator\n'), ((928, 952), 'numpy.where', 'np.where', (['(num == tgt_ele)'], {}), '(num == tgt_ele)\n', (936, 952), True, 'import numpy as np\n'), ((1012, 1023), 'ababe.stru.element.Specie', 'Specie', (['"""G"""'], {}), "('G')\n", (1018, 1023), False, 'from ababe.stru.element import Specie\n'), ((1065, 1080), 'ababe.stru.element.Specie', 'Specie', (['speckle'], {}), '(speckle)\n', (1071, 1080), False, 'from ababe.stru.element import Specie\n'), ((1740, 1799), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(4)'}), '(string.ascii_uppercase + string.digits, k=4)\n', (1754, 1799), False, 'import random\n'), ((2092, 2115), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (2106, 2115), False, 'import os\n'), ((2129, 2149), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (2140, 2149), False, 'import os\n'), ((2211, 2231), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (2222, 2231), False, 'import os\n'), ((2466, 2492), 'ababe.stru.restriction.MinDistanceRestriction', 'MinDistanceRestriction', (['tr'], {}), '(tr)\n', (2488, 2492), False, 'from ababe.stru.restriction import MinDistanceRestriction\n'), ((886, 901), 'ababe.stru.element.Specie', 'Specie', (['element'], {}), '(element)\n', (892, 901), False, 'from ababe.stru.element import Specie\n'), ((2400, 2418), 'ababe.stru.element.Specie', 'Specie', (['self.tr[0]'], {}), '(self.tr[0])\n', (2406, 2418), False, 'from ababe.stru.element import Specie\n'), ((3266, 3278), 'ababe.io.io.GeneralIO', 'GeneralIO', (['c'], {}), '(c)\n', (3275, 3278), False, 'from ababe.io.io import GeneralIO\n'), ((3560, 3589), 'os.path.join', 'os.path.join', (['out_dir', 'ofname'], {}), '(out_dir, ofname)\n', (3572, 3589), False, 'import os\n'), ((3319, 3378), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(4)'}), '(string.ascii_uppercase + string.digits, k=4)\n', (3333, 3378), False, 'import random\n')] |
import numpy as np
def get_box_weights(centroid, n_pix, shape, cols=None):
""" Return the weights of a box aperture given the centroid and the width of
the box in pixels. All pixels will have the same weights except at the ends
of the box aperture.
:param cols: Column indices of good columns Used if the centroid is defined
for specific columns or a subrange of columns.
:param centroid: Position of the centroid (in rows). Same shape as `cols`
:param n_pix: Width of the extraction box in pixels.
:param shape: Shape of the output image. (n_row, n_column)
:type cols: array[int]
:type centroid: array[float]
:type n_pix: float
:type shape: Tuple(int, int)
:returns: weights - An array of pixel weights to use with the box extraction.
:rtype: array[float]
"""
nrows, ncols = shape
# Use all columns if not specified
if cols is None:
cols = np.arange(ncols)
# Row centers of all pixels.
rows = np.indices((nrows, len(cols)))[0]
# Pixels that are entierly inside the box are set to one.
cond = (rows <= (centroid - 0.5 + n_pix / 2))
cond &= ((centroid + 0.5 - n_pix / 2) <= rows)
weights = cond.astype(float)
# Fractional weights at the upper bound.
cond = (centroid - 0.5 + n_pix / 2) < rows
cond &= (rows < (centroid + 0.5 + n_pix / 2))
weights[cond] = (centroid + n_pix / 2 - (rows - 0.5))[cond]
# Fractional weights at the lower bound.
cond = (rows < (centroid + 0.5 - n_pix / 2))
cond &= ((centroid - 0.5 - n_pix / 2) < rows)
weights[cond] = (rows + 0.5 - (centroid - n_pix / 2))[cond]
# Return with the specified shape with zeros where the box is not defined.
out = np.zeros(shape, dtype=float)
out[:, cols] = weights
return out
# TODO what about missing flux due to bad pixels?
def box_extract(scidata, scierr, scimask, box_weights, cols=None):
""" Perform a box extraction.
:param scidata: 2d array of shape (n_row, n_columns)
scidata
:param scierr: 2d array of shape (n_row, n_columns)
uncertainty map
:param scimask: 2d array, boolean, same shape as data
masked pixels
:param box_weights: 2d array, same shape as data
pre-computed weights for box extraction.
:param cols: 1d-array, integer
Which columns to extract
:type scidata: array[float]
:type scierr: array[float]
:type scimask: array[bool]
:type box_weights: array[float]
:type cols: array[int]
:returns: cols, flux, flux_var - The indices of the extracted columns, the
flux in each column, and the variance of each column.
:rtype: Tuple(array[int], array[float], array[float])
"""
nrows, ncols = scidata.shape
# Use all columns if not specified
if cols is None:
cols = np.arange(ncols)
# Keep only required columns and make a copy.
data = scidata[:, cols].copy()
error = scierr[:, cols].copy()
mask = scimask[:, cols].copy()
box_weights = box_weights[:, cols].copy()
# Check that all invalid values are masked.
if not np.isfinite(data[~mask]).all():
message = 'scidata contains un-masked invalid values.'
raise ValueError(message)
if not np.isfinite(error[~mask]).all():
message = 'scierr contains un-masked invalid values.'
raise ValueError(message)
# Set the weights of masked pixels to zero.
box_weights[mask] = 0.
# Extract total flux (sum over columns).
flux = np.nansum(box_weights*data, axis=0)
npix = np.nansum(box_weights, axis=0)
# Extract flux error (sum of variances).
flux_var = np.nansum(box_weights*error**2, axis=0)
flux_err = np.sqrt(flux_var)
# Set empty columns to NaN.
flux = np.where(npix > 0, flux, np.nan)
flux_err = np.where(npix > 0, flux_err, np.nan)
return cols, flux, flux_err, npix
def main():
return
if __name__ == '__main__':
main()
| [
"numpy.nansum",
"numpy.zeros",
"numpy.isfinite",
"numpy.where",
"numpy.arange",
"numpy.sqrt"
] | [((1731, 1759), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (1739, 1759), True, 'import numpy as np\n'), ((3518, 3555), 'numpy.nansum', 'np.nansum', (['(box_weights * data)'], {'axis': '(0)'}), '(box_weights * data, axis=0)\n', (3527, 3555), True, 'import numpy as np\n'), ((3565, 3595), 'numpy.nansum', 'np.nansum', (['box_weights'], {'axis': '(0)'}), '(box_weights, axis=0)\n', (3574, 3595), True, 'import numpy as np\n'), ((3657, 3700), 'numpy.nansum', 'np.nansum', (['(box_weights * error ** 2)'], {'axis': '(0)'}), '(box_weights * error ** 2, axis=0)\n', (3666, 3700), True, 'import numpy as np\n'), ((3712, 3729), 'numpy.sqrt', 'np.sqrt', (['flux_var'], {}), '(flux_var)\n', (3719, 3729), True, 'import numpy as np\n'), ((3774, 3806), 'numpy.where', 'np.where', (['(npix > 0)', 'flux', 'np.nan'], {}), '(npix > 0, flux, np.nan)\n', (3782, 3806), True, 'import numpy as np\n'), ((3822, 3858), 'numpy.where', 'np.where', (['(npix > 0)', 'flux_err', 'np.nan'], {}), '(npix > 0, flux_err, np.nan)\n', (3830, 3858), True, 'import numpy as np\n'), ((932, 948), 'numpy.arange', 'np.arange', (['ncols'], {}), '(ncols)\n', (941, 948), True, 'import numpy as np\n'), ((2836, 2852), 'numpy.arange', 'np.arange', (['ncols'], {}), '(ncols)\n', (2845, 2852), True, 'import numpy as np\n'), ((3115, 3139), 'numpy.isfinite', 'np.isfinite', (['data[~mask]'], {}), '(data[~mask])\n', (3126, 3139), True, 'import numpy as np\n'), ((3256, 3281), 'numpy.isfinite', 'np.isfinite', (['error[~mask]'], {}), '(error[~mask])\n', (3267, 3281), True, 'import numpy as np\n')] |
#! /usr/bin env python
"""
Code to generate WFSS dispersed seed images. Starting with a set of imaging seed images,
potentially with padding, given a JWST WFSS GRISMCONF configuration file
"""
import os
from astropy.io import fits
import numpy as np
from .observations.observations \
import observation as Gsim_observation
from multiprocessing import cpu_count
class Grism_seed():
def __init__(self,image_seeds,cross_filter,mode,config_path=".",extrapolate_SED=False,SED_file=None,instrument="NIRCAM",max_cpu=None, SBE_save=None, renormalize=True, resample=False):
"""A class for a grism simulation
Attributes
----------
image_seeds: list
A list of image image_seeds
cross_filter: str
A string containing the name of a direct filter
mode: str
A string containing either R or C
config_path: str
As string pointing to a GRISMCONF configuration file
extrapolate_SED: bol
If set to True, the objects' SED will be extrapolated if needed
SED_file: str
A string containing the name of an HDF5 file containing the spectra of each source
instrument: str
A string containing the name of the instrument (e.g. NIRCAM)
max_cpu: int
An integer containing the number of CPU to use in the multiheaded pool when dispersing
SBE_save: str
A string containing the name of an output HDF5 file which will contain simulated 2D stamps of each source
renormalize: bol
Whether to renormalize the input data to unity over segmentation map area when using an input spectrum.
reaample: bol
If true, the disperser will first smooth and resample input spectra appropriately to match the disperser resolution.
Methods
-------
observation(self,orders=["+1","+2"],max_split=-1000,ID=0)
finalize(self,tofits=None,Back=False,BackLevel=None)
saveSingleFits(self,name)
"""
config = os.path.join(config_path,"%s_%s_%s.conf" % (instrument,cross_filter,mode))
self.config = config
self.image_seeds = image_seeds
self.cross_filter = cross_filter
self.mode = mode
self.config_path = config_path
if max_cpu is None:
max_cpu = cpu_count() - 1
self.max_cpu = max_cpu
self.SBE_save = SBE_save
if self.SBE_save != None:
print("Will output to ", self.SBE_save)
if os.path.isfile(self.SBE_save):
os.unlink(self.SBE_save)
# Get information about input padding. We use the first image seed for this, just like for the segmentation info.
h = fits.open(image_seeds[0])[0].header
self.xstart = int(h["NOMXSTRT"])
self.xend = int(h["NOMXEND"])
self.ystart = int(h["NOMYSTRT"])
self.yend = int(h["NOMYEND"])
# Get segmentation info, from the first image seed.
self.seg_data = fits.open(image_seeds[0])[2].data
self.extrapolate_SED = extrapolate_SED
self.SED_file = SED_file
self.renormalize = renormalize
self.resample = resample
def observation(self,orders=None,max_split=-1000,ID=0):
"""Sets up an observation.
Parameters
----------
orders : list
A list of string containing the name of the orders to disperse
max_split : int
Maximum number of pixels to disperse at once (not currently used)
ID : int
Specific object ID to dispersed. Set to 0 (default) to disperse all of available objects
"""
self.this_one = {}
# If orders are not passed, we get them from the config file
if orders==None:
import grismconf
C = grismconf.Config(self.config)
print("orders:",C.orders)
self.orders = C.orders
else:
self.orders = orders
for order in self.orders:
boundaries = [self.xstart,self.xend,self.ystart,self.yend]
self.this_one[order] = Gsim_observation(self.image_seeds,self.seg_data,self.config,order=order,max_split=max_split,extrapolate_SED=self.extrapolate_SED,SED_file=self.SED_file,max_cpu=self.max_cpu,ID=ID, SBE_save=self.SBE_save,boundaries=boundaries,renormalize=self.renormalize,resample=self.resample)
#self.this_one[order].disperse_all()
def disperse(self,orders=None,cache=False,trans=None):
"""Run the disperser.
Parameters
----------
orders: list
Optional list containing the name of the orders to disperse
cache: bool
If set to True, the dispersion tables are cached and will be used on subsequent calls.
"""
if orders==None:
orders = self.orders
print("Dispersing orders ", orders)
for order in orders:
#print("Dispersing order ",order)
if self.this_one[order].cache:
self.this_one[order].disperse_all_from_cache(trans=trans)
else:
self.this_one[order].disperse_all(cache=cache)
def disperse_background_1D(self,background):
"""Produces a dispersed 2D image of the background spectrum contained in the fits file background, meant to be the
output of thr jwst_background module. This background is dispersed either in the row or column direction, depending on the
dispersion, and the result is tiled to produce a full 2D image. All orders are generated and added up.
Parameters
----------
background: 2D numpy array [lambda values,flux values]
A 2D numpy array containing the spectrum of the background to disperse.
The wavelength should be in (micron) and the flux (in Mjy/sr), as is produced by
the jwst_background package
output: numpy 2D array
A 2D array containing the model background which can be fed back into finalize()
"""
bck = 0.
for order in self.orders:
print("Computing dispersed background for order ",order)
bck += self.this_one[order].disperse_background_1D(background)
# fits.writeto("WFSS_background.fits",bck,overwrite=True)
return bck
def finalize(self,Back=None,BackLevel=None,tofits=None):
""" Produces a 2D dispersed image and add the appropriate background
Parameters
----------
tofits: str
Name of a fits file to write the simulation to. Default is set to None
Back: str
Name of a fits file containing an image of the background in e-/s in extension 0
If None, the file listed in the config file is used.
BackLevel: float
Renormalization factor for the background image. Renormalization is done by multiplying by BackLevel/np.median(Back)
"""
# Initialize final image with the background estimate
final = 0.
if (Back is None) and (BackLevel is not None):
# Use pre-computed background from config file, scaled by BackLevel
import grismconf
bck_file = grismconf.Config(self.config).BCK
print("Adding pre-computed 2D dispersed background ",bck_file,"scaled to",BackLevel)
import grismconf
final = fits.open(bck_file)[1].data * BackLevel
if (Back is None) and (BackLevel is None):
# Use no background
print("No background added")
final = 0.
if (type(Back)==np.ndarray) and (BackLevel is not None):
# Use a passed background and scale its median to BackLevel
print("adding passed background array scaled to",BackLevel)
final = Back/np.median(Back) * BackLevel
if (type(Back)==np.ndarray) and (BackLevel is None):
# Use a passed background as is
print("adding passed background array as is")
final = Back
if not ((Back is None) and (BackLevel is None)) and (tofits!=None):
# Save the background image to a fits file
hprime = fits.PrimaryHDU()
himg = fits.ImageHDU(final)
himg.header['EXTNAME'] = 'BACKGRND'
himg.header['UNITS'] = 'e/s'
if BackLevel is not None:
himg.header['BackLevel'] = BackLevel
hlist = fits.HDUList([hprime, himg])
hlist.writeto(tofits, overwrite=True)
for order in self.orders:
print("Adding contribution from order ",order)
try:
sim = self.this_one[order].simulated_image[self.ystart:self.yend+1,self.xstart:self.xend+1]
except AttributeError:
print("Contribution from order",order,"is missing. Skipping it.")
continue
final = final + sim
self.final = final
if (Back is None) and (BackLevel is None) and (tofits!=None):
# Save the background image to a fits file
hprime = fits.PrimaryHDU()
himg = fits.ImageHDU(final)
himg.header['EXTNAME'] = 'BACKGRND'
himg.header['UNITS'] = 'e/s'
if BackLevel is not None:
himg.header['BackLevel'] = BackLevel
hlist = fits.HDUList([hprime, himg])
hlist.writeto(tofits, overwrite=True)
def saveSingleFits(self,name):
"""A helper function to write the 2D simulated imae into a fits file
Parameters
----------
fname: str
The file name to use for the output
"""
#save an array into the first extension of a fits file
h0 = fits.PrimaryHDU()
h1 = fits.ImageHDU(self.final,name='DATA')
hdulist = fits.HDUList([h0,h1])
hdulist.writeto(name,overwrite=True)
if __name__ == '__main__':
import glob
image_seeds = glob.glob("V4*.fits")
seed = Grism_seed(image_seeds,"F444W","modA_R","/Users/npirzkal/Dropbox/GRISMDATA/NIRCAM/")
| [
"astropy.io.fits.ImageHDU",
"os.unlink",
"numpy.median",
"astropy.io.fits.PrimaryHDU",
"os.path.isfile",
"astropy.io.fits.open",
"glob.glob",
"grismconf.Config",
"astropy.io.fits.HDUList",
"os.path.join",
"multiprocessing.cpu_count"
] | [((10030, 10051), 'glob.glob', 'glob.glob', (['"""V4*.fits"""'], {}), "('V4*.fits')\n", (10039, 10051), False, 'import glob\n'), ((2063, 2140), 'os.path.join', 'os.path.join', (['config_path', "('%s_%s_%s.conf' % (instrument, cross_filter, mode))"], {}), "(config_path, '%s_%s_%s.conf' % (instrument, cross_filter, mode))\n", (2075, 2140), False, 'import os\n'), ((9795, 9812), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (9810, 9812), False, 'from astropy.io import fits\n'), ((9826, 9864), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['self.final'], {'name': '"""DATA"""'}), "(self.final, name='DATA')\n", (9839, 9864), False, 'from astropy.io import fits\n'), ((9900, 9922), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[h0, h1]'], {}), '([h0, h1])\n', (9912, 9922), False, 'from astropy.io import fits\n'), ((2553, 2582), 'os.path.isfile', 'os.path.isfile', (['self.SBE_save'], {}), '(self.SBE_save)\n', (2567, 2582), False, 'import os\n'), ((3868, 3897), 'grismconf.Config', 'grismconf.Config', (['self.config'], {}), '(self.config)\n', (3884, 3897), False, 'import grismconf\n'), ((8236, 8253), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (8251, 8253), False, 'from astropy.io import fits\n'), ((8273, 8293), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['final'], {}), '(final)\n', (8286, 8293), False, 'from astropy.io import fits\n'), ((8494, 8522), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hprime, himg]'], {}), '([hprime, himg])\n', (8506, 8522), False, 'from astropy.io import fits\n'), ((9150, 9167), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (9165, 9167), False, 'from astropy.io import fits\n'), ((9187, 9207), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['final'], {}), '(final)\n', (9200, 9207), False, 'from astropy.io import fits\n'), ((9408, 9436), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hprime, himg]'], {}), '([hprime, himg])\n', (9420, 9436), False, 'from astropy.io import fits\n'), ((2370, 2381), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2379, 2381), False, 'from multiprocessing import cpu_count\n'), ((2600, 2624), 'os.unlink', 'os.unlink', (['self.SBE_save'], {}), '(self.SBE_save)\n', (2609, 2624), False, 'import os\n'), ((2761, 2786), 'astropy.io.fits.open', 'fits.open', (['image_seeds[0]'], {}), '(image_seeds[0])\n', (2770, 2786), False, 'from astropy.io import fits\n'), ((3040, 3065), 'astropy.io.fits.open', 'fits.open', (['image_seeds[0]'], {}), '(image_seeds[0])\n', (3049, 3065), False, 'from astropy.io import fits\n'), ((7266, 7295), 'grismconf.Config', 'grismconf.Config', (['self.config'], {}), '(self.config)\n', (7282, 7295), False, 'import grismconf\n'), ((7867, 7882), 'numpy.median', 'np.median', (['Back'], {}), '(Back)\n', (7876, 7882), True, 'import numpy as np\n'), ((7446, 7465), 'astropy.io.fits.open', 'fits.open', (['bck_file'], {}), '(bck_file)\n', (7455, 7465), False, 'from astropy.io import fits\n')] |
import pyspark as ps
import pandas as pd
import numpy as np
from pyspark.sql import SparkSession
from pyspark.ml.evaluation import RegressionEvaluator
from sklearn.metrics import mean_squared_error
spark = SparkSession.builder.getOrCreate()
data = pd.read_csv('../data/filtered_ratings.csv')
data = data.iloc[:1000000,:4]
ratings_df = spark.createDataFrame(data)
train, test = ratings_df.randomSplit([0.8, 0.2], seed=51)
print(train.count())
# density of my training data
# num_ratings/ (num_users * num_movies)
num_ratings = train.count()
num_users = train.select("userId").distinct().count()
num_movies = train.select("movieId").distinct().count()
density = num_ratings/ (num_users * num_movies)
print(f'density: {density}')
# create an untrained ALS factorization model.
from pyspark.ml.recommendation import ALS
als_model = ALS(
itemCol='movieId',
userCol='userId',
ratingCol='rating',
nonnegative=True,
coldStartStrategy="drop",
regParam=0.2)
recommender = als_model.fit(train)
predictions = recommender.transform(test)
print(predictions.show())
predictions_pd = predictions.toPandas()
y = np.array(predictions_pd.rating.values)
y_hat = np.array(predictions_pd.prediction.values)
MSE = mean_squared_error(y, y_hat)
RMSE = np.sqrt(MSE)
print(MSE, RMSE)
| [
"pyspark.sql.SparkSession.builder.getOrCreate",
"pandas.read_csv",
"numpy.array",
"pyspark.ml.recommendation.ALS",
"sklearn.metrics.mean_squared_error",
"numpy.sqrt"
] | [((206, 240), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (238, 240), False, 'from pyspark.sql import SparkSession\n'), ((249, 292), 'pandas.read_csv', 'pd.read_csv', (['"""../data/filtered_ratings.csv"""'], {}), "('../data/filtered_ratings.csv')\n", (260, 292), True, 'import pandas as pd\n'), ((831, 954), 'pyspark.ml.recommendation.ALS', 'ALS', ([], {'itemCol': '"""movieId"""', 'userCol': '"""userId"""', 'ratingCol': '"""rating"""', 'nonnegative': '(True)', 'coldStartStrategy': '"""drop"""', 'regParam': '(0.2)'}), "(itemCol='movieId', userCol='userId', ratingCol='rating', nonnegative=\n True, coldStartStrategy='drop', regParam=0.2)\n", (834, 954), False, 'from pyspark.ml.recommendation import ALS\n'), ((1126, 1164), 'numpy.array', 'np.array', (['predictions_pd.rating.values'], {}), '(predictions_pd.rating.values)\n', (1134, 1164), True, 'import numpy as np\n'), ((1173, 1215), 'numpy.array', 'np.array', (['predictions_pd.prediction.values'], {}), '(predictions_pd.prediction.values)\n', (1181, 1215), True, 'import numpy as np\n'), ((1222, 1250), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1240, 1250), False, 'from sklearn.metrics import mean_squared_error\n'), ((1258, 1270), 'numpy.sqrt', 'np.sqrt', (['MSE'], {}), '(MSE)\n', (1265, 1270), True, 'import numpy as np\n')] |
import ctypes
from multiprocessing import Pool, Array, Value
import numpy as np
def emb2Arr(emb):
"""Converts embedding to a shared array."""
return Array(ctypes.c_double, emb.ravel())
def arr2Arr(arr, is_int=False):
"""Converts np.ndarray to a shared array."""
if is_int:
return Array(ctypes.c_int, arr, lock=False)
return Array(ctypes.c_double, arr, lock=False)
def int2Val(value):
"""Converts an int to a shared int."""
return Value('i', value, lock=False)
def Arr2emb(arr, v=None):
"""Reads a shared array as embeddings, without data copy by leveraging on some numpy magic."""
global VOCABSIZE
if v is None:
v = VOCABSIZE.value
return np.frombuffer(arr.get_obj()).reshape((v, -1))
def Arr2arr(arr, is_int=False):
"""Reads a shared array as a np.array, without data copy by leveraging on some numpy magic."""
if is_int:
return np.frombuffer(arr, dtype="int32")
return np.frombuffer(arr)
def init(vocab_, k_, context_size_, noise_probas_, w_emb_, c_emb_, all_ids_):
"""Little helper to share the data between all workers."""
global VOCABSIZE, k_factor, context_size, noise_probas, word_embeddings, context_embeddings, probas, all_ids
VOCABSIZE = vocab_
k_factor = k_
context_size = context_size_
noise_probas = noise_probas_
word_embeddings = w_emb_
context_embeddings = c_emb_
all_ids = all_ids_
def parallel_iter(fun, args, n_worker, initargs):
p = Pool(n_worker, initializer=init, initargs=initargs)
res = p.imap_unordered(fun, args, chunksize=500) # small chunks to get some speed improvement
for r in res:
yield r
p.close()
p.join()
def unpack_iterator(iterator):
while True:
listed_res = next(iterator)
for res in listed_res:
if res:
yield res
def add_to_iterator(iterator, arg):
for d in iterator:
yield((d, arg))
def build_iterator(fun, args, eta, n_worker, initargs):
base_iterator = parallel_iter(fun, args, n_worker, initargs)
unpacked_iterator = unpack_iterator(base_iterator)
final_iterator = add_to_iterator(unpacked_iterator, eta)
return final_iterator
| [
"numpy.frombuffer",
"multiprocessing.Value",
"multiprocessing.Array",
"multiprocessing.Pool"
] | [((356, 395), 'multiprocessing.Array', 'Array', (['ctypes.c_double', 'arr'], {'lock': '(False)'}), '(ctypes.c_double, arr, lock=False)\n', (361, 395), False, 'from multiprocessing import Pool, Array, Value\n'), ((472, 501), 'multiprocessing.Value', 'Value', (['"""i"""', 'value'], {'lock': '(False)'}), "('i', value, lock=False)\n", (477, 501), False, 'from multiprocessing import Pool, Array, Value\n'), ((961, 979), 'numpy.frombuffer', 'np.frombuffer', (['arr'], {}), '(arr)\n', (974, 979), True, 'import numpy as np\n'), ((1487, 1538), 'multiprocessing.Pool', 'Pool', (['n_worker'], {'initializer': 'init', 'initargs': 'initargs'}), '(n_worker, initializer=init, initargs=initargs)\n', (1491, 1538), False, 'from multiprocessing import Pool, Array, Value\n'), ((308, 344), 'multiprocessing.Array', 'Array', (['ctypes.c_int', 'arr'], {'lock': '(False)'}), '(ctypes.c_int, arr, lock=False)\n', (313, 344), False, 'from multiprocessing import Pool, Array, Value\n'), ((916, 949), 'numpy.frombuffer', 'np.frombuffer', (['arr'], {'dtype': '"""int32"""'}), "(arr, dtype='int32')\n", (929, 949), True, 'import numpy as np\n')] |
import argparse
import os
import torch
import numpy as np
import gym
from dreamerv2.utils.wrapper import GymAtar, OneHotAction
# from dreamerv2.training.config import MinAtarConfig
from dreamerv2.training.config import Config
from dreamerv2.training.trainer import Trainer
from dreamerv2.training.evaluator import Evaluator
def main(args):
env_name = args.env
exp_id = args.id
'''make dir for saving results'''
result_dir = os.path.join('results', '{}_{}'.format(env_name, exp_id))
model_dir = os.path.join(result_dir, 'models') # dir to save learnt models
os.makedirs(model_dir, exist_ok=True)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available() and args.device:
device = torch.device('cuda')
torch.cuda.manual_seed(args.seed)
else:
device = torch.device('cpu')
print('using :', device)
env = OneHotAction(GymAtar(env_name))
obs_shape = env.observation_space.shape
action_size = env.action_space.shape[0]
obs_dtype = int
action_dtype = np.float32
batch_size = args.batch_size
seq_len = args.seq_len
config = Config(
env=env_name,
obs_shape=obs_shape,
action_size=action_size,
obs_dtype=obs_dtype,
action_dtype=action_dtype,
seq_len=seq_len,
batch_size=batch_size,
model_dir=model_dir,
)
config_dict = config.__dict__
trainer = Trainer(config, device)
evaluator = Evaluator(config, device)
"""training loop"""
print('...training...')
train_metrics = {}
trainer.collect_seed_episodes(env)
obs, score = env.reset(), 0
done = False
prev_rssmstate = trainer.RSSM._init_rssm_state(1)
prev_action = torch.zeros(1, trainer.action_size).to(trainer.device)
episode_actor_ent = []
scores = []
best_mean_score = 0
best_save_path = os.path.join(model_dir, 'models_best.pth')
for iter in range(1, trainer.config.train_steps):
if iter % trainer.config.train_every == 0:
train_metrics = trainer.train_batch(train_metrics)
print(iter, train_metrics)
if iter % trainer.config.slow_target_update == 0:
trainer.update_target()
if iter % trainer.config.save_every == 0:
trainer.save_model(iter)
with torch.no_grad():
embed = trainer.ObsEncoder(torch.tensor(obs, dtype=torch.float32).unsqueeze(0).to(trainer.device))
_, posterior_rssm_state = trainer.RSSM.rssm_observe(embed, prev_action, not done, prev_rssmstate)
model_state = trainer.RSSM.get_model_state(posterior_rssm_state)
action, action_dist = trainer.ActionModel(model_state)
action = trainer.ActionModel.add_exploration(action, iter).detach()
action_ent = torch.mean(action_dist.entropy()).item()
episode_actor_ent.append(action_ent)
next_obs, rew, done, _ = env.step(action.squeeze(0).cpu().numpy())
score += rew
if done:
trainer.buffer.add(obs, action.squeeze(0).cpu().numpy(), rew, done)
train_metrics['train_rewards'] = score
train_metrics['action_ent'] = np.mean(episode_actor_ent)
scores.append(score)
if len(scores) > 15:
scores.pop(0)
current_average = np.mean(scores)
if current_average > best_mean_score:
best_mean_score = current_average
print('saving best model with mean score : ', best_mean_score)
save_dict = trainer.get_save_dict()
torch.save(save_dict, best_save_path)
obs, score = env.reset(), 0
done = False
prev_rssmstate = trainer.RSSM._init_rssm_state(1)
prev_action = torch.zeros(1, trainer.action_size).to(trainer.device)
episode_actor_ent = []
else:
trainer.buffer.add(obs, action.squeeze(0).detach().cpu().numpy(), rew, done)
obs = next_obs
prev_rssmstate = posterior_rssm_state
prev_action = action
'''evaluating probably best model'''
evaluator.eval_saved_agent(env, best_save_path)
if __name__ == "__main__":
"""there are tonnes of HPs, if you want to do an ablation over any particular one, please add if here"""
parser = argparse.ArgumentParser()
parser.add_argument("--env", default='Pong-v0', type=str, help='atari env name')
parser.add_argument("--id", type=str, default='0', help='Experiment ID')
parser.add_argument('--seed', type=int, default=123, help='Random seed')
parser.add_argument('--device', default='cuda', help='CUDA or CPU')
parser.add_argument('--batch_size', type=int, default=50, help='Batch size')
parser.add_argument('--seq_len', type=int, default=50, help='Sequence Length (chunk length)')
args = parser.parse_args()
main(args)
| [
"dreamerv2.training.trainer.Trainer",
"numpy.random.seed",
"os.makedirs",
"argparse.ArgumentParser",
"torch.manual_seed",
"dreamerv2.utils.wrapper.GymAtar",
"torch.cuda.manual_seed",
"torch.save",
"dreamerv2.training.config.Config",
"dreamerv2.training.evaluator.Evaluator",
"numpy.mean",
"torc... | [((517, 551), 'os.path.join', 'os.path.join', (['result_dir', '"""models"""'], {}), "(result_dir, 'models')\n", (529, 551), False, 'import os\n'), ((585, 622), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (596, 622), False, 'import os\n'), ((628, 653), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (642, 653), True, 'import numpy as np\n'), ((658, 686), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (675, 686), False, 'import torch\n'), ((1148, 1331), 'dreamerv2.training.config.Config', 'Config', ([], {'env': 'env_name', 'obs_shape': 'obs_shape', 'action_size': 'action_size', 'obs_dtype': 'obs_dtype', 'action_dtype': 'action_dtype', 'seq_len': 'seq_len', 'batch_size': 'batch_size', 'model_dir': 'model_dir'}), '(env=env_name, obs_shape=obs_shape, action_size=action_size,\n obs_dtype=obs_dtype, action_dtype=action_dtype, seq_len=seq_len,\n batch_size=batch_size, model_dir=model_dir)\n', (1154, 1331), False, 'from dreamerv2.training.config import Config\n'), ((1444, 1467), 'dreamerv2.training.trainer.Trainer', 'Trainer', (['config', 'device'], {}), '(config, device)\n', (1451, 1467), False, 'from dreamerv2.training.trainer import Trainer\n'), ((1484, 1509), 'dreamerv2.training.evaluator.Evaluator', 'Evaluator', (['config', 'device'], {}), '(config, device)\n', (1493, 1509), False, 'from dreamerv2.training.evaluator import Evaluator\n'), ((1889, 1931), 'os.path.join', 'os.path.join', (['model_dir', '"""models_best.pth"""'], {}), "(model_dir, 'models_best.pth')\n", (1901, 1931), False, 'import os\n'), ((4379, 4404), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4402, 4404), False, 'import argparse\n'), ((694, 719), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (717, 719), False, 'import torch\n'), ((754, 774), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (766, 774), False, 'import torch\n'), ((783, 816), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (805, 816), False, 'import torch\n'), ((844, 863), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (856, 863), False, 'import torch\n'), ((917, 934), 'dreamerv2.utils.wrapper.GymAtar', 'GymAtar', (['env_name'], {}), '(env_name)\n', (924, 934), False, 'from dreamerv2.utils.wrapper import GymAtar, OneHotAction\n'), ((1746, 1781), 'torch.zeros', 'torch.zeros', (['(1)', 'trainer.action_size'], {}), '(1, trainer.action_size)\n', (1757, 1781), False, 'import torch\n'), ((2334, 2349), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2347, 2349), False, 'import torch\n'), ((3199, 3225), 'numpy.mean', 'np.mean', (['episode_actor_ent'], {}), '(episode_actor_ent)\n', (3206, 3225), True, 'import numpy as np\n'), ((3356, 3371), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (3363, 3371), True, 'import numpy as np\n'), ((3639, 3676), 'torch.save', 'torch.save', (['save_dict', 'best_save_path'], {}), '(save_dict, best_save_path)\n', (3649, 3676), False, 'import torch\n'), ((3831, 3866), 'torch.zeros', 'torch.zeros', (['(1)', 'trainer.action_size'], {}), '(1, trainer.action_size)\n', (3842, 3866), False, 'import torch\n'), ((2390, 2428), 'torch.tensor', 'torch.tensor', (['obs'], {'dtype': 'torch.float32'}), '(obs, dtype=torch.float32)\n', (2402, 2428), False, 'import torch\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sckit-learn classification utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import pickle
from absl import flags
from absl import logging
import numpy as np
from sklearn import model_selection
from sklearn.compose import make_column_transformer
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
# pylint: disable=invalid-name
flags.DEFINE_boolean(
"transform_inputs", True,
"If enabled, will scale the numeric features and convert categorical "
"features to one-hot encoding.")
flags.DEFINE_list(
"classifiers", ["LogisticRegression"],
"Type of the classifier. One of: \"LogisticRegression\", \"SVM\", "
"\"RidgeRegression\", \"RandomForest\", \"AdaBoost\", \"LDA\", \"QDA\", "
"\"GaussianProcess\", \"DecisionTree\", \"DNN\", \"GaussianNaiveBayes\", "
"\"BaggingEnsemble\".")
flags.DEFINE_boolean(
"use_implicationals", True, "If True, use the implicational features.")
flags.DEFINE_string(
"best_configurations_file", "",
"File containing the JSON dictionary from feature names to the "
"respective best model and data configurations. When `--cross_validate` "
"is enabled, this is the output file to be generated. In all other modes "
"this is an input file.")
FLAGS = flags.FLAGS
# List of all supported classifiers.
ALL_MODELS = [
"AdaBoost", "DNN", "DecisionTree", "GaussianProcess", "LDA",
"LogisticRegression", "QDA", "RandomForest", "RidgeRegression", "SVM",
"GaussianNaiveBayes", "BaggingEnsemble"
]
# Model information keys.
MODEL_INFO_NAME_KEY = "name"
MODEL_INFO_SPARSITY_KEY = "no_cv" # Not enough data.
MODEL_INFO_SCORE_KEY = "accuracy"
MODEL_INFO_CANDIDATES_KEY = "candidates"
# Random seed.
_RANDOM_STATE = 4611170
# WALS language code.
_LANGUAGE_CODE = "wals_code"
def _prepare_data(input_df):
"""Splits data into features and labels."""
class_label = "target_value"
y = input_df[class_label].copy()
X_columns_to_drop = [class_label, _LANGUAGE_CODE, "target_feature"]
X = input_df.drop(columns=X_columns_to_drop)
return X, y
def _split_into_features_and_labels(feature_name, feature_maker,
training_df, dev_df,
transform_inputs):
"""Preprocesses the data and returns the features and labels."""
# Get the label class counts for the training data.
train_class_counts = training_df.target_value.value_counts()
train_class_counts = list(zip(train_class_counts.index,
train_class_counts.values))
logging.info("%s: Class counts: %s", feature_name, train_class_counts)
# Perform the split into features and labels of the training set.
X_train, y_train = _prepare_data(training_df)
logging.info("%s: Input feature dimensions: %s", feature_name,
X_train.shape[1])
# Split dev set.
X_dev, y_dev = _prepare_data(dev_df)
# Numeric columns are transformed using standard scaler and categorical
# columns are converted to one-hot.
if transform_inputs:
numeric_cols = ["latitude", "longitude"]
categorical_cols = []
for col_name in X_train.columns:
if (col_name in feature_maker.prob_features or
col_name in feature_maker.count_features):
numeric_cols.append(col_name) # Counts, probabilities.
elif col_name in feature_maker.categorical_features:
categorical_cols.append(col_name) # Categorical feature values.
inputs_transformer = make_column_transformer(
(StandardScaler(), numeric_cols),
(OneHotEncoder(handle_unknown="ignore"), categorical_cols),
remainder="passthrough")
X_train = inputs_transformer.fit_transform(X_train)
if X_dev.shape[0]: # Do we have enough samples?
X_dev = inputs_transformer.transform(X_dev)
else:
logging.warning("Feature %s not found in the dev set. This is likely to "
"crash the evaluation mode!", feature_name)
else:
# Transform data frames to Numpy. The input transformer in the branch above
# returns Numpy arrays.
X_train = X_train.to_numpy()
X_dev = X_dev.to_numpy()
return (
X_train, y_train.to_numpy(), X_dev, y_dev.to_numpy(), train_class_counts)
def prepare_data(feature_maker, feature_name, use_implicationals=True,
prediction_mode=False):
"""Prepares the features and labels for the given WALS feature name."""
# Process training and dev data for the feature. Store the WALS language codes
# for the development set aside.
training_df, dev_df = feature_maker.process_data(
feature_name, prediction_mode=prediction_mode)
assert _LANGUAGE_CODE in dev_df.columns
dev_language_codes = list(dev_df[_LANGUAGE_CODE].values)
if not use_implicationals:
logging.info("Discarding implicational features")
training_df = feature_maker.select_columns(training_df,
discard_implicationals=True)
dev_df = feature_maker.select_columns(dev_df,
discard_implicationals=True)
# Split the data into features and labels.
X_train, y_train, X_dev, y_dev, train_class_counts = (
_split_into_features_and_labels(
feature_name, feature_maker, training_df, dev_df,
FLAGS.transform_inputs))
return X_train, y_train, X_dev, y_dev, dev_language_codes, train_class_counts
def _make_classifier(classifier_name):
"""Classifier factory."""
# Class weights: if you set this to None, you'd get much better accuracies,
# but it's likely that the classifier will be overpredicting the majority
# class.
class_weight_strategy = None # Note: this may set "balanced" as default.
max_iters = 10000
if classifier_name == "AdaBoost":
model = AdaBoostClassifier(n_estimators=100)
elif classifier_name == "LogisticRegression":
model = LogisticRegression(max_iter=max_iters,
class_weight=class_weight_strategy)
elif classifier_name == "LDA":
model = LinearDiscriminantAnalysis(tol=1E-6)
elif classifier_name == "QDA":
model = QuadraticDiscriminantAnalysis()
elif classifier_name == "DNN":
model = MLPClassifier(random_state=_RANDOM_STATE,
hidden_layer_sizes=[200])
elif classifier_name == "DecisionTree":
model = DecisionTreeClassifier(random_state=_RANDOM_STATE,
min_samples_leaf=3,
criterion="entropy",
class_weight="balanced")
elif classifier_name == "GaussianProcess":
model = GaussianProcessClassifier(random_state=_RANDOM_STATE,
max_iter_predict=200)
elif classifier_name == "RandomForest":
model = RandomForestClassifier(n_estimators=200,
random_state=_RANDOM_STATE,
min_samples_leaf=3,
criterion="entropy",
class_weight="balanced_subsample")
elif classifier_name == "RidgeRegression":
model = RidgeClassifier(normalize=True, tol=1E-5,
class_weight=class_weight_strategy)
elif classifier_name == "SVM":
model = LinearSVC(max_iter=max_iters, class_weight=class_weight_strategy)
elif classifier_name == "GaussianNaiveBayes":
model = GaussianNB()
elif classifier_name == "BaggingEnsemble":
model = BaggingClassifier(random_state=_RANDOM_STATE)
else:
raise ValueError("Unsupported classifier: %s" % classifier_name)
return model
def cross_validate(feature_name, classifier_name, X, y,
cv_num_folds, cv_num_repeats):
"""Runs repeated stratified $k$-fold cross-validation.
Returns multiple cross-validation metrics as a dictionary, where for each
metric mean and variance across multiple repeats and folds is summarized.
Args:
feature_name: (string) Name of the WALS feature.
classifier_name: (string) Classifier name.
X: (numpy array) Input features.
y: (numpy array) Labels.
cv_num_folds: (int) Number of folds ($k$).
cv_num_repeats: (int) Number of repetitions.
Returns:
Dictionary containing cross-validation scores and stats.
"""
model = _make_classifier(classifier_name)
scoring = ["f1_micro", "precision_micro", "recall_micro", "accuracy"]
try:
# Really primitive logic to figure out class distribution.
_, y_counts = np.unique(y, return_counts=True)
y_max_freq = np.max(y_counts)
# Check if the class counts are not reliable to run cross-validation.
if y_max_freq < cv_num_folds:
logging.warning("[%s] %s: Not enough data. Fitting the model instead "
"of running CV", feature_name, classifier_name)
# Simply fit the model.
model.fit(X, y)
cv_scores = {}
cv_scores["accuracy"] = (model.score(X, y), 0.0)
cv_scores[MODEL_INFO_SPARSITY_KEY] = True
return cv_scores
else:
logging.info("[%s] Running cross-validation of %s (k=%d, n=%d) ...",
feature_name, classifier_name, cv_num_folds, cv_num_repeats)
# Run cross-validation.
cv = RepeatedStratifiedKFold(n_splits=cv_num_folds,
n_repeats=cv_num_repeats,
random_state=_RANDOM_STATE)
cv_scores = model_selection.cross_validate(
model, X, y, cv=cv, scoring=scoring, n_jobs=cv_num_folds)
cv_scores[MODEL_INFO_SPARSITY_KEY] = False
except Exception as e: # pylint: disable=broad-except
logging.error("[%s] %s: CV: Exception: %s", feature_name, classifier_name,
e)
return None
del cv_scores["fit_time"]
del cv_scores["score_time"]
for score_name in scoring:
scores_vec_key = "test_" + score_name
cv_scores[score_name] = (np.mean(cv_scores[scores_vec_key]),
np.var(cv_scores[scores_vec_key]))
del cv_scores[scores_vec_key]
# Sanity check.
if math.isnan(cv_scores["accuracy"][0]):
return None
logging.info("[train] %s: CV scores for %s: %s", feature_name,
classifier_name, cv_scores)
return cv_scores
def train_classifier(feature_name, classifier_name, X, y, model_path=None):
"""Trains classifier."""
model = _make_classifier(classifier_name)
logging.info("%s: Fitting %s model ...",
feature_name, classifier_name)
model.fit(X, y)
logging.info("%s: %s: Score: %s", feature_name, classifier_name,
model.score(X, y))
if model_path:
logging.info("Saving model to \"%s\" ...", model_path)
pickle.dump(model, open(model_path, "wb"))
return model
def select_best_model(classifiers, feature_name, X_train, y_train,
cv_num_folds, cv_num_repeats):
"""Performs cross-validation of various classifiers for a given feature.
Returns a dictionary with the best classifier name, its score and the number
of candidates it was selected from.
Args:
classifiers: (list) Names of the classifiers to choose from.
feature_name: (string) WALS feature name.
X_train: (numpy array) Training features.
y_train: (numpy array) Training labels.
cv_num_folds: (int) Number of folds ($k$).
cv_num_repeats: (int) Number of repetitions.
Returns:
Dictionary containing best configuration.
"""
scores = []
for classifier_name in classifiers:
clf_scores = cross_validate(feature_name, classifier_name, X_train, y_train,
cv_num_folds, cv_num_repeats)
if clf_scores: # Cross-validation may fail for some settings.
scores.append((classifier_name, clf_scores))
# Sort the scores by the highest accuracy mean. For some reason F1 and
# accuracy are the same (as is the precision and recall). Investigate.
scores = sorted(scores, key=lambda score: score[1]["accuracy"][0],
reverse=True)
if len(scores) < 5:
raise ValueError("Expected at least five candidate classifiers!")
best_model = scores[0]
return {
MODEL_INFO_NAME_KEY: best_model[0], # Model name.
# Accuracy mean.
MODEL_INFO_SCORE_KEY: best_model[1]["accuracy"][0],
# Boolean sparsity marker.
MODEL_INFO_SPARSITY_KEY: best_model[1][MODEL_INFO_SPARSITY_KEY],
# Overall number of successful evals.
MODEL_INFO_CANDIDATES_KEY: len(scores)
}
| [
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.cross_validate",
"sklearn.tree.DecisionTreeClassifier",
"absl.logging.info",
"numpy.mean",
"absl.flags.DEFINE_boolean",
"sklearn.neural_network.MLPClassifier",
"sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"absl.flags.... | [((1808, 1964), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""transform_inputs"""', '(True)', '"""If enabled, will scale the numeric features and convert categorical features to one-hot encoding."""'], {}), "('transform_inputs', True,\n 'If enabled, will scale the numeric features and convert categorical features to one-hot encoding.'\n )\n", (1828, 1964), False, 'from absl import flags\n'), ((1973, 2246), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""classifiers"""', "['LogisticRegression']", '"""Type of the classifier. One of: "LogisticRegression", "SVM", "RidgeRegression", "RandomForest", "AdaBoost", "LDA", "QDA", "GaussianProcess", "DecisionTree", "DNN", "GaussianNaiveBayes", "BaggingEnsemble"."""'], {}), '(\'classifiers\', [\'LogisticRegression\'],\n \'Type of the classifier. One of: "LogisticRegression", "SVM", "RidgeRegression", "RandomForest", "AdaBoost", "LDA", "QDA", "GaussianProcess", "DecisionTree", "DNN", "GaussianNaiveBayes", "BaggingEnsemble".\'\n )\n', (1990, 2246), False, 'from absl import flags\n'), ((2293, 2389), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_implicationals"""', '(True)', '"""If True, use the implicational features."""'], {}), "('use_implicationals', True,\n 'If True, use the implicational features.')\n", (2313, 2389), False, 'from absl import flags\n'), ((2392, 2683), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""best_configurations_file"""', '""""""', '"""File containing the JSON dictionary from feature names to the respective best model and data configurations. When `--cross_validate` is enabled, this is the output file to be generated. In all other modes this is an input file."""'], {}), "('best_configurations_file', '',\n 'File containing the JSON dictionary from feature names to the respective best model and data configurations. When `--cross_validate` is enabled, this is the output file to be generated. In all other modes this is an input file.'\n )\n", (2411, 2683), False, 'from absl import flags\n'), ((3999, 4069), 'absl.logging.info', 'logging.info', (['"""%s: Class counts: %s"""', 'feature_name', 'train_class_counts'], {}), "('%s: Class counts: %s', feature_name, train_class_counts)\n", (4011, 4069), False, 'from absl import logging\n'), ((4189, 4274), 'absl.logging.info', 'logging.info', (['"""%s: Input feature dimensions: %s"""', 'feature_name', 'X_train.shape[1]'], {}), "('%s: Input feature dimensions: %s', feature_name, X_train.shape[1]\n )\n", (4201, 4274), False, 'from absl import logging\n'), ((11469, 11505), 'math.isnan', 'math.isnan', (["cv_scores['accuracy'][0]"], {}), "(cv_scores['accuracy'][0])\n", (11479, 11505), False, 'import math\n'), ((11525, 11619), 'absl.logging.info', 'logging.info', (['"""[train] %s: CV scores for %s: %s"""', 'feature_name', 'classifier_name', 'cv_scores'], {}), "('[train] %s: CV scores for %s: %s', feature_name,\n classifier_name, cv_scores)\n", (11537, 11619), False, 'from absl import logging\n'), ((11801, 11872), 'absl.logging.info', 'logging.info', (['"""%s: Fitting %s model ..."""', 'feature_name', 'classifier_name'], {}), "('%s: Fitting %s model ...', feature_name, classifier_name)\n", (11813, 11872), False, 'from absl import logging\n'), ((6211, 6260), 'absl.logging.info', 'logging.info', (['"""Discarding implicational features"""'], {}), "('Discarding implicational features')\n", (6223, 6260), False, 'from absl import logging\n'), ((7213, 7249), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (7231, 7249), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((9914, 9946), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (9923, 9946), True, 'import numpy as np\n'), ((9964, 9980), 'numpy.max', 'np.max', (['y_counts'], {}), '(y_counts)\n', (9970, 9980), True, 'import numpy as np\n'), ((12028, 12080), 'absl.logging.info', 'logging.info', (['"""Saving model to "%s" ..."""', 'model_path'], {}), '(\'Saving model to "%s" ...\', model_path)\n', (12040, 12080), False, 'from absl import logging\n'), ((5258, 5382), 'absl.logging.warning', 'logging.warning', (['"""Feature %s not found in the dev set. This is likely to crash the evaluation mode!"""', 'feature_name'], {}), "(\n 'Feature %s not found in the dev set. This is likely to crash the evaluation mode!'\n , feature_name)\n", (5273, 5382), False, 'from absl import logging\n'), ((7310, 7384), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': 'max_iters', 'class_weight': 'class_weight_strategy'}), '(max_iter=max_iters, class_weight=class_weight_strategy)\n', (7328, 7384), False, 'from sklearn.linear_model import LogisticRegression\n'), ((10096, 10220), 'absl.logging.warning', 'logging.warning', (['"""[%s] %s: Not enough data. Fitting the model instead of running CV"""', 'feature_name', 'classifier_name'], {}), "(\n '[%s] %s: Not enough data. Fitting the model instead of running CV',\n feature_name, classifier_name)\n", (10111, 10220), False, 'from absl import logging\n'), ((10452, 10585), 'absl.logging.info', 'logging.info', (['"""[%s] Running cross-validation of %s (k=%d, n=%d) ..."""', 'feature_name', 'classifier_name', 'cv_num_folds', 'cv_num_repeats'], {}), "('[%s] Running cross-validation of %s (k=%d, n=%d) ...',\n feature_name, classifier_name, cv_num_folds, cv_num_repeats)\n", (10464, 10585), False, 'from absl import logging\n'), ((10642, 10746), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'cv_num_folds', 'n_repeats': 'cv_num_repeats', 'random_state': '_RANDOM_STATE'}), '(n_splits=cv_num_folds, n_repeats=cv_num_repeats,\n random_state=_RANDOM_STATE)\n', (10665, 10746), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((10831, 10924), 'sklearn.model_selection.cross_validate', 'model_selection.cross_validate', (['model', 'X', 'y'], {'cv': 'cv', 'scoring': 'scoring', 'n_jobs': 'cv_num_folds'}), '(model, X, y, cv=cv, scoring=scoring, n_jobs=\n cv_num_folds)\n', (10861, 10924), False, 'from sklearn import model_selection\n'), ((11041, 11118), 'absl.logging.error', 'logging.error', (['"""[%s] %s: CV: Exception: %s"""', 'feature_name', 'classifier_name', 'e'], {}), "('[%s] %s: CV: Exception: %s', feature_name, classifier_name, e)\n", (11054, 11118), False, 'from absl import logging\n'), ((11312, 11346), 'numpy.mean', 'np.mean', (['cv_scores[scores_vec_key]'], {}), '(cv_scores[scores_vec_key])\n', (11319, 11346), True, 'import numpy as np\n'), ((11377, 11410), 'numpy.var', 'np.var', (['cv_scores[scores_vec_key]'], {}), '(cv_scores[scores_vec_key])\n', (11383, 11410), True, 'import numpy as np\n'), ((4949, 4965), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4963, 4965), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4991, 5029), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (5004, 5029), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((7461, 7498), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {'tol': '(1e-06)'}), '(tol=1e-06)\n', (7487, 7498), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((7543, 7574), 'sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis', ([], {}), '()\n', (7572, 7574), False, 'from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n'), ((7620, 7687), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'random_state': '_RANDOM_STATE', 'hidden_layer_sizes': '[200]'}), '(random_state=_RANDOM_STATE, hidden_layer_sizes=[200])\n', (7633, 7687), False, 'from sklearn.neural_network import MLPClassifier\n'), ((7768, 7888), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '_RANDOM_STATE', 'min_samples_leaf': '(3)', 'criterion': '"""entropy"""', 'class_weight': '"""balanced"""'}), "(random_state=_RANDOM_STATE, min_samples_leaf=3,\n criterion='entropy', class_weight='balanced')\n", (7790, 7888), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((8047, 8122), 'sklearn.gaussian_process.GaussianProcessClassifier', 'GaussianProcessClassifier', ([], {'random_state': '_RANDOM_STATE', 'max_iter_predict': '(200)'}), '(random_state=_RANDOM_STATE, max_iter_predict=200)\n', (8072, 8122), False, 'from sklearn.gaussian_process import GaussianProcessClassifier\n'), ((8215, 8363), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(200)', 'random_state': '_RANDOM_STATE', 'min_samples_leaf': '(3)', 'criterion': '"""entropy"""', 'class_weight': '"""balanced_subsample"""'}), "(n_estimators=200, random_state=_RANDOM_STATE,\n min_samples_leaf=3, criterion='entropy', class_weight='balanced_subsample')\n", (8237, 8363), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((8557, 8635), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {'normalize': '(True)', 'tol': '(1e-05)', 'class_weight': 'class_weight_strategy'}), '(normalize=True, tol=1e-05, class_weight=class_weight_strategy)\n', (8572, 8635), False, 'from sklearn.linear_model import RidgeClassifier\n'), ((8708, 8773), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'max_iter': 'max_iters', 'class_weight': 'class_weight_strategy'}), '(max_iter=max_iters, class_weight=class_weight_strategy)\n', (8717, 8773), False, 'from sklearn.svm import LinearSVC\n'), ((8834, 8846), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (8844, 8846), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((8904, 8949), 'sklearn.ensemble.BaggingClassifier', 'BaggingClassifier', ([], {'random_state': '_RANDOM_STATE'}), '(random_state=_RANDOM_STATE)\n', (8921, 8949), False, 'from sklearn.ensemble import BaggingClassifier\n')] |
'''Helmert inversion and transformation functions'''
import numpy as _np
import pandas as _pd
from .gn_const import WGS84, OMEGA_E
def gen_helm_aux(pt1,pt2):
'''aux function for helmert values inversion.'''
pt1 = pt1.astype(float)
pt2 = pt2.astype(float)
n_points=pt1.shape[0]
unity_blk = _np.tile(_np.eye(3),reps=n_points).T
xyz_blk = _np.zeros((n_points,3,3))
xyz_blk[:,1,2] = pt1[:,0] #x[1,2]
xyz_blk[:,2,1] = -pt1[:,0] #x[2,1]
xyz_blk[:,2,0] = pt1[:,1] #y[2,0]
xyz_blk[:,0,2] = -pt1[:,1] #y[0,2]
xyz_blk[:,0,1] = pt1[:,2] #z[0,1]
xyz_blk[:,1,0] = -pt1[:,2] #z[1,0]
xyz = pt1.reshape((-1,1))
A = _np.column_stack([unity_blk,xyz_blk.reshape((-1,3)),xyz]) #matrix
rhs = pt2.reshape((-1,1)) - xyz #right-hand side
return A, rhs
def get_helmert7(pt1,pt2):
'''inversion of 7 Helmert parameters between 2 sets of points'''
A, rhs = gen_helm_aux(pt1,pt2)
sol = _np.linalg.lstsq(A, rhs,rcond=None) # parameters
res = rhs - A@sol[0]
# sol[0] = [Tx, Ty, Tz, Rx, Ry, Rz, μ]
return sol,res.reshape(-1,3)
def gen_rot_matrix(v):
'''creates rotation matrix for transform7
from a list of [Rx, Ry, Rz] as in Altamimi'''
x, y, z = v
mat = _np.empty((3,3),dtype=float)
mat[0] = [ 0, -z, y]
mat[1] = [ z, 0, -x]
mat[2] = [-y, x, 0]
return mat + _np.eye(3)
def transform7(xyz_in,helmert_list):
'''transformation of xyz vector with 7 helmert parameters'''
translation = helmert_list[0:3]
rotation = gen_rot_matrix(helmert_list[3:6].flatten())
scale = helmert_list[6]
xyz_out = ((xyz_in @ rotation)*(1+scale) + translation.T)
return xyz_out
def xyz2llh_larson(xyz_array,ellipsoid=WGS84,tolerance = 1e-10,deg=False):
'''vectorized version of xyz2llh function as in Larson's gnssIR'''
x_arr,y_arr,z_arr = xyz_array[:,0],xyz_array[:,1],xyz_array[:,2]
llh_array = _np.empty_like(xyz_array)
_r = (x_arr*x_arr+y_arr*y_arr)**(1/2)
phi0 = _np.arctan((z_arr/_r)/(1-ellipsoid.ecc1sq))
phi = _np.empty_like(phi0,dtype=_np.float_)
error_mask = phi0!=_np.nan # quick init of mask with all True
for __ in range(10): #10 iterations cap as per Larson
# prime vertical radius of curvature
_n = ellipsoid.semimaj/(1-ellipsoid.ecc1sq*_np.sin(phi0[error_mask])**2)**(1/2)
hei = _r[error_mask]/_np.cos(phi0[error_mask])-_n
phi[error_mask] = _np.arctan((z_arr[error_mask]/_r[error_mask])/\
(1-ellipsoid.ecc1sq*_n/(_n+hei)))
error_mask = _np.abs(phi-phi0) > tolerance
if error_mask.sum() == 0: #if all Falls
break
phi0 = phi.copy()#need to copy here otherwise it's a pointer
# lam = _np.arctan2(y_arr, x_arr)
llh_array[:,0] = phi #phi
llh_array[:,1] = _np.arctan2(y_arr, x_arr) # lam
llh_array[:,2] = hei #hei
if deg:
llh_array[:,:2] = _np.rad2deg(llh_array[:,:2])
return llh_array
def xyz2llh_heik(xyz_array: _np.ndarray, ellipsoid=WGS84, deg=False):
'''<NAME>. (1982)
This is exact transformation and is pretty fast
Output
phi: latitude rad
lam: longitude rad
hei: height meters
'''
x_arr,y_arr,z_arr = xyz_array[:,0],xyz_array[:,1],xyz_array[:,2]
llh_array = _np.empty_like(xyz_array)
z_sq = z_arr*z_arr
r_sq = x_arr*x_arr + y_arr*y_arr
_r = (r_sq)**(1/2)
_f = 54 * ellipsoid.semiminsq * z_sq
_g = r_sq + (1 - ellipsoid.ecc1sq)*z_sq - \
ellipsoid.ecc1sq*(ellipsoid.semimajsq - ellipsoid.semiminsq)
_c = ellipsoid.ecc1sq*ellipsoid.ecc1sq*_f*r_sq/(_g*_g*_g)
_s = (1 + _c + (_c*_c + _c + _c)**(1/2))**(1/3)
_p = _f/(3*(_s + 1/_s + 1)**2*(_g*_g))
_q = (1 + 2*(ellipsoid.ecc1sq*ellipsoid.ecc1sq*_p))**(1/2)
r_0 = -(_p*ellipsoid.ecc1sq*_r)/(1+_q) + (ellipsoid.semimajsq/2*(1+1/_q)\
- _p*(1 - ellipsoid.ecc1sq)*(z_sq)/(_q*(1+_q)) - _p*r_sq/2)**(1/2)
r_ecc1sq_r0_sq = (_r - ellipsoid.ecc1sq*r_0)**2
_u = (r_ecc1sq_r0_sq + z_sq)**(1/2)
_v = (r_ecc1sq_r0_sq + (1-ellipsoid.ecc1sq)*z_sq)**(1/2)
bsq_av = ellipsoid.semiminsq/(ellipsoid.semimaj*_v)
z_0 = bsq_av*z_arr
llh_array[:,0] = _np.arctan((z_arr+ellipsoid.ecc2sq*z_0)/_r) #phi
llh_array[:,1] = _np.arctan2(y_arr, x_arr) # lam
llh_array[:,2] = _u*(1 - bsq_av) #hei
if deg:
llh_array[:,:2] = _np.rad2deg(llh_array[:,:2])
return llh_array
def xyz2llh_zhu(xyz_array,ellipsoid=WGS84,deg=False):
'''<NAME>. (1993)
Output
phi: latitude rad
lam: longitude rad
hei: height meters
'''
x_arr,y_arr,z_arr = xyz_array[:,0],xyz_array[:,1],xyz_array[:,2]
llh_array = _np.empty_like(xyz_array)
_l=ellipsoid.ecc1sq/2
l_sq = _l*_l
r_sq = x_arr*x_arr + y_arr*y_arr
_r = r_sq**(1/2)
_m = r_sq/ellipsoid.semimajsq
ec1sq_z = (1-ellipsoid.ecc1sq)*z_arr
_n = (ec1sq_z/ellipsoid.semimin)**2
_i = -(2*l_sq + _m + _n)/2
_k = l_sq*(l_sq - _m - _n)
_q = (_m + _n - 4*l_sq)**3/216 + _m*_n*l_sq
_d = ((2*_q - _m*_n*l_sq)*_m*_n*l_sq)**(1/2)
beta = _i/3 - (_q+_d)**(1/3) - (_q-_d)**(1/3)
_t = ((beta*beta - _k)**(1/2) - (beta+_i)/2)**(1/2) - _np.sign(_m-_n) * ((beta - _i)/2)**(1/2)
r_0 = _r/(_t+_l)
z_0 = ec1sq_z/(_t-_l)
llh_array[:,0] = _np.arctan(z_0/((1 - ellipsoid.ecc1sq)*r_0)) #phi
llh_array[:,1] = _np.arctan2(y_arr, x_arr) # lam
llh_array[:,2] = _np.sign(_t - 1 + _l) * ((_r - r_0)**2 + (z_arr - z_0)**2)**(1/2) #hei
if deg:
llh_array[:,:2] = _np.rad2deg(llh_array[:,:2])
return llh_array
def llh2xyz(lat,lon,hei,ellipsoid):
'''Converts lat, lon and height to XYZ
phi is geodetic latitude
lam is geodetic longitude
hei is the altitude normal to ellipsoid'''
cos_phi = _np.cos(lat)
sin_phi = _np.sin(lat)
_rp = ellipsoid.semimaj/(1 - ellipsoid.ecc1sq*sin_phi*sin_phi)**(1/2)
rp_h = _rp + hei
x_arr = rp_h * cos_phi * _np.cos(lon)
y_arr = rp_h * cos_phi * _np.sin(lon)
z_arr = (rp_h - ellipsoid.ecc1sq*_rp) * sin_phi
return x_arr,y_arr,z_arr
def llh2rot(phi, lamb):
'''Creates R rotation matrices for n sites stacked
on the 3d dimension from phi (lat) and lamb (lon).'''
sin_lamb = _np.sin(lamb)
cos_lamb = _np.cos(lamb)
sin_phi = _np.sin(phi)
cos_phi = _np.cos(phi)
rot = _np.zeros((phi.shape[0],3,3),dtype=_np.float_)
rot[:,0,0] =-sin_lamb
rot[:,0,1] = cos_lamb
rot[:,1,0] =-sin_phi*cos_lamb
rot[:,1,1] =-sin_phi*sin_lamb
rot[:,1,2] = cos_phi
rot[:,2,0] = cos_phi*cos_lamb
rot[:,2,1] = cos_phi*sin_lamb
rot[:,2,2] = sin_phi
return rot
def norm(a:_np.ndarray,axis:int=1)->_np.ndarray:
'''Computes norm of every vector in the input array'''
return _np.sqrt((a * a).sum(axis=axis))
def ecef2eci(sp3_in):
'''Simplified conversion of sp3 posiitons from ECEF to ECI'''
xyz_idx = _np.argwhere(sp3_in.columns.isin([('EST','X'),('EST','Y'),('EST','Z')])).ravel()
theta = OMEGA_E * (sp3_in.index.get_level_values(0).values)
cos_theta = _np.cos(theta)
sin_theta = _np.sin(theta)
sp3_nd = sp3_in.iloc[:,xyz_idx].values
x = sp3_nd[:,0]
y = sp3_nd[:,1]
z = sp3_nd[:,2]
x_eci = x*cos_theta - y*sin_theta
y_eci = x*sin_theta + y*cos_theta
return _pd.DataFrame(_np.concatenate([x_eci,y_eci,z]).reshape(3,-1).T,index = sp3_in.index,columns=[['EST','EST','EST'],['X','Y','Z']])
def eci2rac_rot(a):
'''Computes rotation 3D stack for sp3 vector rotation into RAC/RTN
RAC conventions of POD (to be discussed)
{u} = |{P}|
[T] = {v} = {w}x{u} * -1 # x of two orthogonal unit-vectors gives a unit vector so no need for ||
{w} = |{P}x{V}| * -1'''
# position
pos = a.EST[['X','Y','Z']].values
# velocity
vel = a.VELi[['X','Y','Z']].values # units should be km/s if XYZ are in km
# radial component
u_u = pos / norm(pos)[:,_np.newaxis]
# -------------------------
# General implementation
# # cross-track component
# w = _np.cross(pos,vel)
# w_u = w / norm(w)[:,_np.newaxis]
# # along-track component
# v_u = _np.cross(w_u,u_u)
# -------------------------
# Simplified implementation
# along-track component
v_u = vel / norm(vel)[:,_np.newaxis]
# cross-track component
w_u = _np.cross(u_u,v_u) # || not needed as u_v and v_u are orthogonal
rot = _np.dstack([u_u,-v_u,-w_u]) # negative v_u and w_u are to be consistent with POD
return rot | [
"numpy.dstack",
"numpy.arctan2",
"numpy.linalg.lstsq",
"numpy.eye",
"numpy.abs",
"numpy.empty",
"numpy.empty_like",
"numpy.zeros",
"numpy.cross",
"numpy.rad2deg",
"numpy.sin",
"numpy.cos",
"numpy.sign",
"numpy.arctan",
"numpy.concatenate"
] | [((364, 391), 'numpy.zeros', '_np.zeros', (['(n_points, 3, 3)'], {}), '((n_points, 3, 3))\n', (373, 391), True, 'import numpy as _np\n'), ((944, 980), 'numpy.linalg.lstsq', '_np.linalg.lstsq', (['A', 'rhs'], {'rcond': 'None'}), '(A, rhs, rcond=None)\n', (960, 980), True, 'import numpy as _np\n'), ((1240, 1270), 'numpy.empty', '_np.empty', (['(3, 3)'], {'dtype': 'float'}), '((3, 3), dtype=float)\n', (1249, 1270), True, 'import numpy as _np\n'), ((1915, 1940), 'numpy.empty_like', '_np.empty_like', (['xyz_array'], {}), '(xyz_array)\n', (1929, 1940), True, 'import numpy as _np\n'), ((1995, 2042), 'numpy.arctan', '_np.arctan', (['(z_arr / _r / (1 - ellipsoid.ecc1sq))'], {}), '(z_arr / _r / (1 - ellipsoid.ecc1sq))\n', (2005, 2042), True, 'import numpy as _np\n'), ((2049, 2087), 'numpy.empty_like', '_np.empty_like', (['phi0'], {'dtype': '_np.float_'}), '(phi0, dtype=_np.float_)\n', (2063, 2087), True, 'import numpy as _np\n'), ((2800, 2825), 'numpy.arctan2', '_np.arctan2', (['y_arr', 'x_arr'], {}), '(y_arr, x_arr)\n', (2811, 2825), True, 'import numpy as _np\n'), ((3274, 3299), 'numpy.empty_like', '_np.empty_like', (['xyz_array'], {}), '(xyz_array)\n', (3288, 3299), True, 'import numpy as _np\n'), ((4173, 4222), 'numpy.arctan', '_np.arctan', (['((z_arr + ellipsoid.ecc2sq * z_0) / _r)'], {}), '((z_arr + ellipsoid.ecc2sq * z_0) / _r)\n', (4183, 4222), True, 'import numpy as _np\n'), ((4243, 4268), 'numpy.arctan2', '_np.arctan2', (['y_arr', 'x_arr'], {}), '(y_arr, x_arr)\n', (4254, 4268), True, 'import numpy as _np\n'), ((4660, 4685), 'numpy.empty_like', '_np.empty_like', (['xyz_array'], {}), '(xyz_array)\n', (4674, 4685), True, 'import numpy as _np\n'), ((5282, 5330), 'numpy.arctan', '_np.arctan', (['(z_0 / ((1 - ellipsoid.ecc1sq) * r_0))'], {}), '(z_0 / ((1 - ellipsoid.ecc1sq) * r_0))\n', (5292, 5330), True, 'import numpy as _np\n'), ((5353, 5378), 'numpy.arctan2', '_np.arctan2', (['y_arr', 'x_arr'], {}), '(y_arr, x_arr)\n', (5364, 5378), True, 'import numpy as _np\n'), ((5765, 5777), 'numpy.cos', '_np.cos', (['lat'], {}), '(lat)\n', (5772, 5777), True, 'import numpy as _np\n'), ((5792, 5804), 'numpy.sin', '_np.sin', (['lat'], {}), '(lat)\n', (5799, 5804), True, 'import numpy as _np\n'), ((6220, 6233), 'numpy.sin', '_np.sin', (['lamb'], {}), '(lamb)\n', (6227, 6233), True, 'import numpy as _np\n'), ((6249, 6262), 'numpy.cos', '_np.cos', (['lamb'], {}), '(lamb)\n', (6256, 6262), True, 'import numpy as _np\n'), ((6278, 6290), 'numpy.sin', '_np.sin', (['phi'], {}), '(phi)\n', (6285, 6290), True, 'import numpy as _np\n'), ((6306, 6318), 'numpy.cos', '_np.cos', (['phi'], {}), '(phi)\n', (6313, 6318), True, 'import numpy as _np\n'), ((6330, 6379), 'numpy.zeros', '_np.zeros', (['(phi.shape[0], 3, 3)'], {'dtype': '_np.float_'}), '((phi.shape[0], 3, 3), dtype=_np.float_)\n', (6339, 6379), True, 'import numpy as _np\n'), ((7052, 7066), 'numpy.cos', '_np.cos', (['theta'], {}), '(theta)\n', (7059, 7066), True, 'import numpy as _np\n'), ((7083, 7097), 'numpy.sin', '_np.sin', (['theta'], {}), '(theta)\n', (7090, 7097), True, 'import numpy as _np\n'), ((8335, 8354), 'numpy.cross', '_np.cross', (['u_u', 'v_u'], {}), '(u_u, v_u)\n', (8344, 8354), True, 'import numpy as _np\n'), ((8411, 8440), 'numpy.dstack', '_np.dstack', (['[u_u, -v_u, -w_u]'], {}), '([u_u, -v_u, -w_u])\n', (8421, 8440), True, 'import numpy as _np\n'), ((1364, 1374), 'numpy.eye', '_np.eye', (['(3)'], {}), '(3)\n', (1371, 1374), True, 'import numpy as _np\n'), ((2429, 2522), 'numpy.arctan', '_np.arctan', (['(z_arr[error_mask] / _r[error_mask] / (1 - ellipsoid.ecc1sq * _n / (_n + hei)))'], {}), '(z_arr[error_mask] / _r[error_mask] / (1 - ellipsoid.ecc1sq * _n /\n (_n + hei)))\n', (2439, 2522), True, 'import numpy as _np\n'), ((2900, 2929), 'numpy.rad2deg', '_np.rad2deg', (['llh_array[:, :2]'], {}), '(llh_array[:, :2])\n', (2911, 2929), True, 'import numpy as _np\n'), ((4355, 4384), 'numpy.rad2deg', '_np.rad2deg', (['llh_array[:, :2]'], {}), '(llh_array[:, :2])\n', (4366, 4384), True, 'import numpy as _np\n'), ((5406, 5427), 'numpy.sign', '_np.sign', (['(_t - 1 + _l)'], {}), '(_t - 1 + _l)\n', (5414, 5427), True, 'import numpy as _np\n'), ((5515, 5544), 'numpy.rad2deg', '_np.rad2deg', (['llh_array[:, :2]'], {}), '(llh_array[:, :2])\n', (5526, 5544), True, 'import numpy as _np\n'), ((5930, 5942), 'numpy.cos', '_np.cos', (['lon'], {}), '(lon)\n', (5937, 5942), True, 'import numpy as _np\n'), ((5972, 5984), 'numpy.sin', '_np.sin', (['lon'], {}), '(lon)\n', (5979, 5984), True, 'import numpy as _np\n'), ((321, 331), 'numpy.eye', '_np.eye', (['(3)'], {}), '(3)\n', (328, 331), True, 'import numpy as _np\n'), ((2544, 2563), 'numpy.abs', '_np.abs', (['(phi - phi0)'], {}), '(phi - phi0)\n', (2551, 2563), True, 'import numpy as _np\n'), ((5171, 5188), 'numpy.sign', '_np.sign', (['(_m - _n)'], {}), '(_m - _n)\n', (5179, 5188), True, 'import numpy as _np\n'), ((2374, 2399), 'numpy.cos', '_np.cos', (['phi0[error_mask]'], {}), '(phi0[error_mask])\n', (2381, 2399), True, 'import numpy as _np\n'), ((7304, 7338), 'numpy.concatenate', '_np.concatenate', (['[x_eci, y_eci, z]'], {}), '([x_eci, y_eci, z])\n', (7319, 7338), True, 'import numpy as _np\n'), ((2308, 2333), 'numpy.sin', '_np.sin', (['phi0[error_mask]'], {}), '(phi0[error_mask])\n', (2315, 2333), True, 'import numpy as _np\n')] |
from unittest import TestCase
import numpy as np
from muDIC import Fields
class TestDIC_Post(TestCase):
def test__true_strain_(self):
# Tolerance
toll = 1e-7
# Generate random numbers in [-0.99,4.]
rand_nrs = 5. * (np.random.random_sample(1000)) - 0.99
# Format as [nEl,i,j,...]
eng_strain = np.reshape(rand_nrs, (5, 2, 2, -1))
# Calculate true strain
true_strain = Fields._true_strain_(eng_strain)
# Determine absolute error
deviation = np.abs(np.log(eng_strain + 1.) - true_strain)
# Pass if all elements are within tolerance
self.assertEqual(True, all([dev < toll for dev in deviation.flatten()]))
def test_green_strain_(self):
# Tolerance
toll = 1e-7
# Generate random numbers in [0.5,1.5]
rand_nrs = (np.random.random_sample(1000)) + 0.5
# Format as [nEl,i,j,...]
F = np.reshape(rand_nrs, (5, 2, 2, 5, 5, 2))
# Calculate green deformation as F^T*F
Green_deformation = np.einsum('nij...,noj...->nio...', F, F)
I = np.eye(2, dtype=float)
# Calculate green strain tensor as 0.5(F^T * F - I)
Green_strain = 0.5 * (Green_deformation - I[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis])
# Green strain to be tested
G = Fields._green_strain_(F)
# Determine absolute error
deviation = np.abs(Green_strain - G)
self.assertEqual(True, all([dev < toll for dev in deviation.flatten()]))
def test_engineering_strain_(self):
# Tolerance
toll = 1e-7
# Generate random numbers in [0.5,1.5]
rand_nrs = (np.random.random_sample(1000)) + 0.5
# Format as [nEl,i,j,...]
F = np.reshape(rand_nrs, (5, 2, 2, 5, 5, 2))
green_strain = Fields._green_strain_(F)
eng_strain = Fields._engineering_strain_(green_strain)
# Calculate green strain from engineering strain
E11 = 0.5 * ((eng_strain[:, 0, 0, :, :, :] + 1.) ** 2. - 1.)
E22 = 0.5 * ((eng_strain[:, 1, 1, :, :, :] + 1.) ** 2. - 1.)
E12 = 0.5 * np.sin(2. * eng_strain[:, 0, 1, :, :, :]) * (1. + eng_strain[:, 0, 0, :, :, :]) * (
1. + eng_strain[:, 1, 1, :, :, :])
# Deterine absolute error
deviation11 = np.abs(E11 - green_strain[:, 0, 0, :, :, :])
deviation22 = np.abs(E22 - green_strain[:, 1, 1, :, :, :])
deviation12 = np.abs(E12 - green_strain[:, 0, 1, :, :, :])
self.assertEqual(True, all([dev < toll for dev in deviation11.flatten()]))
self.assertEqual(True, all([dev < toll for dev in deviation12.flatten()]))
self.assertEqual(True, all([dev < toll for dev in deviation22.flatten()]))
def test_uniaxial_tension_(self):
# Tolerance
toll = 1e-7
# Deformation gradient
F = np.array([[1.1, 0.], [0., 1.]])
F_stack = np.ones((5, 2, 2, 5, 5, 2)) * F[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis]
E = Fields._green_strain_(F_stack)
eng_strain = Fields._engineering_strain_(E)
eng_strain - (F - np.eye(2))[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis]
# Determine absolute error
deviation = np.abs(eng_strain - (F - np.eye(2))[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis])
self.assertEqual(True, all([dev < toll for dev in deviation.flatten()]))
def test_biaxial_tension_(self):
# Tolerance
toll = 1e-7
# Deformation gradient
F = np.array([[1.1, 0.], [0., 1.1]])
F_stack = np.ones((5, 2, 2, 5, 5, 2)) * F[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis]
E = Fields._green_strain_(F_stack)
eng_strain = Fields._engineering_strain_(E)
eng_strain - (F - np.eye(2))[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis]
# Determine absolute error
deviation = np.abs(eng_strain - (F - np.eye(2))[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis])
self.assertEqual(True, all([dev < toll for dev in deviation.flatten()]))
def test_shear_(self):
# Small strain pure shear compared to rotated tension-compression
# Tolerance
toll = 1e-7
# Deformation gradient
F_shear = np.array([[1., 0.00001], [0.00001, 1.]])
F_shear_stack = np.ones((5, 2, 2, 5, 5, 2)) * F_shear[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis]
E_shear = Fields._green_strain_(F_shear_stack)
eng_strain_shear = Fields._engineering_strain_(E_shear)
# Rotate to tension-compression orientation
alpha = np.pi / 4.
R = np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]])
F_tc = np.dot(R.transpose(), np.dot(F_shear, R))
F_tc_stack = np.ones((5, 2, 2, 5, 5, 2)) * F_tc[np.newaxis, :, :, np.newaxis, np.newaxis, np.newaxis]
E_tc = Fields._green_strain_(F_tc_stack)
eng_strain_tc = Fields._engineering_strain_(E_tc)
# Rotate back to pure shear
alpha = -np.pi / 4.
R = np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]])
eng_strain_shear_rot = np.einsum('ij,njk...,kl...->nil...', R.transpose(), eng_strain_tc, R)
deviation = np.abs(eng_strain_shear - eng_strain_shear_rot)
# deviation = np.abs(eng_strain - (F-np.eye(2))[np.newaxis,:,:,np.newaxis,np.newaxis,np.newaxis])
self.assertEqual(True, all([dev < toll for dev in deviation.flatten()]))
# TODO: Write this test!
# def rotated_tension_(self):
# # Tolerance
# toll = 1e-7
#
# # Deformation gradient
# F_tension = np.array([[1.1,0.],[0.,1.]])
#
# # Rotate back to pure shear
# alpha = -np.pi/4.
# R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])
#
# F_tension_rotation = np.dot(R,F_tension)
#
# F_stack = np.ones((5,2,2,5,5,2)) * F_tension_rotation[np.newaxis,:,:,np.newaxis,np.newaxis,np.newaxis]
#
# E = DIC_Post._green_strain_(F_stack)
#
# eng_strain = DIC_Post._engineering_strain_(E)
#
# alpha = np.pi/4.
# R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])
# eng_strain_shear_rot = np.einsum('ij,njk...,kl...->nil...', R.transpose(), eng_strain_tc, R)
#
# # Determine absolute error
# deviation = np.abs(eng_strain - (F-np.eye(2))[np.newaxis,:,:,np.newaxis,np.newaxis,np.newaxis])
#
# self.assertEqual(True, all([dev < toll for dev in deviation.flatten()]))
| [
"numpy.abs",
"muDIC.Fields._green_strain_",
"muDIC.Fields._engineering_strain_",
"numpy.random.random_sample",
"numpy.log",
"numpy.einsum",
"numpy.ones",
"muDIC.Fields._true_strain_",
"numpy.sin",
"numpy.array",
"numpy.reshape",
"numpy.cos",
"numpy.dot",
"numpy.eye"
] | [((348, 383), 'numpy.reshape', 'np.reshape', (['rand_nrs', '(5, 2, 2, -1)'], {}), '(rand_nrs, (5, 2, 2, -1))\n', (358, 383), True, 'import numpy as np\n'), ((438, 470), 'muDIC.Fields._true_strain_', 'Fields._true_strain_', (['eng_strain'], {}), '(eng_strain)\n', (458, 470), False, 'from muDIC import Fields\n'), ((931, 971), 'numpy.reshape', 'np.reshape', (['rand_nrs', '(5, 2, 2, 5, 5, 2)'], {}), '(rand_nrs, (5, 2, 2, 5, 5, 2))\n', (941, 971), True, 'import numpy as np\n'), ((1047, 1087), 'numpy.einsum', 'np.einsum', (['"""nij...,noj...->nio..."""', 'F', 'F'], {}), "('nij...,noj...->nio...', F, F)\n", (1056, 1087), True, 'import numpy as np\n'), ((1101, 1123), 'numpy.eye', 'np.eye', (['(2)'], {'dtype': 'float'}), '(2, dtype=float)\n', (1107, 1123), True, 'import numpy as np\n'), ((1341, 1365), 'muDIC.Fields._green_strain_', 'Fields._green_strain_', (['F'], {}), '(F)\n', (1362, 1365), False, 'from muDIC import Fields\n'), ((1422, 1446), 'numpy.abs', 'np.abs', (['(Green_strain - G)'], {}), '(Green_strain - G)\n', (1428, 1446), True, 'import numpy as np\n'), ((1760, 1800), 'numpy.reshape', 'np.reshape', (['rand_nrs', '(5, 2, 2, 5, 5, 2)'], {}), '(rand_nrs, (5, 2, 2, 5, 5, 2))\n', (1770, 1800), True, 'import numpy as np\n'), ((1825, 1849), 'muDIC.Fields._green_strain_', 'Fields._green_strain_', (['F'], {}), '(F)\n', (1846, 1849), False, 'from muDIC import Fields\n'), ((1872, 1913), 'muDIC.Fields._engineering_strain_', 'Fields._engineering_strain_', (['green_strain'], {}), '(green_strain)\n', (1899, 1913), False, 'from muDIC import Fields\n'), ((2322, 2366), 'numpy.abs', 'np.abs', (['(E11 - green_strain[:, 0, 0, :, :, :])'], {}), '(E11 - green_strain[:, 0, 0, :, :, :])\n', (2328, 2366), True, 'import numpy as np\n'), ((2389, 2433), 'numpy.abs', 'np.abs', (['(E22 - green_strain[:, 1, 1, :, :, :])'], {}), '(E22 - green_strain[:, 1, 1, :, :, :])\n', (2395, 2433), True, 'import numpy as np\n'), ((2456, 2500), 'numpy.abs', 'np.abs', (['(E12 - green_strain[:, 0, 1, :, :, :])'], {}), '(E12 - green_strain[:, 0, 1, :, :, :])\n', (2462, 2500), True, 'import numpy as np\n'), ((2874, 2908), 'numpy.array', 'np.array', (['[[1.1, 0.0], [0.0, 1.0]]'], {}), '([[1.1, 0.0], [0.0, 1.0]])\n', (2882, 2908), True, 'import numpy as np\n'), ((3024, 3054), 'muDIC.Fields._green_strain_', 'Fields._green_strain_', (['F_stack'], {}), '(F_stack)\n', (3045, 3054), False, 'from muDIC import Fields\n'), ((3077, 3107), 'muDIC.Fields._engineering_strain_', 'Fields._engineering_strain_', (['E'], {}), '(E)\n', (3104, 3107), False, 'from muDIC import Fields\n'), ((3551, 3585), 'numpy.array', 'np.array', (['[[1.1, 0.0], [0.0, 1.1]]'], {}), '([[1.1, 0.0], [0.0, 1.1]])\n', (3559, 3585), True, 'import numpy as np\n'), ((3702, 3732), 'muDIC.Fields._green_strain_', 'Fields._green_strain_', (['F_stack'], {}), '(F_stack)\n', (3723, 3732), False, 'from muDIC import Fields\n'), ((3755, 3785), 'muDIC.Fields._engineering_strain_', 'Fields._engineering_strain_', (['E'], {}), '(E)\n', (3782, 3785), False, 'from muDIC import Fields\n'), ((4300, 4338), 'numpy.array', 'np.array', (['[[1.0, 1e-05], [1e-05, 1.0]]'], {}), '([[1.0, 1e-05], [1e-05, 1.0]])\n', (4308, 4338), True, 'import numpy as np\n'), ((4477, 4513), 'muDIC.Fields._green_strain_', 'Fields._green_strain_', (['F_shear_stack'], {}), '(F_shear_stack)\n', (4498, 4513), False, 'from muDIC import Fields\n'), ((4542, 4578), 'muDIC.Fields._engineering_strain_', 'Fields._engineering_strain_', (['E_shear'], {}), '(E_shear)\n', (4569, 4578), False, 'from muDIC import Fields\n'), ((4932, 4965), 'muDIC.Fields._green_strain_', 'Fields._green_strain_', (['F_tc_stack'], {}), '(F_tc_stack)\n', (4953, 4965), False, 'from muDIC import Fields\n'), ((4991, 5024), 'muDIC.Fields._engineering_strain_', 'Fields._engineering_strain_', (['E_tc'], {}), '(E_tc)\n', (5018, 5024), False, 'from muDIC import Fields\n'), ((5301, 5348), 'numpy.abs', 'np.abs', (['(eng_strain_shear - eng_strain_shear_rot)'], {}), '(eng_strain_shear - eng_strain_shear_rot)\n', (5307, 5348), True, 'import numpy as np\n'), ((847, 876), 'numpy.random.random_sample', 'np.random.random_sample', (['(1000)'], {}), '(1000)\n', (870, 876), True, 'import numpy as np\n'), ((1677, 1706), 'numpy.random.random_sample', 'np.random.random_sample', (['(1000)'], {}), '(1000)\n', (1700, 1706), True, 'import numpy as np\n'), ((2925, 2952), 'numpy.ones', 'np.ones', (['(5, 2, 2, 5, 5, 2)'], {}), '((5, 2, 2, 5, 5, 2))\n', (2932, 2952), True, 'import numpy as np\n'), ((3603, 3630), 'numpy.ones', 'np.ones', (['(5, 2, 2, 5, 5, 2)'], {}), '((5, 2, 2, 5, 5, 2))\n', (3610, 3630), True, 'import numpy as np\n'), ((4366, 4393), 'numpy.ones', 'np.ones', (['(5, 2, 2, 5, 5, 2)'], {}), '((5, 2, 2, 5, 5, 2))\n', (4373, 4393), True, 'import numpy as np\n'), ((4785, 4803), 'numpy.dot', 'np.dot', (['F_shear', 'R'], {}), '(F_shear, R)\n', (4791, 4803), True, 'import numpy as np\n'), ((4827, 4854), 'numpy.ones', 'np.ones', (['(5, 2, 2, 5, 5, 2)'], {}), '((5, 2, 2, 5, 5, 2))\n', (4834, 4854), True, 'import numpy as np\n'), ((255, 284), 'numpy.random.random_sample', 'np.random.random_sample', (['(1000)'], {}), '(1000)\n', (278, 284), True, 'import numpy as np\n'), ((533, 557), 'numpy.log', 'np.log', (['(eng_strain + 1.0)'], {}), '(eng_strain + 1.0)\n', (539, 557), True, 'import numpy as np\n'), ((2130, 2172), 'numpy.sin', 'np.sin', (['(2.0 * eng_strain[:, 0, 1, :, :, :])'], {}), '(2.0 * eng_strain[:, 0, 1, :, :, :])\n', (2136, 2172), True, 'import numpy as np\n'), ((3135, 3144), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3141, 3144), True, 'import numpy as np\n'), ((3813, 3822), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3819, 3822), True, 'import numpy as np\n'), ((4682, 4695), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (4688, 4695), True, 'import numpy as np\n'), ((4715, 4728), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (4721, 4728), True, 'import numpy as np\n'), ((4730, 4743), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (4736, 4743), True, 'import numpy as np\n'), ((5113, 5126), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (5119, 5126), True, 'import numpy as np\n'), ((5146, 5159), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (5152, 5159), True, 'import numpy as np\n'), ((5161, 5174), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (5167, 5174), True, 'import numpy as np\n'), ((3281, 3290), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3287, 3290), True, 'import numpy as np\n'), ((3959, 3968), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3965, 3968), True, 'import numpy as np\n'), ((4698, 4711), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (4704, 4711), True, 'import numpy as np\n'), ((5129, 5142), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (5135, 5142), True, 'import numpy as np\n')] |
"""
(c) RIKEN 2015. All rights reserved.
Author: <NAME>
This software is released under the new BSD License; see LICENSE.
"""
"""
NOTE on unit cell constraints determination:
XDS doesn't handle "real" rhombohedral space group (right?).
So, No need to support R3 or R32. They are handled as H3 or H32, maybe.
"""
import numpy
class CellConstraints:
def __init__(self, space_group):
self.cs = space_group.crystal_system()
# __init__()
def is_b_equal_a(self): return self.cs in ("Tetragonal", "Hexagonal", "Trigonal", "Cubic")
def is_c_equal_a_b(self): return self.cs == "Cubic"
def is_angle_constrained(self, angle):
assert angle in ("alpha", "beta", "gamma")
if self.cs == "Triclinic": return False
if self.cs == "Monoclinic": return angle != "beta"
return True
# is_angle_constrained()
# class CellConstraints
def is_same_laue_symmetry(sg1, sg2):
laue = lambda x: x.build_derived_reflection_intensity_group(anomalous_flag=False)
return laue(sg1) == laue(sg2) # == comparison of space_group object is possible.
# is_same_laue_symmetry()
def abc_convert_real_reciprocal(a, b, c):
V = numpy.dot(a, numpy.cross(b, c))
a_ = numpy.cross(b, c) / V
b_ = numpy.cross(c, a) / V
c_ = numpy.cross(a, b) / V
return a_, b_, c_
# abc_convert_real_reciprocal()
def format_unit_cell(uc, lfmt="%6.2f", afmt="%5.1f", sep=" "):
if hasattr(uc, "parameters"):
uc = uc.parameters()
lstr = sep.join(map(lambda x: lfmt%x, uc[:3]))
astr = sep.join(map(lambda x: afmt%x, uc[3:6]))
return lstr + sep + astr
# format_unit_cell()
| [
"numpy.cross"
] | [((1184, 1201), 'numpy.cross', 'numpy.cross', (['b', 'c'], {}), '(b, c)\n', (1195, 1201), False, 'import numpy\n'), ((1212, 1229), 'numpy.cross', 'numpy.cross', (['b', 'c'], {}), '(b, c)\n', (1223, 1229), False, 'import numpy\n'), ((1243, 1260), 'numpy.cross', 'numpy.cross', (['c', 'a'], {}), '(c, a)\n', (1254, 1260), False, 'import numpy\n'), ((1274, 1291), 'numpy.cross', 'numpy.cross', (['a', 'b'], {}), '(a, b)\n', (1285, 1291), False, 'import numpy\n')] |
import numpy as np
import tensorflow as tf
from numpy.linalg import norm
from tqdm.auto import tqdm
import math
def mse(v, v_pred):
return tf.reduce_mean(tf.square(v - v_pred))
def error_pod(U, V):
n_s = U.shape[1]
err_pod = 0.0
print("Computing POD error")
VV = V.dot(V.T)
for j in tqdm(range(n_s)):
err_pod += tf.norm(U[:, j] - VV.dot(U[:, j])) / tf.norm(U[:, j])
return err_pod.numpy() / n_s
def error_podnn(U, U_pred):
return norm(U - U_pred) / norm(U)
def error_podnn_rel(U, U_pred):
"""Define the relative error metric."""
U_pred_mean, U_mean = np.mean(U_pred, axis=-1), np.mean(U, axis=-1)
U_pred_std, U_std = np.std(U_pred, axis=-1), np.std(U, axis=-1)
err_mean = error_podnn(U_mean, U_pred_mean)
err_std = error_podnn(U_std, U_pred_std)
return err_mean, err_std | [
"numpy.std",
"numpy.mean",
"numpy.linalg.norm",
"tensorflow.square",
"tensorflow.norm"
] | [((160, 181), 'tensorflow.square', 'tf.square', (['(v - v_pred)'], {}), '(v - v_pred)\n', (169, 181), True, 'import tensorflow as tf\n'), ((476, 492), 'numpy.linalg.norm', 'norm', (['(U - U_pred)'], {}), '(U - U_pred)\n', (480, 492), False, 'from numpy.linalg import norm\n'), ((495, 502), 'numpy.linalg.norm', 'norm', (['U'], {}), '(U)\n', (499, 502), False, 'from numpy.linalg import norm\n'), ((607, 631), 'numpy.mean', 'np.mean', (['U_pred'], {'axis': '(-1)'}), '(U_pred, axis=-1)\n', (614, 631), True, 'import numpy as np\n'), ((633, 652), 'numpy.mean', 'np.mean', (['U'], {'axis': '(-1)'}), '(U, axis=-1)\n', (640, 652), True, 'import numpy as np\n'), ((677, 700), 'numpy.std', 'np.std', (['U_pred'], {'axis': '(-1)'}), '(U_pred, axis=-1)\n', (683, 700), True, 'import numpy as np\n'), ((702, 720), 'numpy.std', 'np.std', (['U'], {'axis': '(-1)'}), '(U, axis=-1)\n', (708, 720), True, 'import numpy as np\n'), ((385, 401), 'tensorflow.norm', 'tf.norm', (['U[:, j]'], {}), '(U[:, j])\n', (392, 401), True, 'import tensorflow as tf\n')] |
import numpy as np
def entropy_maximum(signal):
"""**Maximum Entropy (MaxEn)**
Provides an upper bound for the entropy of a random variable, so that the empirical entropy
(obtained for instance with :func:`entropy_shannon`) will lie in between 0 and max. entropy.
It can be useful to normalize the empirical entropy by the maximum entropy (which is made by
default in some algorithms).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
--------
maxen : float
The maximum entropy of the signal.
info : dict
An empty dictionary returned for consistency with the other complexity functions.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = [1, 1, 5, 5, 2, 8, 1]
maxen, _ = nk.entropy_maximum(signal)
maxen
"""
return np.log2(len(np.unique(signal))), {}
| [
"numpy.unique"
] | [((1013, 1030), 'numpy.unique', 'np.unique', (['signal'], {}), '(signal)\n', (1022, 1030), True, 'import numpy as np\n')] |
import os
import time
import json
import argparse
import torch
import torchvision
import random
import numpy as np
from data import FaceDataset
from tqdm import tqdm
from torch import nn
from torch import optim
from collections import OrderedDict
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
from torchvision.models.resnet import resnet18
from mean_variance_loss import MeanVarianceLoss
import cv2
LAMBDA_1 = 0.2
LAMBDA_2 = 0.05
START_AGE = 0
END_AGE = 69
VALIDATION_RATE= 0.1
random.seed(2019)
np.random.seed(2019)
torch.manual_seed(2019)
def ResNet18(num_classes):
model = resnet18(pretrained=True)
model.fc = nn.Sequential(
nn.BatchNorm1d(512),
nn.Dropout(0.5),
nn.Linear(512, num_classes),
)
return model
def train(train_loader, model, criterion1, criterion2, optimizer, epoch, result_directory):
model.train()
running_loss = 0.
running_mean_loss = 0.
running_variance_loss = 0.
running_softmax_loss = 0.
interval = 1
for i, sample in enumerate(train_loader):
images = sample['image'].cuda()
labels = sample['label'].cuda()
output = model(images)
mean_loss, variance_loss = criterion1(output, labels)
softmax_loss = criterion2(output, labels)
loss = mean_loss + variance_loss + softmax_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.data
running_softmax_loss += softmax_loss.data
running_mean_loss += mean_loss.data
running_variance_loss += variance_loss.data
if (i + 1) % interval == 0:
print('[%d, %5d] mean_loss: %.3f, variance_loss: %.3f, softmax_loss: %.3f, loss: %.3f'
% (epoch, i, running_mean_loss / interval,
running_variance_loss / interval,
running_softmax_loss / interval,
running_loss / interval))
with open(os.path.join(result_directory, 'log'), 'a') as f:
f.write('[%d, %5d] mean_loss: %.3f, variance_loss: %.3f, softmax_loss: %.3f, loss: %.3f\n'
% (epoch, i, running_mean_loss / interval,
running_variance_loss / interval,
running_softmax_loss / interval,
running_loss / interval))
running_loss = 0.
running_mean_loss = 0.
running_variance_loss = 0.
running_softmax_loss = 0.
def train_softmax(train_loader, model, criterion2, optimizer, epoch, result_directory):
model.train()
running_loss = 0.
running_softmax_loss = 0.
interval = 1
for i, sample in enumerate(train_loader):
images = sample['image'].cuda()
labels = sample['label'].cuda()
output = model(images)
loss = criterion2(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.data
if (i + 1) % interval == 0:
print('[%d, %5d] loss: %.3f'
% (epoch, i, running_loss / interval))
with open(os.path.join(result_directory, 'log'), 'a') as f:
f.write('[%d, %5d] loss: %.3f\n'
% (epoch, i, running_loss / interval))
running_loss = 0.
def evaluate(val_loader, model, criterion1, criterion2):
model.cuda()
model.eval()
loss_val = 0.
mean_loss_val = 0.
variance_loss_val = 0.
softmax_loss_val = 0.
mae = 0.
with torch.no_grad():
for i, sample in enumerate(val_loader):
image = sample['image'].cuda()
label = sample['label'].cuda()
output = model(image)
mean_loss, variance_loss = criterion1(output, label)
softmax_loss = criterion2(output, label)
loss = mean_loss + variance_loss + softmax_loss
loss_val += loss.data
mean_loss_val += mean_loss.data
variance_loss_val += variance_loss.data
softmax_loss_val += softmax_loss.data
m = nn.Softmax(dim=1)
output_softmax = m(output)
a = torch.arange(START_AGE, END_AGE + 1, dtype=torch.float32).cuda()
mean = (output_softmax * a).sum(1, keepdim=True).cpu().data.numpy()
pred = np.around(mean)
mae += np.absolute(pred - sample['label'].cpu().data.numpy())
return mean_loss_val / len(val_loader),\
variance_loss_val / len(val_loader),\
softmax_loss_val / len(val_loader),\
loss_val / len(val_loader),\
mae / len(val_loader)
def evaluate_softmax(val_loader, model, criterion2):
model.cuda()
model.eval()
loss_val = 0.
softmax_loss_val = 0.
mae = 0.
with torch.no_grad():
for i, sample in enumerate(val_loader):
image = sample['image'].cuda()
label = sample['label'].cuda()
output = model(image)
loss = criterion2(output, label)
loss_val += loss.data
m = nn.Softmax(dim=1)
output_softmax = m(output)
a = torch.arange(START_AGE, END_AGE + 1, dtype=torch.float32).cuda()
mean = (output_softmax * a).sum(1, keepdim=True).cpu().data.numpy()
pred = np.around(mean)
mae += np.absolute(pred - sample['label'].cpu().data.numpy())
return loss_val / len(val_loader), mae / len(val_loader)
def test(test_loader, model):
model.cuda()
model.eval()
mae = 0.
with torch.no_grad():
for i, sample in enumerate(test_loader):
image = sample['image'].cuda()
label = sample['label'].cuda()
output = model(image)
m = nn.Softmax(dim=1)
output = m(output)
a = torch.arange(START_AGE, END_AGE + 1, dtype=torch.float32).cuda()
mean = (output * a).sum(1, keepdim=True).cpu().data.numpy()
pred = np.around(mean)
mae += np.absolute(pred - sample['label'].cpu().data.numpy())
return mae / len(test_loader)
def predict(model, image):
model.eval()
with torch.no_grad():
image = image.astype(np.float32) / 255.
image = np.transpose(image, (2,0,1))
img = torch.from_numpy(image).cuda()
output = model(img[None])
m = nn.Softmax(dim=1)
output = m(output)
a = torch.arange(START_AGE, END_AGE + 1, dtype=torch.float32).cuda()
mean = (output * a).sum(1, keepdim=True).cpu().data.numpy()
pred = np.around(mean)[0][0]
return pred
def get_image_list(image_directory, leave_sub, validation_rate):
train_val_list = []
test_list = []
for fn in os.listdir(image_directory):
filepath = os.path.join(image_directory, fn)
subject = int(fn[:3])
if subject == leave_sub:
test_list.append(filepath)
else:
train_val_list.append(filepath)
num = len(train_val_list)
index_val = np.random.choice(num, int(num * validation_rate), replace=False)
train_list = []
val_list = []
for i, fp in enumerate(train_val_list):
if i in index_val:
val_list.append(fp)
else:
train_list.append(fp)
return train_list, val_list, test_list
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--batch_size', type=int, default=16)
parser.add_argument('-i', '--image_directory', type=str)
parser.add_argument('-ls', '--leave_subject', type=int)
parser.add_argument('-lr', '--learning_rate', type=float)
parser.add_argument('-e', '--epoch', type=int, default=0)
parser.add_argument('-r', '--resume', type=str, default=None)
parser.add_argument('-rd', '--result_directory', type=str, default=None)
parser.add_argument('-pi', '--pred_image', type=str, default=None)
parser.add_argument('-pm', '--pred_model', type=str, default=None)
parser.add_argument('-loss', '--is_mean_variance', action='store_true')
return parser.parse_args()
def main():
args = get_args()
if args.epoch > 0:
batch_size = args.batch_size
if args.result_directory is not None:
if not os.path.exists(args.result_directory):
os.mkdir(args.result_directory)
train_filepath_list, val_filepath_list, test_filepath_list\
= get_image_list(args.image_directory, args.leave_subject, VALIDATION_RATE)
transforms_train = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.RandomApply(
[torchvision.transforms.RandomAffine(degrees=10, shear=16),
torchvision.transforms.RandomHorizontalFlip(p=1.0),
], p=0.5),
torchvision.transforms.Resize((256, 256)),
torchvision.transforms.RandomCrop((224, 224)),
torchvision.transforms.ToTensor()
])
train_gen = FaceDataset(train_filepath_list, transforms_train)
train_loader = DataLoader(train_gen, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=8)
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.ToTensor()
])
val_gen = FaceDataset(val_filepath_list, transforms)
val_loader = DataLoader(val_gen, batch_size=1, shuffle=False, pin_memory=True, num_workers=8)
test_gen = FaceDataset(test_filepath_list, transforms)
test_loader = DataLoader(test_gen, batch_size=1, shuffle=False, pin_memory=True, num_workers=8)
model = ResNet18(END_AGE - START_AGE + 1)
model.cuda()
optimizer = optim.Adam(model.parameters(), lr = args.learning_rate)
criterion1 = MeanVarianceLoss(LAMBDA_1, LAMBDA_2, START_AGE, END_AGE).cuda()
criterion2 = torch.nn.CrossEntropyLoss().cuda()
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[80], gamma=0.1)
best_val_mae = np.inf
best_val_loss = np.inf
best_mae_epoch = -1
best_loss_epoch = -1
for epoch in range(args.epoch):
scheduler.step(epoch)
if args.is_mean_variance:
train(train_loader, model, criterion1, criterion2, optimizer, epoch, args.result_directory)
mean_loss, variance_loss, softmax_loss, loss_val, mae = evaluate(val_loader, model, criterion1, criterion2)
print('epoch: %d, mean_loss: %.3f, variance_loss: %.3f, softmax_loss: %.3f, loss: %.3f, mae: %3f' %
(epoch, mean_loss, variance_loss, softmax_loss, loss_val, mae))
with open(os.path.join(args.result_directory, 'log'), 'a') as f:
f.write('epoch: %d, mean_loss: %.3f, variance_loss: %.3f, softmax_loss: %.3f, loss: %.3f, mae: %3f\n' %
(epoch, mean_loss, variance_loss, softmax_loss, loss_val, mae))
else:
train_softmax(train_loader, model, criterion2, optimizer, epoch, args.result_directory)
loss_val, mae = evaluate_softmax(val_loader, model, criterion2)
print('epoch: %d, loss: %.3f, mae: %3f' % (epoch, loss_val, mae))
with open(os.path.join(args.result_directory, 'log'), 'a') as f:
f.write('epoch: %d, loss: %.3f, mae: %3f\n' % (epoch, loss_val, mae))
mae_test = test(test_loader, model)
print('epoch: %d, test_mae: %3f' % (epoch, mae_test))
with open(os.path.join(args.result_directory, 'log'), 'a') as f:
f.write('epoch: %d, mae_test: %3f\n' % (epoch, mae_test))
if best_val_mae > mae:
best_val_mae = mae
best_mae_epoch = epoch
torch.save(model.state_dict(), os.path.join(args.result_directory, "model_best_mae"))
if best_val_loss > loss_val:
best_val_loss = loss_val
best_loss_epoch = epoch
torch.save(model.state_dict(), os.path.join(args.result_directory, "model_best_loss"))
with open(os.path.join(args.result_directory, 'log'), 'a') as f:
f.write('best_loss_epoch: %d, best_val_loss: %f, best_mae_epoch: %d, best_val_mae: %f\n'
% (best_loss_epoch, best_val_loss, best_mae_epoch, best_val_mae))
print('best_loss_epoch: %d, best_val_loss: %f, best_mae_epoch: %d, best_val_mae: %f'
% (best_loss_epoch, best_val_loss, best_mae_epoch, best_val_mae))
if args.pred_image and args.pred_model:
model = ResNet34(END_AGE - START_AGE + 1)
model.cuda()
img = cv2.imread(args.pred_image)
resized_img = cv2.resize(img, (224, 224))
model.load_state_dict(torch.load(args.pred_model))
pred = predict(model, resized_img)
print('Age: ' + str(int(pred)))
cv2.putText(img, 'Age: ' + str(int(pred)), (int(img.shape[1]*0.1), int(img.shape[0]*0.9)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
name, ext = os.path.splitext(args.pred_image)
cv2.imwrite(name + '_result.jpg', img)
if __name__ == "__main__":
main()
| [
"torch.nn.Dropout",
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"torchvision.models.resnet.resnet18",
"data.FaceDataset",
"numpy.around",
"torch.nn.Softmax",
"torch.arange",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"cv2.imwrite",
"torch.load",
"nump... | [((553, 570), 'random.seed', 'random.seed', (['(2019)'], {}), '(2019)\n', (564, 570), False, 'import random\n'), ((571, 591), 'numpy.random.seed', 'np.random.seed', (['(2019)'], {}), '(2019)\n', (585, 591), True, 'import numpy as np\n'), ((592, 615), 'torch.manual_seed', 'torch.manual_seed', (['(2019)'], {}), '(2019)\n', (609, 615), False, 'import torch\n'), ((658, 683), 'torchvision.models.resnet.resnet18', 'resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (666, 683), False, 'from torchvision.models.resnet import resnet18\n'), ((6789, 6816), 'os.listdir', 'os.listdir', (['image_directory'], {}), '(image_directory)\n', (6799, 6816), False, 'import os\n'), ((7407, 7432), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7430, 7432), False, 'import argparse\n'), ((722, 741), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (736, 741), False, 'from torch import nn\n'), ((751, 766), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (761, 766), False, 'from torch import nn\n'), ((776, 803), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_classes'], {}), '(512, num_classes)\n', (785, 803), False, 'from torch import nn\n'), ((3613, 3628), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3626, 3628), False, 'import torch\n'), ((4857, 4872), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4870, 4872), False, 'import torch\n'), ((5613, 5628), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5626, 5628), False, 'import torch\n'), ((6216, 6231), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6229, 6231), False, 'import torch\n'), ((6297, 6327), 'numpy.transpose', 'np.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (6309, 6327), True, 'import numpy as np\n'), ((6417, 6434), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (6427, 6434), False, 'from torch import nn\n'), ((6837, 6870), 'os.path.join', 'os.path.join', (['image_directory', 'fn'], {}), '(image_directory, fn)\n', (6849, 6870), False, 'import os\n'), ((9068, 9118), 'data.FaceDataset', 'FaceDataset', (['train_filepath_list', 'transforms_train'], {}), '(train_filepath_list, transforms_train)\n', (9079, 9118), False, 'from data import FaceDataset\n'), ((9142, 9236), 'torch.utils.data.DataLoader', 'DataLoader', (['train_gen'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(8)'}), '(train_gen, batch_size=batch_size, shuffle=True, pin_memory=True,\n num_workers=8)\n', (9152, 9236), False, 'from torch.utils.data import DataLoader\n'), ((9467, 9509), 'data.FaceDataset', 'FaceDataset', (['val_filepath_list', 'transforms'], {}), '(val_filepath_list, transforms)\n', (9478, 9509), False, 'from data import FaceDataset\n'), ((9531, 9616), 'torch.utils.data.DataLoader', 'DataLoader', (['val_gen'], {'batch_size': '(1)', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(8)'}), '(val_gen, batch_size=1, shuffle=False, pin_memory=True, num_workers=8\n )\n', (9541, 9616), False, 'from torch.utils.data import DataLoader\n'), ((9632, 9675), 'data.FaceDataset', 'FaceDataset', (['test_filepath_list', 'transforms'], {}), '(test_filepath_list, transforms)\n', (9643, 9675), False, 'from data import FaceDataset\n'), ((9698, 9783), 'torch.utils.data.DataLoader', 'DataLoader', (['test_gen'], {'batch_size': '(1)', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(8)'}), '(test_gen, batch_size=1, shuffle=False, pin_memory=True,\n num_workers=8)\n', (9708, 9783), False, 'from torch.utils.data import DataLoader\n'), ((10090, 10153), 'torch.optim.lr_scheduler.MultiStepLR', 'lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': '[80]', 'gamma': '(0.1)'}), '(optimizer, milestones=[80], gamma=0.1)\n', (10114, 10153), False, 'from torch.optim import lr_scheduler\n'), ((12845, 12872), 'cv2.imread', 'cv2.imread', (['args.pred_image'], {}), '(args.pred_image)\n', (12855, 12872), False, 'import cv2\n'), ((12895, 12922), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (12905, 12922), False, 'import cv2\n'), ((13231, 13264), 'os.path.splitext', 'os.path.splitext', (['args.pred_image'], {}), '(args.pred_image)\n', (13247, 13264), False, 'import os\n'), ((13273, 13311), 'cv2.imwrite', 'cv2.imwrite', (["(name + '_result.jpg')", 'img'], {}), "(name + '_result.jpg', img)\n", (13284, 13311), False, 'import cv2\n'), ((4172, 4189), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (4182, 4189), False, 'from torch import nn\n'), ((4409, 4424), 'numpy.around', 'np.around', (['mean'], {}), '(mean)\n', (4418, 4424), True, 'import numpy as np\n'), ((5137, 5154), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (5147, 5154), False, 'from torch import nn\n'), ((5374, 5389), 'numpy.around', 'np.around', (['mean'], {}), '(mean)\n', (5383, 5389), True, 'import numpy as np\n'), ((5815, 5832), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (5825, 5832), False, 'from torch import nn\n'), ((6036, 6051), 'numpy.around', 'np.around', (['mean'], {}), '(mean)\n', (6045, 6051), True, 'import numpy as np\n'), ((12953, 12980), 'torch.load', 'torch.load', (['args.pred_model'], {}), '(args.pred_model)\n', (12963, 12980), False, 'import torch\n'), ((6340, 6363), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (6356, 6363), False, 'import torch\n'), ((6474, 6531), 'torch.arange', 'torch.arange', (['START_AGE', '(END_AGE + 1)'], {'dtype': 'torch.float32'}), '(START_AGE, END_AGE + 1, dtype=torch.float32)\n', (6486, 6531), False, 'import torch\n'), ((6622, 6637), 'numpy.around', 'np.around', (['mean'], {}), '(mean)\n', (6631, 6637), True, 'import numpy as np\n'), ((8304, 8341), 'os.path.exists', 'os.path.exists', (['args.result_directory'], {}), '(args.result_directory)\n', (8318, 8341), False, 'import os\n'), ((8359, 8390), 'os.mkdir', 'os.mkdir', (['args.result_directory'], {}), '(args.result_directory)\n', (8367, 8390), False, 'import os\n'), ((8620, 8655), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (8653, 8655), False, 'import torchvision\n'), ((8889, 8930), 'torchvision.transforms.Resize', 'torchvision.transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (8918, 8930), False, 'import torchvision\n'), ((8944, 8989), 'torchvision.transforms.RandomCrop', 'torchvision.transforms.RandomCrop', (['(224, 224)'], {}), '((224, 224))\n', (8977, 8989), False, 'import torchvision\n'), ((9003, 9036), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (9034, 9036), False, 'import torchvision\n'), ((9300, 9335), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (9333, 9335), False, 'import torchvision\n'), ((9349, 9390), 'torchvision.transforms.Resize', 'torchvision.transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (9378, 9390), False, 'import torchvision\n'), ((9404, 9437), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (9435, 9437), False, 'import torchvision\n'), ((9950, 10006), 'mean_variance_loss.MeanVarianceLoss', 'MeanVarianceLoss', (['LAMBDA_1', 'LAMBDA_2', 'START_AGE', 'END_AGE'], {}), '(LAMBDA_1, LAMBDA_2, START_AGE, END_AGE)\n', (9966, 10006), False, 'from mean_variance_loss import MeanVarianceLoss\n'), ((10035, 10062), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (10060, 10062), False, 'import torch\n'), ((12300, 12342), 'os.path.join', 'os.path.join', (['args.result_directory', '"""log"""'], {}), "(args.result_directory, 'log')\n", (12312, 12342), False, 'import os\n'), ((2026, 2063), 'os.path.join', 'os.path.join', (['result_directory', '"""log"""'], {}), "(result_directory, 'log')\n", (2038, 2063), False, 'import os\n'), ((3212, 3249), 'os.path.join', 'os.path.join', (['result_directory', '"""log"""'], {}), "(result_directory, 'log')\n", (3224, 3249), False, 'import os\n'), ((4245, 4302), 'torch.arange', 'torch.arange', (['START_AGE', '(END_AGE + 1)'], {'dtype': 'torch.float32'}), '(START_AGE, END_AGE + 1, dtype=torch.float32)\n', (4257, 4302), False, 'import torch\n'), ((5210, 5267), 'torch.arange', 'torch.arange', (['START_AGE', '(END_AGE + 1)'], {'dtype': 'torch.float32'}), '(START_AGE, END_AGE + 1, dtype=torch.float32)\n', (5222, 5267), False, 'import torch\n'), ((5880, 5937), 'torch.arange', 'torch.arange', (['START_AGE', '(END_AGE + 1)'], {'dtype': 'torch.float32'}), '(START_AGE, END_AGE + 1, dtype=torch.float32)\n', (5892, 5937), False, 'import torch\n'), ((11705, 11747), 'os.path.join', 'os.path.join', (['args.result_directory', '"""log"""'], {}), "(args.result_directory, 'log')\n", (11717, 11747), False, 'import os\n'), ((11990, 12043), 'os.path.join', 'os.path.join', (['args.result_directory', '"""model_best_mae"""'], {}), "(args.result_directory, 'model_best_mae')\n", (12002, 12043), False, 'import os\n'), ((12214, 12268), 'os.path.join', 'os.path.join', (['args.result_directory', '"""model_best_loss"""'], {}), "(args.result_directory, 'model_best_loss')\n", (12226, 12268), False, 'import os\n'), ((8722, 8779), 'torchvision.transforms.RandomAffine', 'torchvision.transforms.RandomAffine', ([], {'degrees': '(10)', 'shear': '(16)'}), '(degrees=10, shear=16)\n', (8757, 8779), False, 'import torchvision\n'), ((8798, 8848), 'torchvision.transforms.RandomHorizontalFlip', 'torchvision.transforms.RandomHorizontalFlip', ([], {'p': '(1.0)'}), '(p=1.0)\n', (8841, 8848), False, 'import torchvision\n'), ((10846, 10888), 'os.path.join', 'os.path.join', (['args.result_directory', '"""log"""'], {}), "(args.result_directory, 'log')\n", (10858, 10888), False, 'import os\n'), ((11423, 11465), 'os.path.join', 'os.path.join', (['args.result_directory', '"""log"""'], {}), "(args.result_directory, 'log')\n", (11435, 11465), False, 'import os\n')] |
import numba
import numpy as np
import random
import concurrent.futures
from numba import njit, jit, prange
from numba.typed import List
try:
from numba.experimental import jitclass
except ModuleNotFoundError:
from numba import jitclass
from collections import OrderedDict
from itertools import repeat
from . import BurrowsWheelerLibrary
from . import PhasingObjects
from ..tinyhouse.Utils import time_func
from ..tinyhouse import InputOutput
try:
profile
except:
def profile(x):
return x
# @time_func("Creating BW library")
@profile
def get_reference_library(individuals, individual_exclusion = False, reverse = False):
# Construct a library, and add individuals to it.
# individual_exclusion adds a flag to record who's haplotype is in the library, so that the haplotype can be removed when phasing that individual.
# setup: Determins whether the BW library is set up, or if just a base library is created. This can be useful if the library needs to be sub-setted before being used.
# Reverse: Determines if the library should be made up of the reverse haplotypes -- this is used for the backward passes.
haplotype_library = BurrowsWheelerLibrary.BurrowsWheelerLibrary()
for ind in individuals:
for hap in ind.current_haplotypes:
# Unless set to something else, ind.current_haplotypes tracks ind.haplotypes.
if reverse:
new_hap = np.ascontiguousarray(np.flip(hap))
else:
new_hap = np.ascontiguousarray(hap.copy())
if individual_exclusion:
haplotype_library.append(new_hap, ind)
else:
haplotype_library.append(new_hap)
# Fills in missing data, runs the BW algorithm on the haplotypes, and sets exclusions.
return haplotype_library
def run_phasing(individuals, cycles, args):
# Runs phasing cycles. Note: All backward cycles are run before running the forward cycles.
# I tried running it with running forward/backward/forward/backward,
# and the contamination of the forward cycles in the backward pass substantially decreased accuracy.
print("")
print("Backwards phasing cycles.")
rev_individuals = setup_reverse_individuals(individuals)
create_library_and_phase(rev_individuals, cycles, args)
integrate_reverse_individuals(individuals)
rev_individuals = None
print("")
print("Forwards phasing cycles")
create_library_and_phase(individuals, cycles, args)
def setup_reverse_individuals(individuals):
rev_individuals = [ind.reverse_individual() for ind in individuals]
# Run reverse pass
for rev_ind in rev_individuals:
rev_ind.setPhasingView()
return rev_individuals
def integrate_reverse_individuals(individuals):
for ind in individuals:
ind.add_backward_info()
ind.clear_reverse_view()
ind.setPhasingView()
@time_func("Total phasing")
@profile
def create_library_and_phase(individuals, cycles, args) :
# This function creates a haplotype library and phases individuals using the haplotype library.
# Set haplotypes on the last sample.
for i in range(len(cycles) - 1):
phase_round(individuals, set_haplotypes = False, n_samples = cycles[i], args = args)
# Run last round of phasing.
phase_round(individuals, set_haplotypes = True, n_samples = cycles[-1], args = args)
@time_func("Phasing round")
@profile
def phase_round(individuals, set_haplotypes = False, n_samples = 40, args = None):
bw_library = get_reference_library(individuals, individual_exclusion = True)
bw_library.setup_library(create_reverse_library = True, create_a = False)
if InputOutput.args.maxthreads <= 1:
for individual in individuals:
phase_individual(individual, bw_library, set_haplotypes = set_haplotypes, n_samples = n_samples, map_length = args.length*args.phasing_length_modifier)
else:
with concurrent.futures.ThreadPoolExecutor(max_workers=InputOutput.args.maxthreads) as executor:
executor.map(phase_individual, individuals, repeat(bw_library), repeat(set_haplotypes), repeat(n_samples), repeat(args.length*args.phasing_length_modifier))
def phase_individual(individual, haplotype_library, set_haplotypes, n_samples, map_length):
phase(individual, haplotype_library, set_haplotypes = set_haplotypes, imputation = False, n_samples = n_samples, map_length = map_length)
def get_random_values(individual, library, n_samples):
nHaps, n_loci = library.zeroOccNext.shape
random_values = individual.random_generator.random(size = (n_samples, n_loci))
return random_values
def phase(individual, haplotype_library, set_haplotypes, imputation, n_samples, map_length):
# Random values ensures that phasing is consistent per individual
random_values = get_random_values(individual, haplotype_library.library, n_samples)
individual.phasing_view.setup_penetrance()
phase_jit(individual.phasing_view, haplotype_library.library, set_haplotypes, imputation, n_samples, random_values, map_length, InputOutput.args.phasing_consensus_window_size)
individual.phasing_view.clear_penetrance()
@jit(nopython=True, nogil=True)
def phase_jit(ind, haplotype_library, set_haplotypes, imputation, n_samples, random_values, map_length, phasing_consensus_window_size) :
# Phases a specific individual.
# Set_haplotypes determines whether or not to actually set the haplotypes of an individual based on the underlying samples.
# Set_haploypes also determines whether forward_geno_probs gets calculated.
# FLAG: error_rate is hard coded.
nLoci = haplotype_library.nLoci
rate = map_length/nLoci
if imputation:
calculate_forward_estimates = True
track_hap_info = True
else:
calculate_forward_estimates = set_haplotypes
track_hap_info = False
error_rate = 0.01
sample_container = PhasingObjects.PhasingSampleContainer(haplotype_library, ind)
for i in range(n_samples):
# This is the main imputation step.
sample_container.add_sample(rate, error_rate, calculate_forward_estimates, track_hap_info, random_values[i,:])
if imputation:
# For imputation phasing is only run on a subset of loci.
# This step extends the phase information to all of the loci.
converted_samples = [expand_sample(ind, sample, haplotype_library) for sample in sample_container.samples]
extended_sample_container = PhasingObjects.PhasingSampleContainer(haplotype_library, ind)
extended_sample_container.samples = converted_samples
pat_hap, mat_hap = extended_sample_container.get_consensus(phasing_consensus_window_size)
else:
pat_hap, mat_hap = sample_container.get_consensus(phasing_consensus_window_size)
if not imputation and set_haplotypes:
if ind.population_imputation_target:
# If phasing, and individual is a target for imputation, set their haplotypes.
add_haplotypes_to_ind(ind, pat_hap, mat_hap)
ind.backward[:,:] = 0
for sample in sample_container.samples:
ind.backward += sample.forward.forward_geno_probs # We're really just averaging over particles.
if imputation:
add_haplotypes_to_ind(ind, pat_hap, mat_hap)
backward = ind.backward # Not sure why we need to set a secondary variable here, but it turns out we do, otherwise ind.backward doesn't update correctly.
# Only update loci that were considered as part of the haplotype library.
for index in haplotype_library.loci:
for j in range(4):
backward[j, index] = 0.0
for sample in sample_container.samples:
for j in range(4):
backward[j, index] += sample.forward.forward_geno_probs[j, index] # We're really just averaging over particles.
# Always set current_haplotype after the last round of phasing.
ind.current_haplotypes[0][:] = pat_hap
ind.current_haplotypes[1][:] = mat_hap
@jit(nopython=True, nogil=True)
def add_haplotypes_to_ind(ind, pat_hap, mat_hap):
# Sets all loci to the new values (this will call missing loci as well.
ind.haplotypes[0][:] = pat_hap[:]
ind.haplotypes[1][:] = mat_hap[:]
ind.genotypes[:] = pat_hap[:] + mat_hap[:]
@jit(nopython=True, nogil=True)
def expand_sample(ind, sample, bw_library):
nLoci = len(ind.genotypes)
pat_hap = np.full(nLoci, 9, dtype = np.int8)
mat_hap = np.full(nLoci, 9, dtype = np.int8)
hap_info = sample.hap_info
# Fill in the missing loci for phasing.
for i in range(len(hap_info.pat_ranges)):
range_object = hap_info.pat_ranges[i]
# Get the start/stop index for the haplotype in terms of the full (global) set of markers
global_start, global_stop = hap_info.get_global_bounds(i, 0)
# Expand out the haplotype to the full range
set_hap_from_range(range_object, pat_hap, global_start, global_stop, bw_library)
for i in range(len(hap_info.mat_ranges)):
range_object = hap_info.mat_ranges[i]
global_start, global_stop = hap_info.get_global_bounds(i, 1)
set_hap_from_range(range_object, mat_hap, global_start, global_stop, bw_library)
new_sample = PhasingObjects.PhasingSample(sample.rec_rate, sample.error_rate)
new_sample.haplotypes = (pat_hap, mat_hap)
new_sample.genotypes = pat_hap + mat_hap
# Sets the recombination score for the expanded sample.
# Recombination scores are just applied to the corresponding loci in the global set of markers.
# Missing markers on the chip, have a recombination score of 0.
new_sample.rec = expand_rec_tracking(sample.rec, bw_library)
return new_sample
@jit(nopython=True, nogil=True)
def expand_rec_tracking(partial_rec, bw_library):
new_rec = np.full(bw_library.full_nLoci, 0, dtype = np.float32)
for i in range(len(partial_rec)):
true_index = bw_library.get_true_index(i)
new_rec[true_index] = partial_rec[i]
return new_rec
@jit(nopython=True, nogil=True)
def set_hap_from_range(range_object, hap, global_start, global_stop, bw_library):
encoding_index = range_object.encoding_index
# Select the middle haplotype.
bw_index = int((range_object.hap_range[0] + range_object.hap_range[1]-1)/2)
haplotype_index = bw_library.a[bw_index, encoding_index]
hap[global_start:global_stop] = bw_library.full_haps[haplotype_index, global_start:global_stop]
| [
"numpy.full",
"numpy.flip",
"numba.jit",
"itertools.repeat"
] | [((5221, 5251), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (5224, 5251), False, 'from numba import njit, jit, prange\n'), ((8119, 8149), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (8122, 8149), False, 'from numba import njit, jit, prange\n'), ((8405, 8435), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (8408, 8435), False, 'from numba import njit, jit, prange\n'), ((9838, 9868), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (9841, 9868), False, 'from numba import njit, jit, prange\n'), ((10142, 10172), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (10145, 10172), False, 'from numba import njit, jit, prange\n'), ((8527, 8559), 'numpy.full', 'np.full', (['nLoci', '(9)'], {'dtype': 'np.int8'}), '(nLoci, 9, dtype=np.int8)\n', (8534, 8559), True, 'import numpy as np\n'), ((8576, 8608), 'numpy.full', 'np.full', (['nLoci', '(9)'], {'dtype': 'np.int8'}), '(nLoci, 9, dtype=np.int8)\n', (8583, 8608), True, 'import numpy as np\n'), ((9933, 9984), 'numpy.full', 'np.full', (['bw_library.full_nLoci', '(0)'], {'dtype': 'np.float32'}), '(bw_library.full_nLoci, 0, dtype=np.float32)\n', (9940, 9984), True, 'import numpy as np\n'), ((4134, 4152), 'itertools.repeat', 'repeat', (['bw_library'], {}), '(bw_library)\n', (4140, 4152), False, 'from itertools import repeat\n'), ((4154, 4176), 'itertools.repeat', 'repeat', (['set_haplotypes'], {}), '(set_haplotypes)\n', (4160, 4176), False, 'from itertools import repeat\n'), ((4178, 4195), 'itertools.repeat', 'repeat', (['n_samples'], {}), '(n_samples)\n', (4184, 4195), False, 'from itertools import repeat\n'), ((4197, 4247), 'itertools.repeat', 'repeat', (['(args.length * args.phasing_length_modifier)'], {}), '(args.length * args.phasing_length_modifier)\n', (4203, 4247), False, 'from itertools import repeat\n'), ((1467, 1479), 'numpy.flip', 'np.flip', (['hap'], {}), '(hap)\n', (1474, 1479), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import torch
from pytorch_adapt.datasets import CombinedSourceAndTargetDataset
from pytorch_adapt.utils.common_functions import join_lists
class TestCombinedSourceAndTarget(unittest.TestCase):
def test_combined(self):
np.random.seed(3429)
for target_dataset_size in [99, 199]:
src_dataset_size = 117
src = torch.arange(src_dataset_size)
src = [{"src_imgs": i, "src_labels": i} for i in src]
tgt = torch.arange(target_dataset_size)
tgt = [{"target_imgs": i} for i in tgt]
d = CombinedSourceAndTargetDataset(src, tgt)
collected = []
num_loops = 10000
batch_size = 64
total_len = num_loops * batch_size
for x in range(num_loops):
collected.append([])
for i in range(batch_size):
batch = d[i]
collected[x].append(
(batch["src_imgs"].item(), batch["target_imgs"].item())
)
all_src = []
for c in collected:
self.assertTrue([x[1] for x in c] == list(range(batch_size)))
curr_src = [x[0] for x in c]
# check for randomness
self.assertTrue(curr_src not in all_src)
all_src.append(curr_src)
all_src = join_lists(all_src)
self.assertTrue(len(all_src) == total_len)
bincount = np.bincount(all_src)
self.assertTrue(len(bincount) == src_dataset_size)
ideal_bincount = total_len // src_dataset_size
self.assertTrue(
all(np.isclose(x, ideal_bincount, rtol=0.1) for x in bincount)
)
| [
"pytorch_adapt.datasets.CombinedSourceAndTargetDataset",
"numpy.random.seed",
"pytorch_adapt.utils.common_functions.join_lists",
"numpy.isclose",
"torch.arange",
"numpy.bincount"
] | [((269, 289), 'numpy.random.seed', 'np.random.seed', (['(3429)'], {}), '(3429)\n', (283, 289), True, 'import numpy as np\n'), ((390, 420), 'torch.arange', 'torch.arange', (['src_dataset_size'], {}), '(src_dataset_size)\n', (402, 420), False, 'import torch\n'), ((505, 538), 'torch.arange', 'torch.arange', (['target_dataset_size'], {}), '(target_dataset_size)\n', (517, 538), False, 'import torch\n'), ((607, 647), 'pytorch_adapt.datasets.CombinedSourceAndTargetDataset', 'CombinedSourceAndTargetDataset', (['src', 'tgt'], {}), '(src, tgt)\n', (637, 647), False, 'from pytorch_adapt.datasets import CombinedSourceAndTargetDataset\n'), ((1418, 1437), 'pytorch_adapt.utils.common_functions.join_lists', 'join_lists', (['all_src'], {}), '(all_src)\n', (1428, 1437), False, 'from pytorch_adapt.utils.common_functions import join_lists\n'), ((1516, 1536), 'numpy.bincount', 'np.bincount', (['all_src'], {}), '(all_src)\n', (1527, 1536), True, 'import numpy as np\n'), ((1708, 1747), 'numpy.isclose', 'np.isclose', (['x', 'ideal_bincount'], {'rtol': '(0.1)'}), '(x, ideal_bincount, rtol=0.1)\n', (1718, 1747), True, 'import numpy as np\n')] |
# Python modules
# 3rd party modules
import wx
import numpy as np
# Our modules
import vespa.analysis.tab_base as tab_base
import vespa.analysis.prefs as prefs_module
import vespa.analysis.constants as constants
import vespa.analysis.util_menu as util_menu
import vespa.analysis.auto_gui.fidsum_wbnaa as fidsum_wbnaa
import vespa.common.wx_gravy.util as wx_util
import vespa.common.wx_gravy.common_dialogs as common_dialogs
from vespa.analysis.plot_panel_prep_wbnaa import PlotPanelPrepWbnaa, PlotPanelPrepWbnaaSeries
EXCLUDE_DISPLAY_CHOICES = ["FID Abs First Point",
"Peak Shift [Hz]",
"Peak Phase [deg]"
]
#------------------------------------------------------------------------------
def _configure_combo(control, choices, selection=''):
lines = list(choices.values())
control.SetItems(lines)
if selection in lines:
control.SetStringSelection(selection)
else:
control.SetStringSelection(lines[0])
def _paired_event(obj_min, obj_max):
val_min = obj_min.GetValue()
val_max = obj_max.GetValue()
pmin = min(val_min, val_max)
pmax = max(val_min, val_max)
obj_min.SetValue(pmin)
obj_max.SetValue(pmax)
return pmin, pmax
#------------------------------------------------------------------------------
#
# Tab PREP WBNAA (from FIDSUM)
#
#------------------------------------------------------------------------------
class TabPrepWbnaa(tab_base.Tab, fidsum_wbnaa.PanelPrepWbnaaUI):
# self-identify tab to notebook, value does not matter, its presence is sufficient.
IS_PREP_FIDSUM = True
def __init__(self, tab_dataset, top, block):
fidsum_wbnaa.PanelPrepWbnaaUI.__init__(self, tab_dataset.NotebookDataset)
tab_base.Tab.__init__(self, tab_dataset, top, prefs_module.PrefsPrepFidsum)
self.top = top # application frame
self.block = block # processing object
# Plotting is disabled during some of init. That's because the plot
# isn't ready to plot, but the population of some controls
# (e.g. spin controls on the water filter panel) fires their
# respective change event which triggers a call to plot(). This
# appears to happen only under Windows; it might be a Windows-specific
# bug.
# In any case, skipping some calls to plot() will speed things up. =)
self._plotting_enabled = False
self.plot_results = None
self.fid_index = 0
self.initialize_controls()
self.populate_controls()
self._plotting_enabled = True
#------------------------------------------------------------
# Setup the canvas
self.process_and_plot()
# If the sash position isn't recorded in the INI file, we use the
# arbitrary-ish value of 400.
if not self._prefs.sash_position:
self._prefs.sash_position = 400
# Under OS X, wx sets the sash position to 10 (why 10?) *after*
# this method is done. So setting the sash position here does no
# good. We use wx.CallAfter() to (a) set the sash position and
# (b) fake an EVT_SPLITTER_SASH_POS_CHANGED.
wx.CallAfter(self.SplitterWindow.SetSashPosition,
self._prefs.sash_position, True)
wx.CallAfter(self.on_splitter)
#=======================================================
#
# GUI Setup Handlers
#
#=======================================================
def initialize_controls(self):
"""
Initializes the controls to be the right size or have the right
range or number of decimal places. It typically does not set the
default value (that's for populate_controls method to do). This
method does the one-time setup bits.
"""
dataset = self.dataset
dim0, dim1, dim2, dim3 = dataset.spectral_dims
sw = dataset.sw
maxppm = dataset.pts2ppm(0)
minppm = dataset.pts2ppm(dim0-1)
ppmlim = (minppm, maxppm)
wx_util.configure_spin(self.SpinFidIndex, 60)
wx_util.configure_spin(self.FloatGaussianApodization, 70, 2, constants.PrepWbnaa.STEP_APODIZE,
(constants.PrepWbnaa.MIN_APODIZE, constants.PrepWbnaa.MAX_APODIZE))
wx_util.configure_spin(self.SpinFidLeftShift, 70, None, None,
(constants.PrepWbnaa.MIN_LEFT, constants.PrepWbnaa.MAX_LEFT))
wx_util.configure_spin(self.SpinFidLeftShiftB0, 70, None, None,
(constants.PrepWbnaa.MIN_LEFT_B0, constants.PrepWbnaa.MAX_LEFT_B0))
wx_util.configure_spin(self.SpinFidLeftShiftPhase0, 70, None, None,
(constants.PrepWbnaa.MIN_LEFT_PHASE0, constants.PrepWbnaa.MAX_LEFT_PHASE0))
wx_util.configure_spin(self.FloatPeakShiftValue, 70, 3, constants.PrepWbnaa.STEP_PEAK,
(constants.PrepWbnaa.MIN_PEAK,constants.PrepWbnaa.MAX_PEAK))
wx_util.configure_spin(self.FloatPhase0, 70, 3, constants.PrepWbnaa.STEP_PHASE,
(constants.PrepWbnaa.MIN_PHASE,constants.PrepWbnaa.MAX_PHASE))
wx_util.configure_spin(self.FloatPhase1, 70, 3, constants.PrepWbnaa.STEP_PHASE1,
(constants.PrepWbnaa.MIN_PHASE1,constants.PrepWbnaa.MAX_PHASE1))
wx_util.configure_spin(self.FloatReferencePeakCenter, 70, 3, constants.PrepWbnaa.STEP_CENTER,
(constants.PrepWbnaa.MIN_CENTER,constants.PrepWbnaa.MAX_CENTER))
wx_util.configure_spin(self.FloatPeakSearchWidth, 70, 3, constants.PrepWbnaa.STEP_WIDTH,
(constants.PrepWbnaa.MIN_WIDTH,constants.PrepWbnaa.MAX_WIDTH))
wx_util.configure_spin(self.SpinRefPeakLineWidth, 70, None, None,
(constants.PrepWbnaa.MIN_REF_LINE_WIDTH, constants.PrepWbnaa.MAX_REF_LINE_WIDTH))
wx_util.configure_spin(self.SpinConstantPhase0Offset, 70, None, None,
(constants.PrepWbnaa.MIN_CONSTANT_PH0, constants.PrepWbnaa.MAX_CONSTANT_PH0))
wx_util.configure_spin(self.FloatPhase0RangeStart, 70, 2, 0.25, ppmlim)
wx_util.configure_spin(self.FloatPhase0RangeEnd, 70, 2, 0.25, ppmlim)
# set up combo selections
_configure_combo(self.ComboRefSpectrumSource, constants.WbnaaRefSpectrumSource.choices)
# set here because part of the Tab display options, not the block
# Note. not using _configure_combo() because choices list is not in proper format (yet)
self.ComboDataExclusionDisplayMethod.Clear()
self.ComboDataExclusionDisplayMethod.SetItems(EXCLUDE_DISPLAY_CHOICES)
self.ComboDataExclusionDisplayMethod.SetStringSelection(EXCLUDE_DISPLAY_CHOICES[0])
#-------------------------------------------------------------
# Raw Fidsum View setup
self.view = PlotPanelPrepWbnaa(self.PanelViewPrepFidsum,
self,
self._tab_dataset,
naxes=3,
reversex=True,
zoom='span',
reference=True,
middle=True,
do_zoom_select_event=True,
do_zoom_motion_event=True,
do_refs_select_event=True,
do_refs_motion_event=True,
do_middle_select_event=True,
do_middle_motion_event=True,
do_scroll_event=True,
props_zoom=dict(alpha=0.2, facecolor='yellow'),
props_cursor=dict(alpha=0.2, facecolor='gray'),
xscale_bump=0.0,
yscale_bump=0.05,
data = [],
prefs=self._prefs,
dataset=self.dataset,
)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.view, 1, wx.LEFT | wx.TOP | wx.EXPAND)
self.PanelViewPrepFidsum.SetSizer(sizer)
self.view.Fit()
self.view_series = PlotPanelPrepWbnaaSeries(self.PanelViewPrepFidsumSeries,
self,
self._tab_dataset,
naxes=1,
reversex=False,
zoom='span',
reference=True,
middle=True,
do_zoom_select_event=True,
do_zoom_motion_event=True,
do_refs_select_event=True,
do_refs_motion_event=True,
do_middle_select_event=False,
do_middle_motion_event=False,
do_middle_press_event=True,
do_scroll_event=True,
props_zoom=dict(alpha=0.2, facecolor='yellow'),
props_cursor=dict(alpha=0.2, facecolor='gray'),
xscale_bump=0.0,
yscale_bump=0.05,
data = [],
prefs=self._prefs,
)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.view_series, 1, wx.LEFT | wx.TOP | wx.EXPAND)
self.PanelViewPrepFidsumSeries.SetSizer(sizer)
self.view_series.Fit()
width, height = self.SpinFidLeftShift.GetSize()
if "__WXMAC__" in wx.PlatformInfo:
# Under OS X this spin control needs a poke to paint itself
# properly. Without this code, the control doesn't appear on screen
# even though the wx Inspector reports that it has an appropriate
# size & position. Go figger.
wx.CallAfter(self.SpinFidLeftShift.SetSize, (width + 1, height))
def populate_controls(self, preset=False):
"""
Populates the raw data tab with values from the dataset.raw
object. It's meant to be called when a new data object is loaded.
"""
block = self.block
raw = self._tab_dataset.dataset.get_source_data('prep')
self.SpinFidIndex.SetValue(self.fid_index)
self.SpinFidIndex.SetRange(0, raw.shape[-2]-1)
self.FloatGaussianApodization.SetValue(self.block.set.gaussian_apodization)
self.SpinFidLeftShift.SetValue(self.block.set.fid_left_shift)
self.SpinFidLeftShiftB0.SetValue(self.block.set.fid_left_shift_b0)
self.SpinFidLeftShiftPhase0.SetValue(self.block.set.fid_left_shift_phase0)
self.FloatPeakShiftValue.SetValue(self.block.frequency_shift[self.fid_index])
self.FloatPhase0.SetValue(self.block.phase_0[self.fid_index])
self.CheckApplyPeakShift.SetValue(self.block.set.apply_peak_shift)
self.FloatReferencePeakCenter.SetValue(self.block.set.reference_peak_center)
self.FloatPeakSearchWidth.SetValue(self.block.set.peak_search_width)
self.CheckApplyPhase0.SetValue(self.block.set.apply_phase0)
self.FloatPhase0RangeStart.SetValue(self.block.set.phase0_range_start)
self.FloatPhase0RangeEnd.SetValue(self.block.set.phase0_range_end)
self.SpinRefPeakLineWidth.SetValue(self.block.set.ref_peak_line_width)
self.SpinConstantPhase0Offset.SetValue(self.block.set.constant_phase0_offset)
refspec = constants.WbnaaRefSpectrumSource.choices[self.block.set.ref_spectrum_source]
self.ComboRefSpectrumSource.SetStringSelection(refspec)
if self.block.set.ref_spectrum_source == 'average_all_fids':
self.SpinRefPeakLineWidth.Enable(False)
else:
self.SpinRefPeakLineWidth.Enable(True)
#=======================================================
#
# Global and Menu Event Handlers
#
#=======================================================
def on_menu_view_option(self, event):
event_id = event.GetId()
if self._prefs.handle_event(event_id):
if event_id in (util_menu.ViewIdsPrepFidsum.ZERO_LINE_SHOW,
util_menu.ViewIdsPrepFidsum.ZERO_LINE_TOP,
util_menu.ViewIdsPrepFidsum.ZERO_LINE_MIDDLE,
util_menu.ViewIdsPrepFidsum.ZERO_LINE_BOTTOM,
util_menu.ViewIdsPrepFidsum.XAXIS_SHOW,
):
self.view.update_axes()
self.view.canvas.draw()
if event_id in (util_menu.ViewIdsPrepFidsum.ZERO_LINE_PLOT_SHOW,
util_menu.ViewIdsPrepFidsum.ZERO_LINE_PLOT_TOP,
util_menu.ViewIdsPrepFidsum.ZERO_LINE_PLOT_MIDDLE,
util_menu.ViewIdsPrepFidsum.ZERO_LINE_PLOT_BOTTOM,
util_menu.ViewIdsPrepFidsum.XAXIS_SHOW,
):
self.view_series.update_axes()
self.view_series.canvas.draw()
# note. these need to come before next
if event_id == util_menu.ViewIdsPrepFidsum.DATA_TYPE_REAL:
self.view.set_data_type_real()
if event_id == util_menu.ViewIdsPrepFidsum.DATA_TYPE_IMAGINARY:
self.view.set_data_type_imaginary()
if event_id == util_menu.ViewIdsPrepFidsum.DATA_TYPE_MAGNITUDE:
self.view.set_data_type_magnitude()
if event_id in (util_menu.ViewIdsPrepFidsum.DATA_TYPE_REAL,
util_menu.ViewIdsPrepFidsum.DATA_TYPE_IMAGINARY,
util_menu.ViewIdsPrepFidsum.DATA_TYPE_MAGNITUDE,
util_menu.ViewIdsPrepFidsum.XAXIS_PPM,
util_menu.ViewIdsPrepFidsum.XAXIS_HERTZ,
):
self.view.update()
self.view.canvas.draw()
if event_id in (util_menu.ViewIdsPrepFidsum.AREA_CALC_PLOT_A,
util_menu.ViewIdsPrepFidsum.AREA_CALC_PLOT_B,
):
area, rms = self.view.calculate_area()
if self._prefs.area_calc_plot_a:
index = 0
else:
index = 1
self.top.statusbar.SetStatusText(self.build_area_text(area[index], rms[index]), 3)
def on_menu_view_output(self, event):
event_id = event.GetId()
formats = { util_menu.ViewIdsPrepFidsum.VIEW_TO_PNG : "PNG",
util_menu.ViewIdsPrepFidsum.VIEW_TO_SVG : "SVG",
util_menu.ViewIdsPrepFidsum.VIEW_TO_EPS : "EPS",
util_menu.ViewIdsPrepFidsum.VIEW_TO_PDF : "PDF",
}
if event_id in formats:
format = formats[event_id]
lformat = format.lower()
filter_ = "%s files (*.%s)|*.%s" % (format, lformat, lformat)
figure = self.view.figure
filename = common_dialogs.save_as("", filter_)
if filename:
msg = ""
try:
figure.savefig( filename,
dpi=300,
facecolor='w',
edgecolor='w',
orientation='portrait',
papertype='letter',
format=None,
transparent=False)
except IOError:
msg = """I can't write the file "%s".""" % filename
if msg:
common_dialogs.message(msg, style=common_dialogs.E_OK)
#=======================================================
#
# Widget Event Handlers
#
#=======================================================
def on_apply_data_exclusion(self, event):
value = event.GetEventObject().GetValue()
self.block.set.apply_data_exclusion = value
self.process_and_plot()
def on_data_exclusion_display_method(self, event):
self.plot()
def on_toggle_current_index(self, event):
self.block.toggle_exclude_index(self.dataset, self.fid_index)
val = ','.join(str(x) for x in self.block.exclude_indices)
self.TextDataExclusion.SetValue(val)
self.process_and_plot()
def on_clear_indices(self, event):
self.block.toggle_exclude_index(self.dataset, None)
self.TextDataExclusion.SetValue('')
self.process_and_plot()
def on_splitter(self, event=None):
# This is sometimes called programmatically, in which case event is None
self._prefs.sash_position = self.SplitterWindow.GetSashPosition()
def on_fid_index(self, event):
value = event.GetEventObject().GetValue()
self.fid_index = value
shft = self.block.frequency_shift[self.fid_index]
phas = self.block.phase_0[self.fid_index]
self.FloatPeakShiftValue.SetValue(shft)
self.FloatPhase0.SetValue(phas)
self.process_and_plot()
def on_gaussian_apodization(self, event):
value = event.GetEventObject().GetValue()
self.block.set.gaussian_apodization = value
self.process_and_plot()
def on_fid_left_shift(self, event):
value = event.GetEventObject().GetValue()
self.block.set.fid_left_shift = value
self.process_and_plot()
def on_fid_left_shift_b0(self, event):
value = event.GetEventObject().GetValue()
self.block.set.fid_left_shift_b0 = value
self.process_and_plot()
def on_fid_left_shift_phase0(self, event):
value = event.GetEventObject().GetValue()
self.block.set.fid_left_shift_phase0 = value
self.process_and_plot()
def on_peak_shift_value(self, event):
value = event.GetEventObject().GetValue()
self.block.frequency_shift[self.fid_index] = value
self.process_and_plot()
def on_phase0(self, event):
value = event.GetEventObject().GetValue()
self.block.phase_0[self.fid_index] = value
self.process_and_plot()
def on_phase1(self, event):
value = event.GetEventObject().GetValue()
self.block.global_phase1 = value
self.process_and_plot()
def on_apply_peak_shift(self, event):
value = event.GetEventObject().GetValue()
self.block.set.apply_peak_shift = value
self.process_and_plot()
self.update_shift_phase()
def on_reset_peak_shift(self, event):
self.block.frequency_shift *= 0
self.FloatPeakShiftValue.SetValue(self.block.frequency_shift[self.fid_index])
self.process_and_plot()
self.update_shift_phase()
def on_reference_peak_center(self, event):
value = event.GetEventObject().GetValue()
self.block.set.reference_peak_center = value
self.process_and_plot()
self.update_shift_phase()
def on_peak_search_width(self, event):
value = event.GetEventObject().GetValue()
self.block.set.peak_search_width = value
self.process_and_plot()
self.update_shift_phase()
def on_apply_phase0(self, event):
value = event.GetEventObject().GetValue()
self.block.set.apply_phase0 = value
self.process_and_plot()
self.update_shift_phase()
def on_reset_phase0(self, event):
self.block.phase_0 *= 0
self.FloatPhase0.SetValue(self.block.phase_0[self.fid_index])
self.process_and_plot()
self.update_shift_phase()
def on_phase0_range_start(self, event):
# Note. min=End and max=Start because dealing with PPM range
min, max = _paired_event(self.FloatPhase0RangeEnd,
self.FloatPhase0RangeStart)
self.block.set.phase0_range_start = max
self.block.set.phase0_range_end = min
self.process_and_plot()
self.update_shift_phase()
def on_phase0_range_end(self, event):
# Note. min=End and max=Start because dealing with PPM range
min, max = _paired_event(self.FloatPhase0RangeEnd,
self.FloatPhase0RangeStart)
self.block.set.phase0_range_start = max
self.block.set.phase0_range_end = min
self.process_and_plot()
self.update_shift_phase()
def on_ref_spectrum_source(self, event):
index = event.GetEventObject().GetSelection()
val = list(constants.WbnaaRefSpectrumSource.choices.keys())[index]
self.block.set.ref_spectrum_source = val
self.process_and_plot()
self.update_shift_phase()
if val == 'average_all_fids':
self.SpinRefPeakLineWidth.Enable(False)
else:
self.SpinRefPeakLineWidth.Enable(True)
def on_ref_peak_line_width(self, event):
value = event.GetEventObject().GetValue()
self.block.set.ref_peak_line_width = value
self.process_and_plot()
self.update_shift_phase()
def on_constant_phase0_offset(self, event):
value = event.GetEventObject().GetValue()
self.block.set.constant_phase0_offset = value
self.process_and_plot()
self.update_shift_phase()
def update_shift_phase(self):
shft = self.block.frequency_shift[self.fid_index]
phas = self.block.phase_0[self.fid_index]
self.FloatPeakShiftValue.SetValue(shft)
self.FloatPhase0.SetValue(phas)
#=======================================================
#
# Public Methods
#
#=======================================================
def process_and_plot(self, entry='all', do_calculate=True):
"""
The process(), plot() and process_and_plot() methods are standard in
all processing tabs. They are called to update the data in the plot
results dictionary, the plot_panel in the View side of the tab or both.
"""
tab_base.Tab.process_and_plot(self, entry)
self.process(entry=entry, do_calculate=do_calculate)
self.plot()
def process(self, entry='all', do_calculate=True):
"""
Data processing results are stored into the Block inside the Chain,
but the View results are returned as a dictionary from the Chain.run()
method. The plot routine takes its inputs from this dictionary.
"""
tab_base.Tab.process(self, entry)
voxel = self.fid_index
self.plot_results = self.block.chain.run([voxel], entry=entry)
nfids = self.block.chain.raw.shape[2]
if self.block.set.apply_data_exclusion:
nfids = nfids - len(self.block.exclude_indices)
self.LabelRemainingFidCount.SetLabel("Number of FIDs Remaining : %s" % (nfids, ))
def plot(self):
"""
The set_data() method sets data into the plot_panel_spectrum object
in the plot in the right panel.
"""
if self._plotting_enabled:
tab_base.Tab.plot(self)
results = self.plot_results
data1 = results['freq_current'] # no phase offset
data2 = results['freq_summed'] # no phase offset
data3 = results['freq_summed_offset'] # with constant phase offset added in
data = [[data1], [data2], [data3]]
self.view.set_data(data)
self.view.update(set_scale=not self._scale_intialized)
select = self.ComboDataExclusionDisplayMethod.GetStringSelection()
if select == EXCLUDE_DISPLAY_CHOICES[0]:
data3 = np.abs(self.block.chain.raw[0,0,:,0])
do_scale = (self.view_series.xlabel != 'fid[0] Magn')
self.view_series.xlabel = 'fid[0] Magn'
elif select == EXCLUDE_DISPLAY_CHOICES[1]:
data3 = self.block.frequency_shift.copy()
do_scale = (self.view_series.xlabel != 'B0 Shift [Hz]')
self.view_series.xlabel = 'B0 Shift [Hz]'
elif select == EXCLUDE_DISPLAY_CHOICES[2]:
data3 = self.block.phase_0.copy()
do_scale = (self.view_series.xlabel != 'Phase 0 [deg]')
self.view_series.xlabel = 'Phase 0 [deg]'
exclude = self.block.exclude_indices
data3 = {'data':data3, 'markevery':exclude, 'markevery_color':'red'}
data_series = [[data3],]
self.view_series.set_data(data_series)
self.view_series.update(set_scale=do_scale)
if not self._scale_intialized:
self._scale_intialized = True
# Calculate the new area after phasing
area, rms = self.view.calculate_area()
index = (0 if self._prefs.area_calc_plot_a else 1)
area = area[index]
rms = rms[index]
self.top.statusbar.SetStatusText(self.build_area_text(area, rms), 3)
#=======================================================
#
# Internal Helper Functions
#
#=======================================================
def _display_header_text(self):
index = self.fid_index
header = self.dataset.blocks["raw"].headers[index]
lines = "\nCurrent Header, FID index = "+str(index)+"\n" + "-" * 75 + "\n\n"
lines += str(header)
wx_util.display_text_as_file(lines)
| [
"wx.BoxSizer",
"vespa.analysis.tab_base.Tab.process",
"vespa.analysis.tab_base.Tab.plot",
"vespa.analysis.auto_gui.fidsum_wbnaa.PanelPrepWbnaaUI.__init__",
"numpy.abs",
"vespa.common.wx_gravy.common_dialogs.save_as",
"vespa.analysis.tab_base.Tab.process_and_plot",
"vespa.common.wx_gravy.common_dialogs... | [((1783, 1856), 'vespa.analysis.auto_gui.fidsum_wbnaa.PanelPrepWbnaaUI.__init__', 'fidsum_wbnaa.PanelPrepWbnaaUI.__init__', (['self', 'tab_dataset.NotebookDataset'], {}), '(self, tab_dataset.NotebookDataset)\n', (1821, 1856), True, 'import vespa.analysis.auto_gui.fidsum_wbnaa as fidsum_wbnaa\n'), ((1874, 1949), 'vespa.analysis.tab_base.Tab.__init__', 'tab_base.Tab.__init__', (['self', 'tab_dataset', 'top', 'prefs_module.PrefsPrepFidsum'], {}), '(self, tab_dataset, top, prefs_module.PrefsPrepFidsum)\n', (1895, 1949), True, 'import vespa.analysis.tab_base as tab_base\n'), ((3367, 3453), 'wx.CallAfter', 'wx.CallAfter', (['self.SplitterWindow.SetSashPosition', 'self._prefs.sash_position', '(True)'], {}), '(self.SplitterWindow.SetSashPosition, self._prefs.sash_position,\n True)\n', (3379, 3453), False, 'import wx\n'), ((3480, 3510), 'wx.CallAfter', 'wx.CallAfter', (['self.on_splitter'], {}), '(self.on_splitter)\n', (3492, 3510), False, 'import wx\n'), ((4270, 4315), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.SpinFidIndex', '(60)'], {}), '(self.SpinFidIndex, 60)\n', (4292, 4315), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((4325, 4497), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.FloatGaussianApodization', '(70)', '(2)', 'constants.PrepWbnaa.STEP_APODIZE', '(constants.PrepWbnaa.MIN_APODIZE, constants.PrepWbnaa.MAX_APODIZE)'], {}), '(self.FloatGaussianApodization, 70, 2, constants.\n PrepWbnaa.STEP_APODIZE, (constants.PrepWbnaa.MIN_APODIZE, constants.\n PrepWbnaa.MAX_APODIZE))\n', (4347, 4497), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((4520, 4648), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.SpinFidLeftShift', '(70)', 'None', 'None', '(constants.PrepWbnaa.MIN_LEFT, constants.PrepWbnaa.MAX_LEFT)'], {}), '(self.SpinFidLeftShift, 70, None, None, (constants.\n PrepWbnaa.MIN_LEFT, constants.PrepWbnaa.MAX_LEFT))\n', (4542, 4648), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((4676, 4812), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.SpinFidLeftShiftB0', '(70)', 'None', 'None', '(constants.PrepWbnaa.MIN_LEFT_B0, constants.PrepWbnaa.MAX_LEFT_B0)'], {}), '(self.SpinFidLeftShiftB0, 70, None, None, (constants.\n PrepWbnaa.MIN_LEFT_B0, constants.PrepWbnaa.MAX_LEFT_B0))\n', (4698, 4812), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((4840, 4988), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.SpinFidLeftShiftPhase0', '(70)', 'None', 'None', '(constants.PrepWbnaa.MIN_LEFT_PHASE0, constants.PrepWbnaa.MAX_LEFT_PHASE0)'], {}), '(self.SpinFidLeftShiftPhase0, 70, None, None, (\n constants.PrepWbnaa.MIN_LEFT_PHASE0, constants.PrepWbnaa.MAX_LEFT_PHASE0))\n', (4862, 4988), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((5016, 5169), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.FloatPeakShiftValue', '(70)', '(3)', 'constants.PrepWbnaa.STEP_PEAK', '(constants.PrepWbnaa.MIN_PEAK, constants.PrepWbnaa.MAX_PEAK)'], {}), '(self.FloatPeakShiftValue, 70, 3, constants.PrepWbnaa\n .STEP_PEAK, (constants.PrepWbnaa.MIN_PEAK, constants.PrepWbnaa.MAX_PEAK))\n', (5038, 5169), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((5196, 5344), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.FloatPhase0', '(70)', '(3)', 'constants.PrepWbnaa.STEP_PHASE', '(constants.PrepWbnaa.MIN_PHASE, constants.PrepWbnaa.MAX_PHASE)'], {}), '(self.FloatPhase0, 70, 3, constants.PrepWbnaa.\n STEP_PHASE, (constants.PrepWbnaa.MIN_PHASE, constants.PrepWbnaa.MAX_PHASE))\n', (5218, 5344), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((5371, 5527), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.FloatPhase1', '(70)', '(3)', 'constants.PrepWbnaa.STEP_PHASE1', '(constants.PrepWbnaa.MIN_PHASE1, constants.PrepWbnaa.MAX_PHASE1)'], {}), '(self.FloatPhase1, 70, 3, constants.PrepWbnaa.\n STEP_PHASE1, (constants.PrepWbnaa.MIN_PHASE1, constants.PrepWbnaa.\n MAX_PHASE1))\n', (5393, 5527), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((5549, 5718), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.FloatReferencePeakCenter', '(70)', '(3)', 'constants.PrepWbnaa.STEP_CENTER', '(constants.PrepWbnaa.MIN_CENTER, constants.PrepWbnaa.MAX_CENTER)'], {}), '(self.FloatReferencePeakCenter, 70, 3, constants.\n PrepWbnaa.STEP_CENTER, (constants.PrepWbnaa.MIN_CENTER, constants.\n PrepWbnaa.MAX_CENTER))\n', (5571, 5718), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((5740, 5902), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.FloatPeakSearchWidth', '(70)', '(3)', 'constants.PrepWbnaa.STEP_WIDTH', '(constants.PrepWbnaa.MIN_WIDTH, constants.PrepWbnaa.MAX_WIDTH)'], {}), '(self.FloatPeakSearchWidth, 70, 3, constants.\n PrepWbnaa.STEP_WIDTH, (constants.PrepWbnaa.MIN_WIDTH, constants.\n PrepWbnaa.MAX_WIDTH))\n', (5762, 5902), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((5924, 6081), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.SpinRefPeakLineWidth', '(70)', 'None', 'None', '(constants.PrepWbnaa.MIN_REF_LINE_WIDTH, constants.PrepWbnaa.MAX_REF_LINE_WIDTH\n )'], {}), '(self.SpinRefPeakLineWidth, 70, None, None, (\n constants.PrepWbnaa.MIN_REF_LINE_WIDTH, constants.PrepWbnaa.\n MAX_REF_LINE_WIDTH))\n', (5946, 6081), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((6112, 6269), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.SpinConstantPhase0Offset', '(70)', 'None', 'None', '(constants.PrepWbnaa.MIN_CONSTANT_PH0, constants.PrepWbnaa.MAX_CONSTANT_PH0)'], {}), '(self.SpinConstantPhase0Offset, 70, None, None, (\n constants.PrepWbnaa.MIN_CONSTANT_PH0, constants.PrepWbnaa.MAX_CONSTANT_PH0)\n )\n', (6134, 6269), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((6292, 6363), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.FloatPhase0RangeStart', '(70)', '(2)', '(0.25)', 'ppmlim'], {}), '(self.FloatPhase0RangeStart, 70, 2, 0.25, ppmlim)\n', (6314, 6363), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((6372, 6441), 'vespa.common.wx_gravy.util.configure_spin', 'wx_util.configure_spin', (['self.FloatPhase0RangeEnd', '(70)', '(2)', '(0.25)', 'ppmlim'], {}), '(self.FloatPhase0RangeEnd, 70, 2, 0.25, ppmlim)\n', (6394, 6441), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((8524, 8548), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (8535, 8548), False, 'import wx\n'), ((10418, 10442), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (10429, 10442), False, 'import wx\n'), ((23332, 23374), 'vespa.analysis.tab_base.Tab.process_and_plot', 'tab_base.Tab.process_and_plot', (['self', 'entry'], {}), '(self, entry)\n', (23361, 23374), True, 'import vespa.analysis.tab_base as tab_base\n'), ((23782, 23815), 'vespa.analysis.tab_base.Tab.process', 'tab_base.Tab.process', (['self', 'entry'], {}), '(self, entry)\n', (23802, 23815), True, 'import vespa.analysis.tab_base as tab_base\n'), ((26771, 26806), 'vespa.common.wx_gravy.util.display_text_as_file', 'wx_util.display_text_as_file', (['lines'], {}), '(lines)\n', (26799, 26806), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((10989, 11053), 'wx.CallAfter', 'wx.CallAfter', (['self.SpinFidLeftShift.SetSize', '(width + 1, height)'], {}), '(self.SpinFidLeftShift.SetSize, (width + 1, height))\n', (11001, 11053), False, 'import wx\n'), ((16225, 16260), 'vespa.common.wx_gravy.common_dialogs.save_as', 'common_dialogs.save_as', (['""""""', 'filter_'], {}), "('', filter_)\n", (16247, 16260), True, 'import vespa.common.wx_gravy.common_dialogs as common_dialogs\n'), ((24391, 24414), 'vespa.analysis.tab_base.Tab.plot', 'tab_base.Tab.plot', (['self'], {}), '(self)\n', (24408, 24414), True, 'import vespa.analysis.tab_base as tab_base\n'), ((21835, 21882), 'vespa.analysis.constants.WbnaaRefSpectrumSource.choices.keys', 'constants.WbnaaRefSpectrumSource.choices.keys', ([], {}), '()\n', (21880, 21882), True, 'import vespa.analysis.constants as constants\n'), ((24997, 25037), 'numpy.abs', 'np.abs', (['self.block.chain.raw[0, 0, :, 0]'], {}), '(self.block.chain.raw[0, 0, :, 0])\n', (25003, 25037), True, 'import numpy as np\n'), ((16915, 16969), 'vespa.common.wx_gravy.common_dialogs.message', 'common_dialogs.message', (['msg'], {'style': 'common_dialogs.E_OK'}), '(msg, style=common_dialogs.E_OK)\n', (16937, 16969), True, 'import vespa.common.wx_gravy.common_dialogs as common_dialogs\n')] |
import pandas as pd
import numpy as np
np.random.seed(42)
import random
random.seed(42)
import pickle
import gzip
import glob
import os
import json
from tqdm import tqdm
from pathlib import Path
import shutil
from urllib.parse import urlparse
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from pdb import set_trace
## Parses over domain-wise nquad files and extracts all objects of class X by URL
# Set the amount of workers to pass to ProcessPoolExecutor or ThreadPoolExecutor
MAX_WORKERS = None
def get_objects_from_domain(arg_tuple):
domain_path, domain_name, name, main_class, output_path = arg_tuple
domain_dict= {}
with gzip.open(domain_path, 'rt', encoding='utf-8') as f:
for i, line in enumerate(f):
line = line.rstrip()
split = line.split()
so_class = ' '.join(split[2:-2])
so_class = so_class[1:-1]
so_class = so_class.replace('https', 'http')
so_class = so_class.lower()
url = split[-2]
url = url[1:-1]
if so_class == main_class:
node_id = split[0]
if url in domain_dict.keys():
domain_dict[url].add(node_id)
else:
domain_dict[url] = set([node_id])
with gzip.open(f"{output_path}{domain_name}.pkl.gz","wb") as f:
pickle.dump(domain_dict, f)
with open('../../schemaorg_classes.json', 'r') as f:
class_dict = json.load(f)
classes = [k for k in class_dict.keys()]
for name in tqdm(classes):
for k in class_dict.keys():
if k == name:
main_class = class_dict[k].lower()
domains = glob.glob(f'../../data/raw/tablecorpus/by-domain/{name}/*.gz')
print(f'Class: {name} contains {len(domains)} domains.\n')
output_path = f'../../data/interim/tablecorpus/domain_objects/{name}/'
shutil.rmtree(output_path, ignore_errors=True)
Path(output_path).mkdir(parents=True, exist_ok=True)
arguments = ((v, os.path.basename(v)[:-3], name, main_class, output_path) for v in list(domains))
with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
executor.map(get_objects_from_domain, arguments) | [
"tqdm.tqdm",
"json.load",
"numpy.random.seed",
"gzip.open",
"pickle.dump",
"os.path.basename",
"concurrent.futures.ProcessPoolExecutor",
"pathlib.Path",
"random.seed",
"glob.glob",
"shutil.rmtree"
] | [((39, 57), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (53, 57), True, 'import numpy as np\n'), ((72, 87), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (83, 87), False, 'import random\n'), ((1601, 1614), 'tqdm.tqdm', 'tqdm', (['classes'], {}), '(classes)\n', (1605, 1614), False, 'from tqdm import tqdm\n'), ((1533, 1545), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1542, 1545), False, 'import json\n'), ((1733, 1795), 'glob.glob', 'glob.glob', (['f"""../../data/raw/tablecorpus/by-domain/{name}/*.gz"""'], {}), "(f'../../data/raw/tablecorpus/by-domain/{name}/*.gz')\n", (1742, 1795), False, 'import glob\n'), ((1943, 1989), 'shutil.rmtree', 'shutil.rmtree', (['output_path'], {'ignore_errors': '(True)'}), '(output_path, ignore_errors=True)\n', (1956, 1989), False, 'import shutil\n'), ((680, 726), 'gzip.open', 'gzip.open', (['domain_path', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(domain_path, 'rt', encoding='utf-8')\n", (689, 726), False, 'import gzip\n'), ((1366, 1419), 'gzip.open', 'gzip.open', (['f"""{output_path}{domain_name}.pkl.gz"""', '"""wb"""'], {}), "(f'{output_path}{domain_name}.pkl.gz', 'wb')\n", (1375, 1419), False, 'import gzip\n'), ((1433, 1460), 'pickle.dump', 'pickle.dump', (['domain_dict', 'f'], {}), '(domain_dict, f)\n', (1444, 1460), False, 'import pickle\n'), ((2168, 2212), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'MAX_WORKERS'}), '(max_workers=MAX_WORKERS)\n', (2187, 2212), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n'), ((1994, 2011), 'pathlib.Path', 'Path', (['output_path'], {}), '(output_path)\n', (1998, 2011), False, 'from pathlib import Path\n'), ((2073, 2092), 'os.path.basename', 'os.path.basename', (['v'], {}), '(v)\n', (2089, 2092), False, 'import os\n')] |
import numpy as np
import numpy.ma as ma
from math import inf
def get_indexes(v, val):
"""
Returns the indexes of the v array which have the value 'val':
Cases:
if v = column of a matrix:
returns the rows which have the value 'val'
if v = row of a matrix:
returns the columns which have the value 'val'
"""
max_mask = ma.getmask(ma.masked_not_equal(v, val))
return list(ma.array(np.arange(len(v)), mask=max_mask).compressed())
class ZSGame(object):
"""
Represents a simple zero-sum game.
The normal_form is a matrix with the payments (only player 1 payments),
as the game is a zero-sum game, the payment of the other player at [i][j]
-(normal_form[i][j]) (they both sum to zero).
"""
def __init__(self, normal_form):
self.n_form = np.array(normal_form)
def get_state(self):
return self.n_form
def print_state(self):
print("")
for i in range(len(self.n_form)):
print(f"{[str(x).zfill(2) for x in self.n_form[i]]}")
def get_eq(self):
equilibrium = []
max_c = {}
min_r = {}
for cnum, col in enumerate(self.n_form.T):
max_val = np.max(col)
max_c[cnum] = [max_val, get_indexes(col, max_val)]
for rnum, row in enumerate(self.n_form):
min_val = np.min(row)
if any(min_val == j for j in map(lambda v: v[0], max_c.values())):
min_r[rnum] = [min_val, get_indexes(row, min_val)]
for row, val in min_r.items():
for col in val[1]:
if row in max_c[col][1]:
equilibrium.append((row, col))
return equilibrium
class ProbabilisticGame(ZSGame):
def __init__(self, normal_form, player1, player2):
super().__init__(normal_form)
self.player1 = player1
self.player2 = player2
def expectation(self, player=1):
"Return the expected payment (for 'player') given a fixed player choice"
expect = self.player1.strategy@self.n_form@self.player2.strategy
expect = expect if player == 1 else -expect
return expect[0][0]
def get_best_strategy(self, player=1):
"""Return the best strategy and the payment for 'player', for a fixed
strategy for the opponent, and varying all the strategies for
the 'player'.
"""
best_strat = []
best_payment = -inf if player == 1 else +inf
pl = self.player1 if player == 1 else self.player2
optimize = max if player == 1 else min
for strat in pl.get_all_strategy():
pl.strategy = strat
new_expectation = self.expectation(player)
if optimize(new_expectation, best_payment) != best_payment:
best_strat = strat
best_payment = optimize(new_expectation, best_payment)
return best_payment, best_strat
class Player(object):
def __init__(self, p_set, pos):
self.pos = pos
self.n_moves = len(p_set[0])
for p_dist in p_set:
assert sum(p_dist) == 1
assert all(p <= 1 and p >= 0 for p in p_dist)
# Column or row vector, depending on what player
if pos == 'r':
self.p_set = [np.array(p_dist).reshape(1, len(p_dist)) for p_dist in p_set]
elif pos == 'c':
self.p_set = [np.array(p_dist).reshape(len(p_dist), 1) for p_dist in p_set]
self.strategy = self.p_set[0]
print("New player sucessfully created!")
def get_all_strategy(self):
"Returns all strategies of the player"
return self.p_set
def main():
# The test_board matrix represents the payments from the player A (row)
# test_board = [[ 0, 0, -71],
# [-1, -1, -1],
# [ 1, 0, 0]]
# my_game = ZSGame(test_board)
test_board = [[ 0, 0, -71],
[-1, -1, -1],
[ 1, 0, 0]]
luiza_strategies = [[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.1, 0.8, 0.1]]
carlos_strategies = [[0.7, 0.15, 0.15]]
luiza = Player(luiza_strategies, 'r')
carlos = Player(carlos_strategies, 'c')
prob_game = ProbabilisticGame(test_board, luiza, carlos)
print("----------------PAGAMENTO ESPERADO : 'luiza' ------------------")
print(prob_game.expectation())
print("----------------MELHOR ESTRATÉGIA (fixo primeira estratégia de carlos) : 'luiza' ------------------")
print(prob_game.get_best_strategy())
if __name__ == '__main__':
main()
| [
"numpy.max",
"numpy.ma.masked_not_equal",
"numpy.array",
"numpy.min"
] | [((390, 417), 'numpy.ma.masked_not_equal', 'ma.masked_not_equal', (['v', 'val'], {}), '(v, val)\n', (409, 417), True, 'import numpy.ma as ma\n'), ((833, 854), 'numpy.array', 'np.array', (['normal_form'], {}), '(normal_form)\n', (841, 854), True, 'import numpy as np\n'), ((1221, 1232), 'numpy.max', 'np.max', (['col'], {}), '(col)\n', (1227, 1232), True, 'import numpy as np\n'), ((1367, 1378), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (1373, 1378), True, 'import numpy as np\n'), ((3290, 3306), 'numpy.array', 'np.array', (['p_dist'], {}), '(p_dist)\n', (3298, 3306), True, 'import numpy as np\n'), ((3403, 3419), 'numpy.array', 'np.array', (['p_dist'], {}), '(p_dist)\n', (3411, 3419), True, 'import numpy as np\n')] |
import logging
import numpy as np
from tqdm import tqdm
SIZE = 4096
VECTOR_COUNT = 10000
REPETITIONS = 10
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
A = np.random.random((SIZE, SIZE))
vectors = [np.random.random(SIZE) for i in range(VECTOR_COUNT)]
def vector_multiplication(v: np.ndarray) -> np.ndarray:
return np.dot(A, v)
results = list(tqdm(map(vector_multiplication, vectors), total=len(vectors)))
| [
"numpy.dot",
"numpy.random.random",
"logging.basicConfig"
] | [((140, 180), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (159, 180), False, 'import logging\n'), ((189, 219), 'numpy.random.random', 'np.random.random', (['(SIZE, SIZE)'], {}), '((SIZE, SIZE))\n', (205, 219), True, 'import numpy as np\n'), ((235, 257), 'numpy.random.random', 'np.random.random', (['SIZE'], {}), '(SIZE)\n', (251, 257), True, 'import numpy as np\n'), ((364, 376), 'numpy.dot', 'np.dot', (['A', 'v'], {}), '(A, v)\n', (370, 376), True, 'import numpy as np\n')] |
from datetime import datetime
import numpy as np
import csv
from utils import total_gini
import tensorflow.compat.v1 as tf
import json
from pgd_attack import LinfPGDAttack
import utils_init
with open('config.json') as config_file:
config = json.load(config_file)
def print_metrics(sess, model, nat_dict, val_dict, test_dict, ii, args, summary_writer, dict_exp, experiment, global_step):
network_size = list(utils_init.NN[args.network_type])
w_vars, b_vars, stable_var, sparse_vars = utils_init.init_vars(len(network_size)+1)
nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)
test_acc = sess.run(model.accuracy, feed_dict=test_dict)
val_acc = sess.run(model.accuracy, feed_dict=val_dict)
nat_xent = sess.run(model.xent, feed_dict=nat_dict)
stable_xent = sess.run(model.stable_xent, feed_dict=nat_dict)
robust_xent = sess.run(model.robust_xent, feed_dict=nat_dict)
robust_stable_xent = sess.run(model.robust_stable_xent, feed_dict=nat_dict)
stable_var = sess.run(getattr(model, stable_var), feed_dict=nat_dict)
print('Step {}: ({})'.format(ii, datetime.now()))
print(' training nat accuracy {:.4}'.format(nat_acc * 100))
print(' validation nat accuracy {:.4}'.format(val_acc * 100))
print(' Nat Xent {:.4}'.format(nat_xent))
if args.is_stable:
print(' Stability Variable {:.4}'.format(stable_var ))
print(' Stable Xent {:.4}'.format(stable_xent))
if args.rho > 0 :
print(' Robust Xent {:.4}'.format(robust_xent))
if args.is_stable:
print(' Robust Stable Xent {:.4}'.format(robust_stable_xent))
for i in range(len(w_vars)):
if args.l0 > 0:
print(' Killed neurons - ' + w_vars[i], dict_exp[w_vars[i] + '_killed_neurons'][experiment])
print(' Killed input neurons - ' + w_vars[i], dict_exp[w_vars[i] + '_killed_input_features'][experiment])
print(' Non zero features percentage - ' + w_vars[i] , dict_exp[w_vars[i] + '_nonzero'][experiment])
regularizer = sess.run(model.regularizer, feed_dict=nat_dict)
print(' Regularizer', regularizer)
summary = tf.Summary(value=[
tf.Summary.Value(tag='Train Xent', simple_value= nat_xent),
tf.Summary.Value(tag='Val Acc', simple_value= val_acc),
tf.Summary.Value(tag='Train Acc', simple_value= nat_acc),
tf.Summary.Value(tag='Train Stable Xent', simple_value= stable_xent),
tf.Summary.Value(tag='Train Robust Stable Xent', simple_value= robust_stable_xent),
tf.Summary.Value(tag='Test Acc', simple_value= test_acc)])
summary_writer.add_summary(summary, global_step.eval(sess))
for i in range(len(w_vars)):
if args.l0 > 0:
summary_sparse = tf.Summary(value=[
tf.Summary.Value(tag=w_vars[i] + '_killed_neurons', simple_value=dict_exp[w_vars[i] + '_killed_neurons'][experiment]),
tf.Summary.Value(tag=w_vars[i] + '_killed_inputs', simple_value=dict_exp[w_vars[i] + '_killed_input_features'][experiment]),
tf.Summary.Value(tag=w_vars[i] + '_nonzero', simple_value=dict_exp[w_vars[i] + '_nonzero'][experiment])])
summary_writer.add_summary(summary_sparse, global_step.eval(sess))
def update_dict_output(dict_exp, experiment, sess, test_acc, model, test_dict, num_iters):
dict_exp['test_accs'][experiment] = test_acc*100
dict_exp['iterations'][experiment] = num_iters
return dict_exp
def update_adv_acc(args, best_model, x_test, y_test, experiment, dict_exp):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
clip = True
if "uci" in args.data_set:
clip = False
for rho_test in args.robust_test:
attack = LinfPGDAttack(best_model, rho_test, config['k'], config['a'],
config['random_start'], config['loss_func'], clip)
x_test_adv = attack.perturb(x_test, y_test, sess)
adv_dict = {best_model.x_input: x_test_adv, best_model.y_input: y_test}
dict_exp['adv_test_accs'][rho_test][experiment] = sess.run(best_model.accuracy, feed_dict=adv_dict)
def print_stability_measures(dict_exp, args, num_experiments, batch_size, subset_ratio, tot_test_acc, max_train_steps, network_path):
network_size = list(utils_init.NN[args.network_type])
w_vars, b_vars, stable_var, sparse_vars = utils_init.init_vars(len(network_size)+1)
avg_test_acc = tot_test_acc / num_experiments
std = np.array([float(k) for k in dict_exp['test_accs']]).std()
logit_stability = np.mean(np.std(dict_exp['logits_acc'], axis=0), axis=0)
gini_stability = total_gini(dict_exp['preds'].transpose())
print(' Average testing accuracy {:.4}'.format(avg_test_acc * 100))
print(' Individual accuracies: \n', dict_exp['test_accs'])
print(' Adv testing accuracies', dict_exp['adv_test_accs'])
print(' Stability values', dict_exp[stable_var])
print(' Test Accuracy std {:.2}'.format(np.array([float(k) for k in dict_exp['test_accs']]).std()))
print(" Logits std", np.mean(np.mean(np.std(dict_exp['logits_acc'], axis=0), axis=0)))
print(" Gini stability", gini_stability)
weights_stability = print_layer_stability(dict_exp, num_experiments, args)
weights_nonzero = [np.mean(dict_exp[w_vars[i]]) for i in range(len(w_vars))]
for i in range(len(w_vars)):
print(w_vars[i] + ' non zero percentage', weights_nonzero[i])
file = open(str('results_' + network_path + args.data_set + '.csv'), 'a+', newline='')
file_read = open(str('results_' + network_path + args.data_set + '.csv'), "r")
one_char = file_read.read(1)
writer = csv.writer(file)
if not len(one_char):
headers = []
headers += ['num_experiments', 'batch_size', 'subset_ratio', 'max_train_steps']
headers += ['test accuracy '+ str(i) for i in range(num_experiments)]
for key in dict_exp:
if key not in w_vars+ b_vars+ [stable_var]+ sparse_vars + ['adv_test_accs', 'preds']:
headers += ['Avg '+str(key)]
headers += ['Avg test adversarial acc for rho = '+ str(rho) for rho in args.robust_test]
headers += ['is_stable', 'rho', 'train_size', 'l2', 'l0', 'network_size', 'learning rate']
headers += [w_vars[i] + ' Nonzero weights' for i in range(len(w_vars))]
headers += [w_vars[i] + ' Stability' for i in range(len(w_vars))]
headers += ['std', 'logit_stability', 'gini_stability' ]
writer.writerow(headers)
with file:
cols = []
cols += [num_experiments, batch_size, subset_ratio, max_train_steps]
cols += [dict_exp['test_accs'][i] for i in range(num_experiments)]
for key in dict_exp:
if key not in w_vars+ b_vars+ [stable_var]+ sparse_vars + ['adv_test_accs', 'preds']:
cols += [np.mean(dict_exp[key])]
cols += [np.mean(dict_exp['adv_test_accs'][rho]) for rho in args.robust_test]
cols += [args.is_stable, args.rho, args.train_size, args.l2, args.l0, network_size, args.lr]
cols += weights_nonzero
cols += weights_stability
cols += [std, logit_stability, gini_stability ]
print(cols)
writer.writerow(cols)
def print_layer_stability(dict_exp, num_experiments, args):
network_size = list(utils_init.NN[args.network_type])
w_vars, b_vars, stable_var, sparse_vars = utils_init.init_vars(len(network_size)+1)
stabilities = []
for i in range(len(w_vars)):
w_i = [dict_exp[w_vars[i]][experiment].reshape(-1) for experiment in range(num_experiments)]
w_stability = np.mean(np.std(w_i, axis=0), axis=0)
print(w_vars[i] + " std", w_stability)
stabilities = stabilities + [w_stability]
return stabilities
| [
"json.load",
"csv.writer",
"pgd_attack.LinfPGDAttack",
"numpy.std",
"tensorflow.compat.v1.Summary.Value",
"tensorflow.compat.v1.Session",
"numpy.mean",
"datetime.datetime.now",
"tensorflow.compat.v1.global_variables_initializer"
] | [((246, 268), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (255, 268), False, 'import json\n'), ((5740, 5756), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (5750, 5756), False, 'import csv\n'), ((3597, 3609), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (3607, 3609), True, 'import tensorflow.compat.v1 as tf\n'), ((4634, 4672), 'numpy.std', 'np.std', (["dict_exp['logits_acc']"], {'axis': '(0)'}), "(dict_exp['logits_acc'], axis=0)\n", (4640, 4672), True, 'import numpy as np\n'), ((5350, 5378), 'numpy.mean', 'np.mean', (['dict_exp[w_vars[i]]'], {}), '(dict_exp[w_vars[i]])\n', (5357, 5378), True, 'import numpy as np\n'), ((1106, 1120), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1118, 1120), False, 'from datetime import datetime\n'), ((3636, 3669), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3667, 3669), True, 'import tensorflow.compat.v1 as tf\n'), ((3815, 3932), 'pgd_attack.LinfPGDAttack', 'LinfPGDAttack', (['best_model', 'rho_test', "config['k']", "config['a']", "config['random_start']", "config['loss_func']", 'clip'], {}), "(best_model, rho_test, config['k'], config['a'], config[\n 'random_start'], config['loss_func'], clip)\n", (3828, 3932), False, 'from pgd_attack import LinfPGDAttack\n'), ((6987, 7026), 'numpy.mean', 'np.mean', (["dict_exp['adv_test_accs'][rho]"], {}), "(dict_exp['adv_test_accs'][rho])\n", (6994, 7026), True, 'import numpy as np\n'), ((7731, 7750), 'numpy.std', 'np.std', (['w_i'], {'axis': '(0)'}), '(w_i, axis=0)\n', (7737, 7750), True, 'import numpy as np\n'), ((2198, 2255), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Train Xent"""', 'simple_value': 'nat_xent'}), "(tag='Train Xent', simple_value=nat_xent)\n", (2214, 2255), True, 'import tensorflow.compat.v1 as tf\n'), ((2268, 2321), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Val Acc"""', 'simple_value': 'val_acc'}), "(tag='Val Acc', simple_value=val_acc)\n", (2284, 2321), True, 'import tensorflow.compat.v1 as tf\n'), ((2334, 2389), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Train Acc"""', 'simple_value': 'nat_acc'}), "(tag='Train Acc', simple_value=nat_acc)\n", (2350, 2389), True, 'import tensorflow.compat.v1 as tf\n'), ((2402, 2469), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Train Stable Xent"""', 'simple_value': 'stable_xent'}), "(tag='Train Stable Xent', simple_value=stable_xent)\n", (2418, 2469), True, 'import tensorflow.compat.v1 as tf\n'), ((2482, 2568), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Train Robust Stable Xent"""', 'simple_value': 'robust_stable_xent'}), "(tag='Train Robust Stable Xent', simple_value=\n robust_stable_xent)\n", (2498, 2568), True, 'import tensorflow.compat.v1 as tf\n'), ((2576, 2631), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Test Acc"""', 'simple_value': 'test_acc'}), "(tag='Test Acc', simple_value=test_acc)\n", (2592, 2631), True, 'import tensorflow.compat.v1 as tf\n'), ((5150, 5188), 'numpy.std', 'np.std', (["dict_exp['logits_acc']"], {'axis': '(0)'}), "(dict_exp['logits_acc'], axis=0)\n", (5156, 5188), True, 'import numpy as np\n'), ((6945, 6967), 'numpy.mean', 'np.mean', (['dict_exp[key]'], {}), '(dict_exp[key])\n', (6952, 6967), True, 'import numpy as np\n'), ((2826, 2948), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': "(w_vars[i] + '_killed_neurons')", 'simple_value': "dict_exp[w_vars[i] + '_killed_neurons'][experiment]"}), "(tag=w_vars[i] + '_killed_neurons', simple_value=dict_exp[\n w_vars[i] + '_killed_neurons'][experiment])\n", (2842, 2948), True, 'import tensorflow.compat.v1 as tf\n'), ((2961, 3089), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': "(w_vars[i] + '_killed_inputs')", 'simple_value': "dict_exp[w_vars[i] + '_killed_input_features'][experiment]"}), "(tag=w_vars[i] + '_killed_inputs', simple_value=dict_exp[\n w_vars[i] + '_killed_input_features'][experiment])\n", (2977, 3089), True, 'import tensorflow.compat.v1 as tf\n'), ((3102, 3210), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': "(w_vars[i] + '_nonzero')", 'simple_value': "dict_exp[w_vars[i] + '_nonzero'][experiment]"}), "(tag=w_vars[i] + '_nonzero', simple_value=dict_exp[w_vars[i\n ] + '_nonzero'][experiment])\n", (3118, 3210), True, 'import tensorflow.compat.v1 as tf\n')] |
import re
import string
import numpy as np
import pandas as pd
import seaborn as sns
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.tokenize import TweetTokenizer
from gensim.corpora.dictionary import Dictionary
from tensorflow.keras.preprocessing.text import one_hot
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score, precision_score, recall_score, classification_report
nltk.download('stopwords')
nltk.download('punkt')
def load_data(filename):
df = pd.read_csv(filename)
df['verdict'] = df['label'].apply(lambda verdict: 0 if verdict == 'fake' else 1)
print("Shape of the dataset: {}".format(df.shape))
print("Number of the 'REAL' label in the dataset: {}".format(len(df[df['verdict'] == 1])))
print("Number of the 'FAKE' label in the dataset: {}".format(len(df[df['verdict'] == 0])))
return df
def get_vocab_size(X_train_preproc, X_test_preproc, X_val_preproc):
text_data = []
vocab = []
for i in X_train_preproc:
text_data.append(i)
for i in X_test_preproc:
text_data.append(i)
for i in X_val_preproc:
text_data.append(i)
for text in text_data:
text_list = text.split(' ')
for word in text_list:
vocab.append(word)
vocab_size = len(set(vocab))
num_token = [len(tokens.split(' ')) for tokens in X_train_preproc + X_test_preproc + X_val_preproc]
num_token = np.array(num_token)
max_token = np.mean(num_token) + 2 * np.std(num_token)
max_token = int(max_token)
return vocab_size, max_token
def create_token2id(X_train_preproc, X_test_preproc, X_val_preproc):
tokenized_docs = [word_tokenize(text) for text in X_train_preproc + X_test_preproc + X_val_preproc]
dictionary = Dictionary(tokenized_docs)
return dictionary.token2id
def one_hot_text(token2id, input_text):
return [token2id[word] for word in word_tokenize(input_text)]
def preprocessing(text):
negate_dict = {
"couldn't": "could not",
"can't": "can not",
"didn't": "did not",
"won't": "will not",
"don't": "do not",
"aren't": "are not",
"doesn't": "does not",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"isn't": "is not",
"mightn't": "might not",
"mustn't": "must not",
"needn't": "need not",
"shan't": "shall not",
"shouldn't": "should not",
"wasn't": "was not",
"weren't": "were not",
"wouldn't": "would not"
}
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
stopwords_english.remove('not')
stopwords_english.remove('no')
text = re.sub(r'$', '', str(text))
text = re.sub(r'https?:\/\/.*[\r\n]*', '', str(text))
text = re.sub(r'^RT[\s]+', '', str(text))
text = re.sub(r'#', '', str(text))
text = re.sub(r'\@\w*', '', str(text))
text = re.sub(r'WHO', 'world health organization', str(text))
for negate_word in negate_dict.keys():
text = re.sub(negate_word, negate_dict[negate_word], str(text))
text = re.sub(r"&", ' and ', str(text))
text = text.replace('&', ' ')
text = re.sub(r"[^0-9a-zA-Z]+", ' ', str(text))
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
text_tokens = tokenizer.tokenize(text)
clean_text = []
for word in text_tokens:
if (word not in stopwords_english and word not in string.punctuation):
stem_word = stemmer.stem(word)
clean_text.append(stem_word)
return ' '.join(clean_text)
def confusion_matrix_plot(matrix):
group_counts = ['{0:0.0f}'.format(value) for value in matrix.flatten()]
group_percentages = ['{0:.2%}'.format(value) for value in matrix.flatten() / np.sum(matrix)]
labels = [f'{a}\n{b}' for a, b in zip(group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2, 2)
sns.heatmap(matrix, annot=labels, fmt='', cmap=sns.light_palette("seagreen", as_cmap=True))
def evaluate(y_test, prediction):
print(classification_report(y_test, prediction))
accuracy = accuracy_score(y_test, prediction)
precision = precision_score(y_test, prediction)
recall = recall_score(y_test, prediction)
print('Accuracy score: {}'.format(accuracy))
print('Precision score: {}'.format(precision))
print('Recall score: {}'.format(recall))
return accuracy
def get_verdict(model, input_text, vocab_size, maxlen):
input_text_preproc = preprocessing(input_text)
input_text_onehot = one_hot(input_text_preproc, vocab_size)
input_text_embedded_docs = pad_sequences([input_text_onehot], padding='pre', maxlen=maxlen)
output_verdict = model.predict(input_text_embedded_docs)
return output_verdict[0][0]
def get_verdict_with_token2id(model, token2id, input_text, maxlen):
input_text_preproc = preprocessing(input_text)
input_text_onehot = one_hot_text(token2id, input_text_preproc)
input_text_embedded_docs = pad_sequences([input_text_onehot], padding='pre', maxlen=maxlen)
output_verdict = model.predict(input_text_embedded_docs)
return output_verdict[0][0]
| [
"numpy.sum",
"nltk.stem.PorterStemmer",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"seaborn.light_palette",
"sklearn.metrics.classification_report",
"numpy.mean",
"gensim.corpora.dictionary.Dictionary",
"nltk.download",
"numpy.std",
"tensorflow.keras.preprocessing.sequence.pad_sequence... | [((520, 546), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (533, 546), False, 'import nltk\n'), ((547, 569), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (560, 569), False, 'import nltk\n'), ((606, 627), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (617, 627), True, 'import pandas as pd\n'), ((1531, 1550), 'numpy.array', 'np.array', (['num_token'], {}), '(num_token)\n', (1539, 1550), True, 'import numpy as np\n'), ((1867, 1893), 'gensim.corpora.dictionary.Dictionary', 'Dictionary', (['tokenized_docs'], {}), '(tokenized_docs)\n', (1877, 1893), False, 'from gensim.corpora.dictionary import Dictionary\n'), ((2591, 2606), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (2604, 2606), False, 'from nltk.stem import PorterStemmer\n'), ((2628, 2654), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2643, 2654), False, 'from nltk.corpus import stopwords\n'), ((3236, 3308), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {'preserve_case': '(False)', 'strip_handles': '(True)', 'reduce_len': '(True)'}), '(preserve_case=False, strip_handles=True, reduce_len=True)\n', (3250, 3308), False, 'from nltk.tokenize import TweetTokenizer\n'), ((4093, 4127), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (4107, 4127), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, classification_report\n'), ((4144, 4179), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (4159, 4179), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, classification_report\n'), ((4193, 4225), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (4205, 4225), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, classification_report\n'), ((4526, 4565), 'tensorflow.keras.preprocessing.text.one_hot', 'one_hot', (['input_text_preproc', 'vocab_size'], {}), '(input_text_preproc, vocab_size)\n', (4533, 4565), False, 'from tensorflow.keras.preprocessing.text import one_hot\n'), ((4597, 4661), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[input_text_onehot]'], {'padding': '"""pre"""', 'maxlen': 'maxlen'}), "([input_text_onehot], padding='pre', maxlen=maxlen)\n", (4610, 4661), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((4974, 5038), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[input_text_onehot]'], {'padding': '"""pre"""', 'maxlen': 'maxlen'}), "([input_text_onehot], padding='pre', maxlen=maxlen)\n", (4987, 5038), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1567, 1585), 'numpy.mean', 'np.mean', (['num_token'], {}), '(num_token)\n', (1574, 1585), True, 'import numpy as np\n'), ((1768, 1787), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (1781, 1787), False, 'from nltk.tokenize import word_tokenize\n'), ((4034, 4075), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (4055, 4075), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, classification_report\n'), ((1592, 1609), 'numpy.std', 'np.std', (['num_token'], {}), '(num_token)\n', (1598, 1609), True, 'import numpy as np\n'), ((2006, 2031), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['input_text'], {}), '(input_text)\n', (2019, 2031), False, 'from nltk.tokenize import word_tokenize\n'), ((3859, 3877), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (3869, 3877), True, 'import numpy as np\n'), ((3943, 3986), 'seaborn.light_palette', 'sns.light_palette', (['"""seagreen"""'], {'as_cmap': '(True)'}), "('seagreen', as_cmap=True)\n", (3960, 3986), True, 'import seaborn as sns\n'), ((3754, 3768), 'numpy.sum', 'np.sum', (['matrix'], {}), '(matrix)\n', (3760, 3768), True, 'import numpy as np\n')] |
import cv2
import math
import numpy as np
"""
A particle-based object tracker originally implemented by <NAME> of
Allstate and edited by djp42 and aroyc of Stanford.
The main idea is to use particle filtering with importance sampling to predict
where tracked boxes will be, allowing association between frames and the potential
to recover from object occlusion through "holding".
"Holding" means the tracker was initialized but could not be matched to a
detected object in the previous step. There is a variable "max_holding" that
defines the maximum number of frames we "hold" a tracker for without matching
it to an object before re-initializing it.
General particle tracking can be thought of as having 3 main steps:
I. Predict
II. Update
III. Resample
Follow the step-by-step execution after the constructor by looking at update_all()
"""
class ParticleTracker:
def __init__(self,
num_particles,
num_trackers,
max_holding=2,
max_tracker_jump=0.05,
cov=0.00001,
min_allowable_likelihood=-0.5):
"""
The constructor creates many of the arrays necessary for the trackers,
initializing most to 0s. There are many variables here that can probably
be reduced in future iterations.
"""
# num_particles is number of particles.
# num_trackers is the max number of trackers
self.num_particles = num_particles
self.num_trackers = num_trackers
self.max_holding = max_holding
self.max_tracker_jump = max_tracker_jump
self.cov = cov
self.cov_x_arr = np.diag([self.cov]*self.num_particles)
self.cov_y_arr = np.diag([self.cov/2]*self.num_particles)
# TODO have higher variance for bigger objects?
# TODO have some way to learn the covariance over time.
self.min_allowable_likelihood = min_allowable_likelihood
# especially necessary for alternatives
self.particles = np.zeros((num_trackers, num_particles, 2)) # 2 because x, y
self.tracked_boxes = np.zeros((num_trackers, 4)) # box coordinates. index is id
self.tracked_labels = ["" for i in range(num_trackers)]
self.box_indices = set() # the box labels
self.distance_to_particle_identified = np.zeros((num_trackers, num_particles))
self.previous_distance_to_particle_identified = np.zeros((num_trackers, num_particles))
# need to track previous in case we revert (grep for merge_conflict)
self.initialized_trackers=np.zeros(num_trackers)
self.delta_x_trackers=np.zeros(num_trackers)
self.delta_y_trackers=np.zeros(num_trackers)
self.count_holding_vehicles=np.zeros(num_trackers)
self.centroid_x_previous=np.zeros(num_trackers)
self.centroid_y_previous=np.zeros(num_trackers)
self.img = None
self.detections = None
self.box_indices = set()
self.display = False # TODO args
self.verbose = False
self.i = 1
def _reset_tracker(self, trackerID):
"""
For a given trackerID, reset the associated variables.
"""
self.count_holding_vehicles[trackerID] = 0
self.initialized_trackers[trackerID] = 0
self.centroid_x_previous[trackerID] = 0
self.centroid_y_previous[trackerID] = 0
self.delta_x_trackers[trackerID] = 0
self.delta_y_trackers[trackerID] = 0
def _resample_particles(self, trackerID):
"""
For this tracker, shuffle the particles based on the cumulative density
function (CDF) and the distance to the identified object.
"""
#reindexing
indexes_sorted=[b[0] for b in sorted(
enumerate(self.distance_to_particle_identified[trackerID]),
key=lambda i:i[1])]
#weights
w = [math.exp(-0.5*(
self.distance_to_particle_identified[trackerID, indexes_sorted[i]]**0.5
)) for i in range(self.num_particles)]
accum = np.sum(w)
#cumulative distribution
cdf = np.zeros(self.num_particles)
sum_cdf = 0
for i in range(self.num_particles):
sum_cdf += w[i] / accum
cdf[i]=sum_cdf
#uniform: use prescribed distribution function based on cumulative distribution function previously built
#This way the particles are drawn from a distribution induced by the weights
temp_particles = self.particles[trackerID,:,:]
for i in range(self.num_particles):
draw_uniform = np.random.uniform(0,1)
for j in range(self.num_particles):
if draw_uniform <= cdf[j]:
self.particles[trackerID, i, 0] = temp_particles[indexes_sorted[j], 0]
self.particles[trackerID, i, 1] = temp_particles[indexes_sorted[j], 1]
break
if self.verbose:
print("Tracker {}\nOld Particles{}\nNew Particles{}".format(
trackerID,
temp_particles,
self.particles[trackerID, :, :]
))
def gen_rand_particles(self, trackerID):
"""
Create num_particles random particles for this tracker using the current particles
plus delta as the mean and a multivariate normal distr.
"""
mean_x = self.particles[trackerID,:,0] + self.delta_x_trackers[trackerID]
mean_y = self.particles[trackerID,:,1] + self.delta_y_trackers[trackerID]
self.particles[trackerID,:,0] = np.clip(
np.random.multivariate_normal(
mean_x,
self.cov_x_arr * (self.tracked_boxes[trackerID][3] - self.tracked_boxes[trackerID][1])
),
0.0,
1.0)
# TODO scale cov_x_arr by the box width?
self.particles[trackerID,:,1] = np.clip(
np.random.multivariate_normal(mean_y, self.cov_y_arr),
0.0,
1.0)
def identify_particles_bboxes(self, trackerID):
"""
For a given tracker, figure out which detection is most likely being
tracked by us. There must be self.particles for this trackerID
"""
self.gen_rand_particles(trackerID)
cx_identified, cy_identified, identified = 0, 0, 0
distance_to_particle_identified = np.zeros(self.num_particles)
max_likelihood = -10000
for box_index in range(len(self.detections)):
likelihood, cx, cy, d_particles = self.likelihood_of_detection(
trackerID, box_index)
if likelihood > max_likelihood:
cx_identified = cx
cy_identified = cy
identified = box_index
max_likelihood = likelihood
for particle_index in range(self.num_particles): # To store, if found.
self.distance_to_particle_identified[trackerID, particle_index] = \
d_particles[particle_index]
return cx_identified, cy_identified, identified
def likelihood_of_detection(self, trackerID, box_index):
"""
Likelihood here is not in the mathematical sense, but could be something
as simple as the inverse distance (lower distance -> higher likelihood).
This can be extended later to more mathematical representations.
"""
x1 = self.detections[box_index][1]
y1 = self.detections[box_index][0]
x2 = self.detections[box_index][3]
y2 = self.detections[box_index][2]
#centroid of the bounding box
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
#determine the closest bounding box to the cloud of particles
if self.verbose:
print("Tracker ID {} and Box {}".format(trackerID, box_index))
print(self.particles[trackerID, :, :])
print(cx, cy)
distances_to_particles = [np.linalg.norm(
[cx, cy] - self.particles[trackerID, p, :]
) for p in range(self.num_particles)]
likelihood = -np.mean(distances_to_particles)
# TODO likelihood /= (x2 - x1) # divide by box width. more movement ok if big box.
# TODO incorporate change in box size?
# TODO have the likelihood based on a motion model for vehicles, using
# distance measurements, etc.
return likelihood, cx, cy, distances_to_particles
def is_merge_conflict(self, trackerID, cx_identified, cy_identified, init=False):
"""
This function determines if the identified tracker conflicts with other
trackers.
If another initialized tracker is close to the identified
object, we check a second condition, depending on the context for the
function call (whether or not we want to initialize this tracker or are
simply updating it, identified by "init" argument)
- If we are updating this trackerID, then we say there is a conflict if
the close tracker is not "holding".
- If we want to initialize the tracker, we say there is a conflic even if
the close tracker is "holding".
-(the close tracker will probably be updated and associated with
this object later in the execution)
Arguments:
trackerID: Integer
- the identification of the tracker we are trying to initialize or update.
c[x|y]_identified: Integer
- the x or y position of the centroid of an identified object we want
to consider tracking with the given tracker.
init: Boolean, default is False
- Flag indicating whether we want to initialize (True) or update (False)
the given tracker, impacting the conditions of a conflict.
Returns:
boolean:
True if updating/initializing this tracker/object pair would
conflict with an existing tracker.
False if we can go ahead and associate this tracker with the
identified object.
"""
for other_tracker_id in range(self.num_trackers):
if other_tracker_id == trackerID or \
self.initialized_trackers[trackerID] == 0:
# only check other trackers that are initialized
continue
distance_to_box = np.linalg.norm([
cx_identified - self.centroid_x_previous[other_tracker_id],
cy_identified - self.centroid_y_previous[other_tracker_id]
])
if distance_to_box < self.max_tracker_jump / 2:
if not init:
# if we want to update our tracker, if we see another tracker
# that is close to this box and not holding, we say it is a conflict
if self.count_holding_vehicles[other_tracker_id] == 0:
return True
else:
# If we are thinking about initializing a tracker, we say there is
# a conflict even if the other tracker is holding.
if self.verbose:
print("merge in init")
print("d:", distance_to_box, "d:", self.max_tracker_jump)
return True
return False
def increment_holding(self, trackerID):
"""
keep data from previous successful bounding box
assigment while we hold waiting for the bounding box to reappear
"""
if self.count_holding_vehicles[trackerID] >= self.max_holding:
self._reset_tracker(trackerID)
return
if self.verbose:
print("Holding # {} for tracker: {}".format(
self.count_holding_vehicles[trackerID],
trackerID)
)
self.count_holding_vehicles[trackerID] += 1
self.distance_to_particle_identified[trackerID] = \
self.previous_distance_to_particle_identified[trackerID]
'''
self.tracked_boxes[trackerID] += [
self.delta_x_trackers[trackerID],
self.delta_y_trackers[trackerID],
self.delta_x_trackers[trackerID],
self.delta_y_trackers[trackerID]]
'''
# TODO move the box by our expected movement, for visualization purposes.
# TODO this would leverage any new motion models.
def update_tracker(self, trackerID, cx_identified, cy_identified, box_index):
"""
We update the important variables for the tracker in order to associate
it with the given identified object.
Then, we resample the particles for this tracker based on weights
proportional to accuracy of the predicted particles.
Arguments:
trackerID: Integer
- the identification of the tracker we are trying to initialize or update.
c[x|y]_identified: Integer
- the x or y position of the centroid of an identified object we want
to consider tracking with the given tracker.
box_index: Integer
- the index in the list of the detected objects for this bounding box,
so that we can track which boxes have been associated already.
- While not strictly part of particle filtering, it can be helpful
to know this for debugging purposes.
"""
if self.initialized_trackers[trackerID] == 1:
self.delta_x_trackers[trackerID] = (
cx_identified - self.centroid_x_previous[trackerID]
) / (self.count_holding_vehicles[trackerID] + 1)
self.delta_y_trackers[trackerID] = (
cy_identified - self.centroid_y_previous[trackerID]
) / (self.count_holding_vehicles[trackerID] + 1) # The average movement
if self.verbose:
print("Delta for tracker ", trackerID)
print(self.delta_x_trackers[trackerID])
print(self.delta_y_trackers[trackerID])
self.count_holding_vehicles[trackerID] = 0 #the bounding box reappeared
self.centroid_x_previous[trackerID] = cx_identified
self.centroid_y_previous[trackerID] = cy_identified
self.initialized_trackers[trackerID] = 1
self.update_tracked_boxes(trackerID, box_index)
self._resample_particles(trackerID)
def update_tracked_boxes(self, trackerID, box_index):
"""
Finalize the association of tracker and bounding box by updating our
high level variables.
"""
self.tracked_boxes[trackerID] = self.detections[box_index]
self.tracked_labels[trackerID] = self.labels[box_index]
self.box_indices.add(box_index)
# TODO move to new centroid and average dimensions?
def update_initialized_tracker(self, trackerID):
"""
For a tracker that has been previously initialized, we want to identify
the most probably of the detected objects and determine if we should
associate the tracker with that object.
"""
#Identifying the bounding box through the particles: which bounding box corresponds to these particles
cx_identified, cy_identified, identified = self.identify_particles_bboxes(trackerID)
x_translation = abs(cx_identified - self.centroid_x_previous[trackerID])
if x_translation > self.max_tracker_jump or \
self.is_merge_conflict(trackerID, cx_identified, cy_identified):
if self.verbose:
print("Incrementing holding {}: merge_conflict: {}".format(
trackerID,
self.is_merge_conflict(trackerID, cx_identified, cy_identified))
)
#the bounding box possibly dissapeared
self.increment_holding(trackerID)
else:
self.update_tracker(trackerID, cx_identified, cy_identified, identified)
def try_to_start_tracking(self, trackerID):
"""
Must be run after updating all current trackers.
The idea is for uninitialized trackers, if there are remaining un-associated
objects, we try to start tracking them.
We still check if there is a merge conflict as it empirically helps
prevent duplicate detections/trackers.
"""
for box_index in range(len(self.detections)):
if box_index in self.box_indices: continue
x1 = self.detections[box_index][1]
y1 = self.detections[box_index][0]
x2 = self.detections[box_index][3]
y2 = self.detections[box_index][2]
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
if self.verbose:
print("Box {} with centroid: {}, {}".format(
box_index, cx, cy
))
if not self.valid_box(x1,y1,x2,y2):
# our filtering says we ignore this box.
continue
if not self.is_merge_conflict(trackerID, cx, cy, init=True):
"""
if the vehicle being probed is holding then we need to be
more restrictive because the holding is being done with
cx_previous and cy_previous which is farther from current
bounding box that in principle corresponds to holding vehicle
"""
#The bounding box being picked does not belong/correspond to another tracker
mean = [cx, cy]
cov = [[self.cov, 0], [0, self.cov]]
(self.particles[trackerID,:,0],
self.particles[trackerID,:,1]) = np.random.multivariate_normal(mean, cov)
self.update_tracker(trackerID, cx, cy, box_index)
if self.verbose:
print("tracker {} created for box {}".format(
trackerID, box_index))
return
def valid_box(self, x1, y1, x2, y2):
"""
currently just returns if the box is above the midpoint of the image.
In the future we can have more robust filtering, and also pass in the
correct horizon point (not assuming its the midpoint).
"""
if y2 < 0.5:
return False
return True
def reset_all_trackers(self):
"""
A public wrapper to reset all trackers.
"""
for trackerID in range(self.num_trackers):
self._reset_tracker(trackerID)
def get_boxes(self, with_holding=True):
"""
This returns a dictionary of trackerID as the key, with a value that is
a tuple of (box, label).
box itself is a tuple of the coordinates of the bounding box.
"""
boxes_with_labels = dict()
for trackerID in range(self.num_trackers):
if self.initialized_trackers[trackerID] == 1 and \
(with_holding == True or self.count_holding_vehicles[trackerID] == 0):
boxes_with_labels[trackerID] = (
self.tracked_boxes[trackerID],
self.tracked_labels[trackerID])
if self.verbose:
print("Returning {} boxes".format(len(boxes_with_labels.keys())))
return boxes_with_labels
def update_all(self, image, boxes, labels=None, verbose=False):
"""
This is the main entrance function, which is called externally to perform
an update given a single image and set of detected objects in the boxes.
"""
self.i += 1
if type(boxes) == type(None):
return self.get_boxes()
self.img = image
self.detections = boxes
self.verbose = verbose
self.box_indices = set()
self.labels = labels
if self.verbose:
self._print_all_tracker_centroids()
for trackerID in range(self.num_trackers):
if self.initialized_trackers[trackerID] == 1:
self.update_initialized_tracker(trackerID)
for trackerID in range(self.num_trackers):
if self.initialized_trackers[trackerID] == 0:
self.try_to_start_tracking(trackerID)
return self.get_boxes()
def _display_trackers(self, trackerID, identified):
"""
This function displays the trackers for objects, and is used mainly
just for testing and debugging purposes.
"""
if self.initialized_trackers[trackerID] == 1 and \
self.count_holding_vehicles[trackerID] == 0:
colors = [(0,0,255), (0,255,0), (255,0,0),(150,100,0)]
cv2.rectangle(
self.img,
(self.detections[identified][1],self.detections[identified][0]),
(self.detections[identified][3],self.detections[identified][2]),
math.ceil(colors[trackerID / 2]),
2,
1)
cv2.putText(
self.img,
str(trackerID),
(int(cx_identified),int(cy_identified)),
cv2.FONT_HERSHEY_SIMPLEX,
2,
(0,255,0),
2)
def _print_all_tracker_centroids(self):
"""
Another debug function, this prints information abou the trackers.
"""
print("\nStartingToPrintAllCentroids:", self.i)
for trackerID in range(self.num_trackers):
print("TrackerID:", trackerID)
print("Centroid: {}, {}".format(
self.centroid_x_previous[trackerID],
self.centroid_y_previous[trackerID]
))
print("Holding:", self.count_holding_vehicles[trackerID])
if self.detections is None: return
for box_index in range(len(self.detections)):
x1 = self.detections[box_index][1]
y1 = self.detections[box_index][0]
x2 = self.detections[box_index][3]
y2 = self.detections[box_index][2]
#centroid of the bounding box
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
print("Bounding box centroid: {}, {}".format(cx, cy))
| [
"numpy.random.uniform",
"math.exp",
"numpy.sum",
"math.ceil",
"numpy.zeros",
"numpy.mean",
"numpy.random.multivariate_normal",
"numpy.linalg.norm",
"numpy.diag"
] | [((1632, 1672), 'numpy.diag', 'np.diag', (['([self.cov] * self.num_particles)'], {}), '([self.cov] * self.num_particles)\n', (1639, 1672), True, 'import numpy as np\n'), ((1696, 1740), 'numpy.diag', 'np.diag', (['([self.cov / 2] * self.num_particles)'], {}), '([self.cov / 2] * self.num_particles)\n', (1703, 1740), True, 'import numpy as np\n'), ((1997, 2039), 'numpy.zeros', 'np.zeros', (['(num_trackers, num_particles, 2)'], {}), '((num_trackers, num_particles, 2))\n', (2005, 2039), True, 'import numpy as np\n'), ((2086, 2113), 'numpy.zeros', 'np.zeros', (['(num_trackers, 4)'], {}), '((num_trackers, 4))\n', (2094, 2113), True, 'import numpy as np\n'), ((2306, 2345), 'numpy.zeros', 'np.zeros', (['(num_trackers, num_particles)'], {}), '((num_trackers, num_particles))\n', (2314, 2345), True, 'import numpy as np\n'), ((2402, 2441), 'numpy.zeros', 'np.zeros', (['(num_trackers, num_particles)'], {}), '((num_trackers, num_particles))\n', (2410, 2441), True, 'import numpy as np\n'), ((2554, 2576), 'numpy.zeros', 'np.zeros', (['num_trackers'], {}), '(num_trackers)\n', (2562, 2576), True, 'import numpy as np\n'), ((2607, 2629), 'numpy.zeros', 'np.zeros', (['num_trackers'], {}), '(num_trackers)\n', (2615, 2629), True, 'import numpy as np\n'), ((2660, 2682), 'numpy.zeros', 'np.zeros', (['num_trackers'], {}), '(num_trackers)\n', (2668, 2682), True, 'import numpy as np\n'), ((2720, 2742), 'numpy.zeros', 'np.zeros', (['num_trackers'], {}), '(num_trackers)\n', (2728, 2742), True, 'import numpy as np\n'), ((2776, 2798), 'numpy.zeros', 'np.zeros', (['num_trackers'], {}), '(num_trackers)\n', (2784, 2798), True, 'import numpy as np\n'), ((2832, 2854), 'numpy.zeros', 'np.zeros', (['num_trackers'], {}), '(num_trackers)\n', (2840, 2854), True, 'import numpy as np\n'), ((4046, 4055), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (4052, 4055), True, 'import numpy as np\n'), ((4103, 4131), 'numpy.zeros', 'np.zeros', (['self.num_particles'], {}), '(self.num_particles)\n', (4111, 4131), True, 'import numpy as np\n'), ((6352, 6380), 'numpy.zeros', 'np.zeros', (['self.num_particles'], {}), '(self.num_particles)\n', (6360, 6380), True, 'import numpy as np\n'), ((3875, 3969), 'math.exp', 'math.exp', (['(-0.5 * self.distance_to_particle_identified[trackerID, indexes_sorted[i]] **\n 0.5)'], {}), '(-0.5 * self.distance_to_particle_identified[trackerID,\n indexes_sorted[i]] ** 0.5)\n', (3883, 3969), False, 'import math\n'), ((4584, 4607), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4601, 4607), True, 'import numpy as np\n'), ((5576, 5706), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_x', '(self.cov_x_arr * (self.tracked_boxes[trackerID][3] - self.tracked_boxes[\n trackerID][1]))'], {}), '(mean_x, self.cov_x_arr * (self.tracked_boxes[\n trackerID][3] - self.tracked_boxes[trackerID][1]))\n', (5605, 5706), True, 'import numpy as np\n'), ((5893, 5946), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_y', 'self.cov_y_arr'], {}), '(mean_y, self.cov_y_arr)\n', (5922, 5946), True, 'import numpy as np\n'), ((7928, 7986), 'numpy.linalg.norm', 'np.linalg.norm', (['([cx, cy] - self.particles[trackerID, p, :])'], {}), '([cx, cy] - self.particles[trackerID, p, :])\n', (7942, 7986), True, 'import numpy as np\n'), ((8067, 8098), 'numpy.mean', 'np.mean', (['distances_to_particles'], {}), '(distances_to_particles)\n', (8074, 8098), True, 'import numpy as np\n'), ((10360, 10500), 'numpy.linalg.norm', 'np.linalg.norm', (['[cx_identified - self.centroid_x_previous[other_tracker_id], cy_identified -\n self.centroid_y_previous[other_tracker_id]]'], {}), '([cx_identified - self.centroid_x_previous[other_tracker_id],\n cy_identified - self.centroid_y_previous[other_tracker_id]])\n', (10374, 10500), True, 'import numpy as np\n'), ((17630, 17670), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov'], {}), '(mean, cov)\n', (17659, 17670), True, 'import numpy as np\n'), ((20798, 20830), 'math.ceil', 'math.ceil', (['colors[trackerID / 2]'], {}), '(colors[trackerID / 2])\n', (20807, 20830), False, 'import math\n')] |
""" Main excited-state executable script
Note:
The simulations are intended to be used by calling the package
directly via :code:`python -m adpeps ...`, as described in
:ref:`notes/start`
"""
from jax import grad, jit, vmap, value_and_grad
from jax import random
from jax.scipy.optimize import minimize
from jax.test_util import check_grads
from scipy import optimize
from scipy.linalg import eigh, eig
from yaml import safe_load, dump
import jax
import jax.numpy as np
import numpy as onp
from adpeps.ipeps.ipeps import iPEPS, iPEPS_exci
from adpeps.ipeps.make_momentum_path import make_momentum_path
from adpeps.utils import io
from adpeps.utils.printing import print
from adpeps.ipeps.evaluation import filter_null_modes
import adpeps.ipeps.config as sim_config
def run(config_file: str, momentum_ix: int):
""" Start the simulation
Args:
config_file: filename of the configuration file
momentum_ix: index of the point in momentum space
"""
print(config_file)
with open(config_file) as f:
cfg = safe_load(f)
# Show options
print(dump(cfg))
sim_config.from_dict(cfg)
base_file = io.get_exci_base_file()
if not base_file.exists():
print(f"Base file {base_file} not found. Prepare the simulation first by \
running with option '-i'")
return
sim = iPEPSExciSimulation(config_file, momentum_ix)
output_folder = io.get_exci_folder()
output_folder.mkdir(parents=True, exist_ok=True)
kxs, kys = make_momentum_path(sim_config.momentum_path)
sim_config.px = kxs[momentum_ix]
sim_config.py = kys[momentum_ix]
output_file = io.get_exci_file(momentum_ix)
print(f"Output: {output_file}", level=2)
basis_size = sim.basis_size
res_dtype = np.complex128
H = onp.zeros((basis_size,basis_size), dtype=res_dtype)
N = onp.zeros((basis_size,basis_size), dtype=res_dtype)
for m in range(basis_size):
grad_H, grad_N = sim(m)
H[:,m] = grad_H
N[:,m] = grad_N
onp.savez(output_file, H=H, N=N)
print(H)
print(N)
onp.savez(output_file, H=H, N=N)
print('Done')
print(f"Saved to {output_file}")
def prepare(config_file):
with open(config_file) as f:
cfg = safe_load(f)
sim_config.from_dict(cfg)
base_file = io.get_exci_base_file()
print(base_file)
peps = iPEPS()
gs_file = io.get_gs_file()
loaded_sim = np.load(gs_file, allow_pickle=True)
peps = loaded_sim['peps'].item()
sim_config.ctm_max_iter = 30
sim_config.ctm_conv_tol = 1e-12
# Converge GS boundary tensors
peps.converge_boundaries()
# Convert to excitations iPEPS
peps.__class__ = iPEPS_exci
# Normalize the ground-state tensors such that the state has norm 1
peps.normalize_gs()
# Shift the Hamiltonian by the ground-state energy
# The excited state energy is then relative to the ground state
peps.substract_gs_energy()
# Prepare an orthonormal basis with respect to the ground state
print('Preparing orthonormal basis')
basis = peps.compute_orth_basis()
print(f"Saving base to {base_file}")
np.savez(base_file, peps=peps, basis=basis)
def evaluate_single(config_file, momentum_ix):
def _compute_ev_red_basis(H, N, P, n):
P = P[:,:n]
N2 = P.T.conjugate() @ N @ P
H2 = P.T.conjugate() @ H @ P
N2 = 0.5 * (N2 + N2.T.conjugate())
H2 = 0.5 * (H2 + H2.T.conjugate())
ev, _ = eig(H2, N2)
return sorted(ev.real)
with open(config_file) as f:
cfg = safe_load(f)
sim_config.from_dict(cfg)
kxs, kys = make_momentum_path(sim_config.momentum_path)
sim_config.px = kxs[momentum_ix]
sim_config.py = kys[momentum_ix]
base_file = io.get_exci_base_file()
base_sim = np.load(base_file, allow_pickle=True)
output_file = io.get_exci_file(momentum_ix)
print(output_file)
dat = np.load(output_file)
H, N = dat['H'], dat['N']
basis = base_sim['basis']
peps = base_sim['peps'].item()
# basis = basis.T @ filter_null_modes(peps.tensors, basis)
# print(basis.shape)
# print(N.shape)
# N = basis.T @ N @ basis
# H = basis.T @ H @ basis
# H = H.conjugate()
H = 0.5 * (H + H.T.conjugate())
N = 0.5 * (N + N.T.conjugate())
ev_N, P = np.linalg.eig(N)
idx = ev_N.real.argsort()[::-1]
ev_N = ev_N[idx]
selected = (ev_N/ev_N.max()) > 1e-3
P = P[:,idx]
P = P[:,selected]
N2 = P.T.conjugate() @ N @ P
H2 = P.T.conjugate() @ H @ P
N2 = 0.5 * (N2 + N2.T.conjugate())
H2 = 0.5 * (H2 + H2.T.conjugate())
ev, vectors = eig(H2, N2)
ixs = np.argsort(ev)
ev = ev[ixs]
vectors = vectors[:,ixs]
return sorted(ev.real)
def evaluate(config_file, momentum_ix):
# Default option (-1): evaluate all momenta
if momentum_ix != -1:
return evaluate_single(config_file, momentum_ix)
with open(config_file) as f:
cfg = safe_load(f)
# Show options
print(dump(cfg))
sim_config.from_dict(cfg)
kxs, kys, plot_info = make_momentum_path(sim_config.momentum_path, with_plot_info=True)
import matplotlib.pyplot as plt
evs = []
for ix in range(len(kxs)):
try:
ev = evaluate_single(config_file, ix)
except:
ev = [np.nan]
evs.append(ev[0])
plt.plot(evs, '--+')
plt.xticks(**plot_info['xticks'])
plt.title(f"Dispersion {sim_config.model} D={sim_config.D}")
plt.xlabel('k')
plt.ylabel('$\omega$')
plt.show()
class iPEPSExciSimulation:
""" Simulation class for the excited-state simulation
Call an instance of this class directly to start the simulation
"""
def __init__(self, config_file, momentum_ix):
self.config_file = config_file
self.momentum_ix = momentum_ix
@property
def basis_size(self):
with open(self.config_file) as f:
cfg = safe_load(f)
sim_config.from_dict(cfg)
base_file = io.get_exci_base_file()
base_sim = np.load(base_file, allow_pickle=True)
basis = base_sim['basis']
return basis.shape[1]
def __call__(self, ix, v=None):
print(f"Starting simulation of basis vector {ix+1}/{self.basis_size}")
with open(self.config_file) as f:
cfg = safe_load(f)
sim_config.from_dict(cfg)
base_file = io.get_exci_base_file()
base_sim = np.load(base_file, allow_pickle=True)
basis = np.complex_(base_sim['basis'])
peps = base_sim['peps'].item()
if v is None:
v = basis[:,ix]
res, grad_H = value_and_grad(peps.run, has_aux=True)(v)
grad_H = grad_H.conj()
print('Res', res, level=2)
grad_N = res[1].pack_data()
print('Grad H', grad_H, level=2)
print('Grad N', grad_N, level=2)
print(f"========== \nFinished basis vector {ix+1}/{self.basis_size} \n")
return basis.T @ jax.lax.stop_gradient(grad_H), basis.T @ jax.lax.stop_gradient(grad_N)
def check_grads(self, A=None):
with open(self.config_file) as f:
cfg = safe_load(f)
sim_config.from_dict(cfg)
base_file = io.get_exci_base_file()
base_sim = np.load(base_file, allow_pickle=True)
basis = np.complex_(base_sim['basis'])
peps = base_sim['peps'].item()
print('Checking gradient')
# peps.fill(A)
check_grads(peps.run_gc, (A,), order=1, modes='rev')
print('Done check')
| [
"matplotlib.pyplot.title",
"yaml.dump",
"jax.numpy.savez",
"jax.numpy.linalg.eig",
"yaml.safe_load",
"adpeps.ipeps.ipeps.iPEPS",
"adpeps.ipeps.make_momentum_path.make_momentum_path",
"matplotlib.pyplot.xticks",
"jax.numpy.load",
"adpeps.utils.io.get_exci_file",
"matplotlib.pyplot.show",
"jax.n... | [((1023, 1041), 'adpeps.utils.printing.print', 'print', (['config_file'], {}), '(config_file)\n', (1028, 1041), False, 'from adpeps.utils.printing import print\n'), ((1148, 1173), 'adpeps.ipeps.config.from_dict', 'sim_config.from_dict', (['cfg'], {}), '(cfg)\n', (1168, 1173), True, 'import adpeps.ipeps.config as sim_config\n'), ((1190, 1213), 'adpeps.utils.io.get_exci_base_file', 'io.get_exci_base_file', ([], {}), '()\n', (1211, 1213), False, 'from adpeps.utils import io\n'), ((1463, 1483), 'adpeps.utils.io.get_exci_folder', 'io.get_exci_folder', ([], {}), '()\n', (1481, 1483), False, 'from adpeps.utils import io\n'), ((1552, 1596), 'adpeps.ipeps.make_momentum_path.make_momentum_path', 'make_momentum_path', (['sim_config.momentum_path'], {}), '(sim_config.momentum_path)\n', (1570, 1596), False, 'from adpeps.ipeps.make_momentum_path import make_momentum_path\n'), ((1689, 1718), 'adpeps.utils.io.get_exci_file', 'io.get_exci_file', (['momentum_ix'], {}), '(momentum_ix)\n', (1705, 1718), False, 'from adpeps.utils import io\n'), ((1723, 1763), 'adpeps.utils.printing.print', 'print', (['f"""Output: {output_file}"""'], {'level': '(2)'}), "(f'Output: {output_file}', level=2)\n", (1728, 1763), False, 'from adpeps.utils.printing import print\n'), ((1834, 1886), 'numpy.zeros', 'onp.zeros', (['(basis_size, basis_size)'], {'dtype': 'res_dtype'}), '((basis_size, basis_size), dtype=res_dtype)\n', (1843, 1886), True, 'import numpy as onp\n'), ((1894, 1946), 'numpy.zeros', 'onp.zeros', (['(basis_size, basis_size)'], {'dtype': 'res_dtype'}), '((basis_size, basis_size), dtype=res_dtype)\n', (1903, 1946), True, 'import numpy as onp\n'), ((2105, 2113), 'adpeps.utils.printing.print', 'print', (['H'], {}), '(H)\n', (2110, 2113), False, 'from adpeps.utils.printing import print\n'), ((2118, 2126), 'adpeps.utils.printing.print', 'print', (['N'], {}), '(N)\n', (2123, 2126), False, 'from adpeps.utils.printing import print\n'), ((2131, 2163), 'numpy.savez', 'onp.savez', (['output_file'], {'H': 'H', 'N': 'N'}), '(output_file, H=H, N=N)\n', (2140, 2163), True, 'import numpy as onp\n'), ((2168, 2181), 'adpeps.utils.printing.print', 'print', (['"""Done"""'], {}), "('Done')\n", (2173, 2181), False, 'from adpeps.utils.printing import print\n'), ((2186, 2218), 'adpeps.utils.printing.print', 'print', (['f"""Saved to {output_file}"""'], {}), "(f'Saved to {output_file}')\n", (2191, 2218), False, 'from adpeps.utils.printing import print\n'), ((2310, 2335), 'adpeps.ipeps.config.from_dict', 'sim_config.from_dict', (['cfg'], {}), '(cfg)\n', (2330, 2335), True, 'import adpeps.ipeps.config as sim_config\n'), ((2352, 2375), 'adpeps.utils.io.get_exci_base_file', 'io.get_exci_base_file', ([], {}), '()\n', (2373, 2375), False, 'from adpeps.utils import io\n'), ((2380, 2396), 'adpeps.utils.printing.print', 'print', (['base_file'], {}), '(base_file)\n', (2385, 2396), False, 'from adpeps.utils.printing import print\n'), ((2408, 2415), 'adpeps.ipeps.ipeps.iPEPS', 'iPEPS', ([], {}), '()\n', (2413, 2415), False, 'from adpeps.ipeps.ipeps import iPEPS, iPEPS_exci\n'), ((2431, 2447), 'adpeps.utils.io.get_gs_file', 'io.get_gs_file', ([], {}), '()\n', (2445, 2447), False, 'from adpeps.utils import io\n'), ((2465, 2500), 'jax.numpy.load', 'np.load', (['gs_file'], {'allow_pickle': '(True)'}), '(gs_file, allow_pickle=True)\n', (2472, 2500), True, 'import jax.numpy as np\n'), ((3075, 3111), 'adpeps.utils.printing.print', 'print', (['"""Preparing orthonormal basis"""'], {}), "('Preparing orthonormal basis')\n", (3080, 3111), False, 'from adpeps.utils.printing import print\n'), ((3155, 3191), 'adpeps.utils.printing.print', 'print', (['f"""Saving base to {base_file}"""'], {}), "(f'Saving base to {base_file}')\n", (3160, 3191), False, 'from adpeps.utils.printing import print\n'), ((3196, 3239), 'jax.numpy.savez', 'np.savez', (['base_file'], {'peps': 'peps', 'basis': 'basis'}), '(base_file, peps=peps, basis=basis)\n', (3204, 3239), True, 'import jax.numpy as np\n'), ((3637, 3662), 'adpeps.ipeps.config.from_dict', 'sim_config.from_dict', (['cfg'], {}), '(cfg)\n', (3657, 3662), True, 'import adpeps.ipeps.config as sim_config\n'), ((3678, 3722), 'adpeps.ipeps.make_momentum_path.make_momentum_path', 'make_momentum_path', (['sim_config.momentum_path'], {}), '(sim_config.momentum_path)\n', (3696, 3722), False, 'from adpeps.ipeps.make_momentum_path import make_momentum_path\n'), ((3813, 3836), 'adpeps.utils.io.get_exci_base_file', 'io.get_exci_base_file', ([], {}), '()\n', (3834, 3836), False, 'from adpeps.utils import io\n'), ((3852, 3889), 'jax.numpy.load', 'np.load', (['base_file'], {'allow_pickle': '(True)'}), '(base_file, allow_pickle=True)\n', (3859, 3889), True, 'import jax.numpy as np\n'), ((3908, 3937), 'adpeps.utils.io.get_exci_file', 'io.get_exci_file', (['momentum_ix'], {}), '(momentum_ix)\n', (3924, 3937), False, 'from adpeps.utils import io\n'), ((3942, 3960), 'adpeps.utils.printing.print', 'print', (['output_file'], {}), '(output_file)\n', (3947, 3960), False, 'from adpeps.utils.printing import print\n'), ((3971, 3991), 'jax.numpy.load', 'np.load', (['output_file'], {}), '(output_file)\n', (3978, 3991), True, 'import jax.numpy as np\n'), ((4369, 4385), 'jax.numpy.linalg.eig', 'np.linalg.eig', (['N'], {}), '(N)\n', (4382, 4385), True, 'import jax.numpy as np\n'), ((4684, 4695), 'scipy.linalg.eig', 'eig', (['H2', 'N2'], {}), '(H2, N2)\n', (4687, 4695), False, 'from scipy.linalg import eigh, eig\n'), ((4706, 4720), 'jax.numpy.argsort', 'np.argsort', (['ev'], {}), '(ev)\n', (4716, 4720), True, 'import jax.numpy as np\n'), ((5086, 5111), 'adpeps.ipeps.config.from_dict', 'sim_config.from_dict', (['cfg'], {}), '(cfg)\n', (5106, 5111), True, 'import adpeps.ipeps.config as sim_config\n'), ((5138, 5203), 'adpeps.ipeps.make_momentum_path.make_momentum_path', 'make_momentum_path', (['sim_config.momentum_path'], {'with_plot_info': '(True)'}), '(sim_config.momentum_path, with_plot_info=True)\n', (5156, 5203), False, 'from adpeps.ipeps.make_momentum_path import make_momentum_path\n'), ((5421, 5441), 'matplotlib.pyplot.plot', 'plt.plot', (['evs', '"""--+"""'], {}), "(evs, '--+')\n", (5429, 5441), True, 'import matplotlib.pyplot as plt\n'), ((5446, 5479), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), "(**plot_info['xticks'])\n", (5456, 5479), True, 'import matplotlib.pyplot as plt\n'), ((5484, 5544), 'matplotlib.pyplot.title', 'plt.title', (['f"""Dispersion {sim_config.model} D={sim_config.D}"""'], {}), "(f'Dispersion {sim_config.model} D={sim_config.D}')\n", (5493, 5544), True, 'import matplotlib.pyplot as plt\n'), ((5549, 5564), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (5559, 5564), True, 'import matplotlib.pyplot as plt\n'), ((5569, 5592), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\omega$"""'], {}), "('$\\\\omega$')\n", (5579, 5592), True, 'import matplotlib.pyplot as plt\n'), ((5596, 5606), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5604, 5606), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1101), 'yaml.safe_load', 'safe_load', (['f'], {}), '(f)\n', (1098, 1101), False, 'from yaml import safe_load, dump\n'), ((1132, 1141), 'yaml.dump', 'dump', (['cfg'], {}), '(cfg)\n', (1136, 1141), False, 'from yaml import safe_load, dump\n'), ((1253, 1378), 'adpeps.utils.printing.print', 'print', (['f"""Base file {base_file} not found. Prepare the simulation first by running with option \'-i\'"""'], {}), '(\n f"Base file {base_file} not found. Prepare the simulation first by running with option \'-i\'"\n )\n', (1258, 1378), False, 'from adpeps.utils.printing import print\n'), ((2067, 2099), 'numpy.savez', 'onp.savez', (['output_file'], {'H': 'H', 'N': 'N'}), '(output_file, H=H, N=N)\n', (2076, 2099), True, 'import numpy as onp\n'), ((2293, 2305), 'yaml.safe_load', 'safe_load', (['f'], {}), '(f)\n', (2302, 2305), False, 'from yaml import safe_load, dump\n'), ((3527, 3538), 'scipy.linalg.eig', 'eig', (['H2', 'N2'], {}), '(H2, N2)\n', (3530, 3538), False, 'from scipy.linalg import eigh, eig\n'), ((3619, 3631), 'yaml.safe_load', 'safe_load', (['f'], {}), '(f)\n', (3628, 3631), False, 'from yaml import safe_load, dump\n'), ((5027, 5039), 'yaml.safe_load', 'safe_load', (['f'], {}), '(f)\n', (5036, 5039), False, 'from yaml import safe_load, dump\n'), ((5070, 5079), 'yaml.dump', 'dump', (['cfg'], {}), '(cfg)\n', (5074, 5079), False, 'from yaml import safe_load, dump\n'), ((6027, 6052), 'adpeps.ipeps.config.from_dict', 'sim_config.from_dict', (['cfg'], {}), '(cfg)\n', (6047, 6052), True, 'import adpeps.ipeps.config as sim_config\n'), ((6073, 6096), 'adpeps.utils.io.get_exci_base_file', 'io.get_exci_base_file', ([], {}), '()\n', (6094, 6096), False, 'from adpeps.utils import io\n'), ((6116, 6153), 'jax.numpy.load', 'np.load', (['base_file'], {'allow_pickle': '(True)'}), '(base_file, allow_pickle=True)\n', (6123, 6153), True, 'import jax.numpy as np\n'), ((6263, 6335), 'adpeps.utils.printing.print', 'print', (['f"""Starting simulation of basis vector {ix + 1}/{self.basis_size}"""'], {}), "(f'Starting simulation of basis vector {ix + 1}/{self.basis_size}')\n", (6268, 6335), False, 'from adpeps.utils.printing import print\n'), ((6415, 6440), 'adpeps.ipeps.config.from_dict', 'sim_config.from_dict', (['cfg'], {}), '(cfg)\n', (6435, 6440), True, 'import adpeps.ipeps.config as sim_config\n'), ((6462, 6485), 'adpeps.utils.io.get_exci_base_file', 'io.get_exci_base_file', ([], {}), '()\n', (6483, 6485), False, 'from adpeps.utils import io\n'), ((6505, 6542), 'jax.numpy.load', 'np.load', (['base_file'], {'allow_pickle': '(True)'}), '(base_file, allow_pickle=True)\n', (6512, 6542), True, 'import jax.numpy as np\n'), ((6559, 6589), 'jax.numpy.complex_', 'np.complex_', (["base_sim['basis']"], {}), "(base_sim['basis'])\n", (6570, 6589), True, 'import jax.numpy as np\n'), ((6783, 6809), 'adpeps.utils.printing.print', 'print', (['"""Res"""', 'res'], {'level': '(2)'}), "('Res', res, level=2)\n", (6788, 6809), False, 'from adpeps.utils.printing import print\n'), ((6854, 6886), 'adpeps.utils.printing.print', 'print', (['"""Grad H"""', 'grad_H'], {'level': '(2)'}), "('Grad H', grad_H, level=2)\n", (6859, 6886), False, 'from adpeps.utils.printing import print\n'), ((6895, 6927), 'adpeps.utils.printing.print', 'print', (['"""Grad N"""', 'grad_N'], {'level': '(2)'}), "('Grad N', grad_N, level=2)\n", (6900, 6927), False, 'from adpeps.utils.printing import print\n'), ((6936, 7012), 'adpeps.utils.printing.print', 'print', (['f"""========== \nFinished basis vector {ix + 1}/{self.basis_size} \n"""'], {}), '(f"""========== \nFinished basis vector {ix + 1}/{self.basis_size} \n""")\n', (6941, 7012), False, 'from adpeps.utils.printing import print\n'), ((7222, 7247), 'adpeps.ipeps.config.from_dict', 'sim_config.from_dict', (['cfg'], {}), '(cfg)\n', (7242, 7247), True, 'import adpeps.ipeps.config as sim_config\n'), ((7269, 7292), 'adpeps.utils.io.get_exci_base_file', 'io.get_exci_base_file', ([], {}), '()\n', (7290, 7292), False, 'from adpeps.utils import io\n'), ((7312, 7349), 'jax.numpy.load', 'np.load', (['base_file'], {'allow_pickle': '(True)'}), '(base_file, allow_pickle=True)\n', (7319, 7349), True, 'import jax.numpy as np\n'), ((7366, 7396), 'jax.numpy.complex_', 'np.complex_', (["base_sim['basis']"], {}), "(base_sim['basis'])\n", (7377, 7396), True, 'import jax.numpy as np\n'), ((7445, 7471), 'adpeps.utils.printing.print', 'print', (['"""Checking gradient"""'], {}), "('Checking gradient')\n", (7450, 7471), False, 'from adpeps.utils.printing import print\n'), ((7503, 7555), 'jax.test_util.check_grads', 'check_grads', (['peps.run_gc', '(A,)'], {'order': '(1)', 'modes': '"""rev"""'}), "(peps.run_gc, (A,), order=1, modes='rev')\n", (7514, 7555), False, 'from jax.test_util import check_grads\n'), ((7564, 7583), 'adpeps.utils.printing.print', 'print', (['"""Done check"""'], {}), "('Done check')\n", (7569, 7583), False, 'from adpeps.utils.printing import print\n'), ((6006, 6018), 'yaml.safe_load', 'safe_load', (['f'], {}), '(f)\n', (6015, 6018), False, 'from yaml import safe_load, dump\n'), ((6394, 6406), 'yaml.safe_load', 'safe_load', (['f'], {}), '(f)\n', (6403, 6406), False, 'from yaml import safe_load, dump\n'), ((6702, 6740), 'jax.value_and_grad', 'value_and_grad', (['peps.run'], {'has_aux': '(True)'}), '(peps.run, has_aux=True)\n', (6716, 6740), False, 'from jax import grad, jit, vmap, value_and_grad\n'), ((7201, 7213), 'yaml.safe_load', 'safe_load', (['f'], {}), '(f)\n', (7210, 7213), False, 'from yaml import safe_load, dump\n'), ((7034, 7063), 'jax.lax.stop_gradient', 'jax.lax.stop_gradient', (['grad_H'], {}), '(grad_H)\n', (7055, 7063), False, 'import jax\n'), ((7075, 7104), 'jax.lax.stop_gradient', 'jax.lax.stop_gradient', (['grad_N'], {}), '(grad_N)\n', (7096, 7104), False, 'import jax\n')] |
"""Line plot script
"""
# author: <NAME>
# github: themlphdstudent
# Licence: BSD 3-Clause
#import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
X = np.linspace(0, 20, 1000)
y = np.cos(X)
plt.plot(X,y, color='b', linestyle='--')
plt.xlabel('X')
plt.ylabel('cos(X)')
plt.savefig('../figures/Line_Plot.png', dpi=300)
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.cos",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((187, 211), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(1000)'], {}), '(0, 20, 1000)\n', (198, 211), True, 'import numpy as np\n'), ((216, 225), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (222, 225), True, 'import numpy as np\n'), ((227, 268), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {'color': '"""b"""', 'linestyle': '"""--"""'}), "(X, y, color='b', linestyle='--')\n", (235, 268), True, 'import matplotlib.pyplot as plt\n'), ((268, 283), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (278, 283), True, 'import matplotlib.pyplot as plt\n'), ((284, 304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cos(X)"""'], {}), "('cos(X)')\n", (294, 304), True, 'import matplotlib.pyplot as plt\n'), ((305, 353), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../figures/Line_Plot.png"""'], {'dpi': '(300)'}), "('../figures/Line_Plot.png', dpi=300)\n", (316, 353), True, 'import matplotlib.pyplot as plt\n'), ((354, 364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (362, 364), True, 'import matplotlib.pyplot as plt\n')] |
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import sys
import argparse
import numpy as np
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
import mprl.utilities as utilities
# ========================================================================
#
# Main
#
# ========================================================================
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="Plot agents TB files")
parser.add_argument(
"-f",
"--fdir",
help="Folders containing parsed TB event file",
type=str,
required=True,
nargs="+",
)
parser.add_argument(
"-l", "--labels", help="Labels for plot", type=str, nargs="+", default=None
)
parser.add_argument(
"-n",
"--nlims",
help="Limits on episodes to plot",
type=int,
nargs="+",
default=None,
)
parser.add_argument(
"--legends",
help="Figure titles where legend should appear",
type=str,
nargs="+",
default=["loss"],
)
parser.add_argument(
"--lines", help="Vertical lines to add", type=int, nargs="+", default=[]
)
args = parser.parse_args()
# Loop over the folders
for k, fdir in enumerate(args.fdir):
fname = os.path.join(fdir, "agent")
name = None if args.labels is None else args.labels[k]
limit = np.finfo(float).max if args.nlims is None else args.nlims[k]
utilities.plot_tb(
os.path.join(fdir, "data.csv"),
idx=k,
name=name,
limit=limit,
lines=args.lines,
)
utilities.save_tb_plots("compare_training.pdf", legends=args.legends)
| [
"argparse.ArgumentParser",
"os.path.dirname",
"numpy.finfo",
"os.path.join",
"mprl.utilities.save_tb_plots"
] | [((566, 625), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot agents TB files"""'}), "(description='Plot agents TB files')\n", (589, 625), False, 'import argparse\n'), ((1838, 1907), 'mprl.utilities.save_tb_plots', 'utilities.save_tb_plots', (['"""compare_training.pdf"""'], {'legends': 'args.legends'}), "('compare_training.pdf', legends=args.legends)\n", (1861, 1907), True, 'import mprl.utilities as utilities\n'), ((1487, 1514), 'os.path.join', 'os.path.join', (['fdir', '"""agent"""'], {}), "(fdir, 'agent')\n", (1499, 1514), False, 'import os\n'), ((269, 294), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (284, 294), False, 'import os\n'), ((1694, 1724), 'os.path.join', 'os.path.join', (['fdir', '"""data.csv"""'], {}), "(fdir, 'data.csv')\n", (1706, 1724), False, 'import os\n'), ((1594, 1609), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1602, 1609), True, 'import numpy as np\n')] |
'''
Dataset and DataLoader adapted from
https://www.kaggle.com/pinocookie/pytorch-dataset-and-dataloader
'''
import pickle
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
def rotate_img(img, rot):
if rot == 0: # 0 degrees rotation
return img
elif rot == 90: # 90 degrees rotation
return np.flipud(np.transpose(img, (1, 0, 2)))
elif rot == 180: # 90 degrees rotation
return np.fliplr(np.flipud(img))
elif rot == 270: # 270 degrees rotation / or -90
return np.transpose(np.flipud(img), (1, 0, 2))
else:
raise ValueError('rotation should be 0, 90, 180, or 270 degrees')
class RotateDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.transform = transforms.Compose([
transforms.ToTensor()
])
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
img = self.dataset[idx]
rotated_imgs = [
self.transform(img),
self.transform(rotate_img(img, 90).copy()),
self.transform(rotate_img(img, 180).copy()),
self.transform(rotate_img(img, 270).copy())
]
rotation_labels = torch.LongTensor([0, 1, 2, 3])
return torch.stack(rotated_imgs, dim=0), rotation_labels
def load_mnist(batch_size,
data_dir='./data',
val_size=0.1,
shuffle=True,
seed=1):
"""Load MNIST data into train/val/test data loader"""
num_workers = 4
(x_train, y_train), (x_valid, y_valid), (x_test, y_test) = load_mnist_all(
data_dir=data_dir, val_size=val_size, shuffle=shuffle, seed=seed)
trainset = torch.utils.data.TensorDataset(x_train, y_train)
validset = torch.utils.data.TensorDataset(x_valid, y_valid)
testset = torch.utils.data.TensorDataset(x_test, y_test)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, validloader, testloader
def load_mnist_all(data_dir='./data', val_size=0.1, shuffle=True, seed=1):
"""Load entire MNIST dataset into tensor"""
transform = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.MNIST(
root=data_dir, train=True, download=True, transform=transform)
testset = torchvision.datasets.MNIST(
root=data_dir, train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=len(trainset), shuffle=False)
testloader = torch.utils.data.DataLoader(
testset, batch_size=len(testset), shuffle=False)
x, y = next(iter(trainloader))
x_test, y_test = next(iter(testloader))
x_train, x_valid, y_train, y_valid = train_test_split(
x.numpy(), y.numpy(), test_size=val_size, shuffle=shuffle,
random_state=seed, stratify=y)
# scale up
# scale = 2
# x_train = x_train.repeat(scale, axis=2).repeat(scale, axis=3)
# x_valid = x_valid.repeat(scale, axis=2).repeat(scale, axis=3)
# x_test = x_test.numpy().repeat(scale, axis=2).repeat(scale, axis=3)
# x_test = torch.tensor(x_test)
return ((torch.tensor(x_train), torch.tensor(y_train)),
(torch.tensor(x_valid), torch.tensor(y_valid)), (x_test, y_test))
def load_mnist_rot(batch_size, data_dir='./data', val_size=0.1, shuffle=True,
seed=1):
(x_train, _), (x_valid, _), (x_test, _) = load_mnist_all(
data_dir, val_size=val_size, seed=seed)
traindataset = RotateDataset(x_train.numpy().transpose(0, 2, 3, 1))
trainloader = torch.utils.data.DataLoader(
traindataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
validdataset = RotateDataset(x_valid.numpy().transpose(0, 2, 3, 1))
validloader = torch.utils.data.DataLoader(
validdataset, batch_size=batch_size, shuffle=False, num_workers=4)
testdataset = RotateDataset(x_test.numpy().transpose(0, 2, 3, 1))
testloader = torch.utils.data.DataLoader(
testdataset, batch_size=batch_size, shuffle=False, num_workers=4)
return trainloader, validloader, testloader
def load_cifar10(batch_size,
data_dir='./data',
val_size=0.1,
normalize=True,
augment=True,
shuffle=True,
seed=1):
"""Load CIFAR-10 data into train/val/test data loader"""
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
num_workers = 4
transform = transforms.Compose([
transforms.ToTensor()
])
if augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomAffine(
5, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=5),
transforms.ColorJitter(brightness=0.1),
transforms.ToTensor()
])
else:
transform_train = transform
if normalize:
transform = transforms.Compose([
transform,
transforms.Normalize(mean, std)
])
transform_train = transforms.Compose([
transform_train,
transforms.Normalize(mean, std)
])
trainset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform_train)
validset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(
root=data_dir, train=False, download=True, transform=transform)
# Random split train and validation sets
num_train = len(trainset)
indices = list(range(num_train))
split = int(np.floor(val_size * num_train))
if shuffle:
np.random.seed(seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, validloader, testloader
def load_cifar10_all(data_dir='./data', val_size=0.1, shuffle=True, seed=1):
"""Load entire CIFAR-10 dataset into tensor"""
transform = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform)
validset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(
root=data_dir, train=False, download=True, transform=transform)
# Random split train and validation sets
num_train = len(trainset)
indices = list(range(num_train))
split = int(np.floor(val_size * num_train))
if shuffle:
np.random.seed(seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=(num_train - split), sampler=train_sampler)
validloader = torch.utils.data.DataLoader(
validset, batch_size=split, sampler=valid_sampler)
testloader = torch.utils.data.DataLoader(
testset, batch_size=len(testset), shuffle=False)
x_train = next(iter(trainloader))
x_valid = next(iter(validloader))
x_test = next(iter(testloader))
return x_train, x_valid, x_test
def load_cifar10_noise(batch_size, data_dir='./data', val_size=0.1, sd=0,
shuffle=True, seed=1):
(x_train, y_train), (x_valid, y_valid), (x_test, y_test) = load_cifar10_all(
data_dir, val_size=val_size, seed=seed)
x_train += torch.randn_like(x_train) * sd
trainset = torch.utils.data.TensorDataset(x_train, y_train)
validset = torch.utils.data.TensorDataset(x_valid, y_valid)
testset = torch.utils.data.TensorDataset(x_test, y_test)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, shuffle=False, num_workers=4)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=4)
return trainloader, validloader, testloader
def load_cifar10_rot(batch_size, data_dir='./data', val_size=0.1, shuffle=True,
seed=1):
(x_train, _), (x_valid, _), (x_test, _) = load_cifar10_all(
data_dir, val_size=val_size, seed=seed)
traindataset = RotateDataset(x_train.numpy().transpose(0, 2, 3, 1))
trainloader = torch.utils.data.DataLoader(
traindataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
validdataset = RotateDataset(x_valid.numpy().transpose(0, 2, 3, 1))
validloader = torch.utils.data.DataLoader(
validdataset, batch_size=batch_size, shuffle=False, num_workers=4)
testdataset = RotateDataset(x_test.numpy().transpose(0, 2, 3, 1))
testloader = torch.utils.data.DataLoader(
testdataset, batch_size=batch_size, shuffle=False, num_workers=4)
return trainloader, validloader, testloader
def load_gtsrb(data_dir='./data', gray=False, train_file_name=None):
"""
Load GTSRB data as a (datasize) x (channels) x (height) x (width) numpy
matrix. Each pixel is rescaled to lie in [0,1].
"""
def load_pickled_data(file, columns):
"""
Loads pickled training and test data.
Parameters
----------
file : string
Name of the pickle file.
columns : list of strings
List of columns in pickled data we're interested in.
Returns
-------
A tuple of datasets for given columns.
"""
with open(file, mode='rb') as f:
dataset = pickle.load(f)
return tuple(map(lambda c: dataset[c], columns))
def preprocess(x, gray):
"""
Preprocess dataset: turn images into grayscale if specified, normalize
input space to [0,1], reshape array to appropriate shape for NN model
"""
if not gray:
# Scale features to be in [0, 1]
x = (x / 255.).astype(np.float32)
else:
# Convert to grayscale, e.g. single Y channel
x = 0.299 * x[:, :, :, 0] + 0.587 * x[:, :, :, 1] + \
0.114 * x[:, :, :, 2]
# Scale features to be in [0, 1]
x = (x / 255.).astype(np.float32)
x = x[:, :, :, np.newaxis]
return x
# Load pickle dataset
if train_file_name is None:
x_train, y_train = load_pickled_data(
data_dir + 'train.p', ['features', 'labels'])
else:
x_train, y_train = load_pickled_data(
data_dir + train_file_name, ['features', 'labels'])
x_val, y_val = load_pickled_data(
data_dir + 'valid.p', ['features', 'labels'])
x_test, y_test = load_pickled_data(
data_dir + 'test.p', ['features', 'labels'])
# Preprocess loaded data
x_train = preprocess(x_train, gray)
x_val = preprocess(x_val, gray)
x_test = preprocess(x_test, gray)
return x_train, y_train, x_val, y_val, x_test, y_test
class GtsrbDataset(torch.utils.data.Dataset):
def __init__(self, x_np, y_np, mean=None, std=None, augment=False):
self.x_pil = [Image.fromarray(
(x * 255).astype(np.uint8)) for x in x_np]
self.y_np = y_np.astype(np.int64)
if mean is None:
mean = (0, 0, 0)
std = (1, 1, 1)
if augment:
self.transform = transforms.Compose([
transforms.RandomCrop(32, padding=4, padding_mode='edge'),
transforms.RandomAffine(
5, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=5),
transforms.ColorJitter(brightness=0.1),
transforms.ToTensor(),
# transforms.Normalize(mean, std),
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize(mean, std),
])
def __getitem__(self, index):
# apply the transformations and return tensors
return self.transform(self.x_pil[index]), self.y_np[index]
def __len__(self):
return len(self.x_pil)
def load_gtsrb_dataloader(data_dir, batch_size, num_workers=4):
x_train, y_train, x_val, y_val, x_test, y_test = load_gtsrb(
data_dir=data_dir)
# Standardization
mean = np.mean(x_train, (0, 1, 2))
std = np.std(x_train, (0, 1, 2))
trainset = GtsrbDataset(x_train, y_train, mean, std, augment=True)
validset = GtsrbDataset(x_val, y_val, mean, std, augment=False)
testset = GtsrbDataset(x_test, y_test, mean, std, augment=False)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, validloader, testloader
def create_planes(d=1000, k=10, num_total=10000, bound=(0, 1), test_size=0.2,
val_size=0.1, seed=1):
"""
Create plane dataset: two planes with dimension k in space of dimension d.
The first k dimensions are random numbers within the bound, dimensions
k + 1 to d - 1 are 0, and d-th dimension is bound[0] or bound[1] which
determines the class.
"""
assert bound[0] < bound[1]
np.random.seed(seed)
planes = torch.zeros((num_total, d))
planes[:, :k] = torch.rand(num_total, k) * (bound[1] - bound[0]) + bound[0]
# planes[:num_total // 2, -1] = bound[0]
# planes[num_total // 2:, -1] = bound[1]
planes[:num_total // 2, -1] = 0.3
planes[num_total // 2:, -1] = 0.7
indices = np.arange(num_total)
np.random.shuffle(indices)
train_idx = int(num_total * (1 - test_size - val_size))
test_idx = int(num_total * (1 - test_size))
x_train = planes[indices[:train_idx]]
x_valid = planes[indices[train_idx:test_idx]]
x_test = planes[indices[test_idx:]]
y_train = torch.tensor(
(indices[:train_idx] >= num_total // 2).astype(np.int64))
y_valid = torch.tensor(
(indices[train_idx:test_idx] >= num_total // 2).astype(np.int64))
y_test = torch.tensor(
(indices[test_idx:] >= num_total // 2).astype(np.int64))
return (x_train, y_train), (x_valid, y_valid), (x_test, y_test)
def load_planes(batch_size, d=1000, k=10, num_total=10000, bound=(0, 1),
test_size=0.2, val_size=0.1, shuffle=True, seed=1):
num_workers = 4
(x_train, y_train), (x_valid, y_valid), (x_test, y_test) = create_planes(
d=d, k=k, num_total=num_total, bound=bound, test_size=test_size,
val_size=val_size, seed=seed)
trainset = torch.utils.data.TensorDataset(x_train, y_train)
validset = torch.utils.data.TensorDataset(x_valid, y_valid)
testset = torch.utils.data.TensorDataset(x_test, y_test)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, validloader, testloader
| [
"numpy.random.seed",
"numpy.floor",
"torchvision.datasets.CIFAR10",
"numpy.mean",
"numpy.arange",
"torch.utils.data.TensorDataset",
"pickle.load",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"numpy.std",
"numpy.transpose",
"torch.zeros",
"numpy.random.shuffle",
"torc... | [((1915, 1963), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (1945, 1963), False, 'import torch\n'), ((1979, 2027), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_valid', 'y_valid'], {}), '(x_valid, y_valid)\n', (2009, 2027), False, 'import torch\n'), ((2042, 2088), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (2072, 2088), False, 'import torch\n'), ((2107, 2214), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers'}), '(trainset, batch_size=batch_size, shuffle=\n shuffle, num_workers=num_workers)\n', (2134, 2214), False, 'import torch\n'), ((2237, 2341), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(validset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (2264, 2341), False, 'import torch\n'), ((2364, 2467), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(testset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (2391, 2467), False, 'import torch\n'), ((2739, 2832), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': 'data_dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), '(root=data_dir, train=True, download=True,\n transform=transform)\n', (2765, 2832), False, 'import torchvision\n'), ((2852, 2946), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': 'data_dir', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), '(root=data_dir, train=False, download=True,\n transform=transform)\n', (2878, 2946), False, 'import torchvision\n'), ((4135, 4236), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['traindataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': '(4)'}), '(traindataset, batch_size=batch_size, shuffle=\n shuffle, num_workers=4)\n', (4162, 4236), False, 'import torch\n'), ((4332, 4431), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validdataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(validdataset, batch_size=batch_size, shuffle=\n False, num_workers=4)\n', (4359, 4431), False, 'import torch\n'), ((4524, 4622), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testdataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(testdataset, batch_size=batch_size, shuffle=\n False, num_workers=4)\n', (4551, 4622), False, 'import torch\n'), ((5818, 5919), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'data_dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), '(root=data_dir, train=True, download=True,\n transform=transform_train)\n', (5846, 5919), False, 'import torchvision\n'), ((5940, 6035), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'data_dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), '(root=data_dir, train=True, download=True,\n transform=transform)\n', (5968, 6035), False, 'import torchvision\n'), ((6055, 6151), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'data_dir', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), '(root=data_dir, train=False, download=True,\n transform=transform)\n', (6083, 6151), False, 'import torchvision\n'), ((6480, 6510), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (6499, 6510), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((6531, 6561), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_idx'], {}), '(valid_idx)\n', (6550, 6561), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((6581, 6694), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'sampler': 'train_sampler', 'num_workers': 'num_workers'}), '(trainset, batch_size=batch_size, sampler=\n train_sampler, num_workers=num_workers)\n', (6608, 6694), False, 'import torch\n'), ((6725, 6838), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validset'], {'batch_size': 'batch_size', 'sampler': 'valid_sampler', 'num_workers': 'num_workers'}), '(validset, batch_size=batch_size, sampler=\n valid_sampler, num_workers=num_workers)\n', (6752, 6838), False, 'import torch\n'), ((6868, 6971), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(testset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (6895, 6971), False, 'import torch\n'), ((7248, 7343), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'data_dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), '(root=data_dir, train=True, download=True,\n transform=transform)\n', (7276, 7343), False, 'import torchvision\n'), ((7364, 7459), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'data_dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), '(root=data_dir, train=True, download=True,\n transform=transform)\n', (7392, 7459), False, 'import torchvision\n'), ((7479, 7575), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'data_dir', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), '(root=data_dir, train=False, download=True,\n transform=transform)\n', (7507, 7575), False, 'import torchvision\n'), ((7904, 7934), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (7923, 7934), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((7955, 7985), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_idx'], {}), '(valid_idx)\n', (7974, 7985), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((8005, 8100), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(num_train - split)', 'sampler': 'train_sampler'}), '(trainset, batch_size=num_train - split, sampler\n =train_sampler)\n', (8032, 8100), False, 'import torch\n'), ((8125, 8203), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validset'], {'batch_size': 'split', 'sampler': 'valid_sampler'}), '(validset, batch_size=split, sampler=valid_sampler)\n', (8152, 8203), False, 'import torch\n'), ((8781, 8829), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (8811, 8829), False, 'import torch\n'), ((8845, 8893), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_valid', 'y_valid'], {}), '(x_valid, y_valid)\n', (8875, 8893), False, 'import torch\n'), ((8908, 8954), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (8938, 8954), False, 'import torch\n'), ((8974, 9071), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': '(4)'}), '(trainset, batch_size=batch_size, shuffle=\n shuffle, num_workers=4)\n', (9001, 9071), False, 'import torch\n'), ((9094, 9188), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(validset, batch_size=batch_size, shuffle=False,\n num_workers=4)\n', (9121, 9188), False, 'import torch\n'), ((9211, 9304), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(testset, batch_size=batch_size, shuffle=False,\n num_workers=4)\n', (9238, 9304), False, 'import torch\n'), ((9675, 9776), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['traindataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': '(4)'}), '(traindataset, batch_size=batch_size, shuffle=\n shuffle, num_workers=4)\n', (9702, 9776), False, 'import torch\n'), ((9872, 9971), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validdataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(validdataset, batch_size=batch_size, shuffle=\n False, num_workers=4)\n', (9899, 9971), False, 'import torch\n'), ((10064, 10162), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testdataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(testdataset, batch_size=batch_size, shuffle=\n False, num_workers=4)\n', (10091, 10162), False, 'import torch\n'), ((13641, 13668), 'numpy.mean', 'np.mean', (['x_train', '(0, 1, 2)'], {}), '(x_train, (0, 1, 2))\n', (13648, 13668), True, 'import numpy as np\n'), ((13679, 13705), 'numpy.std', 'np.std', (['x_train', '(0, 1, 2)'], {}), '(x_train, (0, 1, 2))\n', (13685, 13705), True, 'import numpy as np\n'), ((13934, 14037), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers'}), '(trainset, batch_size=batch_size, shuffle=True,\n num_workers=num_workers)\n', (13961, 14037), False, 'import torch\n'), ((14061, 14165), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(validset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (14088, 14165), False, 'import torch\n'), ((14188, 14291), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(testset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (14215, 14291), False, 'import torch\n'), ((14775, 14795), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (14789, 14795), True, 'import numpy as np\n'), ((14810, 14837), 'torch.zeros', 'torch.zeros', (['(num_total, d)'], {}), '((num_total, d))\n', (14821, 14837), False, 'import torch\n'), ((15099, 15119), 'numpy.arange', 'np.arange', (['num_total'], {}), '(num_total)\n', (15108, 15119), True, 'import numpy as np\n'), ((15124, 15150), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (15141, 15150), True, 'import numpy as np\n'), ((16120, 16168), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (16150, 16168), False, 'import torch\n'), ((16184, 16232), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_valid', 'y_valid'], {}), '(x_valid, y_valid)\n', (16214, 16232), False, 'import torch\n'), ((16247, 16293), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (16277, 16293), False, 'import torch\n'), ((16312, 16419), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers'}), '(trainset, batch_size=batch_size, shuffle=\n shuffle, num_workers=num_workers)\n', (16339, 16419), False, 'import torch\n'), ((16442, 16546), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(validset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (16469, 16546), False, 'import torch\n'), ((16569, 16672), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(testset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (16596, 16672), False, 'import torch\n'), ((1425, 1455), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (1441, 1455), False, 'import torch\n'), ((6286, 6316), 'numpy.floor', 'np.floor', (['(val_size * num_train)'], {}), '(val_size * num_train)\n', (6294, 6316), True, 'import numpy as np\n'), ((6343, 6363), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6357, 6363), True, 'import numpy as np\n'), ((6372, 6398), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (6389, 6398), True, 'import numpy as np\n'), ((7710, 7740), 'numpy.floor', 'np.floor', (['(val_size * num_train)'], {}), '(val_size * num_train)\n', (7718, 7740), True, 'import numpy as np\n'), ((7767, 7787), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7781, 7787), True, 'import numpy as np\n'), ((7796, 7822), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (7813, 7822), True, 'import numpy as np\n'), ((8734, 8759), 'torch.randn_like', 'torch.randn_like', (['x_train'], {}), '(x_train)\n', (8750, 8759), False, 'import torch\n'), ((1471, 1503), 'torch.stack', 'torch.stack', (['rotated_imgs'], {'dim': '(0)'}), '(rotated_imgs, dim=0)\n', (1482, 1503), False, 'import torch\n'), ((2693, 2714), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2712, 2714), True, 'import torchvision.transforms as transforms\n'), ((3700, 3721), 'torch.tensor', 'torch.tensor', (['x_train'], {}), '(x_train)\n', (3712, 3721), False, 'import torch\n'), ((3723, 3744), 'torch.tensor', 'torch.tensor', (['y_train'], {}), '(y_train)\n', (3735, 3744), False, 'import torch\n'), ((3760, 3781), 'torch.tensor', 'torch.tensor', (['x_valid'], {}), '(x_valid)\n', (3772, 3781), False, 'import torch\n'), ((3783, 3804), 'torch.tensor', 'torch.tensor', (['y_valid'], {}), '(y_valid)\n', (3795, 3804), False, 'import torch\n'), ((5094, 5115), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5113, 5115), True, 'import torchvision.transforms as transforms\n'), ((7202, 7223), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7221, 7223), True, 'import torchvision.transforms as transforms\n'), ((10916, 10930), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10927, 10930), False, 'import pickle\n'), ((14858, 14882), 'torch.rand', 'torch.rand', (['num_total', 'k'], {}), '(num_total, k)\n', (14868, 14882), False, 'import torch\n'), ((544, 572), 'numpy.transpose', 'np.transpose', (['img', '(1, 0, 2)'], {}), '(img, (1, 0, 2))\n', (556, 572), True, 'import numpy as np\n'), ((1007, 1028), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1026, 1028), True, 'import torchvision.transforms as transforms\n'), ((5199, 5235), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (5220, 5235), True, 'import torchvision.transforms as transforms\n'), ((5249, 5282), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (5280, 5282), True, 'import torchvision.transforms as transforms\n'), ((5296, 5371), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', (['(5)'], {'translate': '(0.1, 0.1)', 'scale': '(0.9, 1.1)', 'shear': '(5)'}), '(5, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=5)\n', (5319, 5371), True, 'import torchvision.transforms as transforms\n'), ((5402, 5440), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.1)'}), '(brightness=0.1)\n', (5424, 5440), True, 'import torchvision.transforms as transforms\n'), ((5454, 5475), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5473, 5475), True, 'import torchvision.transforms as transforms\n'), ((5628, 5659), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (5648, 5659), True, 'import torchvision.transforms as transforms\n'), ((5759, 5790), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (5779, 5790), True, 'import torchvision.transforms as transforms\n'), ((643, 657), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (652, 657), True, 'import numpy as np\n'), ((12733, 12790), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)', 'padding_mode': '"""edge"""'}), "(32, padding=4, padding_mode='edge')\n", (12754, 12790), True, 'import torchvision.transforms as transforms\n'), ((12808, 12883), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', (['(5)'], {'translate': '(0.1, 0.1)', 'scale': '(0.9, 1.1)', 'shear': '(5)'}), '(5, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=5)\n', (12831, 12883), True, 'import torchvision.transforms as transforms\n'), ((12922, 12960), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.1)'}), '(brightness=0.1)\n', (12944, 12960), True, 'import torchvision.transforms as transforms\n'), ((12978, 12999), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12997, 12999), True, 'import torchvision.transforms as transforms\n'), ((13147, 13168), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (13166, 13168), True, 'import torchvision.transforms as transforms\n'), ((741, 755), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (750, 755), True, 'import numpy as np\n')] |
"""Relative Concentration Index."""
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>> and <NAME> <<EMAIL>>"
import numpy as np
from .._base import SingleGroupIndex, SpatialExplicitIndex
def _relative_concentration(data, group_pop_var, total_pop_var):
"""Calculate Relative Concentration index.
Parameters
----------
data : a geopandas DataFrame with a geometry column.
group_pop_var : string
The name of variable in data that contains the population size of the group of interest
total_pop_var : string
The name of variable in data that contains the total population of the unit
Returns
----------
statistic : float
Relative Concentration Index
core_data : a geopandas DataFrame
A geopandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Massey, <NAME>., and <NAME>. "The dimensions of residential segregation." Social forces 67.2 (1988): 281-315.
Reference: :cite:`massey1988dimensions`.
"""
x = np.array(data[group_pop_var])
t = np.array(data[total_pop_var])
if any(t < x):
raise ValueError(
"Group of interest population must equal or lower than the total population of the units."
)
area = np.array(data.area)
y = t - x
X = x.sum()
Y = y.sum()
T = t.sum()
# Create the indexes according to the area ordering
des_ind = (-area).argsort()
asc_ind = area.argsort()
# A discussion about the extraction of n1 and n2 can be found in https://github.com/pysal/segregation/issues/43
n1 = np.where(((np.cumsum(t[asc_ind]) / T) < X / T) == False)[0][0] + 1
n2_aux = np.where(((np.cumsum(t[des_ind]) / T) < X / T) == False)[0][0] + 1
n2 = len(data) - n2_aux
n = data.shape[0]
T1 = t[asc_ind][0:n1].sum()
T2 = t[asc_ind][n2:n].sum()
RCO = (
(
((x[asc_ind] * area[asc_ind] / X).sum())
/ ((y[asc_ind] * area[asc_ind] / Y).sum())
)
- 1
) / (
(
((t[asc_ind] * area[asc_ind])[0:n1].sum() / T1)
/ ((t[asc_ind] * area[asc_ind])[n2:n].sum() / T2)
)
- 1
)
core_data = data[[group_pop_var, total_pop_var, data.geometry.name]]
return RCO, core_data
class RelativeConcentration(SingleGroupIndex, SpatialExplicitIndex):
"""Relative Concentration Index.
Parameters
----------
data : pandas.DataFrame or geopandas.GeoDataFrame, required
dataframe or geodataframe if spatial index holding data for location of interest
group_pop_var : str, required
name of column on dataframe holding population totals for focal group
total_pop_var : str, required
name of column on dataframe holding total overall population
Attributes
----------
statistic : float
Relative Conrentration Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Massey, <NAME>., and <NAME>. "The dimensions of residential segregation." Social forces 67.2 (1988): 281-315.
The pairwise distance between unit i and itself is (alpha * area_of_unit_i) ^ beta.
Reference: :cite:`massey1988dimensions`.
"""
def __init__(
self, data, group_pop_var, total_pop_var, **kwargs,
):
"""Init."""
SingleGroupIndex.__init__(self, data, group_pop_var, total_pop_var)
SpatialExplicitIndex.__init__(self,)
aux = _relative_concentration(
self.data, self.group_pop_var, self.total_pop_var,
)
self.statistic = aux[0]
self.core_data = aux[1]
self._function = _relative_concentration
| [
"numpy.cumsum",
"numpy.array"
] | [((1092, 1121), 'numpy.array', 'np.array', (['data[group_pop_var]'], {}), '(data[group_pop_var])\n', (1100, 1121), True, 'import numpy as np\n'), ((1130, 1159), 'numpy.array', 'np.array', (['data[total_pop_var]'], {}), '(data[total_pop_var])\n', (1138, 1159), True, 'import numpy as np\n'), ((1331, 1350), 'numpy.array', 'np.array', (['data.area'], {}), '(data.area)\n', (1339, 1350), True, 'import numpy as np\n'), ((1670, 1691), 'numpy.cumsum', 'np.cumsum', (['t[asc_ind]'], {}), '(t[asc_ind])\n', (1679, 1691), True, 'import numpy as np\n'), ((1750, 1771), 'numpy.cumsum', 'np.cumsum', (['t[des_ind]'], {}), '(t[des_ind])\n', (1759, 1771), True, 'import numpy as np\n')] |
"""
Contingency table functions (:mod:`scipy.stats.contingency`)
============================================================
Functions for creating and analyzing contingency tables.
.. currentmodule:: scipy.stats.contingency
.. autosummary::
:toctree: generated/
chi2_contingency
crosstab
expected_freq
margins
association
"""
from functools import reduce
import numpy as np
from .stats import power_divergence
import math
from ._crosstab import crosstab
__all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab',
'association']
def margins(a):
"""Return a list of the marginal sums of the array `a`.
Parameters
----------
a : ndarray
The array for which to compute the marginal sums.
Returns
-------
margsums : list of ndarrays
A list of length `a.ndim`. `margsums[k]` is the result
of summing `a` over all axes except `k`; it has the same
number of dimensions as `a`, but the length of each axis
except axis `k` will be 1.
Examples
--------
>>> a = np.arange(12).reshape(2, 6)
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11]])
>>> from scipy.stats.contingency import margins
>>> m0, m1 = margins(a)
>>> m0
array([[15],
[51]])
>>> m1
array([[ 6, 8, 10, 12, 14, 16]])
>>> b = np.arange(24).reshape(2,3,4)
>>> m0, m1, m2 = margins(b)
>>> m0
array([[[ 66]],
[[210]]])
>>> m1
array([[[ 60],
[ 92],
[124]]])
>>> m2
array([[[60, 66, 72, 78]]])
"""
margsums = []
ranged = list(range(a.ndim))
for k in ranged:
marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
margsums.append(marg)
return margsums
def expected_freq(observed):
"""
Compute the expected frequencies from a contingency table.
Given an n-dimensional contingency table of observed frequencies,
compute the expected frequencies for the table based on the marginal
sums under the assumption that the groups associated with each
dimension are independent.
Parameters
----------
observed : array_like
The table of observed frequencies. (While this function can handle
a 1-D array, that case is trivial. Generally `observed` is at
least 2-D.)
Returns
-------
expected : ndarray of float64
The expected frequencies, based on the marginal sums of the table.
Same shape as `observed`.
Examples
--------
>>> from scipy.stats.contingency import expected_freq
>>> observed = np.array([[10, 10, 20],[20, 20, 20]])
>>> expected_freq(observed)
array([[ 12., 12., 16.],
[ 18., 18., 24.]])
"""
# Typically `observed` is an integer array. If `observed` has a large
# number of dimensions or holds large values, some of the following
# computations may overflow, so we first switch to floating point.
observed = np.asarray(observed, dtype=np.float64)
# Create a list of the marginal sums.
margsums = margins(observed)
# Create the array of expected frequencies. The shapes of the
# marginal sums returned by apply_over_axes() are just what we
# need for broadcasting in the following product.
d = observed.ndim
expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
return expected
def chi2_contingency(observed, correction=True, lambda_=None):
"""Chi-square test of independence of variables in a contingency table.
This function computes the chi-square statistic and p-value for the
hypothesis test of independence of the observed frequencies in the
contingency table [1]_ `observed`. The expected frequencies are computed
based on the marginal sums under the assumption of independence; see
`scipy.stats.contingency.expected_freq`. The number of degrees of
freedom is (expressed using numpy functions and attributes)::
dof = observed.size - sum(observed.shape) + observed.ndim - 1
Parameters
----------
observed : array_like
The contingency table. The table contains the observed frequencies
(i.e. number of occurrences) in each category. In the two-dimensional
case, the table is often described as an "R x C table".
correction : bool, optional
If True, *and* the degrees of freedom is 1, apply Yates' correction
for continuity. The effect of the correction is to adjust each
observed value by 0.5 towards the corresponding expected value.
lambda_ : float or str, optional
By default, the statistic computed in this test is Pearson's
chi-squared statistic [2]_. `lambda_` allows a statistic from the
Cressie-Read power divergence family [3]_ to be used instead. See
`power_divergence` for details.
Returns
-------
chi2 : float
The test statistic.
p : float
The p-value of the test
dof : int
Degrees of freedom
expected : ndarray, same shape as `observed`
The expected frequencies, based on the marginal sums of the table.
See Also
--------
contingency.expected_freq
fisher_exact
chisquare
power_divergence
Notes
-----
An often quoted guideline for the validity of this calculation is that
the test should be used only if the observed and expected frequencies
in each cell are at least 5.
This is a test for the independence of different categories of a
population. The test is only meaningful when the dimension of
`observed` is two or more. Applying the test to a one-dimensional
table will always result in `expected` equal to `observed` and a
chi-square statistic equal to 0.
This function does not handle masked arrays, because the calculation
does not make sense with missing values.
Like stats.chisquare, this function computes a chi-square statistic;
the convenience this function provides is to figure out the expected
frequencies and degrees of freedom from the given contingency table.
If these were already known, and if the Yates' correction was not
required, one could use stats.chisquare. That is, if one calls::
chi2, p, dof, ex = chi2_contingency(obs, correction=False)
then the following is true::
(chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(),
ddof=obs.size - 1 - dof)
The `lambda_` argument was added in version 0.13.0 of scipy.
References
----------
.. [1] "Contingency table",
https://en.wikipedia.org/wiki/Contingency_table
.. [2] "Pearson's chi-squared test",
https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
.. [3] <NAME>. and Read, <NAME>., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
A two-way example (2 x 3):
>>> from scipy.stats import chi2_contingency
>>> obs = np.array([[10, 10, 20], [20, 20, 20]])
>>> chi2_contingency(obs)
(2.7777777777777777,
0.24935220877729619,
2,
array([[ 12., 12., 16.],
[ 18., 18., 24.]]))
Perform the test using the log-likelihood ratio (i.e. the "G-test")
instead of Pearson's chi-squared statistic.
>>> g, p, dof, expctd = chi2_contingency(obs, lambda_="log-likelihood")
>>> g, p
(2.7688587616781319, 0.25046668010954165)
A four-way example (2 x 2 x 2 x 2):
>>> obs = np.array(
... [[[[12, 17],
... [11, 16]],
... [[11, 12],
... [15, 16]]],
... [[[23, 15],
... [30, 22]],
... [[14, 17],
... [15, 16]]]])
>>> chi2_contingency(obs)
(8.7584514426741897,
0.64417725029295503,
11,
array([[[[ 14.15462386, 14.15462386],
[ 16.49423111, 16.49423111]],
[[ 11.2461395 , 11.2461395 ],
[ 13.10500554, 13.10500554]]],
[[[ 19.5591166 , 19.5591166 ],
[ 22.79202844, 22.79202844]],
[[ 15.54012004, 15.54012004],
[ 18.10873492, 18.10873492]]]]))
"""
observed = np.asarray(observed)
if np.any(observed < 0):
raise ValueError("All values in `observed` must be nonnegative.")
if observed.size == 0:
raise ValueError("No data; `observed` has size 0.")
expected = expected_freq(observed)
if np.any(expected == 0):
# Include one of the positions where expected is zero in
# the exception message.
zeropos = list(zip(*np.nonzero(expected == 0)))[0]
raise ValueError("The internally computed table of expected "
"frequencies has a zero element at %s." % (zeropos,))
# The degrees of freedom
dof = expected.size - sum(expected.shape) + expected.ndim - 1
if dof == 0:
# Degenerate case; this occurs when `observed` is 1D (or, more
# generally, when it has only one nontrivial dimension). In this
# case, we also have observed == expected, so chi2 is 0.
chi2 = 0.0
p = 1.0
else:
if dof == 1 and correction:
# Adjust `observed` according to Yates' correction for continuity.
observed = observed + 0.5 * np.sign(expected - observed)
chi2, p = power_divergence(observed, expected,
ddof=observed.size - 1 - dof, axis=None,
lambda_=lambda_)
return chi2, p, dof, expected
def association(observed, method="cramer", correction=False, lambda_=None):
"""Calculates degree of association between two nominal variables.
The function provides the option for computing one of three measures of
association between two nominal variables from the data given in a 2d
contingency table: Tschuprow's T, Pearson's Contingency Coefficient
and Cramer's V.
Parameters
----------
observed : array-like
The array of observed values
method : {"cramer", "tschuprow", "pearson"} (default = "cramer")
The association test statistic.
correction : bool, optional
Inherited from `scipy.stats.contingency.chi2_contingency()`
lambda_ : float or str, optional
Inherited from `scipy.stats.contingency.chi2_contingency()`
Returns
-------
statistic : float
Value of the test statistic
Notes
-----
Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all
measure the degree to which two nominal or ordinal variables are related,
or the level of their association. This differs from correlation, although
many often mistakenly consider them equivalent. Correlation measures in
what way two variables are related, whereas, association measures how
related the variables are. As such, association does not subsume
independent variables, and is rather a test of independence. A value of
1.0 indicates perfect association, and 0.0 means the variables have no
association.
Both the Cramer's V and Tschuprow's T are extensions of the phi
coefficient. Moreover, due to the close relationship between the
Cramer's V and Tschuprow's T the returned values can often be similar
or even equivalent. They are likely to diverge more as the array shape
diverges from a 2x2.
References
----------
.. [1] "Tschuprow's T",
https://en.wikipedia.org/wiki/Tschuprow's_T
.. [2] <NAME>. (1939)
Principles of the Mathematical Theory of Correlation;
translated by <NAME>. <NAME> & Co.
.. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V
.. [4] "Nominal Association: Phi and Cramer's V",
http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html
.. [5] <NAME>, "Association Between Variables",
http://uregina.ca/~gingrich/ch11a.pdf
Examples
--------
An example with a 4x2 contingency table:
>>> from scipy.stats.contingency import association
>>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]])
Pearson's contingency coefficient
>>> association(obs4x2, method="pearson")
0.18303298140595667
Cramer's V
>>> association(obs4x2, method="cramer")
0.18617813077483678
Tschuprow's T
>>> association(obs4x2, method="tschuprow")
0.14146478765062995
"""
arr = np.asarray(observed)
if not np.issubdtype(arr.dtype, np.integer):
raise ValueError("`observed` must be an integer array.")
if len(arr.shape) != 2:
raise ValueError("method only accepts 2d arrays")
chi2_stat = chi2_contingency(arr, correction=correction,
lambda_=lambda_)
phi2 = chi2_stat[0] / arr.sum()
n_rows, n_cols = arr.shape
if method == "cramer":
value = phi2 / min(n_cols - 1, n_rows - 1)
elif method == "tschuprow":
value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1))
elif method == 'pearson':
value = phi2 / (1 + phi2)
else:
raise ValueError("Invalid argument value: 'method' argument must "
"be 'cramer', 'tschuprow', or 'pearson'")
return math.sqrt(value)
| [
"numpy.apply_over_axes",
"math.sqrt",
"numpy.asarray",
"numpy.any",
"numpy.nonzero",
"numpy.sign",
"functools.reduce",
"numpy.issubdtype"
] | [((3034, 3072), 'numpy.asarray', 'np.asarray', (['observed'], {'dtype': 'np.float64'}), '(observed, dtype=np.float64)\n', (3044, 3072), True, 'import numpy as np\n'), ((8320, 8340), 'numpy.asarray', 'np.asarray', (['observed'], {}), '(observed)\n', (8330, 8340), True, 'import numpy as np\n'), ((8348, 8368), 'numpy.any', 'np.any', (['(observed < 0)'], {}), '(observed < 0)\n', (8354, 8368), True, 'import numpy as np\n'), ((8578, 8599), 'numpy.any', 'np.any', (['(expected == 0)'], {}), '(expected == 0)\n', (8584, 8599), True, 'import numpy as np\n'), ((12583, 12603), 'numpy.asarray', 'np.asarray', (['observed'], {}), '(observed)\n', (12593, 12603), True, 'import numpy as np\n'), ((13385, 13401), 'math.sqrt', 'math.sqrt', (['value'], {}), '(value)\n', (13394, 13401), False, 'import math\n'), ((1709, 1769), 'numpy.apply_over_axes', 'np.apply_over_axes', (['np.sum', 'a', '[j for j in ranged if j != k]'], {}), '(np.sum, a, [j for j in ranged if j != k])\n', (1727, 1769), True, 'import numpy as np\n'), ((3375, 3404), 'functools.reduce', 'reduce', (['np.multiply', 'margsums'], {}), '(np.multiply, margsums)\n', (3381, 3404), False, 'from functools import reduce\n'), ((12615, 12651), 'numpy.issubdtype', 'np.issubdtype', (['arr.dtype', 'np.integer'], {}), '(arr.dtype, np.integer)\n', (12628, 12651), True, 'import numpy as np\n'), ((13118, 13156), 'math.sqrt', 'math.sqrt', (['((n_rows - 1) * (n_cols - 1))'], {}), '((n_rows - 1) * (n_cols - 1))\n', (13127, 13156), False, 'import math\n'), ((9431, 9459), 'numpy.sign', 'np.sign', (['(expected - observed)'], {}), '(expected - observed)\n', (9438, 9459), True, 'import numpy as np\n'), ((8727, 8752), 'numpy.nonzero', 'np.nonzero', (['(expected == 0)'], {}), '(expected == 0)\n', (8737, 8752), True, 'import numpy as np\n')] |
import importlib
import logging
import os
def set_log_level(debug):
os.environ['HPOLIB_DEBUG'] = 'true' if debug else 'false'
import hpolib.container.client_abstract_benchmark as client
importlib.reload(client)
def test_debug_env_variable_1():
set_log_level(False)
from hpolib.container.client_abstract_benchmark import log_level
assert log_level == logging.INFO
set_log_level(True)
from hpolib.container.client_abstract_benchmark import log_level
assert log_level == logging.DEBUG
def test_debug_container():
# Test if the debug option works. Check if some debug output from the server is visible.
set_log_level(True)
from hpolib.container.benchmarks.ml.xgboost_benchmark import XGBoostBenchmark as Benchmark
from hpolib.util.openml_data_manager import get_openmlcc18_taskids
task_id = get_openmlcc18_taskids()[0]
b = Benchmark(task_id=task_id,
container_name='xgboost_benchmark',
container_source='library://phmueller/automl')
cs = b.get_configuration_space()
assert cs is not None
set_log_level(False)
def test_benchmark_encoder():
from enum import Enum
class test_enum(Enum):
obj = 'name'
def __str__(self):
return str(self.value)
from hpolib.container.server_abstract_benchmark import BenchmarkEncoder
import json
import numpy as np
enum_obj = test_enum.obj
enum_obj_str = json.dumps(enum_obj, cls=BenchmarkEncoder)
assert enum_obj_str == '"name"'
array = np.array([1, 2, 3, 4])
array_str = json.dumps(array, cls=BenchmarkEncoder)
assert array_str == '[1, 2, 3, 4]'
if __name__ == '__main__':
test_debug_env_variable_1()
test_debug_container()
| [
"hpolib.container.benchmarks.ml.xgboost_benchmark.XGBoostBenchmark",
"json.dumps",
"hpolib.util.openml_data_manager.get_openmlcc18_taskids",
"importlib.reload",
"numpy.array"
] | [((200, 224), 'importlib.reload', 'importlib.reload', (['client'], {}), '(client)\n', (216, 224), False, 'import importlib\n'), ((890, 1003), 'hpolib.container.benchmarks.ml.xgboost_benchmark.XGBoostBenchmark', 'Benchmark', ([], {'task_id': 'task_id', 'container_name': '"""xgboost_benchmark"""', 'container_source': '"""library://phmueller/automl"""'}), "(task_id=task_id, container_name='xgboost_benchmark',\n container_source='library://phmueller/automl')\n", (899, 1003), True, 'from hpolib.container.benchmarks.ml.xgboost_benchmark import XGBoostBenchmark as Benchmark\n'), ((1459, 1501), 'json.dumps', 'json.dumps', (['enum_obj'], {'cls': 'BenchmarkEncoder'}), '(enum_obj, cls=BenchmarkEncoder)\n', (1469, 1501), False, 'import json\n'), ((1551, 1573), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1559, 1573), True, 'import numpy as np\n'), ((1590, 1629), 'json.dumps', 'json.dumps', (['array'], {'cls': 'BenchmarkEncoder'}), '(array, cls=BenchmarkEncoder)\n', (1600, 1629), False, 'import json\n'), ((853, 877), 'hpolib.util.openml_data_manager.get_openmlcc18_taskids', 'get_openmlcc18_taskids', ([], {}), '()\n', (875, 877), False, 'from hpolib.util.openml_data_manager import get_openmlcc18_taskids\n')] |
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import Normalize, Colormap
from matplotlib.colorbar import ColorbarBase
import matplotlib
del matplotlib.font_manager.weight_dict['roman']
matplotlib.font_manager._rebuild()
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 10
# plt.rcParams['xtick.direction'] = 'in'
# plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['axes.linewidth'] = 1.0
plt.rcParams['xtick.major.width'] = 1.0
plt.rcParams['ytick.major.width'] = 1.0
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.linestyle'] = '-'
plt.rcParams["legend.markerscale"] = 2
plt.rcParams["legend.fancybox"] = False
plt.rcParams["legend.framealpha"] = 1
plt.rcParams["legend.edgecolor"] = 'black'
fig = plt.figure(figsize=(4, 2), dpi=200)
# fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fig.subplots_adjust(left=0., right=1., bottom=0., top=1.)
# fig, (ax1, ax2, ax3) = plt.subplot((), projection='3d')
def draw(texture, c, lab, erase_number=True):
if erase_number:
plt.tick_params(labelbottom=False,
labelleft=False,
labelright=False,
labeltop=False)
# ax.set_xlabel('$\\varphi_1 [{\\rm deg}]$')
# ax.set_ylabel('$\\phi [\\rm deg]$')
# ax.set_zlabel('$\\varphi_2 [\\rm deg]$')
ax.xaxis._axinfo['juggled'] = (2, 0, 1)
ax.yaxis._axinfo['juggled'] = (2, 1, 0)
ax.zaxis._axinfo['juggled'] = (2, 2, 2)
ax.set_xlim(0, 90)
ax.set_ylim(0, 90)
ax.invert_yaxis()
ax.set_zlim(0, 90)
ax.invert_zaxis()
aff = np.diag([1, 1, 1, 1]) # Positioning
aff[0][3] = -20
aff[1][3] = 0
ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), aff)
lim_tex = texture[np.where((texture[:, 0] < 90.) & (
texture[:, 1] < 90.) & (texture[:, 2] < 90.))]
ax.scatter(lim_tex[:, 0], lim_tex[:, 1], lim_tex[:, 2],
color=c, marker='o', label=lab)
plt.yticks(range(0, 90 + 1, 30))
plt.xticks(range(0, 90 + 1, 30))
ax.set_zticks(range(0, 90 + 1, 30))
ax.legend()
def draw_all(texture, erase_number=True):
if erase_number:
plt.tick_params(labelbottom=False,
labelleft=False,
labelright=False,
labeltop=False)
# ax.set_xlabel('$\\varphi_1 [{\\rm deg}]$')
# ax.set_ylabel('$\\phi [\\rm deg]$')
# ax.set_zlabel('$\\varphi_2 [\\rm deg]$')
ax.xaxis._axinfo['juggled'] = (2, 0, 1)
ax.yaxis._axinfo['juggled'] = (2, 1, 0)
ax.zaxis._axinfo['juggled'] = (2, 2, 2)
ax.set_xlim(0, 360)
ax.set_ylim(0, 180)
ax.invert_yaxis()
ax.set_zlim(0, 360)
ax.invert_zaxis()
aff = np.diag([1, 0.5, 1, 1]) # Positioning
aff[0][3] = 100 # x
aff[1][3] = 100 # y
ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), aff)
ax.plot(texture[:, 0], texture[:, 1], texture[:, 2],
"o", color="blue", ms=2, mew=0.5)
plt.yticks(range(0, 180 + 1, 90))
plt.xticks(range(0, 360 + 1, 90))
ax.set_zticks(range(0, 360 + 1, 90))
def draw_voxels(voxel_data, use_alpha=False, erase_number=True):
if erase_number:
# plt.axis('off')
ax.grid(False)
plt.tick_params(labelbottom=False,
labelleft=False,
labelright=False,
labeltop=False)
ax.set_xlim(0, 32)
ax.set_ylim(0, 16)
ax.set_zlim(0, 32)
ax.xaxis._axinfo['juggled'] = (2, 0, 1)
ax.yaxis._axinfo['juggled'] = (2, 1, 0)
ax.zaxis._axinfo['juggled'] = (2, 2, 2)
# ax.invert_yaxis()
# ax.invert_zaxis()
aff = np.diag([0.5, 0.25, 1, 1]) # Positioning
aff[0][3] = 0 # x
aff[1][3] = 10 # y
ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), aff)
voxels = np.ceil(voxel_data[:, :, :, 0]).astype(np.bool)
colors = np.empty((32, 16, 32, 4), dtype=np.float32)
vmax = np.max(voxel_data[:, :, :, 0])
print(vmax)
# vmax = 0.225
if use_alpha:
colors[:, :, :, 0] = 0.
colors[:, :, :, 1] = 0.
colors[:, :, :, 2] = 0.
colors[:, :, :, 3] = voxel_data[:, :, :, 0]
else:
colors[:, :, :, :] = cm.jet(voxel_data[:, :, :, 0] / vmax)
ax_cb = fig.add_axes([0.9, 0.2, 0.025, 0.5])
norm = Normalize(vmin=0., vmax=vmax)
cmap = cm.get_cmap('binary' if use_alpha else 'jet')
cbar = ColorbarBase(ax_cb, cmap=cmap, norm=norm, orientation='vertical')
# cbar.set_ticks(np.arange(0, vmax + 0.075, 0.075))
cbar.set_clim(vmin=0., vmax=1.)
cbar.solids.set(alpha=1)
ax.voxels(voxels, facecolors=colors)
def Show():
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.ceil",
"matplotlib.colors.Normalize",
"matplotlib.cm.get_cmap",
"matplotlib.font_manager._rebuild",
"numpy.empty",
"matplotlib.pyplot.tick_params",
"mpl_toolkits.mplot3d.Axes3D.get_proj",
"matplotlib.cm.jet",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.wher... | [((286, 320), 'matplotlib.font_manager._rebuild', 'matplotlib.font_manager._rebuild', ([], {}), '()\n', (318, 320), False, 'import matplotlib\n'), ((878, 913), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 2)', 'dpi': '(200)'}), '(figsize=(4, 2), dpi=200)\n', (888, 913), True, 'from matplotlib import pyplot as plt\n'), ((1723, 1744), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (1730, 1744), True, 'import numpy as np\n'), ((2834, 2857), 'numpy.diag', 'np.diag', (['[1, 0.5, 1, 1]'], {}), '([1, 0.5, 1, 1])\n', (2841, 2857), True, 'import numpy as np\n'), ((3764, 3790), 'numpy.diag', 'np.diag', (['[0.5, 0.25, 1, 1]'], {}), '([0.5, 0.25, 1, 1])\n', (3771, 3790), True, 'import numpy as np\n'), ((3986, 4029), 'numpy.empty', 'np.empty', (['(32, 16, 32, 4)'], {'dtype': 'np.float32'}), '((32, 16, 32, 4), dtype=np.float32)\n', (3994, 4029), True, 'import numpy as np\n'), ((4041, 4071), 'numpy.max', 'np.max', (['voxel_data[:, :, :, 0]'], {}), '(voxel_data[:, :, :, 0])\n', (4047, 4071), True, 'import numpy as np\n'), ((4411, 4441), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0.0)', 'vmax': 'vmax'}), '(vmin=0.0, vmax=vmax)\n', (4420, 4441), False, 'from matplotlib.colors import Normalize, Colormap\n'), ((4452, 4497), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (["('binary' if use_alpha else 'jet')"], {}), "('binary' if use_alpha else 'jet')\n", (4463, 4497), True, 'import matplotlib.cm as cm\n'), ((4509, 4574), 'matplotlib.colorbar.ColorbarBase', 'ColorbarBase', (['ax_cb'], {'cmap': 'cmap', 'norm': 'norm', 'orientation': '"""vertical"""'}), "(ax_cb, cmap=cmap, norm=norm, orientation='vertical')\n", (4521, 4574), False, 'from matplotlib.colorbar import ColorbarBase\n'), ((4755, 4765), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4763, 4765), True, 'from matplotlib import pyplot as plt\n'), ((1171, 1260), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelbottom': '(False)', 'labelleft': '(False)', 'labelright': '(False)', 'labeltop': '(False)'}), '(labelbottom=False, labelleft=False, labelright=False,\n labeltop=False)\n', (1186, 1260), True, 'from matplotlib import pyplot as plt\n'), ((1879, 1965), 'numpy.where', 'np.where', (['((texture[:, 0] < 90.0) & (texture[:, 1] < 90.0) & (texture[:, 2] < 90.0))'], {}), '((texture[:, 0] < 90.0) & (texture[:, 1] < 90.0) & (texture[:, 2] <\n 90.0))\n', (1887, 1965), True, 'import numpy as np\n'), ((2279, 2368), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelbottom': '(False)', 'labelleft': '(False)', 'labelright': '(False)', 'labeltop': '(False)'}), '(labelbottom=False, labelleft=False, labelright=False,\n labeltop=False)\n', (2294, 2368), True, 'from matplotlib import pyplot as plt\n'), ((3347, 3436), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelbottom': '(False)', 'labelleft': '(False)', 'labelright': '(False)', 'labeltop': '(False)'}), '(labelbottom=False, labelleft=False, labelright=False,\n labeltop=False)\n', (3362, 3436), True, 'from matplotlib import pyplot as plt\n'), ((4312, 4349), 'matplotlib.cm.jet', 'cm.jet', (['(voxel_data[:, :, :, 0] / vmax)'], {}), '(voxel_data[:, :, :, 0] / vmax)\n', (4318, 4349), True, 'import matplotlib.cm as cm\n'), ((1831, 1850), 'mpl_toolkits.mplot3d.Axes3D.get_proj', 'Axes3D.get_proj', (['ax'], {}), '(ax)\n', (1846, 1850), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2956, 2975), 'mpl_toolkits.mplot3d.Axes3D.get_proj', 'Axes3D.get_proj', (['ax'], {}), '(ax)\n', (2971, 2975), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3886, 3905), 'mpl_toolkits.mplot3d.Axes3D.get_proj', 'Axes3D.get_proj', (['ax'], {}), '(ax)\n', (3901, 3905), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3925, 3956), 'numpy.ceil', 'np.ceil', (['voxel_data[:, :, :, 0]'], {}), '(voxel_data[:, :, :, 0])\n', (3932, 3956), True, 'import numpy as np\n')] |
import numpy as np
from BC_patchbvae import BCPatchBVAE
from goalrepresent.datasets.image.imagedataset import LENIADataset
from goalrepresent.helper.randomhelper import set_seed
if __name__ == '__main__':
set_seed(0)
# load reference dataset
dataset_config = LENIADataset.default_config()
dataset_config.data_root = '/gpfswork/rech/zaj/ucf28eq/data/lenia_datasets/data_005/'
dataset_config.split = 'train'
dataset = LENIADataset(config=dataset_config)
# load model
bc_patchbvae = BCPatchBVAE(set_BC_range=False)
z_values = np.zeros((dataset.n_images, bc_patchbvae.n_latents))
for idx in range(dataset.n_images):
im = dataset.get_image(idx).squeeze().numpy()
cur_z = bc_patchbvae.calc_embedding(im)
z_values[idx] = cur_z
np.savez('reference_dataset_patchbvae_descriptors_values.npz',
z=z_values)
np.savez('reference_dataset_patchbvae_descriptors_range.npz',
low=np.percentile(z_values, 0.01), high=np.percentile(z_values, 99.9))
| [
"BC_patchbvae.BCPatchBVAE",
"numpy.zeros",
"numpy.percentile",
"goalrepresent.helper.randomhelper.set_seed",
"goalrepresent.datasets.image.imagedataset.LENIADataset",
"numpy.savez",
"goalrepresent.datasets.image.imagedataset.LENIADataset.default_config"
] | [((212, 223), 'goalrepresent.helper.randomhelper.set_seed', 'set_seed', (['(0)'], {}), '(0)\n', (220, 223), False, 'from goalrepresent.helper.randomhelper import set_seed\n'), ((275, 304), 'goalrepresent.datasets.image.imagedataset.LENIADataset.default_config', 'LENIADataset.default_config', ([], {}), '()\n', (302, 304), False, 'from goalrepresent.datasets.image.imagedataset import LENIADataset\n'), ((444, 479), 'goalrepresent.datasets.image.imagedataset.LENIADataset', 'LENIADataset', ([], {'config': 'dataset_config'}), '(config=dataset_config)\n', (456, 479), False, 'from goalrepresent.datasets.image.imagedataset import LENIADataset\n'), ((517, 548), 'BC_patchbvae.BCPatchBVAE', 'BCPatchBVAE', ([], {'set_BC_range': '(False)'}), '(set_BC_range=False)\n', (528, 548), False, 'from BC_patchbvae import BCPatchBVAE\n'), ((565, 617), 'numpy.zeros', 'np.zeros', (['(dataset.n_images, bc_patchbvae.n_latents)'], {}), '((dataset.n_images, bc_patchbvae.n_latents))\n', (573, 617), True, 'import numpy as np\n'), ((795, 869), 'numpy.savez', 'np.savez', (['"""reference_dataset_patchbvae_descriptors_values.npz"""'], {'z': 'z_values'}), "('reference_dataset_patchbvae_descriptors_values.npz', z=z_values)\n", (803, 869), True, 'import numpy as np\n'), ((967, 996), 'numpy.percentile', 'np.percentile', (['z_values', '(0.01)'], {}), '(z_values, 0.01)\n', (980, 996), True, 'import numpy as np\n'), ((1003, 1032), 'numpy.percentile', 'np.percentile', (['z_values', '(99.9)'], {}), '(z_values, 99.9)\n', (1016, 1032), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import os
import time
import numpy as np
import pandas as pd
from collections import defaultdict
from utils import read_vocab,write_vocab,build_vocab,Tokenizer,padding_idx,timeSince
from env import R2RBatch
from model import EncoderLSTM, AttnDecoderLSTM
from agent import Seq2SeqAgent
from eval import Evaluation
TRAIN_VOCAB = 'tasks/R2R/data/train_vocab.txt'
TRAINVAL_VOCAB = 'tasks/R2R/data/trainval_vocab.txt'
RESULT_DIR = 'tasks/R2R/results/'
SNAPSHOT_DIR = 'tasks/R2R/snapshots/'
PLOT_DIR = 'tasks/R2R/plots/'
IMAGENET_FEATURES = 'img_features/ResNet-152-imagenet.tsv'
MAX_INPUT_LENGTH = 80
features = IMAGENET_FEATURES
# batch_size = 100
batch_size = 1
max_episode_len = 20
word_embedding_size = 256
action_embedding_size = 32
hidden_size = 512
bidirectional = False
dropout_ratio = 0.5
feedback_method = 'sample' # teacher or sample
learning_rate = 0.0001
weight_decay = 0.0005
n_iters = 5000 if feedback_method == 'teacher' else 20000
model_prefix = 'seq2seq_%s_imagenet' % (feedback_method)
def train(train_env, encoder, decoder, n_iters, log_every=100, val_envs={}):
''' Train on training set, validating on both seen and unseen. '''
agent = Seq2SeqAgent(train_env, "", encoder, decoder, max_episode_len)
print('Training with %s feedback' % feedback_method)
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate, weight_decay=weight_decay)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate, weight_decay=weight_decay)
data_log = defaultdict(list)
start = time.time()
for idx in range(0, n_iters, log_every):
interval = min(log_every,n_iters-idx)
iter = idx + interval
data_log['iteration'].append(iter)
# Train for log_every interval
agent.train(encoder_optimizer, decoder_optimizer, interval, feedback=feedback_method)
train_losses = np.array(agent.losses)
assert len(train_losses) == interval
train_loss_avg = np.average(train_losses)
data_log['train loss'].append(train_loss_avg)
loss_str = 'train loss: %.4f' % train_loss_avg
# Run validation
for env_name, (env, evaluator) in val_envs.items():
agent.env = env
agent.results_path = '%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, env_name, iter)
# Get validation loss under the same conditions as training
agent.test(use_dropout=True, feedback=feedback_method, allow_cheat=True)
val_losses = np.array(agent.losses)
val_loss_avg = np.average(val_losses)
data_log['%s loss' % env_name].append(val_loss_avg)
# Get validation distance from goal under test evaluation conditions
agent.test(use_dropout=False, feedback='argmax')
agent.write_results()
score_summary, _ = evaluator.score(agent.results_path)
loss_str += ', %s loss: %.4f' % (env_name, val_loss_avg)
for metric,val in score_summary.items():
data_log['%s %s' % (env_name,metric)].append(val)
if metric in ['success_rate']:
loss_str += ', %s: %.3f' % (metric, val)
agent.env = train_env
print('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),
iter, float(iter)/n_iters*100, loss_str))
df = pd.DataFrame(data_log)
df.set_index('iteration')
df_path = '%s%s_log.csv' % (PLOT_DIR, model_prefix)
df.to_csv(df_path)
split_string = "-".join(train_env.splits)
enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, iter)
dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, iter)
agent.save(enc_path, dec_path)
def setup():
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# Check for vocabs
if not os.path.exists(TRAIN_VOCAB):
write_vocab(build_vocab(splits=['train']), TRAIN_VOCAB)
if not os.path.exists(TRAINVAL_VOCAB):
write_vocab(build_vocab(splits=['train','val_seen','val_unseen']), TRAINVAL_VOCAB)
def test_submission():
''' Train on combined training and validation sets, and generate test submission. '''
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(TRAINVAL_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=MAX_INPUT_LENGTH)
train_env = R2RBatch(features, batch_size=batch_size, splits=['train', 'val_seen', 'val_unseen'], tokenizer=tok)
# Build models and train
enc_hidden_size = hidden_size//2 if bidirectional else hidden_size
encoder = EncoderLSTM(len(vocab), word_embedding_size, enc_hidden_size, padding_idx,
dropout_ratio, bidirectional=bidirectional).cuda()
decoder = AttnDecoderLSTM(Seq2SeqAgent.n_inputs(), Seq2SeqAgent.n_outputs(),
action_embedding_size, hidden_size, dropout_ratio).cuda()
train(train_env, encoder, decoder, n_iters)
# Generate test submission
test_env = R2RBatch(features, batch_size=batch_size, splits=['test'], tokenizer=tok)
agent = Seq2SeqAgent(test_env, "", encoder, decoder, max_episode_len)
agent.results_path = '%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, 'test', 20000)
agent.test(use_dropout=False, feedback='argmax')
agent.write_results()
def train_val():
''' Train on the training set, and validate on seen and unseen splits. '''
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(TRAIN_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=MAX_INPUT_LENGTH)
train_env = R2RBatch(features, batch_size=batch_size, splits=['train'], tokenizer=tok)
# Creat validation environments
val_envs = {split: (R2RBatch(features, batch_size=batch_size, splits=[split],
tokenizer=tok), Evaluation([split])) for split in ['val_seen', 'val_unseen']}
# Build models and train
enc_hidden_size = hidden_size//2 if bidirectional else hidden_size
encoder = EncoderLSTM(len(vocab), word_embedding_size, enc_hidden_size, padding_idx,
dropout_ratio, bidirectional=bidirectional).cuda()
decoder = AttnDecoderLSTM(Seq2SeqAgent.n_inputs(), Seq2SeqAgent.n_outputs(),
action_embedding_size, hidden_size, dropout_ratio).cuda()
train(train_env, encoder, decoder, n_iters, val_envs=val_envs)
if __name__ == "__main__":
train_val()
#test_submission()
| [
"pandas.DataFrame",
"agent.Seq2SeqAgent.n_inputs",
"numpy.average",
"agent.Seq2SeqAgent",
"torch.manual_seed",
"torch.cuda.manual_seed",
"os.path.exists",
"time.time",
"collections.defaultdict",
"utils.Tokenizer",
"utils.build_vocab",
"agent.Seq2SeqAgent.n_outputs",
"numpy.array",
"utils.r... | [((1297, 1359), 'agent.Seq2SeqAgent', 'Seq2SeqAgent', (['train_env', '""""""', 'encoder', 'decoder', 'max_episode_len'], {}), "(train_env, '', encoder, decoder, max_episode_len)\n", (1309, 1359), False, 'from agent import Seq2SeqAgent\n'), ((1637, 1654), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1648, 1654), False, 'from collections import defaultdict\n'), ((1667, 1678), 'time.time', 'time.time', ([], {}), '()\n', (1676, 1678), False, 'import time\n'), ((3946, 3966), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (3963, 3966), False, 'import torch\n'), ((3971, 3996), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1)'], {}), '(1)\n', (3993, 3996), False, 'import torch\n'), ((4471, 4497), 'utils.read_vocab', 'read_vocab', (['TRAINVAL_VOCAB'], {}), '(TRAINVAL_VOCAB)\n', (4481, 4497), False, 'from utils import read_vocab, write_vocab, build_vocab, Tokenizer, padding_idx, timeSince\n'), ((4508, 4564), 'utils.Tokenizer', 'Tokenizer', ([], {'vocab': 'vocab', 'encoding_length': 'MAX_INPUT_LENGTH'}), '(vocab=vocab, encoding_length=MAX_INPUT_LENGTH)\n', (4517, 4564), False, 'from utils import read_vocab, write_vocab, build_vocab, Tokenizer, padding_idx, timeSince\n'), ((4581, 4685), 'env.R2RBatch', 'R2RBatch', (['features'], {'batch_size': 'batch_size', 'splits': "['train', 'val_seen', 'val_unseen']", 'tokenizer': 'tok'}), "(features, batch_size=batch_size, splits=['train', 'val_seen',\n 'val_unseen'], tokenizer=tok)\n", (4589, 4685), False, 'from env import R2RBatch\n'), ((5193, 5266), 'env.R2RBatch', 'R2RBatch', (['features'], {'batch_size': 'batch_size', 'splits': "['test']", 'tokenizer': 'tok'}), "(features, batch_size=batch_size, splits=['test'], tokenizer=tok)\n", (5201, 5266), False, 'from env import R2RBatch\n'), ((5279, 5340), 'agent.Seq2SeqAgent', 'Seq2SeqAgent', (['test_env', '""""""', 'encoder', 'decoder', 'max_episode_len'], {}), "(test_env, '', encoder, decoder, max_episode_len)\n", (5291, 5340), False, 'from agent import Seq2SeqAgent\n'), ((5708, 5731), 'utils.read_vocab', 'read_vocab', (['TRAIN_VOCAB'], {}), '(TRAIN_VOCAB)\n', (5718, 5731), False, 'from utils import read_vocab, write_vocab, build_vocab, Tokenizer, padding_idx, timeSince\n'), ((5742, 5798), 'utils.Tokenizer', 'Tokenizer', ([], {'vocab': 'vocab', 'encoding_length': 'MAX_INPUT_LENGTH'}), '(vocab=vocab, encoding_length=MAX_INPUT_LENGTH)\n', (5751, 5798), False, 'from utils import read_vocab, write_vocab, build_vocab, Tokenizer, padding_idx, timeSince\n'), ((5815, 5889), 'env.R2RBatch', 'R2RBatch', (['features'], {'batch_size': 'batch_size', 'splits': "['train']", 'tokenizer': 'tok'}), "(features, batch_size=batch_size, splits=['train'], tokenizer=tok)\n", (5823, 5889), False, 'from env import R2RBatch\n'), ((2002, 2024), 'numpy.array', 'np.array', (['agent.losses'], {}), '(agent.losses)\n', (2010, 2024), True, 'import numpy as np\n'), ((2095, 2119), 'numpy.average', 'np.average', (['train_losses'], {}), '(train_losses)\n', (2105, 2119), True, 'import numpy as np\n'), ((3509, 3531), 'pandas.DataFrame', 'pd.DataFrame', (['data_log'], {}), '(data_log)\n', (3521, 3531), True, 'import pandas as pd\n'), ((4031, 4058), 'os.path.exists', 'os.path.exists', (['TRAIN_VOCAB'], {}), '(TRAIN_VOCAB)\n', (4045, 4058), False, 'import os\n'), ((4135, 4165), 'os.path.exists', 'os.path.exists', (['TRAINVAL_VOCAB'], {}), '(TRAINVAL_VOCAB)\n', (4149, 4165), False, 'import os\n'), ((2626, 2648), 'numpy.array', 'np.array', (['agent.losses'], {}), '(agent.losses)\n', (2634, 2648), True, 'import numpy as np\n'), ((2676, 2698), 'numpy.average', 'np.average', (['val_losses'], {}), '(val_losses)\n', (2686, 2698), True, 'import numpy as np\n'), ((4080, 4109), 'utils.build_vocab', 'build_vocab', ([], {'splits': "['train']"}), "(splits=['train'])\n", (4091, 4109), False, 'from utils import read_vocab, write_vocab, build_vocab, Tokenizer, padding_idx, timeSince\n'), ((4187, 4242), 'utils.build_vocab', 'build_vocab', ([], {'splits': "['train', 'val_seen', 'val_unseen']"}), "(splits=['train', 'val_seen', 'val_unseen'])\n", (4198, 4242), False, 'from utils import read_vocab, write_vocab, build_vocab, Tokenizer, padding_idx, timeSince\n'), ((5951, 6023), 'env.R2RBatch', 'R2RBatch', (['features'], {'batch_size': 'batch_size', 'splits': '[split]', 'tokenizer': 'tok'}), '(features, batch_size=batch_size, splits=[split], tokenizer=tok)\n', (5959, 6023), False, 'from env import R2RBatch\n'), ((6041, 6060), 'eval.Evaluation', 'Evaluation', (['[split]'], {}), '([split])\n', (6051, 6060), False, 'from eval import Evaluation\n'), ((4971, 4994), 'agent.Seq2SeqAgent.n_inputs', 'Seq2SeqAgent.n_inputs', ([], {}), '()\n', (4992, 4994), False, 'from agent import Seq2SeqAgent\n'), ((4996, 5020), 'agent.Seq2SeqAgent.n_outputs', 'Seq2SeqAgent.n_outputs', ([], {}), '()\n', (5018, 5020), False, 'from agent import Seq2SeqAgent\n'), ((6392, 6415), 'agent.Seq2SeqAgent.n_inputs', 'Seq2SeqAgent.n_inputs', ([], {}), '()\n', (6413, 6415), False, 'from agent import Seq2SeqAgent\n'), ((6417, 6441), 'agent.Seq2SeqAgent.n_outputs', 'Seq2SeqAgent.n_outputs', ([], {}), '()\n', (6439, 6441), False, 'from agent import Seq2SeqAgent\n')] |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optax._src.numerics."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
import numpy as np
from optax._src import numerics
int32_array = lambda i: jnp.array(i, dtype=jnp.int32)
float32_array = lambda i: jnp.array(i, dtype=jnp.float32)
class NumericsTest(chex.TestCase):
@chex.all_variants()
def test_safe_int32_increments(self):
inc_fn = self.variant(numerics.safe_int32_increment)
# increment small numbers correctly.
base = int32_array(3)
incremented = inc_fn(base)
np.testing.assert_array_equal(incremented, int32_array(4))
# avoid overflow when incrementing maxint.
base = int32_array(np.iinfo(np.int32).max)
incremented = inc_fn(base)
np.testing.assert_array_equal(incremented, base)
@chex.all_variants()
def test_safe_norm(self):
dnorm_dx = self.variant(jax.grad(numerics.safe_norm))
# Test gradient is 0. in 0. when zero min norm is used.
g = dnorm_dx(float32_array(0.), float32_array(0.))
np.testing.assert_array_equal(g, jnp.zeros_like(g))
# Test gradient is 0. in 0. when non zero min norm is used.
g = dnorm_dx(float32_array(0.), float32_array(3.))
np.testing.assert_array_equal(g, jnp.zeros_like(g))
if __name__ == '__main__':
absltest.main()
| [
"jax.numpy.array",
"absl.testing.absltest.main",
"numpy.testing.assert_array_equal",
"jax.numpy.zeros_like",
"numpy.iinfo",
"jax.grad",
"chex.all_variants"
] | [((894, 923), 'jax.numpy.array', 'jnp.array', (['i'], {'dtype': 'jnp.int32'}), '(i, dtype=jnp.int32)\n', (903, 923), True, 'import jax.numpy as jnp\n'), ((950, 981), 'jax.numpy.array', 'jnp.array', (['i'], {'dtype': 'jnp.float32'}), '(i, dtype=jnp.float32)\n', (959, 981), True, 'import jax.numpy as jnp\n'), ((1023, 1042), 'chex.all_variants', 'chex.all_variants', ([], {}), '()\n', (1040, 1042), False, 'import chex\n'), ((1483, 1502), 'chex.all_variants', 'chex.all_variants', ([], {}), '()\n', (1500, 1502), False, 'import chex\n'), ((1966, 1981), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (1979, 1981), False, 'from absl.testing import absltest\n'), ((1430, 1478), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['incremented', 'base'], {}), '(incremented, base)\n', (1459, 1478), True, 'import numpy as np\n'), ((1559, 1587), 'jax.grad', 'jax.grad', (['numerics.safe_norm'], {}), '(numerics.safe_norm)\n', (1567, 1587), False, 'import jax\n'), ((1741, 1758), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['g'], {}), '(g)\n', (1755, 1758), True, 'import jax.numpy as jnp\n'), ((1916, 1933), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['g'], {}), '(g)\n', (1930, 1933), True, 'import jax.numpy as jnp\n'), ((1371, 1389), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1379, 1389), True, 'import numpy as np\n')] |
import os
import h5py
import tensorflow as tf
import numpy as np
from math import exp
from tqdm import tqdm
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
from summary_handler import SummaryHandler
from read_data import *
from data import GenModelVocab, translate, save_vocab, restore_vocab,\
translate_spans, GloVEVocab
import time
from tensorflow.python.client import timeline
from utils import *
import random
from model import BiRNNClf as UsedModel
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def main(config):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # disable cpp error msgs
if config.mode == 'train':
_train(config)
elif config.mode == 'test':
_test(config)
elif config.mode == 'predict':
_predict(config)
else:
raise ValueError("Invalid Mode!")
def _train(config):
if config.dataset == "imdb":
train_data, valid_data, test_data = create_imdb_data(config)
else:
train_data, valid_data, test_data = create_twenty_newsgroup_data(config)
vocab_freq = train_data.get_word_lists()
print("Data loaded!")
vocab = GloVEVocab(vocab_freq, config.embedding_file,
threshold=config.min_occurence)
print("Vocab built! Size (%d)" % vocab.size())
model = UsedModel(config, vocab, 2 if config.dataset == 'imdb' else 20)
#create session
gpu_configuration = gpu_config()
sess = tf.Session(config=gpu_configuration)
with sess.as_default():
model.build_graph()
print("Graph built!")
model.add_train_op()
print("Train op added!")
sess.run(tf.global_variables_initializer())
print("Variables initialized")
if config.continue_training:
start_e, steps, out_dir, ckpt_dir = restore_from_last_ckpt(
config, model, sess)
# backup new argv
with open(os.path.join(out_dir, 'argv.txt'), 'a') as f:
f.write("\n")
f.write(" ".join(sys.argv))
print("Continue training after epoch %d, step %d" % (start_e, steps))
else:
if config.model_name == 'default':
c_time = time.strftime("%m_%d_%H_%M_%S", time.localtime())
config.model_name = UsedModel.__name__ + "_%s" % c_time
if config.debug:
config.checkpoint_size = 10
if not config.debug:
out_dir = os.path.join(config.out_root, config.model_name)
if os.path.exists(out_dir):
raise ValueError("Output directory already exists!")
else:
os.makedirs(out_dir)
# back up src file
os.system("cp -r src %s" % os.path.join(out_dir, 'src'))
# back up argv
with open(os.path.join(out_dir, "argv.txt"), 'w') as f:
f.write(" ".join(sys.argv))
# back up environ
with open(os.path.join(out_dir, 'recreate_environ.sh'), 'w') as f:
for var, val in os.environ.items():
f.write("export %s=\"%s\"\n" % (var, val))
os.system("chmod +x %s" % os.path.join(out_dir, 'recreate_environ.sh'))
ckpt_dir = os.path.join(out_dir, "ckpts")
vocab_loc = os.path.join(out_dir, "vocab.pkl")
save_vocab(vocab, vocab_loc)
print("Initialized output at %s" % out_dir)
steps = 0
start_e = -1
print("Started training!")
#construct graph handler
summary_handler = SummaryHandler(
os.path.join(config.summary_save_path, config.model_name),
['LOSS', 'ACCURACY'])
for e in range(config.num_epochs):
total_loss = []
grad_norms = []
for batches in tqdm(train_data.get_batches(config.batch_size)):
if steps != 0 or not config.start_eval:
steps += 1
if steps > 10 and config.debug:
exit(0)
is_training = True
fd = model.encode(batches, is_training)
loss, grad_norm = model.train_step(sess, fd)
total_loss.append(loss)
grad_norms.append(grad_norm)
if steps % config.checkpoint_size == 0:
accuracy = eval_model(
config, valid_data, vocab, model, sess)
print("Result at step %d: %f" % (steps, accuracy))
print("avg lost: %f" % (sum(total_loss) / len(total_loss)))
print("avg grad norm: %f"%(sum(grad_norms) / len(grad_norms)))
if not config.debug:
summary_handler.write_summaries(sess,
{
'ITERATION': steps,
'LOSS': avg(total_loss),
'ACCURACY': accuracy
})
if start_e > 0:
epoch = e + start_e
else:
epoch = e
model.save_to(sess, os.path.join(ckpt_dir,
'epoch_%04d_step%08d_acc(%f)' % (epoch, steps,
accuracy)))
summary_handler.close_writer()
def _test(config):
print("Evaluating!")
out_dir = os.path.join(config.out_root, config.model_name)
vocab_loc = os.path.join(out_dir, "vocab.pkl")
if os.path.exists(vocab_loc): # vocab exists!
vocab = restore_vocab(vocab_loc)
else:
raise Exception("Not valid output directory! No vocab found!")
print("Vocab built! Size (%d)" % vocab.size())
if config.dataset == "imdb":
train_data, valid_data, test_data = create_imdb_data(config)
else:
train_data, valid_data, test_data = create_twenty_newsgroup_data(config)
if not config.use_dev:
valid_data = test_data
print(len(valid_data.data))
print("Data loaded!")
#construct model
model = UsedModel(config, vocab, 2 if config.dataset == 'imdb' else 20)
gpu_configuration = gpu_config()
sess = tf.Session(config=gpu_configuration)
with sess.as_default():
model.build_graph()
print("Graph built!")
model.add_train_op()
print("Train op added!")
sess.run(tf.global_variables_initializer())
if config.use_ckpt is not None:
model.restore_from(sess, os.path.join(out_dir, 'ckpts', config.use_ckpt))
elif config.at_step is not None:
restore_from_step(config, model, sess, config.at_step)
else:
raise ValueError("Must specify a ckpt to restore from!")
accuracy = eval_model(
config, valid_data, vocab, model, sess)
print("Results: %f" % (accuracy))
print("Done!")
def _predict(config):
print("Predicting!")
out_dir = os.path.join(config.out_root, config.model_name)
vocab_loc = os.path.join(out_dir, "vocab.pkl")
if os.path.exists(vocab_loc): # vocab exists!
vocab = restore_vocab(vocab_loc)
else:
raise Exception("Not valid output directory! No vocab found!")
print("Vocab built! Size (%d)" % vocab.size())
if config.dataset == "imdb":
train_data, valid_data, test_data = create_imdb_data(config)
else:
train_data, valid_data, test_data = create_twenty_newsgroup_data(config)
if not config.use_dev:
valid_data = test_data
print(len(valid_data.data))
print("Data loaded!")
#construct model
model = UsedModel(config, vocab, 2 if config.dataset == 'imdb' else 20)
gpu_configuration = gpu_config()
sess = tf.Session(config=gpu_configuration)
with sess.as_default():
model.build_graph()
print("Graph built!")
model.add_train_op()
print("Train op added!")
sess.run(tf.global_variables_initializer())
if config.use_ckpt is not None:
model.restore_from(sess, os.path.join(out_dir, 'ckpts', config.use_ckpt))
elif config.at_step is not None:
restore_from_step(config, model, sess, config.at_step)
else:
raise ValueError("Must specify a ckpt to restore from!")
predictions, _ = generate_predictions(config, valid_data, vocab, model,
sess)
np_preds = np.asarray(predictions)
print(np_preds.shape)
out_file = h5py.File(config.prediction_file, 'w')
out_file.create_dataset("predictions", data=np_preds)
out_file.close()
| [
"h5py.File",
"model.BiRNNClf",
"os.makedirs",
"tensorflow.global_variables_initializer",
"data.save_vocab",
"numpy.asarray",
"os.environ.items",
"tensorflow.Session",
"os.path.exists",
"data.GloVEVocab",
"sys.setdefaultencoding",
"data.restore_vocab",
"os.path.join",
"time.localtime"
] | [((507, 538), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (529, 538), False, 'import sys\n'), ((1147, 1224), 'data.GloVEVocab', 'GloVEVocab', (['vocab_freq', 'config.embedding_file'], {'threshold': 'config.min_occurence'}), '(vocab_freq, config.embedding_file, threshold=config.min_occurence)\n', (1157, 1224), False, 'from data import GenModelVocab, translate, save_vocab, restore_vocab, translate_spans, GloVEVocab\n'), ((1302, 1365), 'model.BiRNNClf', 'UsedModel', (['config', 'vocab', "(2 if config.dataset == 'imdb' else 20)"], {}), "(config, vocab, 2 if config.dataset == 'imdb' else 20)\n", (1311, 1365), True, 'from model import BiRNNClf as UsedModel\n'), ((1436, 1472), 'tensorflow.Session', 'tf.Session', ([], {'config': 'gpu_configuration'}), '(config=gpu_configuration)\n', (1446, 1472), True, 'import tensorflow as tf\n'), ((5247, 5295), 'os.path.join', 'os.path.join', (['config.out_root', 'config.model_name'], {}), '(config.out_root, config.model_name)\n', (5259, 5295), False, 'import os\n'), ((5313, 5347), 'os.path.join', 'os.path.join', (['out_dir', '"""vocab.pkl"""'], {}), "(out_dir, 'vocab.pkl')\n", (5325, 5347), False, 'import os\n'), ((5355, 5380), 'os.path.exists', 'os.path.exists', (['vocab_loc'], {}), '(vocab_loc)\n', (5369, 5380), False, 'import os\n'), ((5919, 5982), 'model.BiRNNClf', 'UsedModel', (['config', 'vocab', "(2 if config.dataset == 'imdb' else 20)"], {}), "(config, vocab, 2 if config.dataset == 'imdb' else 20)\n", (5928, 5982), True, 'from model import BiRNNClf as UsedModel\n'), ((6032, 6068), 'tensorflow.Session', 'tf.Session', ([], {'config': 'gpu_configuration'}), '(config=gpu_configuration)\n', (6042, 6068), True, 'import tensorflow as tf\n'), ((6761, 6809), 'os.path.join', 'os.path.join', (['config.out_root', 'config.model_name'], {}), '(config.out_root, config.model_name)\n', (6773, 6809), False, 'import os\n'), ((6827, 6861), 'os.path.join', 'os.path.join', (['out_dir', '"""vocab.pkl"""'], {}), "(out_dir, 'vocab.pkl')\n", (6839, 6861), False, 'import os\n'), ((6869, 6894), 'os.path.exists', 'os.path.exists', (['vocab_loc'], {}), '(vocab_loc)\n', (6883, 6894), False, 'import os\n'), ((7432, 7495), 'model.BiRNNClf', 'UsedModel', (['config', 'vocab', "(2 if config.dataset == 'imdb' else 20)"], {}), "(config, vocab, 2 if config.dataset == 'imdb' else 20)\n", (7441, 7495), True, 'from model import BiRNNClf as UsedModel\n'), ((7545, 7581), 'tensorflow.Session', 'tf.Session', ([], {'config': 'gpu_configuration'}), '(config=gpu_configuration)\n', (7555, 7581), True, 'import tensorflow as tf\n'), ((8184, 8207), 'numpy.asarray', 'np.asarray', (['predictions'], {}), '(predictions)\n', (8194, 8207), True, 'import numpy as np\n'), ((8250, 8288), 'h5py.File', 'h5py.File', (['config.prediction_file', '"""w"""'], {}), "(config.prediction_file, 'w')\n", (8259, 8288), False, 'import h5py\n'), ((1634, 1667), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1665, 1667), True, 'import tensorflow as tf\n'), ((3518, 3575), 'os.path.join', 'os.path.join', (['config.summary_save_path', 'config.model_name'], {}), '(config.summary_save_path, config.model_name)\n', (3530, 3575), False, 'import os\n'), ((5414, 5438), 'data.restore_vocab', 'restore_vocab', (['vocab_loc'], {}), '(vocab_loc)\n', (5427, 5438), False, 'from data import GenModelVocab, translate, save_vocab, restore_vocab, translate_spans, GloVEVocab\n'), ((6231, 6264), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6262, 6264), True, 'import tensorflow as tf\n'), ((6928, 6952), 'data.restore_vocab', 'restore_vocab', (['vocab_loc'], {}), '(vocab_loc)\n', (6941, 6952), False, 'from data import GenModelVocab, translate, save_vocab, restore_vocab, translate_spans, GloVEVocab\n'), ((7744, 7777), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7775, 7777), True, 'import tensorflow as tf\n'), ((2390, 2438), 'os.path.join', 'os.path.join', (['config.out_root', 'config.model_name'], {}), '(config.out_root, config.model_name)\n', (2402, 2438), False, 'import os\n'), ((2454, 2477), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (2468, 2477), False, 'import os\n'), ((3177, 3207), 'os.path.join', 'os.path.join', (['out_dir', '"""ckpts"""'], {}), "(out_dir, 'ckpts')\n", (3189, 3207), False, 'import os\n'), ((3233, 3267), 'os.path.join', 'os.path.join', (['out_dir', '"""vocab.pkl"""'], {}), "(out_dir, 'vocab.pkl')\n", (3245, 3267), False, 'import os\n'), ((3280, 3308), 'data.save_vocab', 'save_vocab', (['vocab', 'vocab_loc'], {}), '(vocab, vocab_loc)\n', (3290, 3308), False, 'from data import GenModelVocab, translate, save_vocab, restore_vocab, translate_spans, GloVEVocab\n'), ((6336, 6383), 'os.path.join', 'os.path.join', (['out_dir', '"""ckpts"""', 'config.use_ckpt'], {}), "(out_dir, 'ckpts', config.use_ckpt)\n", (6348, 6383), False, 'import os\n'), ((7849, 7896), 'os.path.join', 'os.path.join', (['out_dir', '"""ckpts"""', 'config.use_ckpt'], {}), "(out_dir, 'ckpts', config.use_ckpt)\n", (7861, 7896), False, 'import os\n'), ((1888, 1921), 'os.path.join', 'os.path.join', (['out_dir', '"""argv.txt"""'], {}), "(out_dir, 'argv.txt')\n", (1900, 1921), False, 'import os\n'), ((2186, 2202), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2200, 2202), False, 'import time\n'), ((2582, 2602), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (2593, 2602), False, 'import os\n'), ((2985, 3003), 'os.environ.items', 'os.environ.items', ([], {}), '()\n', (3001, 3003), False, 'import os\n'), ((2674, 2702), 'os.path.join', 'os.path.join', (['out_dir', '"""src"""'], {}), "(out_dir, 'src')\n", (2686, 2702), False, 'import os\n'), ((2753, 2786), 'os.path.join', 'os.path.join', (['out_dir', '"""argv.txt"""'], {}), "(out_dir, 'argv.txt')\n", (2765, 2786), False, 'import os\n'), ((2896, 2940), 'os.path.join', 'os.path.join', (['out_dir', '"""recreate_environ.sh"""'], {}), "(out_dir, 'recreate_environ.sh')\n", (2908, 2940), False, 'import os\n'), ((3107, 3151), 'os.path.join', 'os.path.join', (['out_dir', '"""recreate_environ.sh"""'], {}), "(out_dir, 'recreate_environ.sh')\n", (3119, 3151), False, 'import os\n'), ((5014, 5099), 'os.path.join', 'os.path.join', (['ckpt_dir', "('epoch_%04d_step%08d_acc(%f)' % (epoch, steps, accuracy))"], {}), "(ckpt_dir, 'epoch_%04d_step%08d_acc(%f)' % (epoch, steps, accuracy)\n )\n", (5026, 5099), False, 'import os\n')] |
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import skimage.io
import argparse
import numpy as np
import time
import os
import cv2
import math
# from memory_profiler import profile
import nets
import dataloader
from dataloader import transforms
from utils import utils
from utils.file_io import write_pfm
import webcamgrabber
import visualise
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='test', type=str,
help='Validation mode on small subset or test mode on full test data')
parser.add_argument('--num_workers', default=0, type=int, help='Number of workers for data loading')
parser.add_argument('--img_height', default=576, type=int, help='Image height for inference')
parser.add_argument('--img_width', default=960, type=int, help='Image width for inference')
# Model
parser.add_argument('--seed', default=326, type=int, help='Random seed for reproducibility')
parser.add_argument('--output_dir', default='output', type=str,
help='Directory to save inference results')
parser.add_argument('--max_disp', default=192, type=int, help='Max disparity')
# AANet
parser.add_argument('--feature_type', default='aanet', type=str, help='Type of feature extractor')
parser.add_argument('--no_feature_mdconv', action='store_true', help='Whether to use mdconv for feature extraction')
parser.add_argument('--feature_pyramid', action='store_true', help='Use pyramid feature')
parser.add_argument('--feature_pyramid_network', action='store_true', help='Use FPN')
parser.add_argument('--feature_similarity', default='correlation', type=str,
help='Similarity measure for matching cost')
parser.add_argument('--num_downsample', default=2, type=int, help='Number of downsample layer for feature extraction')
parser.add_argument('--aggregation_type', default='adaptive', type=str, help='Type of cost aggregation')
parser.add_argument('--num_scales', default=3, type=int, help='Number of stages when using parallel aggregation')
parser.add_argument('--num_fusions', default=6, type=int, help='Number of multi-scale fusions when using parallel'
'aggragetion')
parser.add_argument('--num_stage_blocks', default=1, type=int, help='Number of deform blocks for ISA')
parser.add_argument('--num_deform_blocks', default=3, type=int, help='Number of DeformBlocks for aggregation')
parser.add_argument('--no_intermediate_supervision', action='store_true',
help='Whether to add intermediate supervision')
parser.add_argument('--deformable_groups', default=2, type=int, help='Number of deformable groups')
parser.add_argument('--mdconv_dilation', default=2, type=int, help='Dilation rate for deformable conv')
parser.add_argument('--refinement_type', default='stereodrnet', help='Type of refinement module')
parser.add_argument('--pretrained_aanet', default=None, type=str, help='Pretrained network')
parser.add_argument('--save_type', default='png', choices=['pfm', 'png', 'npy'], help='Save file type')
parser.add_argument('--visualize', action='store_true', help='Visualize disparity map')
# Log
args = parser.parse_args()
model_name = os.path.basename(args.pretrained_aanet)[:-4]
model_dir = os.path.basename(os.path.dirname(args.pretrained_aanet))
args.output_dir = os.path.join(args.output_dir, model_dir + '-' + model_name)
utils.check_path(args.output_dir)
utils.save_command(args.output_dir)
# @profile
def main():
# cam = webcamgrabber.Arducam("rtsp://192.168.1.70:8554/test")
# cam = webcamgrabber.Arducam("parallel_dining.mp4")
cam = webcamgrabber.Arducam("udpsrc port=5000 ! application/x-rtp, media=video, encoding-name=JPEG, payload=96 ! rtpjpegdepay ! jpegdec ! videoconvert ! appsink")
left, right = cam.read()
img_height, img_width= left.shape[:2]
vis = visualise.Visualiser(cam.Q_)
# For reproducibility
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.benchmark = True
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Test loader
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)])
print(f"Creating AANet...")
aanet = nets.AANet(args.max_disp,
num_downsample=args.num_downsample,
feature_type=args.feature_type,
no_feature_mdconv=args.no_feature_mdconv,
feature_pyramid=args.feature_pyramid,
feature_pyramid_network=args.feature_pyramid_network,
feature_similarity=args.feature_similarity,
aggregation_type=args.aggregation_type,
num_scales=args.num_scales,
num_fusions=args.num_fusions,
num_stage_blocks=args.num_stage_blocks,
num_deform_blocks=args.num_deform_blocks,
no_intermediate_supervision=args.no_intermediate_supervision,
refinement_type=args.refinement_type,
mdconv_dilation=args.mdconv_dilation,
deformable_groups=args.deformable_groups).to(device)
# print(aanet)
if os.path.exists(args.pretrained_aanet):
print('=> Loading pretrained AANet:', args.pretrained_aanet)
utils.load_pretrained_net(aanet, args.pretrained_aanet, no_strict=True)
else:
raise Exception(f'Model not found! {args.pretrained_aanet}')
if torch.cuda.device_count() > 1:
print('=> Use %d GPUs' % torch.cuda.device_count())
aanet = torch.nn.DataParallel(aanet)
# Inference
aanet.eval()
inference_time = 0
framecount = 0
print(f"Finished warmup, starting inference...")
while True:
print(f"Frame {framecount}")
left_img, right_img = cam.read()
cv2.imshow("left", left_img)
cv2.imshow("right", right_img)
img = {'left': left_img, 'right': right_img}
img = test_transform(img)
left = img['left'].unsqueeze(0).to(device) # [B, 3, H, W]
right = img['right'].unsqueeze(0).to(device)
# Pad
ori_height, ori_width = left.size()[2:]
factor = 48 if args.refinement_type != 'hourglass' else 96
img_height = math.ceil(ori_height / factor) * factor
img_width = math.ceil(ori_width / factor) * factor
if ori_height < img_height or ori_width < img_width:
top_pad = img_height - ori_height
right_pad = img_width - ori_width
# Pad size: (left_pad, right_pad, top_pad, bottom_pad)
left = F.pad(left, (0, right_pad, top_pad, 0))
right = F.pad(right, (0, right_pad, top_pad, 0))
framecount += left.size(0)
# print("Performing inference...")
with torch.no_grad():
time_start = time.perf_counter()
pred_disp = aanet(left, right) # [B, C, H, W]
inference_time += time.perf_counter() - time_start
if pred_disp.size(-1) < left.size(-1):
print("Interpolating disparity...")
pred_disp = pred_disp.unsqueeze(1) # [B, 1, H, W]
pred_disp = F.interpolate(pred_disp, (left.size(-2), left.size(-1)),
mode='bilinear', align_corners=True, recompute_scale_factor=True) * (left.size(-1) / pred_disp.size(-1))
pred_disp = pred_disp.cpu().squeeze(1) # [B, H, W]
# Crop
if ori_height < img_height or ori_width < img_width:
if right_pad != 0:
pred_disp = pred_disp[:, top_pad:, :-right_pad]
else:
pred_disp = pred_disp[:, top_pad:]
disp = pred_disp[0].detach().cpu().numpy()
vis.update(disp, left_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cam.release()
vis.release()
break
# save image
disp = 255 * disp
img = disp.astype(np.uint8)
cv2.imwrite("disparity.png", img)
cv2.imwrite("left.png", left_img)
cv2.imwrite("right.png", right_img)
print('=> Mean inference time for %d images: %.3fs' % (framecount, inference_time / framecount))
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"dataloader.transforms.Normalize",
"torch.cuda.device_count",
"utils.utils.load_pretrained_net",
"nets.AANet",
"cv2.imshow",
"os.path.join",
"torch.no_grad",
"torch.nn.functional.pad",
"cv2.imwrite",
"os.path.dirname",
"os.path.exists",
"util... | [((497, 522), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (520, 522), False, 'import argparse\n'), ((3514, 3573), 'os.path.join', 'os.path.join', (['args.output_dir', "(model_dir + '-' + model_name)"], {}), "(args.output_dir, model_dir + '-' + model_name)\n", (3526, 3573), False, 'import os\n'), ((3577, 3610), 'utils.utils.check_path', 'utils.check_path', (['args.output_dir'], {}), '(args.output_dir)\n', (3593, 3610), False, 'from utils import utils\n'), ((3612, 3647), 'utils.utils.save_command', 'utils.save_command', (['args.output_dir'], {}), '(args.output_dir)\n', (3630, 3647), False, 'from utils import utils\n'), ((3380, 3419), 'os.path.basename', 'os.path.basename', (['args.pretrained_aanet'], {}), '(args.pretrained_aanet)\n', (3396, 3419), False, 'import os\n'), ((3455, 3493), 'os.path.dirname', 'os.path.dirname', (['args.pretrained_aanet'], {}), '(args.pretrained_aanet)\n', (3470, 3493), False, 'import os\n'), ((3812, 3978), 'webcamgrabber.Arducam', 'webcamgrabber.Arducam', (['"""udpsrc port=5000 ! application/x-rtp, media=video, encoding-name=JPEG, payload=96 ! rtpjpegdepay ! jpegdec ! videoconvert ! appsink"""'], {}), "(\n 'udpsrc port=5000 ! application/x-rtp, media=video, encoding-name=JPEG, payload=96 ! rtpjpegdepay ! jpegdec ! videoconvert ! appsink'\n )\n", (3833, 3978), False, 'import webcamgrabber\n'), ((4055, 4083), 'visualise.Visualiser', 'visualise.Visualiser', (['cam.Q_'], {}), '(cam.Q_)\n', (4075, 4083), False, 'import visualise\n'), ((4116, 4144), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4133, 4144), False, 'import torch\n'), ((4150, 4183), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4172, 4183), False, 'import torch\n'), ((4189, 4214), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4203, 4214), True, 'import numpy as np\n'), ((5590, 5627), 'os.path.exists', 'os.path.exists', (['args.pretrained_aanet'], {}), '(args.pretrained_aanet)\n', (5604, 5627), False, 'import os\n'), ((8428, 8461), 'cv2.imwrite', 'cv2.imwrite', (['"""disparity.png"""', 'img'], {}), "('disparity.png', img)\n", (8439, 8461), False, 'import cv2\n'), ((8467, 8500), 'cv2.imwrite', 'cv2.imwrite', (['"""left.png"""', 'left_img'], {}), "('left.png', left_img)\n", (8478, 8500), False, 'import cv2\n'), ((8506, 8541), 'cv2.imwrite', 'cv2.imwrite', (['"""right.png"""', 'right_img'], {}), "('right.png', right_img)\n", (8517, 8541), False, 'import cv2\n'), ((5708, 5779), 'utils.utils.load_pretrained_net', 'utils.load_pretrained_net', (['aanet', 'args.pretrained_aanet'], {'no_strict': '(True)'}), '(aanet, args.pretrained_aanet, no_strict=True)\n', (5733, 5779), False, 'from utils import utils\n'), ((5871, 5896), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5894, 5896), False, 'import torch\n'), ((5980, 6008), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['aanet'], {}), '(aanet)\n', (6001, 6008), False, 'import torch\n'), ((6250, 6278), 'cv2.imshow', 'cv2.imshow', (['"""left"""', 'left_img'], {}), "('left', left_img)\n", (6260, 6278), False, 'import cv2\n'), ((6288, 6318), 'cv2.imshow', 'cv2.imshow', (['"""right"""', 'right_img'], {}), "('right', right_img)\n", (6298, 6318), False, 'import cv2\n'), ((4299, 4324), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4322, 4324), False, 'import torch\n'), ((4410, 4431), 'dataloader.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4429, 4431), False, 'from dataloader import transforms\n'), ((4442, 4500), 'dataloader.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'IMAGENET_MEAN', 'std': 'IMAGENET_STD'}), '(mean=IMAGENET_MEAN, std=IMAGENET_STD)\n', (4462, 4500), False, 'from dataloader import transforms\n'), ((4555, 5226), 'nets.AANet', 'nets.AANet', (['args.max_disp'], {'num_downsample': 'args.num_downsample', 'feature_type': 'args.feature_type', 'no_feature_mdconv': 'args.no_feature_mdconv', 'feature_pyramid': 'args.feature_pyramid', 'feature_pyramid_network': 'args.feature_pyramid_network', 'feature_similarity': 'args.feature_similarity', 'aggregation_type': 'args.aggregation_type', 'num_scales': 'args.num_scales', 'num_fusions': 'args.num_fusions', 'num_stage_blocks': 'args.num_stage_blocks', 'num_deform_blocks': 'args.num_deform_blocks', 'no_intermediate_supervision': 'args.no_intermediate_supervision', 'refinement_type': 'args.refinement_type', 'mdconv_dilation': 'args.mdconv_dilation', 'deformable_groups': 'args.deformable_groups'}), '(args.max_disp, num_downsample=args.num_downsample, feature_type=\n args.feature_type, no_feature_mdconv=args.no_feature_mdconv,\n feature_pyramid=args.feature_pyramid, feature_pyramid_network=args.\n feature_pyramid_network, feature_similarity=args.feature_similarity,\n aggregation_type=args.aggregation_type, num_scales=args.num_scales,\n num_fusions=args.num_fusions, num_stage_blocks=args.num_stage_blocks,\n num_deform_blocks=args.num_deform_blocks, no_intermediate_supervision=\n args.no_intermediate_supervision, refinement_type=args.refinement_type,\n mdconv_dilation=args.mdconv_dilation, deformable_groups=args.\n deformable_groups)\n', (4565, 5226), False, 'import nets\n'), ((6698, 6728), 'math.ceil', 'math.ceil', (['(ori_height / factor)'], {}), '(ori_height / factor)\n', (6707, 6728), False, 'import math\n'), ((6759, 6788), 'math.ceil', 'math.ceil', (['(ori_width / factor)'], {}), '(ori_width / factor)\n', (6768, 6788), False, 'import math\n'), ((7046, 7085), 'torch.nn.functional.pad', 'F.pad', (['left', '(0, right_pad, top_pad, 0)'], {}), '(left, (0, right_pad, top_pad, 0))\n', (7051, 7085), True, 'import torch.nn.functional as F\n'), ((7107, 7147), 'torch.nn.functional.pad', 'F.pad', (['right', '(0, right_pad, top_pad, 0)'], {}), '(right, (0, right_pad, top_pad, 0))\n', (7112, 7147), True, 'import torch.nn.functional as F\n'), ((7246, 7261), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7259, 7261), False, 'import torch\n'), ((7289, 7308), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7306, 7308), False, 'import time\n'), ((5936, 5961), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5959, 5961), False, 'import torch\n'), ((7400, 7419), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7417, 7419), False, 'import time\n'), ((8239, 8253), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8250, 8253), False, 'import cv2\n')] |
import numpy as np
import pandas as pd
from scipy.stats import chi2_contingency, wasserstein_distance, ks_2samp, distributions
import matplotlib.pyplot as plt
def compute_distribution_cat(a1: np.array, a2: np.array, sample_weights1=None, sample_weights2=None,
max_n_cat: int = None):
if sample_weights1 is None:
sample_weights1 = np.ones_like(a1)
if sample_weights2 is None:
sample_weights2 = np.ones_like(a2)
# compute cat_map is needed
def _compute_cat_map(a: np.array, sample_weights: np.array, max_n_cat: float):
# sort categories by size
unique_cat = np.unique(a).tolist()
total_weight = np.sum(sample_weights)
cat_weights = [np.sum(sample_weights[a == cat]) / total_weight for cat in unique_cat]
sorted_cat = [cat for _, cat in sorted(zip(cat_weights, unique_cat), reverse=True)]
# create category mapping to reducce number of categories
cat_map = {}
for i, cat in enumerate(sorted_cat):
if i < (max_n_cat-1):
cat_map[cat] = cat
else:
cat_map[cat] = 'other_cat_agg'
return cat_map
if max_n_cat is not None:
cat_map = _compute_cat_map(np.concatenate((a1, a2)), np.concatenate((sample_weights1, sample_weights2)),
max_n_cat)
else:
cat_map = None
# compute the distribution
unique_cat1 = np.unique(a1).tolist()
unique_cat2 = np.unique(a2).tolist()
unique_cat = unique_cat1 + [cat for cat in unique_cat2 if cat not in unique_cat1]
total_weight1 = np.sum(sample_weights1)
total_weight2 = np.sum(sample_weights2)
if cat_map is not None:
distrib = {cat: [0, 0] for cat in cat_map.values()}
for cat in unique_cat:
distrib[cat_map[cat]] = [distrib[cat_map[cat]][0] + np.sum(sample_weights1[a1 == cat]) / total_weight1,
distrib[cat_map[cat]][1] + np.sum(sample_weights2[a2 == cat]) / total_weight2]
else:
distrib = {}
for cat in unique_cat:
distrib[cat] = [np.sum(sample_weights1[a1 == cat]) / total_weight1,
np.sum(sample_weights2[a2 == cat]) / total_weight2]
return distrib
def compute_mean_diff(a1: np.array, a2: np.array, sample_weights1=None, sample_weights2=None):
if sample_weights1 is None:
sample_weights1 = np.ones_like(a1)
if sample_weights2 is None:
sample_weights2 = np.ones_like(a2)
mean1 = np.sum(a1 * sample_weights1) / np.sum(sample_weights1)
mean2 = np.sum(a2 * sample_weights2) / np.sum(sample_weights2)
return mean2 - mean1
def wasserstein_distance_for_cat(a1: np.array, a2: np.array, sample_weights1=None, sample_weights2=None):
# this correspond to wasserstein distance where we assume a distance 1 between two categories of the feature
distrib = compute_distribution_cat(a1, a2, sample_weights1, sample_weights2)
drift = 0
for cat in distrib.keys():
drift += abs(distrib[cat][0] - distrib[cat][1]) / 2
return drift
def chi2_test(a1: np.array, a2: np.array, sample_weights1=None, sample_weights2=None):
# TODO: generalization of Chi2 for weights != np.ones is complicated (need verif)
# chi2 do not take max_n_cat into account. If pbm with number of cat, should be handled by
# chi2_test internally with a proper solution
distrib = compute_distribution_cat(a1, a2, sample_weights1, sample_weights2)
contingency_table = pd.DataFrame({cat: pd.Series({'X1': distrib[cat][0] * len(a1), 'X2': distrib[cat][1] * len(a2)})
for cat in distrib.keys()})
chi2_stat, p_value, dof, expected = chi2_contingency(contingency_table)
return {'chi2_stat': chi2_stat,
'p_value': p_value,
'dof': dof,
'contingency_table': contingency_table}
def compute_drift_num(a1: np.array, a2: np.array, sample_weights1=None, sample_weights2=None):
if (sample_weights1 is None and sample_weights2 is None or
np.all(sample_weights1 == sample_weights1[0]) and np.all(sample_weights2 == sample_weights2[0])):
kolmogorov_smirnov = ks_2samp(a1, a2)
else:
kolmogorov_smirnov = ks_weighted(a1, a2, sample_weights1, sample_weights2)
return {'mean_difference': compute_mean_diff(a1, a2, sample_weights1, sample_weights2),
'wasserstein': wasserstein_distance(a1, a2, sample_weights1, sample_weights2),
'kolmogorov_smirnov': kolmogorov_smirnov}
def ks_weighted(data1, data2, wei1, wei2, alternative='two-sided'):
# kolmogorov smirnov test for weighted samples
# taken from https://stackoverflow.com/questions/40044375/how-to-calculate-the-kolmogorov-smirnov-statistic-between-two-weighted-samples
# see also: https://github.com/scipy/scipy/issues/12315
# TODO: verify p-value computation is good
ix1 = np.argsort(data1)
ix2 = np.argsort(data2)
data1 = data1[ix1]
data2 = data2[ix2]
wei1 = wei1[ix1]
wei2 = wei2[ix2]
data = np.concatenate([data1, data2])
cwei1 = np.hstack([0, np.cumsum(wei1)/sum(wei1)])
cwei2 = np.hstack([0, np.cumsum(wei2)/sum(wei2)])
cdf1we = cwei1[np.searchsorted(data1, data, side='right')]
cdf2we = cwei2[np.searchsorted(data2, data, side='right')]
d = np.max(np.abs(cdf1we - cdf2we))
# calculate p-value
n1 = data1.shape[0]
n2 = data2.shape[0]
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
return {'statistic': d, 'pvalue': prob}
def compute_drift_cat(a1: np.array, a2: np.array, sample_weights1=None, sample_weights2=None):
return {'wasserstein': wasserstein_distance_for_cat(a1, a2, sample_weights1, sample_weights2),
'chi2_test': chi2_test(a1, a2, sample_weights1, sample_weights2)}
def plot_drift_cat(a1: np.array, a2: np.array, sample_weights1=None, sample_weights2=None, title=None,
max_n_cat: float = None, figsize=(10, 6)):
# compute both distributions
distrib = compute_distribution_cat(a1, a2, sample_weights1, sample_weights2, max_n_cat)
bar_height = np.array([v for v in distrib.values()]) # len(distrib) rows and 2 columns
#plot
index = np.arange(len(distrib))
bar_width = 0.35
fig, ax = plt.subplots(figsize=figsize)
ax.bar(index, bar_height[:, 0], bar_width, label="Dataset 1")
ax.bar(index+bar_width, bar_height[:, 1], bar_width, label="Dataset 2")
ax.set_xlabel('Category')
ax.set_ylabel('Percentage')
ax.set_title(title)
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(list(distrib.keys()), rotation=30)
ax.legend()
plt.show()
def plot_drift_num(a1: np.array, a2: np.array, sample_weights1: np.array=None, sample_weights2: np.array=None,
title=None, figsize=(7,5), bins=10):
#distrib = compute_distribution_num(a1, a2, sample_weights1, sample_weights2)
fig, ax = plt.subplots(figsize=figsize)
ax.hist(a1, bins=bins, density=True, weights=sample_weights1, alpha=0.3)
ax.hist(a2, bins=bins, density=True, weights=sample_weights2, alpha=0.3)
ax.legend(['Dataset 1', 'Dataset 2'])
plt.title(title)
plt.show()
def compute_distribution_num(a1: np.array, a2: np.array, sample_weights1=None, sample_weights2=None):
pass
| [
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.ones_like",
"numpy.abs",
"numpy.unique",
"numpy.searchsorted",
"numpy.all",
"numpy.argsort",
"numpy.cumsum",
"scipy.stats.wasserstein_distance",
"scipy.stats.chi2_contingency",
"numpy.exp",
"numpy.round",
"scipy.sta... | [((1623, 1646), 'numpy.sum', 'np.sum', (['sample_weights1'], {}), '(sample_weights1)\n', (1629, 1646), True, 'import numpy as np\n'), ((1667, 1690), 'numpy.sum', 'np.sum', (['sample_weights2'], {}), '(sample_weights2)\n', (1673, 1690), True, 'import numpy as np\n'), ((3742, 3777), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['contingency_table'], {}), '(contingency_table)\n', (3758, 3777), False, 'from scipy.stats import chi2_contingency, wasserstein_distance, ks_2samp, distributions\n'), ((4947, 4964), 'numpy.argsort', 'np.argsort', (['data1'], {}), '(data1)\n', (4957, 4964), True, 'import numpy as np\n'), ((4975, 4992), 'numpy.argsort', 'np.argsort', (['data2'], {}), '(data2)\n', (4985, 4992), True, 'import numpy as np\n'), ((5092, 5122), 'numpy.concatenate', 'np.concatenate', (['[data1, data2]'], {}), '([data1, data2])\n', (5106, 5122), True, 'import numpy as np\n'), ((6662, 6691), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6674, 6691), True, 'import matplotlib.pyplot as plt\n'), ((7040, 7050), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7048, 7050), True, 'import matplotlib.pyplot as plt\n'), ((7316, 7345), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (7328, 7345), True, 'import matplotlib.pyplot as plt\n'), ((7546, 7562), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7555, 7562), True, 'import matplotlib.pyplot as plt\n'), ((7567, 7577), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7575, 7577), True, 'import matplotlib.pyplot as plt\n'), ((373, 389), 'numpy.ones_like', 'np.ones_like', (['a1'], {}), '(a1)\n', (385, 389), True, 'import numpy as np\n'), ((448, 464), 'numpy.ones_like', 'np.ones_like', (['a2'], {}), '(a2)\n', (460, 464), True, 'import numpy as np\n'), ((681, 703), 'numpy.sum', 'np.sum', (['sample_weights'], {}), '(sample_weights)\n', (687, 703), True, 'import numpy as np\n'), ((2439, 2455), 'numpy.ones_like', 'np.ones_like', (['a1'], {}), '(a1)\n', (2451, 2455), True, 'import numpy as np\n'), ((2514, 2530), 'numpy.ones_like', 'np.ones_like', (['a2'], {}), '(a2)\n', (2526, 2530), True, 'import numpy as np\n'), ((2543, 2571), 'numpy.sum', 'np.sum', (['(a1 * sample_weights1)'], {}), '(a1 * sample_weights1)\n', (2549, 2571), True, 'import numpy as np\n'), ((2574, 2597), 'numpy.sum', 'np.sum', (['sample_weights1'], {}), '(sample_weights1)\n', (2580, 2597), True, 'import numpy as np\n'), ((2610, 2638), 'numpy.sum', 'np.sum', (['(a2 * sample_weights2)'], {}), '(a2 * sample_weights2)\n', (2616, 2638), True, 'import numpy as np\n'), ((2641, 2664), 'numpy.sum', 'np.sum', (['sample_weights2'], {}), '(sample_weights2)\n', (2647, 2664), True, 'import numpy as np\n'), ((4221, 4237), 'scipy.stats.ks_2samp', 'ks_2samp', (['a1', 'a2'], {}), '(a1, a2)\n', (4229, 4237), False, 'from scipy.stats import chi2_contingency, wasserstein_distance, ks_2samp, distributions\n'), ((4450, 4512), 'scipy.stats.wasserstein_distance', 'wasserstein_distance', (['a1', 'a2', 'sample_weights1', 'sample_weights2'], {}), '(a1, a2, sample_weights1, sample_weights2)\n', (4470, 4512), False, 'from scipy.stats import chi2_contingency, wasserstein_distance, ks_2samp, distributions\n'), ((5250, 5292), 'numpy.searchsorted', 'np.searchsorted', (['data1', 'data'], {'side': '"""right"""'}), "(data1, data, side='right')\n", (5265, 5292), True, 'import numpy as np\n'), ((5313, 5355), 'numpy.searchsorted', 'np.searchsorted', (['data2', 'data'], {'side': '"""right"""'}), "(data2, data, side='right')\n", (5328, 5355), True, 'import numpy as np\n'), ((5372, 5395), 'numpy.abs', 'np.abs', (['(cdf1we - cdf2we)'], {}), '(cdf1we - cdf2we)\n', (5378, 5395), True, 'import numpy as np\n'), ((5865, 5877), 'numpy.exp', 'np.exp', (['expt'], {}), '(expt)\n', (5871, 5877), True, 'import numpy as np\n'), ((1246, 1270), 'numpy.concatenate', 'np.concatenate', (['(a1, a2)'], {}), '((a1, a2))\n', (1260, 1270), True, 'import numpy as np\n'), ((1272, 1322), 'numpy.concatenate', 'np.concatenate', (['(sample_weights1, sample_weights2)'], {}), '((sample_weights1, sample_weights2))\n', (1286, 1322), True, 'import numpy as np\n'), ((1453, 1466), 'numpy.unique', 'np.unique', (['a1'], {}), '(a1)\n', (1462, 1466), True, 'import numpy as np\n'), ((1494, 1507), 'numpy.unique', 'np.unique', (['a2'], {}), '(a2)\n', (1503, 1507), True, 'import numpy as np\n'), ((4094, 4139), 'numpy.all', 'np.all', (['(sample_weights1 == sample_weights1[0])'], {}), '(sample_weights1 == sample_weights1[0])\n', (4100, 4139), True, 'import numpy as np\n'), ((4144, 4189), 'numpy.all', 'np.all', (['(sample_weights2 == sample_weights2[0])'], {}), '(sample_weights2 == sample_weights2[0])\n', (4150, 4189), True, 'import numpy as np\n'), ((5626, 5638), 'numpy.round', 'np.round', (['en'], {}), '(en)\n', (5634, 5638), True, 'import numpy as np\n'), ((5662, 5673), 'numpy.sqrt', 'np.sqrt', (['en'], {}), '(en)\n', (5669, 5673), True, 'import numpy as np\n'), ((636, 648), 'numpy.unique', 'np.unique', (['a'], {}), '(a)\n', (645, 648), True, 'import numpy as np\n'), ((727, 759), 'numpy.sum', 'np.sum', (['sample_weights[a == cat]'], {}), '(sample_weights[a == cat])\n', (733, 759), True, 'import numpy as np\n'), ((5149, 5164), 'numpy.cumsum', 'np.cumsum', (['wei1'], {}), '(wei1)\n', (5158, 5164), True, 'import numpy as np\n'), ((5203, 5218), 'numpy.cumsum', 'np.cumsum', (['wei2'], {}), '(wei2)\n', (5212, 5218), True, 'import numpy as np\n'), ((2132, 2166), 'numpy.sum', 'np.sum', (['sample_weights1[a1 == cat]'], {}), '(sample_weights1[a1 == cat])\n', (2138, 2166), True, 'import numpy as np\n'), ((2212, 2246), 'numpy.sum', 'np.sum', (['sample_weights2[a2 == cat]'], {}), '(sample_weights2[a2 == cat])\n', (2218, 2246), True, 'import numpy as np\n'), ((5827, 5851), 'numpy.sqrt', 'np.sqrt', (['(m * n * (m + n))'], {}), '(m * n * (m + n))\n', (5834, 5851), True, 'import numpy as np\n'), ((1874, 1908), 'numpy.sum', 'np.sum', (['sample_weights1[a1 == cat]'], {}), '(sample_weights1[a1 == cat])\n', (1880, 1908), True, 'import numpy as np\n'), ((1990, 2024), 'numpy.sum', 'np.sum', (['sample_weights2[a2 == cat]'], {}), '(sample_weights2[a2 == cat])\n', (1996, 2024), True, 'import numpy as np\n')] |
import numpy as np
import healpy as hp
import fitsio
import matplotlib.pyplot as plt
from sortcl import enumerate_cls
import camb
nbins = 10
lmax = 1024
l = np.arange(lmax+1)
################################################################################
fits = fitsio.FITS('map.fits')
zbins = []
for i in range(nbins):
h = fits[f'MAP{i+1}'].read_header()
zbins.append((h['ZMIN'], h['ZMAX']))
delta = [fits[f'MAP{i+1}'].read(columns=['delta'])['delta'] for i in range(nbins)]
kappa = [fits[f'MAP{i+1}'].read(columns=['kappa'])['kappa'] for i in range(nbins)]
delta_cls = hp.anafast(delta, lmax=lmax, pol=False, use_pixel_weights=True)
kappa_cls = hp.anafast(kappa, lmax=lmax, pol=False, use_pixel_weights=True)
################################################################################
pars = camb.set_params(H0=70, omch2=0.3*(0.7)**2)
pars.Want_CMB = False
pars.Want_Cls = True
pars.SourceTerms.counts_density = True
pars.SourceTerms.counts_evolve = True
pars.SourceTerms.counts_redshift = False
pars.SourceTerms.counts_lensing = False
pars.SourceTerms.counts_velocity = False
pars.SourceTerms.counts_radial = False
pars.SourceTerms.counts_timedelay = False
pars.SourceTerms.counts_ISW = False
pars.SourceTerms.counts_potential = False
results = camb.get_background(pars, no_thermo=True)
windows = []
for i, (zmin, zmax) in enumerate(zbins):
z = np.linspace(zmin, zmax, 100)
w = ((1 + z)*results.angular_diameter_distance(z))**2/results.h_of_z(z)
w /= np.trapz(w, z)
windows.append(camb.sources.SplinedSourceWindow(source_type='counts', z=z, W=w))
windows.append(camb.sources.GaussianSourceWindow(source_type='lensing', redshift=zmax, sigma=0.01))
pars.SourceWindows = windows
results = camb.get_results(pars)
camb_cls = results.get_source_cls_dict(lmax=lmax, raw_cl=True)
################################################################################
fig, ax = plt.subplots(nbins+1, nbins+1)
for i in range(nbins+1):
ax[i, i].axis('off')
ax[i, i].set_facecolor('grey')
ax[i, i].add_artist(ax[i, i].patch)
ax[i, i].patch.set_zorder(-1)
for i, j, cl in enumerate_cls(delta_cls):
ax[i, j+1].plot(l, (2*l+1)*cl)
ax[i, j+1].plot(l, (2*l+1)*camb_cls[f'W{2*i+1}xW{2*j+1}'])
ax[i, j+1].set_xscale('symlog', linthresh=10, linscale=0.5)
ax[i, j+1].set_yscale('symlog', linthresh=1e-4, linscale=0.5)
ax[i, j+1].set_xlim(0, lmax)
ax[i, j+1].tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)
for i, j, cl in enumerate_cls(kappa_cls):
ax[j+1, i].plot(l, (2*l+1)*cl)
ax[j+1, i].plot(l, (2*l+1)*camb_cls[f'W{2*i+2}xW{2*j+2}'])
ax[j+1, i].set_xscale('symlog', linthresh=10, linscale=0.5)
ax[j+1, i].set_yscale('symlog', linthresh=1e-7, linscale=0.5)
ax[j+1, i].set_xlim(0, lmax)
ax[j+1, i].tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)
fig.tight_layout(pad=0)
plt.show()
| [
"numpy.trapz",
"camb.sources.GaussianSourceWindow",
"matplotlib.pyplot.show",
"fitsio.FITS",
"camb.sources.SplinedSourceWindow",
"camb.get_background",
"sortcl.enumerate_cls",
"camb.set_params",
"camb.get_results",
"numpy.arange",
"numpy.linspace",
"healpy.anafast",
"matplotlib.pyplot.subplo... | [((163, 182), 'numpy.arange', 'np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (172, 182), True, 'import numpy as np\n'), ((271, 294), 'fitsio.FITS', 'fitsio.FITS', (['"""map.fits"""'], {}), "('map.fits')\n", (282, 294), False, 'import fitsio\n'), ((591, 654), 'healpy.anafast', 'hp.anafast', (['delta'], {'lmax': 'lmax', 'pol': '(False)', 'use_pixel_weights': '(True)'}), '(delta, lmax=lmax, pol=False, use_pixel_weights=True)\n', (601, 654), True, 'import healpy as hp\n'), ((667, 730), 'healpy.anafast', 'hp.anafast', (['kappa'], {'lmax': 'lmax', 'pol': '(False)', 'use_pixel_weights': '(True)'}), '(kappa, lmax=lmax, pol=False, use_pixel_weights=True)\n', (677, 730), True, 'import healpy as hp\n'), ((821, 865), 'camb.set_params', 'camb.set_params', ([], {'H0': '(70)', 'omch2': '(0.3 * 0.7 ** 2)'}), '(H0=70, omch2=0.3 * 0.7 ** 2)\n', (836, 865), False, 'import camb\n'), ((1277, 1318), 'camb.get_background', 'camb.get_background', (['pars'], {'no_thermo': '(True)'}), '(pars, no_thermo=True)\n', (1296, 1318), False, 'import camb\n'), ((1741, 1763), 'camb.get_results', 'camb.get_results', (['pars'], {}), '(pars)\n', (1757, 1763), False, 'import camb\n'), ((1920, 1954), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(nbins + 1)', '(nbins + 1)'], {}), '(nbins + 1, nbins + 1)\n', (1932, 1954), True, 'import matplotlib.pyplot as plt\n'), ((2128, 2152), 'sortcl.enumerate_cls', 'enumerate_cls', (['delta_cls'], {}), '(delta_cls)\n', (2141, 2152), False, 'from sortcl import enumerate_cls\n'), ((2548, 2572), 'sortcl.enumerate_cls', 'enumerate_cls', (['kappa_cls'], {}), '(kappa_cls)\n', (2561, 2572), False, 'from sortcl import enumerate_cls\n'), ((2977, 2987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2985, 2987), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1410), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', '(100)'], {}), '(zmin, zmax, 100)\n', (1393, 1410), True, 'import numpy as np\n'), ((1496, 1510), 'numpy.trapz', 'np.trapz', (['w', 'z'], {}), '(w, z)\n', (1504, 1510), True, 'import numpy as np\n'), ((1530, 1594), 'camb.sources.SplinedSourceWindow', 'camb.sources.SplinedSourceWindow', ([], {'source_type': '"""counts"""', 'z': 'z', 'W': 'w'}), "(source_type='counts', z=z, W=w)\n", (1562, 1594), False, 'import camb\n'), ((1615, 1702), 'camb.sources.GaussianSourceWindow', 'camb.sources.GaussianSourceWindow', ([], {'source_type': '"""lensing"""', 'redshift': 'zmax', 'sigma': '(0.01)'}), "(source_type='lensing', redshift=zmax,\n sigma=0.01)\n", (1648, 1702), False, 'import camb\n')] |
import numpy as np
import requests
from .utils import id_to_url, get_path_indexes, get_ideal_coords
def test_id_to_url_does_transform():
assert id_to_url(
'A0000001') == 'https://iiif.wellcomecollection.org/image/A0000001.jpg/full/960,/0/default.jpg'
def test_id_to_url_returns_valid_url():
url = id_to_url('A0000001')
response = requests.get(url)
assert response.status_code == 200
def test_get_path_indexes():
closest_indexes = np.array([
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9]
])
start_index = 0
end_index = 10
pathway = get_path_indexes(closest_indexes, start_index, end_index)
assert pathway[0] == start_index
assert pathway[-1] == end_index
assert pathway == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def test_get_ideal_coords():
n = 10
start_coord = np.random.random(size=1)
end_coord = np.random.random(size=1)
ideal_coords = np.vstack([
[start_coord],
get_ideal_coords(start_coord, end_coord, n),
[end_coord]
])
index = np.random.randint(1, n - 1)
expected_value = ((index) * (end_coord - start_coord) / n) + start_coord
assert np.isclose(ideal_coords[index], expected_value, atol=0.05).all()
| [
"numpy.isclose",
"numpy.random.randint",
"numpy.random.random",
"numpy.array",
"requests.get"
] | [((354, 371), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (366, 371), False, 'import requests\n'), ((464, 748), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, \n 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, \n 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]]'], {}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, \n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7,\n 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]])\n', (472, 748), True, 'import numpy as np\n'), ((1117, 1141), 'numpy.random.random', 'np.random.random', ([], {'size': '(1)'}), '(size=1)\n', (1133, 1141), True, 'import numpy as np\n'), ((1158, 1182), 'numpy.random.random', 'np.random.random', ([], {'size': '(1)'}), '(size=1)\n', (1174, 1182), True, 'import numpy as np\n'), ((1329, 1356), 'numpy.random.randint', 'np.random.randint', (['(1)', '(n - 1)'], {}), '(1, n - 1)\n', (1346, 1356), True, 'import numpy as np\n'), ((1445, 1503), 'numpy.isclose', 'np.isclose', (['ideal_coords[index]', 'expected_value'], {'atol': '(0.05)'}), '(ideal_coords[index], expected_value, atol=0.05)\n', (1455, 1503), True, 'import numpy as np\n')] |
#!/usr/bin/env nemesis
#
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/libtests/materials/data/IsotropicLinearMaxwellPlaneStrain_VarStrain.py
# @brief Python application for generating spatial database files for
# testing IsotropicLinearMaxwellPlaneStrain via Method of
# Manufactured Solutions for linearly varying total strain.
# ----------------------------------------------------------------------
# Domain
from spatialdata.geocoords.CSCart import CSCart
from spatialdata.spatialdb.SimpleGridAscii import createWriter
import math
import numpy
XLIM = (-4.0e+3, +4.0e+3)
YLIM = XLIM
DX = 100.0
# Material properties
DENSITY = 4000.0
VS = 5600.0
VP = 10000.0
VISCOSITY = 7.91700159488e+19
# Variable definitions for solution.
A = 1.0e-6
B = 2.5e-6
C = 3.0e-6
TIME = 1.0e7
# ----------------------------------------------------------------------
x = numpy.arange(XLIM[0], XLIM[1] + 0.1 * DX, DX, dtype=numpy.float64)
y = numpy.arange(YLIM[0], YLIM[1] + 0.1 * DX, DX, dtype=numpy.float64)
xgrid, ygrid = numpy.meshgrid(x, y)
points = numpy.vstack((xgrid.ravel(), ygrid.ravel())).transpose()
npts = points.shape[0]
PX = points[:, 0]
PY = points[:, 1]
density = DENSITY * numpy.ones((npts,))
vs = VS * numpy.ones((npts,))
vp = VP * numpy.ones((npts,))
viscosity = VISCOSITY * numpy.ones((npts,))
# Create material properties for solution.
shearModulus = DENSITY * VS * VS
lameConstant = DENSITY * VP * VP - 2.0 * shearModulus
bulkModulus = lameConstant + 2.0 * shearModulus / 3.0
maxwellTime = VISCOSITY / shearModulus
# Create coordinate system for spatial database
cs = CSCart()
cs._configure()
cs.setSpaceDim(2)
# ----------------------------------------------------------------------
def generateAuxSubfields():
totalStrain_11 = (2.0 * A * PX + B * PY) * math.exp(-TIME / maxwellTime)
totalStrain_12 = (B * PX / 2.0 + B * PY / 2.0 + C * PX +
C * PY) * math.exp(-TIME / maxwellTime)
totalStrain_22 = (2.0 * A * PY + B * PX) * math.exp(-TIME / maxwellTime)
totalStrain_33 = numpy.zeros_like(totalStrain_22)
visStrain_11 = (math.exp(TIME / maxwellTime) - 1.0) * (A * PX - A * PY - B * PX + B * PY) * \
math.exp(-2.0 * TIME / maxwellTime)
visStrain_12 = (math.exp(TIME / maxwellTime) - 1.0) * (B * PX + B * PY + C * PX + C * PY) * \
math.exp(-2.0 * TIME / maxwellTime)
visStrain_22 = -(math.exp(TIME / maxwellTime) - 1.0) * (A * PX - A * PY - B * PX + B * PY) * \
math.exp(-2.0 * TIME / maxwellTime)
visStrain_33 = -(math.exp(TIME / maxwellTime) - 1.0) * (A * PX + A * PY + B * PX + B * PY) * \
math.exp(-2.0 * TIME / maxwellTime)
equil_1 = 4.0 * bulkModulus * \
(A + B) * math.exp(-TIME / maxwellTime) * \
numpy.ones(npts, dtype=numpy.float64)
equil_2 = 4.0 * bulkModulus * \
(A + B) * math.exp(-TIME / maxwellTime) * \
numpy.ones(npts, dtype=numpy.float64)
writer = createWriter(
"IsotropicLinearMaxwellPlaneStrain_VarStrain_aux.spatialdb")
writer.write({'points': points,
'x': x,
'y': y,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "vs", 'units': "m/s", 'data': vs},
{'name': "vp", 'units': "m/s", 'data': vp},
{'name': "density", 'units': "kg/m**3", 'data': density},
{'name': "viscosity", 'units': "Pa*s",
'data': viscosity},
{'name': "total_strain_xx", 'units': "None",
'data': totalStrain_11},
{'name': "total_strain_yy", 'units': "None",
'data': totalStrain_22},
{'name': "total_strain_zz", 'units': "None",
'data': totalStrain_33},
{'name': "total_strain_xy", 'units': "None",
'data': totalStrain_12},
{'name': "vis_strain_xx", 'units': "None",
'data': visStrain_11},
{'name': "vis_strain_yy", 'units': "None",
'data': visStrain_22},
{'name': "vis_strain_zz", 'units': "None",
'data': visStrain_33},
{'name': "vis_strain_xy", 'units': "None",
'data': visStrain_12},
{'name': "body_force_x", 'units': "N", 'data': equil_1},
{'name': "body_force_y", 'units': "N", 'data': equil_2},
]})
return
# ----------------------------------------------------------------------
def generateSolution():
disp = numpy.zeros((npts, 2))
disp[:, 0] = (A * PX**2 + 2.0 * B * PX * PY + C * PY**2) * \
math.exp(-TIME / maxwellTime)
disp[:, 1] = (A * PY**2 + 2.0 * B * PX * PY + C * PX**2) * \
math.exp(-TIME / maxwellTime)
disp_dot = numpy.zeros((npts, 2))
disp_dot[:, 0] = -(A * PX**2 + 2.0 * B * PX * PY + C *
PY**2) * math.exp(-TIME / maxwellTime) / maxwellTime
disp_dot[:, 1] = -(A * PY**2 + 2.0 * B * PX * PY + C *
PX**2) * math.exp(-TIME / maxwellTime) / maxwellTime
# Create writer for spatial database file
writer = createWriter(
"IsotropicLinearMaxwellPlaneStrain_VarStrain_soln.spatialdb")
writer.write({'points': points,
'x': x,
'y': y,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "displacement_x", 'units': "m", 'data': disp[:, 0]},
{'name': "displacement_y",
'units': "m", 'data': disp[:, 1]},
{'name': "displacement_dot_x",
'units': "m/s", 'data': disp_dot[:, 0]},
{'name': "displacement_dot_y",
'units': "m/s", 'data': disp_dot[:, 1]},
]})
return
# ----------------------------------------------------------------------
def generatePerturbation():
PERT_DX = 500.0
PERT_AMPLITUDE = 1.0e-2
x = numpy.arange(XLIM[0], XLIM[1] + 0.1 * PERT_DX,
PERT_DX, dtype=numpy.float64)
y = numpy.arange(YLIM[0], YLIM[1] + 0.1 * PERT_DX,
PERT_DX, dtype=numpy.float64)
xgrid, ygrid = numpy.meshgrid(x, y)
points = numpy.vstack((xgrid.ravel(), ygrid.ravel())).transpose()
npts = points.shape[0]
disp = PERT_AMPLITUDE * (numpy.random.rand(npts, 2) - 0.5)
disp_dot = 0 * disp
# Create writer for spatial database file
writer = createWriter(
"IsotropicLinearMaxwellPlaneStrain_VarStrain_pert.spatialdb")
writer.write({'points': points,
'x': x,
'y': y,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "displacement_x", 'units': "m", 'data': disp[:, 0]},
{'name': "displacement_y",
'units': "m", 'data': disp[:, 1]},
{'name': "displacement_dot_x",
'units': "m/s", 'data': disp_dot[:, 0]},
{'name': "displacement_dot_y",
'units': "m/s", 'data': disp_dot[:, 1]},
]})
return
# ======================================================================
def generate():
generateAuxSubfields()
generateSolution()
generatePerturbation()
# MAIN /////////////////////////////////////////////////////////////////
if __name__ == "__main__":
generate()
# End of file
| [
"math.exp",
"numpy.zeros_like",
"numpy.meshgrid",
"numpy.zeros",
"numpy.ones",
"spatialdata.geocoords.CSCart.CSCart",
"numpy.arange",
"numpy.random.rand",
"spatialdata.spatialdb.SimpleGridAscii.createWriter"
] | [((1287, 1353), 'numpy.arange', 'numpy.arange', (['XLIM[0]', '(XLIM[1] + 0.1 * DX)', 'DX'], {'dtype': 'numpy.float64'}), '(XLIM[0], XLIM[1] + 0.1 * DX, DX, dtype=numpy.float64)\n', (1299, 1353), False, 'import numpy\n'), ((1358, 1424), 'numpy.arange', 'numpy.arange', (['YLIM[0]', '(YLIM[1] + 0.1 * DX)', 'DX'], {'dtype': 'numpy.float64'}), '(YLIM[0], YLIM[1] + 0.1 * DX, DX, dtype=numpy.float64)\n', (1370, 1424), False, 'import numpy\n'), ((1440, 1460), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1454, 1460), False, 'import numpy\n'), ((2009, 2017), 'spatialdata.geocoords.CSCart.CSCart', 'CSCart', ([], {}), '()\n', (2015, 2017), False, 'from spatialdata.geocoords.CSCart import CSCart\n'), ((1607, 1626), 'numpy.ones', 'numpy.ones', (['(npts,)'], {}), '((npts,))\n', (1617, 1626), False, 'import numpy\n'), ((1637, 1656), 'numpy.ones', 'numpy.ones', (['(npts,)'], {}), '((npts,))\n', (1647, 1656), False, 'import numpy\n'), ((1667, 1686), 'numpy.ones', 'numpy.ones', (['(npts,)'], {}), '((npts,))\n', (1677, 1686), False, 'import numpy\n'), ((1711, 1730), 'numpy.ones', 'numpy.ones', (['(npts,)'], {}), '((npts,))\n', (1721, 1730), False, 'import numpy\n'), ((2454, 2486), 'numpy.zeros_like', 'numpy.zeros_like', (['totalStrain_22'], {}), '(totalStrain_22)\n', (2470, 2486), False, 'import numpy\n'), ((3341, 3414), 'spatialdata.spatialdb.SimpleGridAscii.createWriter', 'createWriter', (['"""IsotropicLinearMaxwellPlaneStrain_VarStrain_aux.spatialdb"""'], {}), "('IsotropicLinearMaxwellPlaneStrain_VarStrain_aux.spatialdb')\n", (3353, 3414), False, 'from spatialdata.spatialdb.SimpleGridAscii import createWriter\n'), ((5300, 5322), 'numpy.zeros', 'numpy.zeros', (['(npts, 2)'], {}), '((npts, 2))\n', (5311, 5322), False, 'import numpy\n'), ((5545, 5567), 'numpy.zeros', 'numpy.zeros', (['(npts, 2)'], {}), '((npts, 2))\n', (5556, 5567), False, 'import numpy\n'), ((5898, 5972), 'spatialdata.spatialdb.SimpleGridAscii.createWriter', 'createWriter', (['"""IsotropicLinearMaxwellPlaneStrain_VarStrain_soln.spatialdb"""'], {}), "('IsotropicLinearMaxwellPlaneStrain_VarStrain_soln.spatialdb')\n", (5910, 5972), False, 'from spatialdata.spatialdb.SimpleGridAscii import createWriter\n'), ((6825, 6901), 'numpy.arange', 'numpy.arange', (['XLIM[0]', '(XLIM[1] + 0.1 * PERT_DX)', 'PERT_DX'], {'dtype': 'numpy.float64'}), '(XLIM[0], XLIM[1] + 0.1 * PERT_DX, PERT_DX, dtype=numpy.float64)\n', (6837, 6901), False, 'import numpy\n'), ((6931, 7007), 'numpy.arange', 'numpy.arange', (['YLIM[0]', '(YLIM[1] + 0.1 * PERT_DX)', 'PERT_DX'], {'dtype': 'numpy.float64'}), '(YLIM[0], YLIM[1] + 0.1 * PERT_DX, PERT_DX, dtype=numpy.float64)\n', (6943, 7007), False, 'import numpy\n'), ((7048, 7068), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (7062, 7068), False, 'import numpy\n'), ((7314, 7388), 'spatialdata.spatialdb.SimpleGridAscii.createWriter', 'createWriter', (['"""IsotropicLinearMaxwellPlaneStrain_VarStrain_pert.spatialdb"""'], {}), "('IsotropicLinearMaxwellPlaneStrain_VarStrain_pert.spatialdb')\n", (7326, 7388), False, 'from spatialdata.spatialdb.SimpleGridAscii import createWriter\n'), ((2203, 2232), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (2211, 2232), False, 'import math\n'), ((2326, 2355), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (2334, 2355), False, 'import math\n'), ((2403, 2432), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (2411, 2432), False, 'import math\n'), ((2594, 2629), 'math.exp', 'math.exp', (['(-2.0 * TIME / maxwellTime)'], {}), '(-2.0 * TIME / maxwellTime)\n', (2602, 2629), False, 'import math\n'), ((2736, 2771), 'math.exp', 'math.exp', (['(-2.0 * TIME / maxwellTime)'], {}), '(-2.0 * TIME / maxwellTime)\n', (2744, 2771), False, 'import math\n'), ((2879, 2914), 'math.exp', 'math.exp', (['(-2.0 * TIME / maxwellTime)'], {}), '(-2.0 * TIME / maxwellTime)\n', (2887, 2914), False, 'import math\n'), ((3022, 3057), 'math.exp', 'math.exp', (['(-2.0 * TIME / maxwellTime)'], {}), '(-2.0 * TIME / maxwellTime)\n', (3030, 3057), False, 'import math\n'), ((3155, 3192), 'numpy.ones', 'numpy.ones', (['npts'], {'dtype': 'numpy.float64'}), '(npts, dtype=numpy.float64)\n', (3165, 3192), False, 'import numpy\n'), ((3289, 3326), 'numpy.ones', 'numpy.ones', (['npts'], {'dtype': 'numpy.float64'}), '(npts, dtype=numpy.float64)\n', (3299, 3326), False, 'import numpy\n'), ((5396, 5425), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (5404, 5425), False, 'import math\n'), ((5499, 5528), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (5507, 5528), False, 'import math\n'), ((3113, 3142), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (3121, 3142), False, 'import math\n'), ((3247, 3276), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (3255, 3276), False, 'import math\n'), ((5659, 5688), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (5667, 5688), False, 'import math\n'), ((5794, 5823), 'math.exp', 'math.exp', (['(-TIME / maxwellTime)'], {}), '(-TIME / maxwellTime)\n', (5802, 5823), False, 'import math\n'), ((7196, 7222), 'numpy.random.rand', 'numpy.random.rand', (['npts', '(2)'], {}), '(npts, 2)\n', (7213, 7222), False, 'import numpy\n'), ((2508, 2536), 'math.exp', 'math.exp', (['(TIME / maxwellTime)'], {}), '(TIME / maxwellTime)\n', (2516, 2536), False, 'import math\n'), ((2650, 2678), 'math.exp', 'math.exp', (['(TIME / maxwellTime)'], {}), '(TIME / maxwellTime)\n', (2658, 2678), False, 'import math\n'), ((2793, 2821), 'math.exp', 'math.exp', (['(TIME / maxwellTime)'], {}), '(TIME / maxwellTime)\n', (2801, 2821), False, 'import math\n'), ((2936, 2964), 'math.exp', 'math.exp', (['(TIME / maxwellTime)'], {}), '(TIME / maxwellTime)\n', (2944, 2964), False, 'import math\n')] |
# coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for making plots of robustness metric results and statistics."""
import datetime
import math
import os
from absl import logging
from matplotlib import pyplot as plt
import numpy as np
from rl_reliability_metrics.analysis import io_utils_oss as io_utils
from rl_reliability_metrics.analysis import plot_utils
from rl_reliability_metrics.analysis import stats
from rl_reliability_metrics.analysis import stats_utils
# Internal gfile dependencies
HATCH_PATTERNS = ('-', '/', '.', 'O', '+', 'o', 'x', '*', '\\')
ALGO_COLORS = ('r', 'y', 'g', 'b', 'm')
MARKERS = ('o', 's', 'v', '^', '<', '>')
TIMEFRAME_NAMES = ['Beginning', 'Middle', 'End']
UP_ARROW = r' $\uparrow$'
DOWN_ARROW = r' $\downarrow$'
class Plotter(object):
"""Class for making plots of metric results and statistics."""
def __init__(self,
data,
pvals_dir,
confidence_intervals_dir,
n_timeframes,
algorithms=None,
out_dir=None,
pthresh=0.01,
multiple_comparisons_method='benjamini-yekutieli',
subplot_axis_labels=True,
make_legend=False):
"""Initialize Plotter object.
Args:
data: DataDef object containing all the metric results.
pvals_dir: Path to directory containing p-values for comparisons between
pairs of algorithms.
confidence_intervals_dir: Path to directory containing bootstrap
confidence intervals.
n_timeframes: Total number of timeframes we are dividing each run into.
algorithms: If specified, these algorithms will be plotted, in this order.
If None, we plot all algorithms available in the data (order not
guaranteed).
out_dir: Path to directory where we save the plot images. If None, we
simply display the images without saving.
pthresh: p-value threshold for significance.
multiple_comparisons_method: String indicating method to use for multiple
comparisons correction. See self._do_multiple_comparisons_correction for
options.
subplot_axis_labels: Whether to add x- and y-axis labels for each subplot.
make_legend: Whether to make a legend.
"""
self.data_def = data
self.pvals_dir = pvals_dir
self.confidence_intervals_dir = confidence_intervals_dir
self.n_timeframes = n_timeframes
self.out_dir = out_dir
self.pthresh = pthresh
self.multiple_comparisons_method = multiple_comparisons_method
self.subplot_axis_labels = subplot_axis_labels
self.make_legend = make_legend
# Parse information from data_def
self.dataset = self.data_def.dataset
self.algorithms = algorithms if algorithms else self.data_def.algorithms
self.n_algo = len(self.algorithms)
self.n_task = len(self.data_def.tasks)
# Bonferroni-corrected p-value threshold
self.pthresh_corrected = stats_utils.multiple_comparisons_correction(
self.n_algo, self.pthresh, self.multiple_comparisons_method)
def make_plots(self, metric):
"""Make all plots for a given metric.
Args:
metric: String name of the metric.
"""
plot_utils.paper_figure_configs()
# Create a metric-specific StatsRunner object
stats_runner = stats.StatsRunner(self.data_def, metric, self.n_timeframes)
result_dims = stats_runner.result_dims
if result_dims == 'ATRP':
# Within-runs metric with eval points.
self._make_plots_with_eval_points(metric, stats_runner)
elif result_dims == 'ATR':
# Within-runs metrics without eval points (one value per run).
self._make_plots_no_eval_points(metric, stats_runner)
elif result_dims == 'ATP':
# Across-runs metric with eval points
self._make_plots_with_eval_points(metric, stats_runner)
else:
raise ValueError('plotting not implemented for result_dims: %s' %
result_dims)
def _save_fig(self, metric, plot_name):
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
filepath = os.path.join(self.out_dir,
'%s__%s__%s.png' % (metric, plot_name, timestamp))
io_utils.makedirs(os.path.dirname(filepath))
with open(filepath, 'wb') as f:
plt.savefig(f)
logging.info('Plot output to: %s', filepath)
def _make_plots_with_eval_points(self, metric, stats_runner):
"""Make plots for a metric evaluated at multiple evaluation points per run.
e.g. 'ATP' or 'ATRP' metrics.
Plot 1: raw metric values per task.
* One subplot per task.
* Each subplot contains a plot showing the metric values across evaluation
points. For ATRP metrics, we show the median metric values and fill plots
indicating the IQR at each evaluation point.
Plot 2: Mean rankings across tasks.
* One subplot per timeframe.
* One bar plot showing the mean ranking for each algorithm, and horizontal
line segments indicating which pairs of algorithms are statistically
different.
Args:
metric: String specifying the metric.
stats_runner: StatsRunner object
"""
# Set up figure for per-task raw values.
subplot_ncol_1 = 4
n_subplots_1 = self.n_task + 1 if self.make_legend else self.n_task
subplot_nrow_1 = math.ceil(n_subplots_1 / subplot_ncol_1)
fig1 = plt.figure(figsize=(4 * subplot_ncol_1, 4 * subplot_nrow_1))
# Set up figure for mean rankings.
subplot_ncol_2 = self.n_timeframes
if self.make_legend:
subplot_ncol_2 += 1
subplot_nrow_2 = 1
fig2 = plt.figure(figsize=(4 * subplot_ncol_2, 4 * subplot_nrow_2))
##=== Plot 1: Raw metric values per task ===##
plt.figure(fig1.number)
eval_point_idxs = stats_runner.get_timeframe_points(None)
eval_point_values = self.data_def.metric_params[metric]['eval_points']
metric_results = stats_runner.load_metric_results(
self.algorithms, eval_point_idxs, collapse_on_timepoints=False)
result_dims = stats_runner.result_dims
for i_task in range(self.n_task):
plt.subplot(subplot_nrow_1, subplot_ncol_1, i_task + 1)
task_results = np.squeeze(metric_results[:, i_task])
if len(eval_point_idxs) == 1:
task_results = np.expand_dims(task_results, -1)
if result_dims == 'ATP':
# For across-run metrics, we plot a single curve.
for i_algo in range(self.n_algo):
plt.plot(eval_point_values, task_results[i_algo, :],
marker=MARKERS[i_algo])
if self.subplot_axis_labels:
plt.xlabel('evaluation points', fontsize=16)
plt.ylabel('metric values', fontsize=16)
elif result_dims == 'ATRP':
# For per-run metrics, we plot the median and IQR across curves.
for i_algo in range(self.n_algo):
algo_color = ALGO_COLORS[i_algo]
task_algo_results = task_results[i_algo] # n_runs x n_eval_points
result_medians = np.median(task_algo_results, axis=0)
result_quartile1 = np.percentile(task_algo_results, q=25, axis=0)
result_quartile3 = np.percentile(task_algo_results, q=75, axis=0)
plt.plot(eval_point_values, result_medians, algo_color,
marker=MARKERS[i_algo])
plt.fill_between(
eval_point_values,
result_quartile1,
result_quartile3,
alpha=0.3,
color=algo_color)
if self.subplot_axis_labels:
plt.xlabel('evaluation points', fontsize=16)
plt.ylabel('metric values', fontsize=16)
else:
raise ValueError('result_dims must be ATP or ATRP, not %s' %
result_dims)
plot_utils.simple_axis(plt.gca())
plt.title(self.data_def.tasks[i_task])
# plot the legend
if self.make_legend:
plt.subplot(subplot_nrow_1, subplot_ncol_1, n_subplots_1)
self._lineplot_legend()
##=== Plot 2: Mean rankings (mean across tasks) ===##
for timeframe in range(self.n_timeframes):
# Load data for plotting.
timeframe_points = stats_runner.get_timeframe_points(timeframe)
pvals = self._load_pvals(metric, timeframe)
confidence_intervals = self._load_confidence_intervals(
metric, stats_runner, timeframe)
plt.figure(fig2.number)
metric_results = stats_runner.load_metric_results(
self.algorithms, timeframe_points, collapse_on_timepoints=True)
plt.subplot(subplot_nrow_2, subplot_ncol_2, timeframe + 1)
self._plot_bars_and_significant_differences(metric_results, pvals,
confidence_intervals,
stats_runner)
plt.title(TIMEFRAME_NAMES[timeframe], fontsize=14)
# plot the legend
if self.make_legend:
plt.subplot(subplot_nrow_2, subplot_ncol_2, subplot_ncol_2)
self._barplot_legend()
##=== Wrap up the figures ===##
for fig, plot_name in [(fig1, 'per-task_raw'), (fig2, 'mean_rankings')]:
if plot_name == 'per-task_raw':
suptitle_suffix = (
UP_ARROW if stats_runner.bigger_is_better else DOWN_ARROW)
else:
suptitle_suffix = ''
plt.figure(fig.number, plot_name)
self._wrap_up_figure(metric, plot_name, suptitle_suffix)
def _make_plots_no_eval_points(self, metric, stats_runner):
"""Make plots for a metric without evaluation points (one value per run).
e.g. 'ATR' metrics.
Plot 1: Raw metric values per task.
* One subplot per task.
* Each subplot contains a box-and-whisker plot showing the median metric
values for each algorithm, a box indicating 1st and 3rd quartiles, and
whiskers indicating the minimum and maximum values (excluding outliers,
defined as being outside 1.5x the inter-quartile range from the 1st and 3rd
quartiles).
Plot 2: Mean rankings across tasks.
* One bar plot showing the mean ranking for each algorithm, and horizontal
line segments indicating which pairs of algorithms are statistically
different.
Args:
metric: String specifying the metric.
stats_runner: StatsRunner object
"""
# Load data for plotting.
metric_results = stats_runner.load_metric_results(
self.algorithms, timeframe_points=None)
pvals = self._load_pvals(metric)
confidence_intervals = self._load_confidence_intervals(metric, stats_runner)
##=== Plot 1: Raw metric values per task ===##
# Set up figure.
subplot_ncol = 4
n_subplot = self.n_task
if self.make_legend:
n_subplot += 1
subplot_nrow = math.ceil(n_subplot / subplot_ncol)
plt.figure(figsize=(4 * subplot_ncol, 4 * subplot_nrow))
# Plot the raw metric values as box-and-whisker plots.
for i_task in range(self.n_task):
plt.subplot(subplot_nrow, subplot_ncol, i_task + 1)
task_results = np.squeeze(metric_results[:, i_task, :])
boxplot = plt.boxplot(task_results.T, patch_artist=True)
for part in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(boxplot[part], color='k')
for i_patch, patch in enumerate(boxplot['boxes']):
patch.set(facecolor=ALGO_COLORS[i_patch])
plt.title(self.data_def.tasks[i_task], fontsize=16)
self._configure_axes('Raw metric values')
self._extend_ylims_past_zero(task_results)
plot_utils.simple_axis(plt.gca())
if self.make_legend:
plt.subplot(subplot_nrow, subplot_ncol, n_subplot)
self._barplot_legend()
# Wrap up the figure.
suptitle_suffix = (
UP_ARROW if stats_runner.bigger_is_better else DOWN_ARROW)
self._wrap_up_figure(
metric, plot_name='per-task_raw', suptitle_suffix=suptitle_suffix)
##=== Plot 2: Mean rankings (mean across tasks) ===##
# Set up figure.
subplot_ncol = 2 if self.make_legend else 1
subplot_nrow = 1
plt.figure(figsize=(4 * subplot_ncol, 4 * subplot_nrow))
# Plot mean rankings and show statistical differences
plt.subplot(subplot_nrow, subplot_ncol, 1)
self._plot_bars_and_significant_differences(metric_results, pvals,
confidence_intervals,
stats_runner)
plot_utils.simple_axis(plt.gca())
# plot the legend
if self.make_legend:
plt.subplot(subplot_nrow, subplot_ncol, subplot_ncol)
self._barplot_legend()
# Wrap up the figure.
self._wrap_up_figure(metric, plot_name='mean_rankings')
def _wrap_up_figure(self, metric, plot_name, suptitle_suffix=''):
"""Add suptitle, set tight layout, and save the figure."""
plt.suptitle(
plot_utils.METRICS_DISPLAY_NAMES[metric] + suptitle_suffix, fontsize=14)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
if self.out_dir:
self._save_fig(metric, plot_name)
def _load_pvals(self, metric, timeframe=None):
"""Load previously computed p-values.
Args:
metric: Which metric we are plotting.
timeframe: Which timeframe we are plotting. Set None if irrelevant (for
metrics that are not evaluated at specific eval points).
Returns:
Dictionary of p-values, with entries {'algo1.algo2': pval}
"""
pvals = {}
for algo1 in self.algorithms:
for algo2 in self.algorithms:
# Get path to p-value
pvals_filepath = ('%s/%s_%s_%s' %
(self.pvals_dir, metric, algo1, algo2))
if timeframe is not None:
pvals_filepath += '_%d' % timeframe
# Load the p-value
with open(pvals_filepath, 'r') as f:
pval = float(f.readline())
pvals['%s.%s' % (algo1, algo2)] = pval
logging.info('P-values loaded:')
logging.info(pvals)
return pvals
def _load_confidence_intervals(self, metric, stats_runner, timeframe=None):
"""Load previously computed confidence intervals.
Args:
metric: Which metric we are plotting.
stats_runner: StatsRunner object
timeframe: Which timeframe we are plotting. Set None if irrelevant (for
metrics that are not evaluated at specific eval points).
Returns:
Dictionary of confidence intervals, with entries
{'algo': [ci_lower, ci_upper]}
"""
cis = {}
for algo in self.algorithms:
# Get path to confidence intervals
ci_filepath = '%s/%s_%s' % (self.confidence_intervals_dir, metric, algo)
if timeframe is not None:
ci_filepath += '_%d' % timeframe
# Load the p-value
with open(ci_filepath, 'r') as f:
line = f.readline()
ci = list(map(float, line.split(',')))
# Normalize to range (1, n_metrics)
if 'R' in stats_runner.result_dims:
ci[0] /= self.data_def.n_runs_per_experiment
ci[1] /= self.data_def.n_runs_per_experiment
cis[algo] = ci
logging.info('Confidence intervals loaded:')
logging.info(cis)
return cis
def _plot_bars_and_significant_differences(self, metric_results, pvals,
confidence_intervals,
stats_runner):
"""For a single timeframe, plot mean rank and show significant differences.
Args:
metric_results: Numpy array with metric values. First two dimensions
should be (n_algorithm, n_task)
pvals: p-values on comparison between each pair of algorithms. A dict with
entries {'algo1.algo2': pvalue}.
confidence_intervals: Confidence intervals on mean rank for each
algorithm. A dict with entries {'algo': [ci_lower, ci_upper]}.
stats_runner: StatsRunner object
"""
ymax = 1.32 * (len(self.algorithms))
y_pval_lines = 0.83
# First get the rankings across all algos
metric_ranks = stats_runner.rank_per_task(metric_results)
# Get mean ranks over tasks, for each algo
# (collapse across all other dimensions)
extra_dims = range(1, len(metric_ranks.shape))
mean_ranks = np.mean(metric_ranks, tuple(extra_dims))
# Normalize the ranks to range (1, n_algorithms)
if 'R' in stats_runner.result_dims:
mean_ranks /= self.data_def.n_runs_per_experiment
# Plot the mean rankings and error bars for each algo
for i_algo, algo in enumerate(self.algorithms):
plot_utils.flipped_errorbar(
x=i_algo,
y=mean_ranks[i_algo],
yerr=confidence_intervals[algo],
ymax=self.n_algo,
bar_color=ALGO_COLORS[i_algo],
hatch_pattern=HATCH_PATTERNS[i_algo],
x_offset=0.6,
)
# Rank order the p-values.
if self.multiple_comparisons_method != 'bonferroni':
# Get subset of the p-values: we don't need the reverse comparisons, and
# we don't need the self comparisons.
pvals_subset = {}
for i_algo, algo1 in enumerate(self.algorithms):
for j_algo in range(i_algo + 1, self.n_algo):
algo2 = self.algorithms[j_algo]
algo_str = '%s.%s' % (algo1, algo2)
pvals_subset[algo_str] = pvals[algo_str]
sorted_keys = sorted(pvals_subset, key=pvals_subset.get)
pval_ranks = {key: rank for rank, key in enumerate(sorted_keys)}
# Plot black bars indicating significant differences.
n_lines_plotted = 0
for i_algo, algo1 in enumerate(self.algorithms):
for j_algo in range(i_algo + 1, self.n_algo):
algo2 = self.algorithms[j_algo]
algo_pair_str = '%s.%s' % (algo1, algo2)
if self.multiple_comparisons_method != 'bonferroni':
pval_rank = pval_ranks[algo_pair_str]
pthresh_corrected = self.pthresh_corrected[pval_rank]
else:
pthresh_corrected = self.pthresh_corrected
if pvals[algo_pair_str] < pthresh_corrected:
x = [i_algo + 1, j_algo + 1]
y = [(y_pval_lines + n_lines_plotted * 0.03) * ymax] * 2
plt.plot(x, y, color='k')
n_lines_plotted += 1
self._configure_axes('normalized mean rank', range(1, self.n_algo + 1),
range(self.n_algo, 0, -1))
def _configure_axes(self, y_label, y_ticks=None, y_tick_labels=None):
"""Configure axis limits and labels."""
algo_abbreviations = [
plot_utils.ALGO_ABBREVIATIONS[algo] for algo in self.algorithms
]
plt.xticks(range(1, self.n_algo + 1), algo_abbreviations)
plt.xlim(0, len(self.algorithms) + 1)
if y_ticks:
plt.yticks(y_ticks)
if y_tick_labels:
plt.gca().set_yticklabels(y_tick_labels)
if self.subplot_axis_labels:
plt.xlabel('algorithm', fontsize=16)
plt.ylabel(y_label, fontsize=16)
plt.tick_params(top='off')
@staticmethod
def _extend_ylims_past_zero(data, tolerance=0.01, extension=0.1):
"""Extend y-axis to ensure that zero-values in the data are visible.
Args:
data: Data being plotted.
tolerance: Determines what values are considered too close to zero.
extension: Determines how far to extend the y-axis.
"""
ylims_orig = plt.gca().get_ylim()
abs_min = np.abs(np.min(data))
abs_max = np.abs(np.max(data))
# Extend below zero.
if abs_min < tolerance * abs_max:
ylim_lower = -ylims_orig[1] * extension
plt.ylim([ylim_lower, ylims_orig[1]])
# Extend above zero.
elif abs_max < tolerance * abs_min:
ylim_upper = -ylims_orig[0] * extension
plt.ylim([ylims_orig[0], ylim_upper])
def _barplot_legend(self):
"""Plot a legend showing the color/texture for each algorithm."""
for ibox in range(self.n_algo):
box_y = self.n_algo - ibox
plt.scatter(
0,
box_y,
s=300,
marker='s',
facecolor=ALGO_COLORS[ibox],
edgecolor='k',
hatch=HATCH_PATTERNS[ibox],
label=HATCH_PATTERNS[ibox])
plt.text(0.008, box_y - 0.15, self.algorithms[ibox], fontsize=14)
plt.xlim(-0.01, 0.05)
plot_utils.no_axis(plt.gca())
def _lineplot_legend(self):
"""Plot a legend showing the color/marker for each algorithm."""
for i_algo in range(self.n_algo):
y = self.n_algo - i_algo
color = ALGO_COLORS[i_algo]
plt.plot([0, 2], [y, y], color=color)
plt.plot(1, y, marker=MARKERS[i_algo], color=color)
plt.text(2.5, y - 0.002, self.algorithms[i_algo], fontsize=14)
ax = plt.gca()
plot_utils.no_axis(ax)
ax.set_axis_bgcolor('white')
plt.xlim([0, 10])
plt.ylim([0, self.n_algo + 1])
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.boxplot",
"rl_reliability_metrics.analysis.plot_utils.flipped_errorbar",
"absl.logging.info",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.fill_between",
"matplot... | [((3519, 3628), 'rl_reliability_metrics.analysis.stats_utils.multiple_comparisons_correction', 'stats_utils.multiple_comparisons_correction', (['self.n_algo', 'self.pthresh', 'self.multiple_comparisons_method'], {}), '(self.n_algo, self.pthresh, self\n .multiple_comparisons_method)\n', (3562, 3628), False, 'from rl_reliability_metrics.analysis import stats_utils\n'), ((3772, 3805), 'rl_reliability_metrics.analysis.plot_utils.paper_figure_configs', 'plot_utils.paper_figure_configs', ([], {}), '()\n', (3803, 3805), False, 'from rl_reliability_metrics.analysis import plot_utils\n'), ((3876, 3935), 'rl_reliability_metrics.analysis.stats.StatsRunner', 'stats.StatsRunner', (['self.data_def', 'metric', 'self.n_timeframes'], {}), '(self.data_def, metric, self.n_timeframes)\n', (3893, 3935), False, 'from rl_reliability_metrics.analysis import stats\n'), ((4659, 4736), 'os.path.join', 'os.path.join', (['self.out_dir', "('%s__%s__%s.png' % (metric, plot_name, timestamp))"], {}), "(self.out_dir, '%s__%s__%s.png' % (metric, plot_name, timestamp))\n", (4671, 4736), False, 'import os\n'), ((4875, 4919), 'absl.logging.info', 'logging.info', (['"""Plot output to: %s"""', 'filepath'], {}), "('Plot output to: %s', filepath)\n", (4887, 4919), False, 'from absl import logging\n'), ((5887, 5927), 'math.ceil', 'math.ceil', (['(n_subplots_1 / subplot_ncol_1)'], {}), '(n_subplots_1 / subplot_ncol_1)\n', (5896, 5927), False, 'import math\n'), ((5939, 5999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4 * subplot_ncol_1, 4 * subplot_nrow_1)'}), '(figsize=(4 * subplot_ncol_1, 4 * subplot_nrow_1))\n', (5949, 5999), True, 'from matplotlib import pyplot as plt\n'), ((6164, 6224), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4 * subplot_ncol_2, 4 * subplot_nrow_2)'}), '(figsize=(4 * subplot_ncol_2, 4 * subplot_nrow_2))\n', (6174, 6224), True, 'from matplotlib import pyplot as plt\n'), ((6282, 6305), 'matplotlib.pyplot.figure', 'plt.figure', (['fig1.number'], {}), '(fig1.number)\n', (6292, 6305), True, 'from matplotlib import pyplot as plt\n'), ((11227, 11262), 'math.ceil', 'math.ceil', (['(n_subplot / subplot_ncol)'], {}), '(n_subplot / subplot_ncol)\n', (11236, 11262), False, 'import math\n'), ((11267, 11323), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4 * subplot_ncol, 4 * subplot_nrow)'}), '(figsize=(4 * subplot_ncol, 4 * subplot_nrow))\n', (11277, 11323), True, 'from matplotlib import pyplot as plt\n'), ((12514, 12570), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4 * subplot_ncol, 4 * subplot_nrow)'}), '(figsize=(4 * subplot_ncol, 4 * subplot_nrow))\n', (12524, 12570), True, 'from matplotlib import pyplot as plt\n'), ((12634, 12676), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot_nrow', 'subplot_ncol', '(1)'], {}), '(subplot_nrow, subplot_ncol, 1)\n', (12645, 12676), True, 'from matplotlib import pyplot as plt\n'), ((13278, 13367), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['(plot_utils.METRICS_DISPLAY_NAMES[metric] + suptitle_suffix)'], {'fontsize': '(14)'}), '(plot_utils.METRICS_DISPLAY_NAMES[metric] + suptitle_suffix,\n fontsize=14)\n', (13290, 13367), True, 'from matplotlib import pyplot as plt\n'), ((13377, 13418), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0.03, 1, 0.95]'}), '(rect=[0, 0.03, 1, 0.95])\n', (13393, 13418), True, 'from matplotlib import pyplot as plt\n'), ((14322, 14354), 'absl.logging.info', 'logging.info', (['"""P-values loaded:"""'], {}), "('P-values loaded:')\n", (14334, 14354), False, 'from absl import logging\n'), ((14359, 14378), 'absl.logging.info', 'logging.info', (['pvals'], {}), '(pvals)\n', (14371, 14378), False, 'from absl import logging\n'), ((15476, 15520), 'absl.logging.info', 'logging.info', (['"""Confidence intervals loaded:"""'], {}), "('Confidence intervals loaded:')\n", (15488, 15520), False, 'from absl import logging\n'), ((15525, 15542), 'absl.logging.info', 'logging.info', (['cis'], {}), '(cis)\n', (15537, 15542), False, 'from absl import logging\n'), ((19243, 19269), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'top': '"""off"""'}), "(top='off')\n", (19258, 19269), True, 'from matplotlib import pyplot as plt\n'), ((20944, 20953), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20951, 20953), True, 'from matplotlib import pyplot as plt\n'), ((20958, 20980), 'rl_reliability_metrics.analysis.plot_utils.no_axis', 'plot_utils.no_axis', (['ax'], {}), '(ax)\n', (20976, 20980), False, 'from rl_reliability_metrics.analysis import plot_utils\n'), ((21018, 21035), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 10]'], {}), '([0, 10])\n', (21026, 21035), True, 'from matplotlib import pyplot as plt\n'), ((21040, 21070), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, self.n_algo + 1]'], {}), '([0, self.n_algo + 1])\n', (21048, 21070), True, 'from matplotlib import pyplot as plt\n'), ((4787, 4812), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (4802, 4812), False, 'import os\n'), ((4856, 4870), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f'], {}), '(f)\n', (4867, 4870), True, 'from matplotlib import pyplot as plt\n'), ((6658, 6713), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot_nrow_1', 'subplot_ncol_1', '(i_task + 1)'], {}), '(subplot_nrow_1, subplot_ncol_1, i_task + 1)\n', (6669, 6713), True, 'from matplotlib import pyplot as plt\n'), ((6735, 6772), 'numpy.squeeze', 'np.squeeze', (['metric_results[:, i_task]'], {}), '(metric_results[:, i_task])\n', (6745, 6772), True, 'import numpy as np\n'), ((8333, 8371), 'matplotlib.pyplot.title', 'plt.title', (['self.data_def.tasks[i_task]'], {}), '(self.data_def.tasks[i_task])\n', (8342, 8371), True, 'from matplotlib import pyplot as plt\n'), ((8426, 8483), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot_nrow_1', 'subplot_ncol_1', 'n_subplots_1'], {}), '(subplot_nrow_1, subplot_ncol_1, n_subplots_1)\n', (8437, 8483), True, 'from matplotlib import pyplot as plt\n'), ((8885, 8908), 'matplotlib.pyplot.figure', 'plt.figure', (['fig2.number'], {}), '(fig2.number)\n', (8895, 8908), True, 'from matplotlib import pyplot as plt\n'), ((9046, 9104), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot_nrow_2', 'subplot_ncol_2', '(timeframe + 1)'], {}), '(subplot_nrow_2, subplot_ncol_2, timeframe + 1)\n', (9057, 9104), True, 'from matplotlib import pyplot as plt\n'), ((9320, 9370), 'matplotlib.pyplot.title', 'plt.title', (['TIMEFRAME_NAMES[timeframe]'], {'fontsize': '(14)'}), '(TIMEFRAME_NAMES[timeframe], fontsize=14)\n', (9329, 9370), True, 'from matplotlib import pyplot as plt\n'), ((9425, 9484), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot_nrow_2', 'subplot_ncol_2', 'subplot_ncol_2'], {}), '(subplot_nrow_2, subplot_ncol_2, subplot_ncol_2)\n', (9436, 9484), True, 'from matplotlib import pyplot as plt\n'), ((9813, 9846), 'matplotlib.pyplot.figure', 'plt.figure', (['fig.number', 'plot_name'], {}), '(fig.number, plot_name)\n', (9823, 9846), True, 'from matplotlib import pyplot as plt\n'), ((11428, 11479), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot_nrow', 'subplot_ncol', '(i_task + 1)'], {}), '(subplot_nrow, subplot_ncol, i_task + 1)\n', (11439, 11479), True, 'from matplotlib import pyplot as plt\n'), ((11501, 11541), 'numpy.squeeze', 'np.squeeze', (['metric_results[:, i_task, :]'], {}), '(metric_results[:, i_task, :])\n', (11511, 11541), True, 'import numpy as np\n'), ((11558, 11604), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['task_results.T'], {'patch_artist': '(True)'}), '(task_results.T, patch_artist=True)\n', (11569, 11604), True, 'from matplotlib import pyplot as plt\n'), ((11840, 11891), 'matplotlib.pyplot.title', 'plt.title', (['self.data_def.tasks[i_task]'], {'fontsize': '(16)'}), '(self.data_def.tasks[i_task], fontsize=16)\n', (11849, 11891), True, 'from matplotlib import pyplot as plt\n'), ((12061, 12111), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot_nrow', 'subplot_ncol', 'n_subplot'], {}), '(subplot_nrow, subplot_ncol, n_subplot)\n', (12072, 12111), True, 'from matplotlib import pyplot as plt\n'), ((12907, 12916), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12914, 12916), True, 'from matplotlib import pyplot as plt\n'), ((12972, 13025), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot_nrow', 'subplot_ncol', 'subplot_ncol'], {}), '(subplot_nrow, subplot_ncol, subplot_ncol)\n', (12983, 13025), True, 'from matplotlib import pyplot as plt\n'), ((16921, 17124), 'rl_reliability_metrics.analysis.plot_utils.flipped_errorbar', 'plot_utils.flipped_errorbar', ([], {'x': 'i_algo', 'y': 'mean_ranks[i_algo]', 'yerr': 'confidence_intervals[algo]', 'ymax': 'self.n_algo', 'bar_color': 'ALGO_COLORS[i_algo]', 'hatch_pattern': 'HATCH_PATTERNS[i_algo]', 'x_offset': '(0.6)'}), '(x=i_algo, y=mean_ranks[i_algo], yerr=\n confidence_intervals[algo], ymax=self.n_algo, bar_color=ALGO_COLORS[\n i_algo], hatch_pattern=HATCH_PATTERNS[i_algo], x_offset=0.6)\n', (16948, 17124), False, 'from rl_reliability_metrics.analysis import plot_utils\n'), ((19035, 19054), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y_ticks'], {}), '(y_ticks)\n', (19045, 19054), True, 'from matplotlib import pyplot as plt\n'), ((19163, 19199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""algorithm"""'], {'fontsize': '(16)'}), "('algorithm', fontsize=16)\n", (19173, 19199), True, 'from matplotlib import pyplot as plt\n'), ((19206, 19238), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {'fontsize': '(16)'}), '(y_label, fontsize=16)\n', (19216, 19238), True, 'from matplotlib import pyplot as plt\n'), ((19670, 19682), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (19676, 19682), True, 'import numpy as np\n'), ((19705, 19717), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (19711, 19717), True, 'import numpy as np\n'), ((19835, 19872), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[ylim_lower, ylims_orig[1]]'], {}), '([ylim_lower, ylims_orig[1]])\n', (19843, 19872), True, 'from matplotlib import pyplot as plt\n'), ((20204, 20348), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', 'box_y'], {'s': '(300)', 'marker': '"""s"""', 'facecolor': 'ALGO_COLORS[ibox]', 'edgecolor': '"""k"""', 'hatch': 'HATCH_PATTERNS[ibox]', 'label': 'HATCH_PATTERNS[ibox]'}), "(0, box_y, s=300, marker='s', facecolor=ALGO_COLORS[ibox],\n edgecolor='k', hatch=HATCH_PATTERNS[ibox], label=HATCH_PATTERNS[ibox])\n", (20215, 20348), True, 'from matplotlib import pyplot as plt\n'), ((20432, 20497), 'matplotlib.pyplot.text', 'plt.text', (['(0.008)', '(box_y - 0.15)', 'self.algorithms[ibox]'], {'fontsize': '(14)'}), '(0.008, box_y - 0.15, self.algorithms[ibox], fontsize=14)\n', (20440, 20497), True, 'from matplotlib import pyplot as plt\n'), ((20504, 20525), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.01)', '(0.05)'], {}), '(-0.01, 0.05)\n', (20512, 20525), True, 'from matplotlib import pyplot as plt\n'), ((20550, 20559), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20557, 20559), True, 'from matplotlib import pyplot as plt\n'), ((20770, 20807), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 2]', '[y, y]'], {'color': 'color'}), '([0, 2], [y, y], color=color)\n', (20778, 20807), True, 'from matplotlib import pyplot as plt\n'), ((20814, 20865), 'matplotlib.pyplot.plot', 'plt.plot', (['(1)', 'y'], {'marker': 'MARKERS[i_algo]', 'color': 'color'}), '(1, y, marker=MARKERS[i_algo], color=color)\n', (20822, 20865), True, 'from matplotlib import pyplot as plt\n'), ((20872, 20934), 'matplotlib.pyplot.text', 'plt.text', (['(2.5)', '(y - 0.002)', 'self.algorithms[i_algo]'], {'fontsize': '(14)'}), '(2.5, y - 0.002, self.algorithms[i_algo], fontsize=14)\n', (20880, 20934), True, 'from matplotlib import pyplot as plt\n'), ((4591, 4614), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4612, 4614), False, 'import datetime\n'), ((6832, 6864), 'numpy.expand_dims', 'np.expand_dims', (['task_results', '(-1)'], {}), '(task_results, -1)\n', (6846, 6864), True, 'import numpy as np\n'), ((8316, 8325), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8323, 8325), True, 'from matplotlib import pyplot as plt\n'), ((11692, 11726), 'matplotlib.pyplot.setp', 'plt.setp', (['boxplot[part]'], {'color': '"""k"""'}), "(boxplot[part], color='k')\n", (11700, 11726), True, 'from matplotlib import pyplot as plt\n'), ((12018, 12027), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12025, 12027), True, 'from matplotlib import pyplot as plt\n'), ((19628, 19637), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19635, 19637), True, 'from matplotlib import pyplot as plt\n'), ((19991, 20028), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[ylims_orig[0], ylim_upper]'], {}), '([ylims_orig[0], ylim_upper])\n', (19999, 20028), True, 'from matplotlib import pyplot as plt\n'), ((7007, 7083), 'matplotlib.pyplot.plot', 'plt.plot', (['eval_point_values', 'task_results[i_algo, :]'], {'marker': 'MARKERS[i_algo]'}), '(eval_point_values, task_results[i_algo, :], marker=MARKERS[i_algo])\n', (7015, 7083), True, 'from matplotlib import pyplot as plt\n'), ((7150, 7194), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""evaluation points"""'], {'fontsize': '(16)'}), "('evaluation points', fontsize=16)\n", (7160, 7194), True, 'from matplotlib import pyplot as plt\n'), ((7205, 7245), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""metric values"""'], {'fontsize': '(16)'}), "('metric values', fontsize=16)\n", (7215, 7245), True, 'from matplotlib import pyplot as plt\n'), ((18501, 18526), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""k"""'}), "(x, y, color='k')\n", (18509, 18526), True, 'from matplotlib import pyplot as plt\n'), ((19083, 19092), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19090, 19092), True, 'from matplotlib import pyplot as plt\n'), ((7543, 7579), 'numpy.median', 'np.median', (['task_algo_results'], {'axis': '(0)'}), '(task_algo_results, axis=0)\n', (7552, 7579), True, 'import numpy as np\n'), ((7609, 7655), 'numpy.percentile', 'np.percentile', (['task_algo_results'], {'q': '(25)', 'axis': '(0)'}), '(task_algo_results, q=25, axis=0)\n', (7622, 7655), True, 'import numpy as np\n'), ((7685, 7731), 'numpy.percentile', 'np.percentile', (['task_algo_results'], {'q': '(75)', 'axis': '(0)'}), '(task_algo_results, q=75, axis=0)\n', (7698, 7731), True, 'import numpy as np\n'), ((7742, 7821), 'matplotlib.pyplot.plot', 'plt.plot', (['eval_point_values', 'result_medians', 'algo_color'], {'marker': 'MARKERS[i_algo]'}), '(eval_point_values, result_medians, algo_color, marker=MARKERS[i_algo])\n', (7750, 7821), True, 'from matplotlib import pyplot as plt\n'), ((7851, 7955), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['eval_point_values', 'result_quartile1', 'result_quartile3'], {'alpha': '(0.3)', 'color': 'algo_color'}), '(eval_point_values, result_quartile1, result_quartile3,\n alpha=0.3, color=algo_color)\n', (7867, 7955), True, 'from matplotlib import pyplot as plt\n'), ((8070, 8114), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""evaluation points"""'], {'fontsize': '(16)'}), "('evaluation points', fontsize=16)\n", (8080, 8114), True, 'from matplotlib import pyplot as plt\n'), ((8125, 8165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""metric values"""'], {'fontsize': '(16)'}), "('metric values', fontsize=16)\n", (8135, 8165), True, 'from matplotlib import pyplot as plt\n')] |
import os
import warnings
import astropy.units as u
import numpy as np
from astropy.time import Time
from sora.config import input_tests
from sora.config.decorators import deprecated_alias
from .utils import calc_fresnel
warnings.simplefilter('always', UserWarning)
class LightCurve:
"""Defines a Light Curve.
Parameters
----------
name : `str`
The name of the LightCurve. Each time an LightCurve object is defined
the name must be different.
tref : `astropy.time.Time`, `str`, `float`
Instant of reference.
Format: `Julian Date`, string in ISO format or Time object.
Required only if LightCurve have input fluxes and given time is
not in Julian Date.
central_bandpass : `int`, `float`, otpional, default=0.7
The center band pass of the detector used in observation. Value in microns.
delta_bandpass : `int`, `float`, optional, default=0.3
The band pass width of the detector used in observation. Value in microns.
exptime : `int`, `float`
The exposure time of the observation, in seconds.
*NOT* required in cases *2*, *3* and *4* below.
*Required* in case *1* below.
**kwargs: `int`, `float`
Object velocity, distance, and star diameter.
Note
----
vel : `int`, `float`
Velocity in km/s.
dist : `int`, `float`
Object distance in AU.
d_star : `float`
Star diameter, in km.
Warning
-------
Input data must be one of the 4 options below:
1) Input data from file with time and flux
`file (str)`: a file with the time and flux. A third column with the error in
flux can also be given.
`usecols (int, tuple, array)`: Which columns to read, with the first being the
time, the seconds the flux and third the flux error (optional).
**Example:**
>>> LightCurve(name, file, exptime) # dflux can also be given
2) Input data when file is not given:
`time`: time must be a list of times, in seconds from tref, or Julian Date, or
a Time object.
`flux`: flux must be a list of fluxes. It must have the same lenght as time.
`dflux`: if file not given, dflux must be a list of fluxes errors. It must
have the same lenght as time. (not required)
**Example:**
>>> LightCurve(name, flux, time, exptime) # dflux can also be given
Cases for when `time` and `flux` are not given.
3) Input for a positive occultation:
`immersion`: The instant of immersion.
`emersion`: The instant of emersion.
`immersion_err`: Immersion time uncertainty, in seconds.
`emersion_err`: Emersion time uncertainty, in seconds.
**Example:**
>>> LightCurve(name, immersion, immersion_err, emersion, emersion_err)
4) Input for a negative occultation:
`initial_time`: The initial time of observation.
`end_time`: The end time of observation.
**Example:**
>>> LightCurve(name, initial_time, end_time)
"""
@deprecated_alias(lambda_0='central_bandpass', delta_lambda='delta_bandpass') # remove this line for v1.0
def __init__(self, name='', **kwargs):
allowed_kwargs = ['emersion', 'emersion_err', 'immersion', 'immersion_err', 'initial_time', 'end_time',
'file', 'time', 'flux', 'exptime', 'central_bandpass', 'delta_bandpass', 'tref', 'dflux',
'skiprows', 'usecols', 'dist', 'vel', 'd_star']
input_tests.check_kwargs(kwargs, allowed_kwargs=allowed_kwargs)
input_done = False
self.dflux = None
self._name = name
self.flux = None
self.time_model = None
if 'tref' in kwargs:
self.tref = kwargs['tref']
if 'immersion' in kwargs:
self.immersion = kwargs['immersion']
self.immersion_err = kwargs.get('immersion_err', 0.0)
if self.immersion_err < 0:
warnings.warn("Immersion Error must be positive. Using absolute value.")
self.immersion_err = np.absolute(self.immersion_err)
input_done = True
if 'emersion' in kwargs:
self.emersion = kwargs['emersion']
self.emersion_err = kwargs.get('emersion_err', 0.0)
if self.emersion_err < 0:
warnings.warn("Emersion Error must be positive. Using absolute value.")
self.emersion_err = np.absolute(self.emersion_err)
try:
if self.emersion <= self.immersion:
raise ValueError("emersion time must be greater than immersion time")
except AttributeError:
pass
input_done = True
if 'initial_time' in kwargs and 'end_time' in kwargs:
self.initial_time = kwargs['initial_time']
self.end_time = kwargs['end_time']
if self.end_time <= self.initial_time:
raise ValueError('end_time must be greater than initial_time')
input_done = True
if not input_done:
try:
self.set_flux(**kwargs)
except:
raise ValueError('No allowed input conditions satisfied. Please refer to the tutorial.')
self.set_filter(central_bandpass=kwargs.get('central_bandpass', 0.70),
delta_bandpass=kwargs.get('delta_bandpass', 0.30))
self.dt = 0.0
@property
def fresnel_scale(self):
lamb = self.lambda_0*u.micrometer.to('km')
dlamb = self.delta_lambda*u.micrometer.to('km')
dist = self.dist*u.au.to('km')
fresnel_scale_1 = calc_fresnel(dist, lamb-dlamb/2.0)
fresnel_scale_2 = calc_fresnel(dist, lamb+dlamb/2.0)
fresnel_scale = (fresnel_scale_1 + fresnel_scale_2)/2.0
return fresnel_scale
@property
def central_bandpass(self):
return self.lambda_0
@property
def delta_bandpass(self):
return self.delta_lambda
@property
def name(self):
return self._name
@property
def tref(self):
if hasattr(self, '_tref'):
return self._tref
else:
raise AttributeError("'LightCurve' object has no attribute 'tref'")
@tref.setter
def tref(self, value):
if type(value) in [int, float]:
self.tref = Time(value, format='jd')
else:
try:
self._tref = Time(value)
except ValueError:
raise ValueError('{} is not a valid time format accepted by tref'.format(value))
@property
def immersion(self):
if hasattr(self, '_immersion'):
return self._immersion + self.dt*u.s
else:
raise AttributeError('The immersion time was not fitted or instantiated.')
@immersion.setter
def immersion(self, value):
if type(value) in [int, float]:
if value > 2400000:
self.immersion = Time(value, format='jd')
elif hasattr(self, 'tref'):
self.immersion = self.tref + value*u.s
else:
raise ValueError('{} can not be set without a reference time'.format(value))
else:
try:
self._immersion = Time(value)
except ValueError:
raise ValueError('{} is not a valid time format accepted by immersion'.format(value))
@property
def emersion(self):
if hasattr(self, '_emersion'):
return self._emersion + self.dt*u.s
else:
raise AttributeError('The emersion time was not fitted or instanciated.')
@emersion.setter
def emersion(self, value):
if type(value) in [int, float]:
if value > 2400000:
self.emersion = Time(value, format='jd')
elif hasattr(self, 'tref'):
self.emersion = self.tref + value*u.s
else:
raise ValueError('{} can not be set without a reference time'.format(value))
else:
try:
self._emersion = Time(value)
except ValueError:
raise ValueError('{} is not a valid time format accepted by emersion'.format(value))
@property
def initial_time(self):
if hasattr(self, '_initial_time'):
return self._initial_time
else:
raise AttributeError("'LightCurve' object has no attribute 'initial_time'")
@initial_time.setter
def initial_time(self, value):
if type(value) in [int, float]:
if value > 2400000:
self.initial_time = Time(value, format='jd')
elif hasattr(self, 'tref'):
self.initial_time = self.tref + value*u.s
else:
raise ValueError('{} can not be set without a reference time'.format(value))
else:
try:
self._initial_time = Time(value)
except ValueError:
raise ValueError('{} is not a valid time format accepted by initial_time'.format(value))
@property
def end_time(self):
if hasattr(self, '_end_time'):
return self._end_time
else:
raise AttributeError("'LightCurve' object has no attribute 'end_time'")
@end_time.setter
def end_time(self, value):
if type(value) in [int, float]:
if value > 2400000:
self.end_time = Time(value, format='jd')
elif hasattr(self, 'tref'):
self.end_time = self.tref + value*u.s
else:
raise ValueError('{} can not be set without a reference time'.format(value))
else:
try:
self._end_time = Time(value)
except ValueError:
raise ValueError('{} is not a valid time format accepted by end_time'.format(value))
@property
def time_mean(self):
if hasattr(self, '_immersion') and hasattr(self, '_emersion'):
return Time((self.immersion.jd + self.emersion.jd)/2, format='jd')
else:
return Time((self.initial_time.jd + self.end_time.jd)/2, format='jd')
@property
def time(self):
try:
return (self._time - self.tref).sec
except:
raise AttributeError("'LightCurve' object has no attribute 'time'")
def set_flux(self, **kwargs):
"""Sets the flux for the LightCurve.
Parameters
----------
exptime : `int`, `float`, required
The exposure time of the observation, in seconds.
file : `str`
A file with the time and flux in the first and second columns,
respectively. A third column with error in flux can also be given.
time
If file not given, time must be a list of times, in seconds from `tref`,
or `Julian Date`, or a `Time object`.
flux
If file not given, flux must be a list of fluxes. It must have the
same lenght as time.
dflux
If file not given, dflux must be a list of fluxes errors. It must
have the same lenght as time.
tref : `astropy.time.Time`, `str`, `float`
Instant of reference. It can be in `Julian Date`, string in ISO
format or `Time object`.
usecols : `int`, `tuple`, array, optional
Which columns to read, with the first being the time, the seconds
the flux and third the flux error.
**kwargs : `int`, `float`
Object velocity, object distance, star diameter.
Note
----
vel : `int`, `float`
Velocity in km/s.
dist : `int`, `float`:
Object distance in AU.
d_star : `float`
Star diameter, in km.
"""
from .utils import read_lc_file
input_done = False
usecols = None
if 'usecols' in kwargs:
usecols = kwargs['usecols']
skiprows = 0
if 'skiprows' in kwargs:
skiprows = int(kwargs['skiprows'])
if 'file' in kwargs:
try:
lc_data = read_lc_file(kwargs['file'], usecols=usecols, skiprows=skiprows)
if len(lc_data) == 2:
time, self.flux = lc_data
elif len(lc_data) == 3:
time, self.flux, self.dflux = lc_data
except:
pass
if hasattr(self, 'flux'):
self.flux_obs = self.flux
if not hasattr(self, 'flux_obs'):
raise ValueError('Input file must have 2 or 3 columns')
input_done = True
if 'time' in kwargs and 'flux' in kwargs:
if input_done:
raise ValueError('Only one type of input can be given. Please refer to the tutorial.')
self.flux = kwargs['flux']
time = kwargs['time']
if len(self.flux) != len(time):
raise ValueError('time and flux must have the same length')
if 'dflux' in kwargs:
self.dflux = kwargs['dflux']
if len(self.flux) != len(self.dflux):
raise ValueError('dflux must have the same length as flux and time')
input_done = True
if 'exptime' not in kwargs:
raise ValueError('exptime not defined')
if kwargs['exptime'] <= 0:
raise ValueError('Exposure time can not be zero or negative')
else:
self.exptime = kwargs['exptime']
if 'vel' in kwargs:
self.set_vel(vel=kwargs['vel'])
if 'dist' in kwargs:
self.set_dist(dist=kwargs['dist'])
if 'd_star' in kwargs:
self.set_star_diam(d_star=kwargs['d_star'])
if 'tref' in kwargs:
self.tref = kwargs['tref']
if 'time' in locals():
if type(time) == Time:
if not hasattr(self, 'tref'):
self.tref = Time(time[0].iso.split(' ')[0] + ' 00:00:00.000')
elif all(time > 2400000):
time = Time(time, format='jd')
if not hasattr(self, 'tref'):
self.tref = Time(time[0].iso.split(' ')[0] + ' 00:00:00.000')
elif not hasattr(self, 'tref'):
raise ValueError('tref must be given')
else:
time = self.tref + time*u.s
order = np.argsort(time)
self._time = time[order]
self.model = np.ones(len(time))
self.flux = self.flux[order]
self.flux_obs = self.flux
if self.dflux is not None:
self.dflux = self.dflux[order]
self.initial_time = np.min(time)
self.end_time = np.max(time)
time_diffs = np.diff(self._time[0:]).tolist()
self.cycle = max(set(time_diffs), key=time_diffs.count).sec
if self.cycle < self.exptime:
warnings.warn('Exposure time ({:0.4f} seconds) higher than Cycle time ({:0.4f} seconds)'.
format(self.exptime, self.cycle))
def set_exptime(self, exptime):
"""Sets the light curve exposure time.
Parameters
----------
exptime : `int`, `float`
Exposure time, in seconds.
"""
exptime = u.Quantity(exptime, unit=u.s)
if not np.isscalar(exptime):
raise TypeError('Exposure time must be an integer, a float or an Astropy Unit object')
if exptime.value <= 0:
raise ValueError('Exposure time can not be zero or negative')
self.exptime = exptime.value
try:
if self.cycle < self.exptime:
warnings.warn('Exposure time ({:0.4f} seconds) higher than Cycle time ({:0.4f} seconds)'.
format(self.exptime, self.cycle))
except:
pass
def set_vel(self, vel):
"""Sets the occultation velocity.
Parameters
----------
vel : `int`, `float`
Velocity in km/s.
"""
vel = u.Quantity(vel, unit=u.km/u.s)
self.vel = np.absolute(vel.value)
def set_dist(self, dist):
"""Sets the object distance.
Parameters
----------
dist : `int`, `float`
Object distance in AU.
"""
dist = u.Quantity(dist, unit=u.AU)
if dist.value < 0:
warnings.warn("distance cannot be negative. Using absolute value.")
self.dist = np.absolute(dist.value)
def set_star_diam(self, d_star):
"""Sets the star diameter.
Parameters
----------
d_star : `float`
Star diameter, in km.
"""
d_star = u.Quantity(d_star, unit=u.km)
if d_star.value < 0:
warnings.warn("star diameter cannot be negative. Using absolute value.")
self.d_star = np.absolute(d_star.value)
@deprecated_alias(lambda_0='central_bandpass', delta_lambda='delta_bandpass') # remove this line for v1.0
def set_filter(self, central_bandpass, delta_bandpass):
"""Sets the filter bandwidth in microns.
Parameters
----------
central_bandpass : `float`
Center band in microns.
delta_bandpass : `float`
Bandwidth in microns.
"""
central_bandpass = u.Quantity(central_bandpass, unit=u.micrometer)
if central_bandpass.value <= 0:
raise ValueError("central bandpass cannot be negative.")
self.lambda_0 = central_bandpass.value
delta_bandpass = u.Quantity(delta_bandpass, unit=u.micrometer)
if delta_bandpass <= 0:
raise ValueError("delta bandpass cannot be negative")
self.delta_lambda = delta_bandpass.value
if (central_bandpass - delta_bandpass).value <= 0:
raise ValueError("The given central and delta bandpass give a range ({}, {}) microns. Bandpass cannot be negative. "
"Please give appropriate values".format(*(central_bandpass +
np.array([-1, 1])*delta_bandpass).value))
def calc_magnitude_drop(self, mag_star, mag_obj):
"""Determines the magnitude drop of the occultation.
Parameters
----------
mag_star : `int`, `float`
Star magnitude.
mag_obj `int`, `float`
Object apparent magnitude to the date.
Returns
-------
mag_drop : `float`
Magnitude drop for the given magnitudes.
bottom_flux : `float`
Normalized bottom flux for the given magnitudes.
"""
from .utils import calc_magnitude_drop
mag_drop, bottom_flux = calc_magnitude_drop(mag_star, mag_obj)
self.mag_drop = mag_drop
self.bottom_flux = bottom_flux
def normalize(self, poly_deg=None, mask=None, flux_min=0.0, flux_max=1.0, plot=False):
"""Returns the fresnel scale.
Parameters
----------
poly_deg : `int`
Degree of the polynomial to be fitted.
mask : `bool` array
Which values to be fitted.
flux_min : `int`, `float`
Event flux to be set as 0.
flux_max : `int`, `float`
Baseline flux to be set as 1.
plot : `bool`
If True plot the steps for visual aid.
"""
from .utils import fit_pol
import matplotlib.pyplot as plt
# Create a mask where the polynomial fit will be done
if not all(self.flux):
raise ValueError('Normalization is only possible when a LightCurve is instantiated with time and flux.')
self.reset_flux()
lc_flux = (self.flux - flux_min)/(flux_max-flux_min)
if mask is None:
preliminar_occ = self.occ_detect(maximum_duration=((self.end_time - self.initial_time).value*u.d.to('s'))/3)
tmax = preliminar_occ['emersion_time']+1.00*preliminar_occ['occultation_duration']
tmin = preliminar_occ['immersion_time']-1.00*preliminar_occ['occultation_duration']
chord = preliminar_occ['occultation_duration']
mask = np.invert((self.time > tmin-(chord/2)) & (self.time < tmax+(chord/2)))
norm_time = (self.time - self.time.min())/(self.time.max()-self.time.min())
if poly_deg is not None:
n = poly_deg
p, err = fit_pol(norm_time[mask], lc_flux[mask], n)
flux_poly_model = np.zeros(len(norm_time))
for ii in np.arange(n+1):
flux_poly_model = flux_poly_model + p[ii]*(norm_time**(n-ii))
if plot:
plt.plot(norm_time[mask], lc_flux[mask], 'k.-')
plt.plot(norm_time[mask], flux_poly_model[mask], 'r-')
plt.title('Polynomial degree = {}'.format(n), fontsize=15)
plt.show()
else:
n = 0
p, err = fit_pol(norm_time[mask], lc_flux[mask], n)
flux_poly_model = np.zeros(len(norm_time))
for ii in np.arange(n+1):
flux_poly_model += p[ii]*(norm_time**(n-ii))
if plot:
plt.plot(norm_time[mask], lc_flux[mask], 'k.-')
plt.plot(norm_time[mask], flux_poly_model[mask], 'r-')
plt.title('Polynomial degree = {}'.format(n), fontsize=15)
plt.show()
for nn in np.arange(1, 10):
p, err = fit_pol(norm_time[mask], lc_flux[mask], nn)
flux_poly_model_new = np.zeros(len(norm_time))
for ii in np.arange(nn+1):
flux_poly_model_new += p[ii]*(norm_time**(nn-ii))
F = np.var(flux_poly_model[mask]-lc_flux[mask])/np.var(flux_poly_model_new[mask]-lc_flux[mask])
if F > 1.05:
flux_poly_model = flux_poly_model_new.copy()
n = nn
if plot:
plt.plot(norm_time[mask], lc_flux[mask], 'k.-')
plt.plot(norm_time[mask], flux_poly_model[mask], 'r-')
plt.title('Polynomial degree = {}'.format(nn), fontsize=15)
plt.show()
else:
print('Normalization using a {} degree polynomial'.format(n))
print('There is no improvement with a {} degree polynomial'.format(n+1))
break
self.flux = lc_flux/flux_poly_model
self.normalizer_flux = flux_poly_model
self.normalizer_mask = mask
def reset_flux(self):
""" Resets flux for original values
"""
try:
self.flux = self.flux_obs
except:
raise ValueError('Reset is only possible when a LightCurve is instantiated with time and flux.')
return
def occ_model(self, immersion_time, emersion_time, opacity, mask, npt_star=12,
time_resolution_factor=10, flux_min=0, flux_max=1):
"""Returns the modelled light curve.
The modelled light curve takes into account the fresnel diffraction, the
star diameter and the instrumental response.
Parameters
----------
immersion_time : `int`, `float`
Immersion time, in seconds.
emersion_time : `int`, `float`
Emersion time, in seconds.
opacity : `int`, `float`
Opacity. Opaque = 1.0, transparent = 0.0,
mask : `bool` array
Mask with True values to be computed.
npt_star : `int`, default=12
Number of subdivisions for computing the star size effects.
time_resolution_factor : `int`, `float`, default: 10*fresnel scale
Steps for fresnel scale used for modelling the light curve.
flux_min : `int`, `float`, default=0
Bottom flux (only object).
flux_max : `int`, `float`, default=1
Base flux (object plus star).
"""
from .utils import bar_fresnel
# Computing the fresnel scale
lamb = self.lambda_0*u.micrometer.to('km')
dlamb = self.delta_lambda*u.micrometer.to('km')
dist = self.dist*u.au.to('km')
vel = np.absolute(self.vel)
time_obs = self.time[mask]
fresnel_scale_1 = calc_fresnel(dist, lamb-dlamb/2.0)
fresnel_scale_2 = calc_fresnel(dist, lamb+dlamb/2.0)
fresnel_scale = (fresnel_scale_1 + fresnel_scale_2)/2.0
time_resolution = (np.min([fresnel_scale/vel, self.exptime]))/time_resolution_factor
# Creating a high resolution curve to compute fresnel diffraction, stellar diameter and instrumental integration
time_model = np.arange(time_obs.min()-5*self.exptime, time_obs.max()+5*self.exptime, time_resolution)
# Changing X: time (s) to distances in the sky plane (km), considering the tangential velocity (vel in km/s)
x = time_model*vel
x01 = immersion_time*vel
x02 = emersion_time*vel
# Computing fresnel diffraction for the case where the star size is negligenciable
flux_fresnel_1 = bar_fresnel(x, x01, x02, fresnel_scale_1, opacity)
flux_fresnel_2 = bar_fresnel(x, x01, x02, fresnel_scale_2, opacity)
flux_fresnel = (flux_fresnel_1 + flux_fresnel_2)/2.
flux_star = flux_fresnel.copy()
if self.d_star > 0:
# Computing fresnel diffraction for the case where the star size is not negligenciable
resolucao = (self.d_star/2)/npt_star
flux_star_1 = np.zeros(len(time_model))
flux_star_2 = np.zeros(len(time_model))
# Computing stellar diameter only near the immersion or emersion times
star_diam = (np.absolute(x - x01) < 3*self.d_star) + (np.absolute(x - x02) < 3*self.d_star)
p = np.arange(-npt_star, npt_star)*resolucao
coeff = np.sqrt(np.absolute((self.d_star/2)**2 - p**2))
for ii in np.where(star_diam == True)[0]:
xx = x[ii] + p
flux1 = bar_fresnel(xx, x01, x02, fresnel_scale_1, opacity)
flux2 = bar_fresnel(xx, x01, x02, fresnel_scale_2, opacity)
flux_star_1[ii] = np.sum(coeff*flux1)/coeff.sum()
flux_star_2[ii] = np.sum(coeff*flux2)/coeff.sum()
flux_star[ii] = (flux_star_1[ii] + flux_star_2[ii])/2.
flux_inst = np.zeros(len(time_obs))
for i in range(len(time_obs)):
event_model = (time_model > time_obs[i]-self.exptime/2.) & (time_model < time_obs[i]+self.exptime/2.)
flux_inst[i] = (flux_star[event_model]).mean()
self.model[mask] = flux_inst*(flux_max - flux_min) + flux_min
self.time_model = time_model
self.model_star = flux_star*(flux_max - flux_min) + flux_min
self.model_fresnel = flux_fresnel*(flux_max - flux_min) + flux_min
ev_model = (time_model > immersion_time) & (time_model < emersion_time)
flux_box = np.ones(len(time_model))
flux_box[ev_model] = (1-opacity)**2
flux_box = flux_box*(flux_max - flux_min) + flux_min
self.model_geometric = flux_box
self.baseflux = flux_max
self.bottomflux = flux_min
def occ_lcfit(self, **kwargs):
"""Monte Carlo chi square fit for occultations lightcurve.
Parameters
----------
tmin : `int`, `float`
Minimum time to consider in the fit procedure, in seconds.
tmax : `int`, `float`
Maximum time to consider in the fit procedure, in seconds.
flux_min : `int`, `float`, default=0
Bottom flux (only object).
flux_max :`int`, `float`, default=1
Base flux (object plus star).
immersion_time : `int`, `float`
Initial guess for immersion time, in seconds.
emersion_time : `int`, `float`
Initial guess for emersion time, in seconds.
opacity : `int`, `float`, default=1
Initial guess for opacity. Opaque = 1, Transparent = 0.
delta_t : `int`, `float`
Interval to fit immersion or emersion time.
dopacity : `int`, `float`, default=0
Interval to fit opacity.
sigma : `int`, `float`, `array`, 'auto'
Fluxes errors. If None it will use the `self.dflux`. If 'auto' it
will calculate using the region outside the event.
loop : `int`, default=10000
Number of tests to be done.
sigma_result : `int`, `float`
Sigma value to be considered as result.
Returns
-------
chi2 : `sora.extra.ChiSquare`
ChiSquare object.
"""
from sora.config.visuals import progressbar
from sora.extra import ChiSquare
allowed_kwargs = ['tmin', 'tmax', 'flux_min', 'flux_max', 'immersion_time', 'emersion_time', 'opacity',
'delta_t', 'dopacity', 'sigma', 'loop', 'sigma_result']
input_tests.check_kwargs(kwargs, allowed_kwargs=allowed_kwargs)
if not hasattr(self, 'flux'):
raise ValueError('Fit curve is only possible when a LightCurve is instantiated with time and flux.')
preliminar_occ = self.occ_detect()
delta_t = 2*self.cycle
loop = kwargs.get('loop', 10000)
tmax = self.time.max()
tmin = self.time.min()
immersion_time = tmin - self.exptime
do_immersion = False
emersion_time = tmax + self.exptime
do_emersion = False
opacity = kwargs.get('opacity', 1.0)
delta_opacity = kwargs.get('dopacity', 0.0)
do_opacity = 'dopacity' in kwargs
if ('immersion_time' not in kwargs) and ('emersion_time' not in kwargs):
immersion_time = preliminar_occ['immersion_time']
do_immersion = True
emersion_time = preliminar_occ['emersion_time']
do_emersion = True
delta_t = 5*preliminar_occ['time_err']
tmax = emersion_time+2*preliminar_occ['occultation_duration']
tmin = immersion_time-2*preliminar_occ['occultation_duration']
if 2*preliminar_occ['occultation_duration'] < 10*self.cycle:
tmax = emersion_time + 10*self.cycle
tmin = immersion_time - 10*self.cycle
tmax = kwargs.get('tmax', tmax)
tmin = kwargs.get('tmin', tmin)
delta_t = kwargs.get('delta_t', delta_t)
if 'immersion_time' in kwargs:
immersion_time = kwargs['immersion_time']
do_immersion = True
t_i = immersion_time + delta_t*(2*np.random.random(loop) - 1)
if 'emersion_time' in kwargs:
emersion_time = kwargs['emersion_time']
do_emersion = True
t_e = emersion_time + delta_t*(2*np.random.random(loop) - 1)
mask = (self.time >= tmin) & (self.time <= tmax)
if 'sigma' not in kwargs:
if self.dflux is not None:
sigma = self.dflux
else:
sigma = 'auto'
else:
if type(kwargs['sigma']) in [float, int]:
sigma = np.repeat(kwargs['sigma'], len(self.flux))
elif kwargs['sigma'] is None:
sigma = self.dflux
else:
sigma = kwargs['sigma']
if type(sigma) is str and sigma == 'auto':
mask_sigma = (((self.time >= tmin) & (self.time < immersion_time - self.exptime)) +
((self.time > emersion_time + self.exptime) & (self.time <= tmax)))
sigma = np.repeat(self.flux[mask_sigma].std(ddof=1), len(self.flux))
opas = opacity + delta_opacity*(2*np.random.random(loop) - 1)
opas[opas > 1.], opas[opas < 0.] = 1.0, 0.0
flux_min = kwargs.get('flux_min', 1 - preliminar_occ['depth'])
flux_max = kwargs.get('flux_max', preliminar_occ['baseline'])
sigma_result = kwargs.get('sigma_result', 1)
tflag = np.zeros(loop)
tflag[t_i > t_e] = t_i[t_i > t_e]
t_i[t_i > t_e] = t_e[t_i > t_e]
t_e[t_i > t_e] = tflag[t_i > t_e]
chi2 = 999999*np.ones(loop)
for i in progressbar(range(loop), 'LightCurve fit:'):
model_test = self.__occ_model(t_i[i], t_e[i], opas[i], mask, flux_min=flux_min, flux_max=flux_max)
chi2[i] = np.sum(((self.flux[mask] - model_test)**2)/(sigma[mask]**2))
kkwargs = {}
if do_immersion:
kkwargs['immersion'] = t_i
if do_emersion:
kkwargs['emersion'] = t_e
if do_opacity:
kkwargs['opacity'] = opas
chisquare = ChiSquare(chi2, len(self.flux[mask]), **kkwargs)
result_sigma = chisquare.get_nsigma(sigma=sigma_result)
if 'immersion' in result_sigma:
self._immersion = self.tref + result_sigma['immersion'][0]*u.s
self.immersion_err = result_sigma['immersion'][1]
immersion_time = result_sigma['immersion'][0]
else:
try:
immersion_time = (self._immersion.jd - self.tref.jd)*u.d.to('s')
except:
pass
if 'emersion' in result_sigma:
self._emersion = self.tref + result_sigma['emersion'][0]*u.s
self.emersion_err = result_sigma['emersion'][1]
emersion_time = result_sigma['emersion'][0]
else:
try:
emersion_time = (self._emersion.jd - self.tref.jd)*u.d.to('s')
except:
pass
if 'opacity' in result_sigma:
opacity = result_sigma['opacity'][0]
# Run occ_model() to save best parameters in the Object.
self.occ_model(immersion_time, emersion_time, opacity, np.repeat(True, len(self.flux)),
flux_min=flux_min, flux_max=flux_max)
self.lc_sigma = sigma
self.chisquare = chisquare
self.opacity = opacity
return chisquare
def plot_lc(self, ax=None):
""" Plots the light curve
"""
import matplotlib.pyplot as plt
if not any(self.flux):
raise ValueError('Plotting the light curve is only possible when the '
'Object LightCurve is instantiated with time and flux')
ax = ax or plt.gca()
ax.plot(self.time, self.flux, 'k.-', label='Obs.', zorder=0)
if any(self.model):
ax.plot(self.time, self.model, 'r-', label='Model', zorder=2)
ax.scatter(self.time, self.model, s=50, facecolors='none', edgecolors='r', zorder=3)
ax.set_xlabel('Time [seconds]', fontsize=20)
ax.set_ylabel('Relative Flux', fontsize=20)
ax.legend()
def plot_model(self, ax=None):
""" Plots the modelled light curve
"""
import matplotlib.pyplot as plt
if not all(self.time_model):
raise ValueError('Plotting the model light curve is only possible after the model '
'[LightCurve.occ_model()] or the fit [LightCurve.occ_lcfit()]')
ax = ax or plt.gca()
ax.plot(self.time_model, self.model_geometric, 'c-', label='Geometric', zorder=1)
ax.plot(self.time_model, self.model_fresnel, 'b-', label='Fresnel', zorder=1)
ax.plot(self.time_model, self.model_star, 'g-', label='Star diam.', zorder=1)
ax.set_xlabel('Time [seconds]', fontsize=20)
ax.set_ylabel('Relative Flux', fontsize=20)
ax.legend()
def to_log(self, namefile=None):
"""Saves the light curve log to a file.
Parameters
----------
namefile : `str`
Filename to save the log.
"""
if namefile is None:
namefile = self.name.replace(' ', '_')+'.log'
f = open(namefile, 'w')
f.write(self.__str__())
f.close()
def to_file(self, namefile=None):
"""Saves the light curve to a file.
Parameters
----------
namefile : `str`
Filename to save the data.
"""
# Observational data
if namefile is None:
folder = ''
file = self.name.replace(' ', '_')+'.dat'
else:
folder = os.path.dirname(namefile)
file = os.path.basename(namefile)
data = np.array([(self.time*u.s + self.tref).jd, self.time, self.flux, self.model, self.flux-self.model])
colunm_names = ['Time JD', 'Time relative to {} UTC in seconds'.format(self.tref.iso),
'Observational Flux', 'Modelled Flux', 'Residual O-C']
np.savetxt(os.path.join(folder, file), data.T, fmt='%11.8f')
f = open(os.path.join(folder, file) + '.label', 'w')
for i, name in enumerate(colunm_names):
f.write('Column {}: {}\n'.format(i+1, name))
f.close()
# Complete Model
if all(self.time_model):
data_model = np.array([(self.time_model*u.s + self.tref).jd, self.time_model, self.model_geometric,
self.model_fresnel, self.model_star])
colunm_names_model = ['Model time JD', 'Model time relative to {} UTC in seconds'.format(self.tref.iso),
'Geometric Model', 'Model with Fresnel diffraction', 'Model with star diameter']
np.savetxt(os.path.join(folder, 'model_'+file), data_model.T, fmt='%11.8f')
f = open(os.path.join(folder, 'model_'+file)+'.label', 'w')
for i, name in enumerate(colunm_names_model):
f.write('Column {}: {}\n'.format(i+1, name))
f.close()
def occ_detect(self, maximum_duration=None, dur_step=None, snr_limit=None,
n_detections=None, plot=False):
"""Detects automatically the occultation event in the light curve.
Detects a 'square well' shaped transit. All parameters are optional.
Parameters
----------
maximum_duration : `float`, default: light curve time span
Maximum duration of the occultation event.
dur_step : `float`, default: 1/2 of sampling rate
Step size to sweep occultation duration event.
snr_limit : `float`, default=None
Minimum occultation SNR.
n_detections : `int`, default=1
Number of detections regardless the SNR. `n_detections` is
superseded by `snr_limit`.
plot : `bool`
True if output plots are desired.
Returns
-------
OrderedDict : `dict`
An ordered dictionary of :attr:`name`::attr:`value` pairs for each
parameter.
Examples
--------
>>> lc = LightCurve(time=time, flux=flux, exptime=0.0, name='lc_example')
>>> params = lc.occ_detect()
>>> params
{'rank': 1,
'occultation_duration': 40.1384063065052,
'central_time': 7916.773870512843,
'immersion_time': 7896.7046673595905,
'emersion_time': 7936.843073666096,
'time_err': 0.05011036992073059,
'depth': 0.8663887801707082,
'depth_err': 0.10986223384336465,
'baseline': 0.9110181732552853,
'baseline_err': 0.19045768512595365,
'snr': 7.886138392251848,
'occ_mask': array([False, False, False, ..., False, False, False])}
"""
from .occdetect import occ_detect
occ = occ_detect(self.flux, self.dflux, self.time, self.cycle, maximum_duration=maximum_duration,
dur_step=dur_step, snr_limit=snr_limit, n_detections=n_detections, plot=plot)
return occ
def __occ_model(self, immersion_time, emersion_time, opacity, mask, npt_star=12,
time_resolution_factor=10, flux_min=0.0, flux_max=1.0):
"""Private function. Returns the modelled light curve.
Returns the modelled light curve considering fresnel diffraction, star
diameter and instrumental response, intended for fitting inside the
`self.occ_lcfit()`.
Parameters
----------
immersion_time : `int`, `float`
Immersion time, in seconds.
emersion_time : `int`, `float`
Emersion time, in seconds.
opacity `int`, `float`
Opacity. Opaque = 1, Transparent = 0.
mask : `bool` array
Mask with True values to be computed.
npt_star : `int`, default=12
Number of subdivisions for computing the star size effects.
time_resolution_factor : `int`, `float`, default=10*fresnel scale
Steps for fresnel scale used for modelling the light curve.
flux_min : `int`, `float`, default=0
Bottom flux (only object).
flux_max : `int`, `float`, default=1
Base flux (object plus star).
Returns
-------
flux_inst : array
Modelled Instrumental light flux.
"""
from .utils import bar_fresnel
# Computing the fresnel scale
lamb = self.lambda_0*u.micrometer.to('km')
dlamb = self.delta_lambda*u.micrometer.to('km')
dist = self.dist*u.au.to('km')
vel = np.absolute(self.vel)
time_obs = self.time[mask]
fresnel_scale_1 = calc_fresnel(dist, lamb-dlamb/2.0)
fresnel_scale_2 = calc_fresnel(dist, lamb+dlamb/2.0)
fresnel_scale = (fresnel_scale_1 + fresnel_scale_2)/2.0
time_resolution = (np.min([fresnel_scale/vel, self.exptime]))/time_resolution_factor
self.model_resolution = time_resolution
# Creating a high resolution curve to compute fresnel diffraction, stellar diameter and instrumental integration
time_model = np.arange(time_obs.min()-5*self.exptime, time_obs.max()+5*self.exptime, time_resolution)
# Changing X: time (s) to distances in the sky plane (km), considering the tangential velocity (vel in km/s)
x = time_model*vel
x01 = immersion_time*vel
x02 = emersion_time*vel
# Computing fresnel diffraction for the case where the star size is negligenciable
flux_fresnel_1 = bar_fresnel(x, x01, x02, fresnel_scale_1, opacity)
flux_fresnel_2 = bar_fresnel(x, x01, x02, fresnel_scale_2, opacity)
flux_fresnel = (flux_fresnel_1 + flux_fresnel_2)/2.
flux_star = flux_fresnel.copy()
if self.d_star > 0:
# Computing fresnel diffraction for the case where the star size is not negligenciable
resolucao = (self.d_star/2)/npt_star
flux_star_1 = np.zeros(len(time_model))
flux_star_2 = np.zeros(len(time_model))
# Computing stellar diameter only near the immersion or emersion times
star_diam = (np.absolute(x - x01) < 3*self.d_star) + (np.absolute(x - x02) < 3*self.d_star)
p = np.arange(-npt_star, npt_star)*resolucao
coeff = np.sqrt(np.absolute((self.d_star/2)**2 - p**2))
for ii in np.where(star_diam == True)[0]:
xx = x[ii] + p
flux1 = bar_fresnel(xx, x01, x02, fresnel_scale_1, opacity)
flux2 = bar_fresnel(xx, x01, x02, fresnel_scale_2, opacity)
flux_star_1[ii] = np.sum(coeff*flux1)/coeff.sum()
flux_star_2[ii] = np.sum(coeff*flux2)/coeff.sum()
flux_star[ii] = (flux_star_1[ii] + flux_star_2[ii])/2.
flux_inst = np.zeros(len(time_obs))
for i in range(len(time_obs)):
event_model = (time_model > time_obs[i]-self.exptime/2.) & (time_model < time_obs[i]+self.exptime/2.)
flux_inst[i] = (flux_star[event_model]).mean()
return flux_inst*(flux_max - flux_min) + flux_min
def __str__(self):
""" String representation of the LightCurve Object
"""
output = 'Light curve name: {}\n'.format(self.name)
try:
output += ('Initial time: {} UTC\n'
'End time: {} UTC\n'
'Duration: {:.3f} minutes\n'.format(
self.initial_time.iso, self.end_time.iso,
(self.end_time - self.initial_time).value*u.d.to('min'))
)
except:
pass
output += 'Time offset: {:.3f} seconds\n\n'.format(self.dt)
try:
output += 'Exposure time: {:.4f} seconds\n'.format(self.exptime)
output += 'Cycle time: {:.4f} seconds\n'.format(self.cycle)
output += 'Num. data points: {}\n\n'.format(len(self.time))
except:
output += 'Object LightCurve was not instantiated with time and flux.\n\n'
try:
output += ('Bandpass: {:.3f} +/- {:.3f} microns\n'
'Object Distance: {:.2f} AU\n'
'Used shadow velocity: {:.3f} km/s\n'
'Fresnel scale: {:.3f} seconds or {:.2f} km\n'
'Stellar size effect: {:.3f} seconds or {:.2f} km\n'.format(
self.lambda_0, self.delta_lambda, self.dist, self.vel,
self.fresnel_scale/self.vel, self.fresnel_scale,
self.d_star/self.vel, self.d_star)
)
except:
output += '\nThere is no occultation associated with this light curve.\n'
try:
output += ('Inst. response: {:.3f} seconds or {:.2f} km\n'
'Dead time effect: {:.3f} seconds or {:.2f} km\n'
'Model resolution: {:.3f} seconds or {:.2f} km\n'
'Modelled baseflux: {:.3f}\n'
'Modelled bottomflux: {:.3f}\n'
'Light curve sigma: {:.3f}\n\n'.format(
self.exptime, self.exptime*self.vel, self.cycle-self.exptime,
(self.cycle-self.exptime)*self.vel, self.model_resolution,
self.model_resolution*self.vel, self.baseflux, self.bottomflux,
self.lc_sigma.mean())
)
except:
output += '\nObject LightCurve model was not fitted.\n\n'
try:
output += ('Immersion time: {} UTC +/- {:.3f} seconds\n'
'Emersion time: {} UTC +/- {:.3f} seconds\n\n'.format(
self.immersion.iso, self.immersion_err,
self.emersion.iso, self.emersion_err)
)
except:
output += 'Immersion and emersion times were not fitted or instantiated.\n\n'
try:
output += 'Monte Carlo chi square fit.\n\n' + self.chisquare.__str__() + '\n'
except:
pass
return output
| [
"numpy.absolute",
"sora.config.decorators.deprecated_alias",
"numpy.sum",
"numpy.invert",
"numpy.ones",
"numpy.argsort",
"numpy.arange",
"matplotlib.pyplot.gca",
"os.path.join",
"warnings.simplefilter",
"sora.config.input_tests.check_kwargs",
"os.path.dirname",
"astropy.units.au.to",
"astr... | [((224, 268), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'UserWarning'], {}), "('always', UserWarning)\n", (245, 268), False, 'import warnings\n'), ((3130, 3206), 'sora.config.decorators.deprecated_alias', 'deprecated_alias', ([], {'lambda_0': '"""central_bandpass"""', 'delta_lambda': '"""delta_bandpass"""'}), "(lambda_0='central_bandpass', delta_lambda='delta_bandpass')\n", (3146, 3206), False, 'from sora.config.decorators import deprecated_alias\n'), ((17106, 17182), 'sora.config.decorators.deprecated_alias', 'deprecated_alias', ([], {'lambda_0': '"""central_bandpass"""', 'delta_lambda': '"""delta_bandpass"""'}), "(lambda_0='central_bandpass', delta_lambda='delta_bandpass')\n", (17122, 17182), False, 'from sora.config.decorators import deprecated_alias\n'), ((3590, 3653), 'sora.config.input_tests.check_kwargs', 'input_tests.check_kwargs', (['kwargs'], {'allowed_kwargs': 'allowed_kwargs'}), '(kwargs, allowed_kwargs=allowed_kwargs)\n', (3614, 3653), False, 'from sora.config import input_tests\n'), ((15496, 15525), 'astropy.units.Quantity', 'u.Quantity', (['exptime'], {'unit': 'u.s'}), '(exptime, unit=u.s)\n', (15506, 15525), True, 'import astropy.units as u\n'), ((16257, 16289), 'astropy.units.Quantity', 'u.Quantity', (['vel'], {'unit': '(u.km / u.s)'}), '(vel, unit=u.km / u.s)\n', (16267, 16289), True, 'import astropy.units as u\n'), ((16307, 16329), 'numpy.absolute', 'np.absolute', (['vel.value'], {}), '(vel.value)\n', (16318, 16329), True, 'import numpy as np\n'), ((16529, 16556), 'astropy.units.Quantity', 'u.Quantity', (['dist'], {'unit': 'u.AU'}), '(dist, unit=u.AU)\n', (16539, 16556), True, 'import astropy.units as u\n'), ((16684, 16707), 'numpy.absolute', 'np.absolute', (['dist.value'], {}), '(dist.value)\n', (16695, 16707), True, 'import numpy as np\n'), ((16908, 16937), 'astropy.units.Quantity', 'u.Quantity', (['d_star'], {'unit': 'u.km'}), '(d_star, unit=u.km)\n', (16918, 16937), True, 'import astropy.units as u\n'), ((17074, 17099), 'numpy.absolute', 'np.absolute', (['d_star.value'], {}), '(d_star.value)\n', (17085, 17099), True, 'import numpy as np\n'), ((17538, 17585), 'astropy.units.Quantity', 'u.Quantity', (['central_bandpass'], {'unit': 'u.micrometer'}), '(central_bandpass, unit=u.micrometer)\n', (17548, 17585), True, 'import astropy.units as u\n'), ((17767, 17812), 'astropy.units.Quantity', 'u.Quantity', (['delta_bandpass'], {'unit': 'u.micrometer'}), '(delta_bandpass, unit=u.micrometer)\n', (17777, 17812), True, 'import astropy.units as u\n'), ((24431, 24452), 'numpy.absolute', 'np.absolute', (['self.vel'], {}), '(self.vel)\n', (24442, 24452), True, 'import numpy as np\n'), ((29195, 29258), 'sora.config.input_tests.check_kwargs', 'input_tests.check_kwargs', (['kwargs'], {'allowed_kwargs': 'allowed_kwargs'}), '(kwargs, allowed_kwargs=allowed_kwargs)\n', (29219, 29258), False, 'from sora.config import input_tests\n'), ((32174, 32188), 'numpy.zeros', 'np.zeros', (['loop'], {}), '(loop)\n', (32182, 32188), True, 'import numpy as np\n'), ((36483, 36590), 'numpy.array', 'np.array', (['[(self.time * u.s + self.tref).jd, self.time, self.flux, self.model, self.\n flux - self.model]'], {}), '([(self.time * u.s + self.tref).jd, self.time, self.flux, self.\n model, self.flux - self.model])\n', (36491, 36590), True, 'import numpy as np\n'), ((41339, 41360), 'numpy.absolute', 'np.absolute', (['self.vel'], {}), '(self.vel)\n', (41350, 41360), True, 'import numpy as np\n'), ((5597, 5618), 'astropy.units.micrometer.to', 'u.micrometer.to', (['"""km"""'], {}), "('km')\n", (5612, 5618), True, 'import astropy.units as u\n'), ((5653, 5674), 'astropy.units.micrometer.to', 'u.micrometer.to', (['"""km"""'], {}), "('km')\n", (5668, 5674), True, 'import astropy.units as u\n'), ((5700, 5713), 'astropy.units.au.to', 'u.au.to', (['"""km"""'], {}), "('km')\n", (5707, 5713), True, 'import astropy.units as u\n'), ((6447, 6471), 'astropy.time.Time', 'Time', (['value'], {'format': '"""jd"""'}), "(value, format='jd')\n", (6451, 6471), False, 'from astropy.time import Time\n'), ((10104, 10165), 'astropy.time.Time', 'Time', (['((self.immersion.jd + self.emersion.jd) / 2)'], {'format': '"""jd"""'}), "((self.immersion.jd + self.emersion.jd) / 2, format='jd')\n", (10108, 10165), False, 'from astropy.time import Time\n'), ((10197, 10261), 'astropy.time.Time', 'Time', (['((self.initial_time.jd + self.end_time.jd) / 2)'], {'format': '"""jd"""'}), "((self.initial_time.jd + self.end_time.jd) / 2, format='jd')\n", (10201, 10261), False, 'from astropy.time import Time\n'), ((14580, 14596), 'numpy.argsort', 'np.argsort', (['time'], {}), '(time)\n', (14590, 14596), True, 'import numpy as np\n'), ((14875, 14887), 'numpy.min', 'np.min', (['time'], {}), '(time)\n', (14881, 14887), True, 'import numpy as np\n'), ((14916, 14928), 'numpy.max', 'np.max', (['time'], {}), '(time)\n', (14922, 14928), True, 'import numpy as np\n'), ((15541, 15561), 'numpy.isscalar', 'np.isscalar', (['exptime'], {}), '(exptime)\n', (15552, 15561), True, 'import numpy as np\n'), ((16596, 16663), 'warnings.warn', 'warnings.warn', (['"""distance cannot be negative. Using absolute value."""'], {}), "('distance cannot be negative. Using absolute value.')\n", (16609, 16663), False, 'import warnings\n'), ((16979, 17051), 'warnings.warn', 'warnings.warn', (['"""star diameter cannot be negative. Using absolute value."""'], {}), "('star diameter cannot be negative. Using absolute value.')\n", (16992, 17051), False, 'import warnings\n'), ((20396, 20470), 'numpy.invert', 'np.invert', (['((self.time > tmin - chord / 2) & (self.time < tmax + chord / 2))'], {}), '((self.time > tmin - chord / 2) & (self.time < tmax + chord / 2))\n', (20405, 20470), True, 'import numpy as np\n'), ((20750, 20766), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (20759, 20766), True, 'import numpy as np\n'), ((21275, 21291), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (21284, 21291), True, 'import numpy as np\n'), ((21632, 21648), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (21641, 21648), True, 'import numpy as np\n'), ((24300, 24321), 'astropy.units.micrometer.to', 'u.micrometer.to', (['"""km"""'], {}), "('km')\n", (24315, 24321), True, 'import astropy.units as u\n'), ((24356, 24377), 'astropy.units.micrometer.to', 'u.micrometer.to', (['"""km"""'], {}), "('km')\n", (24371, 24377), True, 'import astropy.units as u\n'), ((24403, 24416), 'astropy.units.au.to', 'u.au.to', (['"""km"""'], {}), "('km')\n", (24410, 24416), True, 'import astropy.units as u\n'), ((24701, 24744), 'numpy.min', 'np.min', (['[fresnel_scale / vel, self.exptime]'], {}), '([fresnel_scale / vel, self.exptime])\n', (24707, 24744), True, 'import numpy as np\n'), ((32335, 32348), 'numpy.ones', 'np.ones', (['loop'], {}), '(loop)\n', (32342, 32348), True, 'import numpy as np\n'), ((32544, 32606), 'numpy.sum', 'np.sum', (['((self.flux[mask] - model_test) ** 2 / sigma[mask] ** 2)'], {}), '((self.flux[mask] - model_test) ** 2 / sigma[mask] ** 2)\n', (32550, 32606), True, 'import numpy as np\n'), ((34481, 34490), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34488, 34490), True, 'import matplotlib.pyplot as plt\n'), ((35261, 35270), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (35268, 35270), True, 'import matplotlib.pyplot as plt\n'), ((36396, 36421), 'os.path.dirname', 'os.path.dirname', (['namefile'], {}), '(namefile)\n', (36411, 36421), False, 'import os\n'), ((36441, 36467), 'os.path.basename', 'os.path.basename', (['namefile'], {}), '(namefile)\n', (36457, 36467), False, 'import os\n'), ((36775, 36801), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (36787, 36801), False, 'import os\n'), ((37092, 37223), 'numpy.array', 'np.array', (['[(self.time_model * u.s + self.tref).jd, self.time_model, self.\n model_geometric, self.model_fresnel, self.model_star]'], {}), '([(self.time_model * u.s + self.tref).jd, self.time_model, self.\n model_geometric, self.model_fresnel, self.model_star])\n', (37100, 37223), True, 'import numpy as np\n'), ((41208, 41229), 'astropy.units.micrometer.to', 'u.micrometer.to', (['"""km"""'], {}), "('km')\n", (41223, 41229), True, 'import astropy.units as u\n'), ((41264, 41285), 'astropy.units.micrometer.to', 'u.micrometer.to', (['"""km"""'], {}), "('km')\n", (41279, 41285), True, 'import astropy.units as u\n'), ((41311, 41324), 'astropy.units.au.to', 'u.au.to', (['"""km"""'], {}), "('km')\n", (41318, 41324), True, 'import astropy.units as u\n'), ((41609, 41652), 'numpy.min', 'np.min', (['[fresnel_scale / vel, self.exptime]'], {}), '([fresnel_scale / vel, self.exptime])\n', (41615, 41652), True, 'import numpy as np\n'), ((4061, 4133), 'warnings.warn', 'warnings.warn', (['"""Immersion Error must be positive. Using absolute value."""'], {}), "('Immersion Error must be positive. Using absolute value.')\n", (4074, 4133), False, 'import warnings\n'), ((4171, 4202), 'numpy.absolute', 'np.absolute', (['self.immersion_err'], {}), '(self.immersion_err)\n', (4182, 4202), True, 'import numpy as np\n'), ((4431, 4502), 'warnings.warn', 'warnings.warn', (['"""Emersion Error must be positive. Using absolute value."""'], {}), "('Emersion Error must be positive. Using absolute value.')\n", (4444, 4502), False, 'import warnings\n'), ((4539, 4569), 'numpy.absolute', 'np.absolute', (['self.emersion_err'], {}), '(self.emersion_err)\n', (4550, 4569), True, 'import numpy as np\n'), ((6532, 6543), 'astropy.time.Time', 'Time', (['value'], {}), '(value)\n', (6536, 6543), False, 'from astropy.time import Time\n'), ((7062, 7086), 'astropy.time.Time', 'Time', (['value'], {'format': '"""jd"""'}), "(value, format='jd')\n", (7066, 7086), False, 'from astropy.time import Time\n'), ((7358, 7369), 'astropy.time.Time', 'Time', (['value'], {}), '(value)\n', (7362, 7369), False, 'from astropy.time import Time\n'), ((7886, 7910), 'astropy.time.Time', 'Time', (['value'], {'format': '"""jd"""'}), "(value, format='jd')\n", (7890, 7910), False, 'from astropy.time import Time\n'), ((8180, 8191), 'astropy.time.Time', 'Time', (['value'], {}), '(value)\n', (8184, 8191), False, 'from astropy.time import Time\n'), ((8719, 8743), 'astropy.time.Time', 'Time', (['value'], {'format': '"""jd"""'}), "(value, format='jd')\n", (8723, 8743), False, 'from astropy.time import Time\n'), ((9021, 9032), 'astropy.time.Time', 'Time', (['value'], {}), '(value)\n', (9025, 9032), False, 'from astropy.time import Time\n'), ((9536, 9560), 'astropy.time.Time', 'Time', (['value'], {'format': '"""jd"""'}), "(value, format='jd')\n", (9540, 9560), False, 'from astropy.time import Time\n'), ((9830, 9841), 'astropy.time.Time', 'Time', (['value'], {}), '(value)\n', (9834, 9841), False, 'from astropy.time import Time\n'), ((20881, 20928), 'matplotlib.pyplot.plot', 'plt.plot', (['norm_time[mask]', 'lc_flux[mask]', '"""k.-"""'], {}), "(norm_time[mask], lc_flux[mask], 'k.-')\n", (20889, 20928), True, 'import matplotlib.pyplot as plt\n'), ((20945, 20999), 'matplotlib.pyplot.plot', 'plt.plot', (['norm_time[mask]', 'flux_poly_model[mask]', '"""r-"""'], {}), "(norm_time[mask], flux_poly_model[mask], 'r-')\n", (20953, 20999), True, 'import matplotlib.pyplot as plt\n'), ((21091, 21101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21099, 21101), True, 'import matplotlib.pyplot as plt\n'), ((21389, 21436), 'matplotlib.pyplot.plot', 'plt.plot', (['norm_time[mask]', 'lc_flux[mask]', '"""k.-"""'], {}), "(norm_time[mask], lc_flux[mask], 'k.-')\n", (21397, 21436), True, 'import matplotlib.pyplot as plt\n'), ((21453, 21507), 'matplotlib.pyplot.plot', 'plt.plot', (['norm_time[mask]', 'flux_poly_model[mask]', '"""r-"""'], {}), "(norm_time[mask], flux_poly_model[mask], 'r-')\n", (21461, 21507), True, 'import matplotlib.pyplot as plt\n'), ((21599, 21609), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21607, 21609), True, 'import matplotlib.pyplot as plt\n'), ((21808, 21825), 'numpy.arange', 'np.arange', (['(nn + 1)'], {}), '(nn + 1)\n', (21817, 21825), True, 'import numpy as np\n'), ((26036, 26066), 'numpy.arange', 'np.arange', (['(-npt_star)', 'npt_star'], {}), '(-npt_star, npt_star)\n', (26045, 26066), True, 'import numpy as np\n'), ((26105, 26149), 'numpy.absolute', 'np.absolute', (['((self.d_star / 2) ** 2 - p ** 2)'], {}), '((self.d_star / 2) ** 2 - p ** 2)\n', (26116, 26149), True, 'import numpy as np\n'), ((26167, 26194), 'numpy.where', 'np.where', (['(star_diam == True)'], {}), '(star_diam == True)\n', (26175, 26194), True, 'import numpy as np\n'), ((36842, 36868), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (36854, 36868), False, 'import os\n'), ((37507, 37544), 'os.path.join', 'os.path.join', (['folder', "('model_' + file)"], {}), "(folder, 'model_' + file)\n", (37519, 37544), False, 'import os\n'), ((42992, 43022), 'numpy.arange', 'np.arange', (['(-npt_star)', 'npt_star'], {}), '(-npt_star, npt_star)\n', (43001, 43022), True, 'import numpy as np\n'), ((43061, 43105), 'numpy.absolute', 'np.absolute', (['((self.d_star / 2) ** 2 - p ** 2)'], {}), '((self.d_star / 2) ** 2 - p ** 2)\n', (43072, 43105), True, 'import numpy as np\n'), ((43123, 43150), 'numpy.where', 'np.where', (['(star_diam == True)'], {}), '(star_diam == True)\n', (43131, 43150), True, 'import numpy as np\n'), ((14247, 14270), 'astropy.time.Time', 'Time', (['time'], {'format': '"""jd"""'}), "(time, format='jd')\n", (14251, 14270), False, 'from astropy.time import Time\n'), ((14954, 14977), 'numpy.diff', 'np.diff', (['self._time[0:]'], {}), '(self._time[0:])\n', (14961, 14977), True, 'import numpy as np\n'), ((21915, 21960), 'numpy.var', 'np.var', (['(flux_poly_model[mask] - lc_flux[mask])'], {}), '(flux_poly_model[mask] - lc_flux[mask])\n', (21921, 21960), True, 'import numpy as np\n'), ((21959, 22008), 'numpy.var', 'np.var', (['(flux_poly_model_new[mask] - lc_flux[mask])'], {}), '(flux_poly_model_new[mask] - lc_flux[mask])\n', (21965, 22008), True, 'import numpy as np\n'), ((25941, 25961), 'numpy.absolute', 'np.absolute', (['(x - x01)'], {}), '(x - x01)\n', (25952, 25961), True, 'import numpy as np\n'), ((25982, 26002), 'numpy.absolute', 'np.absolute', (['(x - x02)'], {}), '(x - x02)\n', (25993, 26002), True, 'import numpy as np\n'), ((26416, 26437), 'numpy.sum', 'np.sum', (['(coeff * flux1)'], {}), '(coeff * flux1)\n', (26422, 26437), True, 'import numpy as np\n'), ((26482, 26503), 'numpy.sum', 'np.sum', (['(coeff * flux2)'], {}), '(coeff * flux2)\n', (26488, 26503), True, 'import numpy as np\n'), ((33281, 33292), 'astropy.units.d.to', 'u.d.to', (['"""s"""'], {}), "('s')\n", (33287, 33292), True, 'import astropy.units as u\n'), ((33660, 33671), 'astropy.units.d.to', 'u.d.to', (['"""s"""'], {}), "('s')\n", (33666, 33671), True, 'import astropy.units as u\n'), ((37593, 37630), 'os.path.join', 'os.path.join', (['folder', "('model_' + file)"], {}), "(folder, 'model_' + file)\n", (37605, 37630), False, 'import os\n'), ((42897, 42917), 'numpy.absolute', 'np.absolute', (['(x - x01)'], {}), '(x - x01)\n', (42908, 42917), True, 'import numpy as np\n'), ((42938, 42958), 'numpy.absolute', 'np.absolute', (['(x - x02)'], {}), '(x - x02)\n', (42949, 42958), True, 'import numpy as np\n'), ((43372, 43393), 'numpy.sum', 'np.sum', (['(coeff * flux1)'], {}), '(coeff * flux1)\n', (43378, 43393), True, 'import numpy as np\n'), ((43438, 43459), 'numpy.sum', 'np.sum', (['(coeff * flux2)'], {}), '(coeff * flux2)\n', (43444, 43459), True, 'import numpy as np\n'), ((44321, 44334), 'astropy.units.d.to', 'u.d.to', (['"""min"""'], {}), "('min')\n", (44327, 44334), True, 'import astropy.units as u\n'), ((22181, 22228), 'matplotlib.pyplot.plot', 'plt.plot', (['norm_time[mask]', 'lc_flux[mask]', '"""k.-"""'], {}), "(norm_time[mask], lc_flux[mask], 'k.-')\n", (22189, 22228), True, 'import matplotlib.pyplot as plt\n'), ((22253, 22307), 'matplotlib.pyplot.plot', 'plt.plot', (['norm_time[mask]', 'flux_poly_model[mask]', '"""r-"""'], {}), "(norm_time[mask], flux_poly_model[mask], 'r-')\n", (22261, 22307), True, 'import matplotlib.pyplot as plt\n'), ((22416, 22426), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22424, 22426), True, 'import matplotlib.pyplot as plt\n'), ((30817, 30839), 'numpy.random.random', 'np.random.random', (['loop'], {}), '(loop)\n', (30833, 30839), True, 'import numpy as np\n'), ((31007, 31029), 'numpy.random.random', 'np.random.random', (['loop'], {}), '(loop)\n', (31023, 31029), True, 'import numpy as np\n'), ((31883, 31905), 'numpy.random.random', 'np.random.random', (['loop'], {}), '(loop)\n', (31899, 31905), True, 'import numpy as np\n'), ((20111, 20122), 'astropy.units.d.to', 'u.d.to', (['"""s"""'], {}), "('s')\n", (20117, 20122), True, 'import astropy.units as u\n'), ((18309, 18326), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (18317, 18326), True, 'import numpy as np\n')] |
"""Tests the evaluation of the potential for single and array-valued
arguments for SHO, bump and KP potentials.
"""
import pytest
from basis.potential import Potential
import numpy as np
def test_getattr():
"""Tests the attribute re-routing to Potential.params.
"""
pot = Potential("potentials/kp.cfg")
assert pot.w == pot.params["w"]
pot = Potential("potentials/bump.cfg")
assert pot.a == pot.params["a"]
pot = Potential("potentials/sho.cfg")
assert pot.shift == pot.params["shift"]
with pytest.raises(AttributeError):
pot.dummy
def test_kp():
"""Tests the Kronig-Penney potential.
"""
pot = Potential("potentials/kp.cfg")
# Params for kp are w, s, n and v0. We use R as the new resolution
# for the numpy array check. My kp file only does even numbers of
# wells.
params = [(2, 0.5, 16, -15., 100),
(1e5, 1e3, 100, -1234., 1e5),
(1./3, 1./6, 10, 5, 20),
(np.pi, np.pi/4., 6, np.sqrt(2), 23)]
for w, s, n, v0, R in params:
pot.adjust(w=w, s=s, n=n, v0=v0)
xa = np.linspace(0, w*n, R)
assert pot(0) == v0
assert pot(w*n) == 0.
assert pot((w-s)/2.) == v0
assert len(pot(xa)) == R
assert pot(w-s/2.) == 0.
assert pot(-5.*w*n) == 0.
def test_sho():
"""Tests the SHO potential.
"""
pot = Potential("potentials/sho.cfg")
params = [(2, 0., 15., 100),
(1e5, 1e3, 1234., 1e5),
(1./3, 1./6, 10, 5),
(np.pi, np.pi/2., np.sqrt(2), 23)]
for a, shift, v0, N in params:
pot.adjust(a=a, shift=shift, v0=v0)
xa = np.linspace(-a, a, N)
assert pot(-a) == v0*(-a-shift)**2
assert pot(a) == 0.
assert pot(3./4*a) == v0*(3./4*a-shift)**2
assert len(pot(xa)) == N
assert pot(-5.*a) == 0. #Outside of region
with pytest.raises(ValueError):
pot("some sho")
def test_bump():
"""Tests the bump in the square well potential.
"""
pot = Potential("potentials/bump.cfg")
params = [(2, 1, -15., 100),
(1e5, 1e3, -1234., 1e5),
(1./3, 1./6, -10, 5),
(np.pi, np.pi/2., -np.sqrt(2), 23)]
for a, w, V0, N in params:
pot.adjust(a=a, w=w, v0=V0)
x = w+(a-w)/2.
xa = np.linspace(-a, a, N)
assert pot(x) == 0.
assert pot(3./4*w) == V0
assert len(pot(xa)) == N
assert pot(-5.*a) == 0.
assert pot(-w) == V0
assert pot(a) == 0.
with pytest.raises(ValueError):
pot("a")
def test_adjust():
"""Tests adjusting a potential using an expression instead of a
constant. Also tests the warning when attempting to adjust a
non-existent parameter.
"""
pot = Potential("potentials/bump.cfg")
pot.adjust(a="w*numpy.sqrt(v0)")
pot.adjust(dummy=0.1)
def test_incorrect():
"""Tests execution of warning messages for incorrectly configured
potential files.
"""
with pytest.raises(ValueError):
V = Potential("potentials/wrong.cfg")
| [
"pytest.raises",
"basis.potential.Potential",
"numpy.linspace",
"numpy.sqrt"
] | [((285, 315), 'basis.potential.Potential', 'Potential', (['"""potentials/kp.cfg"""'], {}), "('potentials/kp.cfg')\n", (294, 315), False, 'from basis.potential import Potential\n'), ((363, 395), 'basis.potential.Potential', 'Potential', (['"""potentials/bump.cfg"""'], {}), "('potentials/bump.cfg')\n", (372, 395), False, 'from basis.potential import Potential\n'), ((443, 474), 'basis.potential.Potential', 'Potential', (['"""potentials/sho.cfg"""'], {}), "('potentials/sho.cfg')\n", (452, 474), False, 'from basis.potential import Potential\n'), ((654, 684), 'basis.potential.Potential', 'Potential', (['"""potentials/kp.cfg"""'], {}), "('potentials/kp.cfg')\n", (663, 684), False, 'from basis.potential import Potential\n'), ((1385, 1416), 'basis.potential.Potential', 'Potential', (['"""potentials/sho.cfg"""'], {}), "('potentials/sho.cfg')\n", (1394, 1416), False, 'from basis.potential import Potential\n'), ((2053, 2085), 'basis.potential.Potential', 'Potential', (['"""potentials/bump.cfg"""'], {}), "('potentials/bump.cfg')\n", (2062, 2085), False, 'from basis.potential import Potential\n'), ((2813, 2845), 'basis.potential.Potential', 'Potential', (['"""potentials/bump.cfg"""'], {}), "('potentials/bump.cfg')\n", (2822, 2845), False, 'from basis.potential import Potential\n'), ((529, 558), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (542, 558), False, 'import pytest\n'), ((1102, 1126), 'numpy.linspace', 'np.linspace', (['(0)', '(w * n)', 'R'], {}), '(0, w * n, R)\n', (1113, 1126), True, 'import numpy as np\n'), ((1665, 1686), 'numpy.linspace', 'np.linspace', (['(-a)', 'a', 'N'], {}), '(-a, a, N)\n', (1676, 1686), True, 'import numpy as np\n'), ((2348, 2369), 'numpy.linspace', 'np.linspace', (['(-a)', 'a', 'N'], {}), '(-a, a, N)\n', (2359, 2369), True, 'import numpy as np\n'), ((3052, 3077), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3065, 3077), False, 'import pytest\n'), ((3091, 3124), 'basis.potential.Potential', 'Potential', (['"""potentials/wrong.cfg"""'], {}), "('potentials/wrong.cfg')\n", (3100, 3124), False, 'from basis.potential import Potential\n'), ((996, 1006), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1003, 1006), True, 'import numpy as np\n'), ((1555, 1565), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1562, 1565), True, 'import numpy as np\n'), ((1906, 1931), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1919, 1931), False, 'import pytest\n'), ((2566, 2591), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2579, 2591), False, 'import pytest\n'), ((2227, 2237), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2234, 2237), True, 'import numpy as np\n')] |
import os
import sys
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from SocialMediaIE.active_learning.helpers import (get_joined_metrics,
plot_metrics)
from SocialMediaIE.active_learning.query_strategies import (
entropy_scoring, min_margin_scoring, select_proportional, select_random,
select_top)
from SocialMediaIE.data.load_lexicon import load_sentiment_lexicon
from SocialMediaIE.data.text_preprocess import (create_lexicon_feature_fn,
preprocess_text)
from SocialMediaIE.models.sklearn_bag_of_words import get_model
from SocialMediaIE.training.active_learning_trainer import \
ActiveLearningTrainer
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore" # Also affect subprocesses
sns.set_context("talk")
sns.set_style("ticks")
np.random.seed(1337)
def create_trainer(args):
SENTIMENT_LEXICON = load_sentiment_lexicon(args.lexicon_path)
model_fn = lambda: get_model(SENTIMENT_LEXICON)
if args.scoring == "min_margin":
scoring_fn = min_margin_scoring
elif args.scoring == "entropy":
scoring_fn = entropy_scoring
else:
raise RuntimeError(f"args.scoring={args.scoring} is invalid")
if args.selection == "top":
selection_fn = select_top
elif args.selection == "random":
selection_fn = select_random
elif args.selection == "proportional":
selection_fn = select_proportional
else:
raise RuntimeError(f"args.selection={args.selection} is invalid")
trainer = ActiveLearningTrainer(
model_fn, scoring_fn=scoring_fn, selection_fn=selection_fn
)
return trainer
def main(args):
DATA_KEY = args.data_key
TASK_KEY = args.task_key
df_train = pd.read_json(
f"./data/processed/{TASK_KEY}/{DATA_KEY}/train.json",
orient="records",
lines=True,
)
eval_dfs = {
k: pd.read_json(
f"./data/processed/{TASK_KEY}/{DATA_KEY}/{k}.json",
orient="records",
lines=True,
)
for k in ["dev", "test"]
}
trainer = create_trainer(args)
all_metrics, base_metrics, training_indexes = trainer.train_multiple_rounds(
df_train,
eval_dfs=eval_dfs,
annotations_per_step=args.annotations_per_step,
max_iterations=args.max_iters,
)
output_dir = os.path.join(
args.output_dir, TASK_KEY, DATA_KEY, f"{args.scoring}_{args.selection}"
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
training_indexes.to_csv(os.path.join(output_dir, "training_indexes.csv"))
for k, metrics in all_metrics.items():
df_cm, df_reports = get_joined_metrics(metrics, base_metrics=base_metrics.get(k))
df_cm.to_csv(os.path.join(output_dir, f"{k}_cm.csv"))
df_reports.to_csv(os.path.join(output_dir, f"{k}_reports.csv"))
plot_metrics(metrics, base_metric=base_metrics.get(k))
plt.savefig(os.path.join(output_dir, f"{k}_metrics.pdf"), bbox_inches="tight")
def create_parser():
from argparse import ArgumentParser
parser = ArgumentParser(description="Active learning experiment")
parser.add_argument("--data-key", default="SemEval")
parser.add_argument("--task-key", default="SENTIMENT")
parser.add_argument(
"--max-iters", default=100, type=int, help="number of active learning rounds"
)
parser.add_argument(
"--lexicon-path", default="./data/sentiments.csv", help="sentiment lexicon"
)
parser.add_argument(
"--annotations-per-step",
default=100, type=int,
help="Number of annotations per step before restarting training.",
)
parser.add_argument(
"--output-dir",
default="./data/active_learning_models/",
help="output directory to store model metrics.",
)
parser.add_argument(
"--scoring",
choices=["entropy", "min_margin"],
default="entropy",
help="scoring function",
)
parser.add_argument(
"--selection",
choices=["random", "top", "proportional"],
default="top",
help="scoring function",
)
return parser
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
print(args)
main(args)
| [
"seaborn.set_style",
"numpy.random.seed",
"warnings.simplefilter",
"argparse.ArgumentParser",
"os.makedirs",
"SocialMediaIE.training.active_learning_trainer.ActiveLearningTrainer",
"os.path.exists",
"pandas.read_json",
"SocialMediaIE.data.load_lexicon.load_sentiment_lexicon",
"os.path.join",
"se... | [((914, 937), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (929, 937), True, 'import seaborn as sns\n'), ((938, 960), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (951, 960), True, 'import seaborn as sns\n'), ((961, 981), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (975, 981), True, 'import numpy as np\n'), ((809, 840), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (830, 840), False, 'import warnings\n'), ((1034, 1075), 'SocialMediaIE.data.load_lexicon.load_sentiment_lexicon', 'load_sentiment_lexicon', (['args.lexicon_path'], {}), '(args.lexicon_path)\n', (1056, 1075), False, 'from SocialMediaIE.data.load_lexicon import load_sentiment_lexicon\n'), ((1682, 1768), 'SocialMediaIE.training.active_learning_trainer.ActiveLearningTrainer', 'ActiveLearningTrainer', (['model_fn'], {'scoring_fn': 'scoring_fn', 'selection_fn': 'selection_fn'}), '(model_fn, scoring_fn=scoring_fn, selection_fn=\n selection_fn)\n', (1703, 1768), False, 'from SocialMediaIE.training.active_learning_trainer import ActiveLearningTrainer\n'), ((1888, 1989), 'pandas.read_json', 'pd.read_json', (['f"""./data/processed/{TASK_KEY}/{DATA_KEY}/train.json"""'], {'orient': '"""records"""', 'lines': '(True)'}), "(f'./data/processed/{TASK_KEY}/{DATA_KEY}/train.json', orient=\n 'records', lines=True)\n", (1900, 1989), True, 'import pandas as pd\n'), ((2505, 2594), 'os.path.join', 'os.path.join', (['args.output_dir', 'TASK_KEY', 'DATA_KEY', 'f"""{args.scoring}_{args.selection}"""'], {}), "(args.output_dir, TASK_KEY, DATA_KEY,\n f'{args.scoring}_{args.selection}')\n", (2517, 2594), False, 'import os\n'), ((3248, 3304), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Active learning experiment"""'}), "(description='Active learning experiment')\n", (3262, 3304), False, 'from argparse import ArgumentParser\n'), ((1099, 1127), 'SocialMediaIE.models.sklearn_bag_of_words.get_model', 'get_model', (['SENTIMENT_LEXICON'], {}), '(SENTIMENT_LEXICON)\n', (1108, 1127), False, 'from SocialMediaIE.models.sklearn_bag_of_words import get_model\n'), ((2044, 2143), 'pandas.read_json', 'pd.read_json', (['f"""./data/processed/{TASK_KEY}/{DATA_KEY}/{k}.json"""'], {'orient': '"""records"""', 'lines': '(True)'}), "(f'./data/processed/{TASK_KEY}/{DATA_KEY}/{k}.json', orient=\n 'records', lines=True)\n", (2056, 2143), True, 'import pandas as pd\n'), ((2616, 2642), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (2630, 2642), False, 'import os\n'), ((2652, 2675), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (2663, 2675), False, 'import os\n'), ((2704, 2752), 'os.path.join', 'os.path.join', (['output_dir', '"""training_indexes.csv"""'], {}), "(output_dir, 'training_indexes.csv')\n", (2716, 2752), False, 'import os\n'), ((2908, 2947), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{k}_cm.csv"""'], {}), "(output_dir, f'{k}_cm.csv')\n", (2920, 2947), False, 'import os\n'), ((2975, 3019), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{k}_reports.csv"""'], {}), "(output_dir, f'{k}_reports.csv')\n", (2987, 3019), False, 'import os\n'), ((3104, 3148), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{k}_metrics.pdf"""'], {}), "(output_dir, f'{k}_metrics.pdf')\n", (3116, 3148), False, 'import os\n')] |
'''
Random Forest Analysis
This is a regressor random forest that aims to predict the charges (insurance) based on the variables in the dataset
'''
from exploratory_analysis import data
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from numpy import mean
from numpy import std
from numpy import arange
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.ensemble import RandomForestRegressor
dataX = data.iloc[:,:-1]
dataY = data.iloc[:,6]
'''
This part of the program was adapted from a regressor model on "Machine Learning Mastery"
url: https://machinelearningmastery.com/random-forest-ensemble-in-python/
*** code adapted from the above url ***
'''
def get_models():
models = dict()
#exploting ratios from 10% to 100%
for i in arange(0.1, 1.1, 0.1):
key = "%.1f" % i
#setting the max samples to none
if i == 1.0:
i = None
models[key] = RandomForestRegressor(max_samples = i)
return models
def evaluate_model(model, x, y):
#defining the evaluation procedure
cv = RepeatedKFold(n_splits = 10, n_repeats = 3, random_state = 1)
scores = cross_val_score(model, dataX, dataY, scoring = "neg_mean_absolute_error", cv = cv, n_jobs = 1, error_score = "raise")
#scores = cross_val_score(model, dataX, dataY, scoring = "neg_mean_squared_error", cv = cv, n_jobs = 1, error_score = "raise")
return np.absolute(scores)
models = get_models()
results, names = list(), list()
for name, model in models.items():
#evaluate the model
scores = evaluate_model(model, dataX, dataY)
#storing the results
results.append(scores)
names.append(name)
#summarizing the performance
print("Mean MAE scores and STD", name, mean(scores), std(scores))
#print("RMSE scores and STD", name, mean(np.sqrt(scores)))
#ans = np.sqrt(results)
#converting the ans variable to a list in order to plot it with the names list - otherwise it won't run
#ans = list(ans)
plt.boxplot(results, labels = names, showmeans = True)
plt.show()
| [
"numpy.absolute",
"matplotlib.pyplot.show",
"matplotlib.pyplot.boxplot",
"sklearn.model_selection.cross_val_score",
"numpy.std",
"sklearn.ensemble.RandomForestRegressor",
"numpy.mean",
"numpy.arange",
"sklearn.model_selection.RepeatedKFold"
] | [((2128, 2178), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['results'], {'labels': 'names', 'showmeans': '(True)'}), '(results, labels=names, showmeans=True)\n', (2139, 2178), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2195), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2193, 2195), True, 'import matplotlib.pyplot as plt\n'), ((878, 899), 'numpy.arange', 'arange', (['(0.1)', '(1.1)', '(0.1)'], {}), '(0.1, 1.1, 0.1)\n', (884, 899), False, 'from numpy import arange\n'), ((1188, 1243), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': '(10)', 'n_repeats': '(3)', 'random_state': '(1)'}), '(n_splits=10, n_repeats=3, random_state=1)\n', (1201, 1243), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((1265, 1379), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'dataX', 'dataY'], {'scoring': '"""neg_mean_absolute_error"""', 'cv': 'cv', 'n_jobs': '(1)', 'error_score': '"""raise"""'}), "(model, dataX, dataY, scoring='neg_mean_absolute_error', cv=\n cv, n_jobs=1, error_score='raise')\n", (1280, 1379), False, 'from sklearn.model_selection import cross_val_score\n'), ((1529, 1548), 'numpy.absolute', 'np.absolute', (['scores'], {}), '(scores)\n', (1540, 1548), True, 'import numpy as np\n'), ((1041, 1077), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'max_samples': 'i'}), '(max_samples=i)\n', (1062, 1077), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1885, 1897), 'numpy.mean', 'mean', (['scores'], {}), '(scores)\n', (1889, 1897), False, 'from numpy import mean\n'), ((1899, 1910), 'numpy.std', 'std', (['scores'], {}), '(scores)\n', (1902, 1910), False, 'from numpy import std\n')] |
"""
This is an FSLeyes plugin script that integrates AxonDeepSeg tools into FSLeyes.
Author : <NAME>
"""
import wx
import wx.lib.agw.hyperlink as hl
import fsleyes.controls.controlpanel as ctrlpanel
import fsleyes.actions.loadoverlay as ovLoad
import numpy as np
import nibabel as nib
from PIL import Image, ImageDraw, ImageOps
import scipy.misc
import json
from pathlib import Path
import AxonDeepSeg
from AxonDeepSeg.apply_model import axon_segmentation
from AxonDeepSeg.segment import segment_image
import AxonDeepSeg.morphometrics.compute_morphometrics as compute_morphs
from AxonDeepSeg import postprocessing, params, ads_utils
from config import axonmyelin_suffix, axon_suffix, myelin_suffix, index_suffix, axonmyelin_index_suffix
import math
from scipy import ndimage as ndi
from skimage import measure, morphology, feature
import tempfile
import openpyxl
import pandas as pd
import imageio
VERSION = "0.2.19"
class ADSsettings:
"""
This class handles everything related to the parameters used in the ADS plugin, including the frame for the settings
menu.
"""
def __init__(self, ads_control):
"""
Constructor for the ADSsettings class. Initializes the default settings.
:param ads_control: An instance of ADScontrol
:type ads_control: ADScontrol
"""
self.ads_control = ads_control
# Declare the settings used
self.overlap_value = 25
self.model_resolution = 0.01 # Unused
self.use_custom_resolution = False # Unused
self.custom_resolution = 0.07 # Unused
self.zoom_factor = 1.0
self.axon_shape = "circle"
def on_settings_button(self, event):
"""
This function creates the settings_frame (the settings menu). It is called when the 'settings' button has been
pressed.
"""
self.settings_frame = wx.Frame(self.ads_control, title="Settings", size=(600, 300))
frame_sizer_h = wx.BoxSizer(wx.VERTICAL)
# Add the overlap value to the settings menu
sizer_overlap_value = wx.BoxSizer(wx.HORIZONTAL)
overlap_value_tooltip = wx.ToolTip("Represents the number of pixels that overlap two patches of the image when "
"applying the prediction model")
sizer_overlap_value.Add(wx.StaticText(self.settings_frame, label="Overlap value (pixels): "))
self.overlap_value_spinCtrl = wx.SpinCtrl(self.settings_frame, min=0, max=100, initial=self.overlap_value)
self.overlap_value_spinCtrl.Bind(wx.EVT_SPINCTRL, self.on_overlap_value_changed)
self.overlap_value_spinCtrl.SetToolTip(overlap_value_tooltip)
sizer_overlap_value.Add(self.overlap_value_spinCtrl, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_overlap_value)
# Add the zoom factor to the settings menu
sizer_zoom_factor = wx.BoxSizer(wx.HORIZONTAL)
zoom_factor_tooltip = wx.ToolTip("When applying the model, the pixel size of the image will be "
"multiplied by this number. The zoom factor does not affect the computation of morphometrics.")
sizer_zoom_factor.Add(wx.StaticText(self.settings_frame, label="Zoom factor: "))
self.zoom_factor_spinCtrlDouble = wx.SpinCtrlDouble(self.settings_frame, initial=self.zoom_factor, inc=0.0001)
self.zoom_factor_spinCtrlDouble.Bind(wx.EVT_SPINCTRLDOUBLE, self.on_zoom_factor_changed)
self.zoom_factor_spinCtrlDouble.SetToolTip(zoom_factor_tooltip)
sizer_zoom_factor.Add(self.zoom_factor_spinCtrlDouble, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_zoom_factor)
# Add the axon shape selection
axon_shape_choices = ["circle", "ellipse"]
sizer_axon_shape = wx.BoxSizer(wx.HORIZONTAL)
axon_shape_tooltip = wx.ToolTip('Select what is the shape of the axons that will be considered when computing '
'the morphometrics. "circle" will use the equivalent diameter (diameter of a circle with the same area as the axon). '
'"ellipse" will use minor axis of a fitted ellipse as diameter.')
sizer_axon_shape.Add(wx.StaticText(self.settings_frame, label="Axon shape: "))
self.axon_shape_combobox = wx.ComboBox(
self.settings_frame,
choices=axon_shape_choices,
size=(100, 20),
value=self.axon_shape
)
self.axon_shape_combobox.Bind(wx.EVT_COMBOBOX, self.on_axon_shape_combobox_item_selected)
self.axon_shape_combobox.SetToolTip(axon_shape_tooltip)
sizer_axon_shape.Add(self.axon_shape_combobox, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_axon_shape)
# Add the done button
sizer_done_button = wx.BoxSizer(wx.HORIZONTAL)
done_button = wx.Button(self.settings_frame, label="Done")
done_button.Bind(wx.EVT_BUTTON, self.on_done_button)
sizer_done_button.Add(done_button, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_done_button)
self.settings_frame.SetSizer(frame_sizer_h)
self.settings_frame.Show()
def on_overlap_value_changed(self, event):
self.overlap_value = self.overlap_value_spinCtrl.GetValue()
def on_zoom_factor_changed(self, event):
self.zoom_factor = self.zoom_factor_spinCtrlDouble.GetValue()
def on_axon_shape_combobox_item_selected(self, event):
self.axon_shape = self.axon_shape_combobox.GetStringSelection()
def on_done_button(self, event):
# TODO: make sure every setting is saved
self.settings_frame.Close()
class ADScontrol(ctrlpanel.ControlPanel):
"""
This class is the object corresponding to the AxonDeepSeg control panel.
"""
def __init__(self, ortho, *args, **kwargs):
"""
This function initializes the control panel. It generates the widgets and adds them to the panel. It also sets
the initial position of the panel to the left.
:param ortho: This is used to access the ortho ops in order to turn off the X and Y canvas as well as the cursor
"""
ctrlpanel.ControlPanel.__init__(self, ortho, *args, **kwargs)
# Create the settings object
self.settings = ADSsettings(self)
# Add a sizer to the control panel
# This sizer will contain the buttons
sizer_h = wx.BoxSizer(wx.VERTICAL)
# Add the logo to the control panel
ADS_logo = self.get_logo()
sizer_h.Add(ADS_logo, flag=wx.SHAPED, proportion=1)
# Add the citation to the control panel
citation_box = wx.TextCtrl(
self, value=self.get_citation(), size=(100, 50), style=wx.TE_MULTILINE
)
sizer_h.Add(citation_box, flag=wx.SHAPED, proportion=1)
# Add a hyperlink to the documentation
hyper = hl.HyperLinkCtrl(
self, -1, label="Need help? Read the documentation", URL="https://axondeepseg.readthedocs.io/en/latest/"
)
sizer_h.Add(hyper, flag=wx.SHAPED, proportion=1)
# Define the color of button labels
button_label_color = (0, 0, 0)
# Add the image loading button
load_png_button = wx.Button(self, label="Load PNG or TIF file")
load_png_button.SetForegroundColour(button_label_color)
load_png_button.Bind(wx.EVT_BUTTON, self.on_load_png_button)
load_png_button.SetToolTip(wx.ToolTip("Loads a .png or .tif file into FSLeyes"))
sizer_h.Add(load_png_button, flag=wx.SHAPED, proportion=1)
# Add the mask loading button
load_mask_button = wx.Button(self, label="Load existing mask")
load_mask_button.SetForegroundColour(button_label_color)
load_mask_button.Bind(wx.EVT_BUTTON, self.on_load_mask_button)
load_mask_button.SetToolTip(
wx.ToolTip(
"Loads an existing axonmyelin mask into FSLeyes. "
"The selected image should contain both the axon and myelin masks. "
"The regions on the image should have an intensity of 0 for the background, "
"127 for the myelin and 255 for the axons. "
)
)
sizer_h.Add(load_mask_button, flag=wx.SHAPED, proportion=1)
# Add the model choice combobox
self.model_combobox = wx.ComboBox(
self,
choices=ads_utils.get_existing_models_list(),
size=(100, 20),
value="Select the modality",
)
self.model_combobox.SetForegroundColour(button_label_color)
self.model_combobox.SetToolTip(
wx.ToolTip("Select the modality used to acquire the image")
)
sizer_h.Add(self.model_combobox, flag=wx.SHAPED, proportion=1)
# Add the button that applies the prediction model
apply_model_button = wx.Button(self, label="Apply ADS prediction model")
apply_model_button.SetForegroundColour(button_label_color)
apply_model_button.Bind(wx.EVT_BUTTON, self.on_apply_model_button)
apply_model_button.SetToolTip(
wx.ToolTip("Applies the prediction model and displays the masks")
)
sizer_h.Add(apply_model_button, flag=wx.SHAPED, proportion=1)
# The Watershed button's purpose isn't clear. It is unavailable for now.
# # Add the button that runs the watershed algorithm
# run_watershed_button = wx.Button(self, label="Run Watershed")
# run_watershed_button.Bind(wx.EVT_BUTTON, self.on_run_watershed_button)
# run_watershed_button.SetToolTip(
# wx.ToolTip(
# "Uses a watershed algorithm to find the different axon+myelin"
# "objects. This is used to see if where are connections"
# " between two axon+myelin objects."
# )
# )
# sizer_h.Add(run_watershed_button, flag=wx.SHAPED, proportion=1)
# Add the fill axon tool
fill_axons_button = wx.Button(self, label="Fill axons")
fill_axons_button.SetForegroundColour(button_label_color)
fill_axons_button.Bind(wx.EVT_BUTTON, self.on_fill_axons_button)
fill_axons_button.SetToolTip(
wx.ToolTip(
"Automatically fills the axons inside myelin objects."
" THE MYELIN OBJECTS NEED TO BE CLOSED AND SEPARATED FROM EACH "
"OTHER (THEY MUST NOT TOUCH) FOR THIS TOOL TO WORK CORRECTLY."
)
)
sizer_h.Add(fill_axons_button, flag=wx.SHAPED, proportion=1)
# Add the save Segmentation button
save_segmentation_button = wx.Button(self, label="Save segmentation")
save_segmentation_button.SetForegroundColour(button_label_color)
save_segmentation_button.Bind(wx.EVT_BUTTON, self.on_save_segmentation_button)
save_segmentation_button.SetToolTip(
wx.ToolTip("Saves the axon and myelin masks in the selected folder")
)
sizer_h.Add(save_segmentation_button, flag=wx.SHAPED, proportion=1)
# Add compute morphometrics button
compute_morphometrics_button = wx.Button(self, label="Compute morphometrics")
compute_morphometrics_button.SetForegroundColour(button_label_color)
compute_morphometrics_button.Bind(wx.EVT_BUTTON, self.on_compute_morphometrics_button)
compute_morphometrics_button.SetToolTip(
wx.ToolTip(
"Calculates and saves the morphometrics to an excel and csv file. "
"Shows the indexes of the axons at the coordinates specified in the morphometrics file."
)
)
sizer_h.Add(compute_morphometrics_button, flag=wx.SHAPED, proportion=1)
# Add the settings button
settings_button = wx.Button(self, label="Settings")
settings_button.SetForegroundColour(button_label_color)
settings_button.Bind(wx.EVT_BUTTON, self.settings.on_settings_button)
sizer_h.Add(settings_button, flag=wx.SHAPED, proportion=1)
# Set the sizer of the control panel
self.SetSizer(sizer_h)
# Initialize the variables that are used to track the active image
self.png_image_name = []
self.image_dir_path = []
self.most_recent_watershed_mask_name = None
# Toggle off the X and Y canvas
oopts = ortho.sceneOpts
oopts.showXCanvas = False
oopts.showYCanvas = False
# Toggle off the cursor
oopts.showCursor = False
# Toggle off the radiological orientation
self.displayCtx.radioOrientation = False
# Invert the Y display
self.frame.viewPanels[0].frame.viewPanels[0].getZCanvas().opts.invertY = True
# Create a temporary directory that will hold the NIfTI files
self.ads_temp_dir_var = tempfile.TemporaryDirectory() #This variable needs to stay loaded to keep the temporary
# directory from being destroyed
self.ads_temp_dir = Path(self.ads_temp_dir_var.name)
# Check the version
self.verrify_version()
def on_load_png_button(self, event):
"""
This function is called when the user presses on the Load Png button. It allows the user to select a PNG or TIF
image, convert it into a NIfTI and load it into FSLeyes.
"""
# Ask the user which file he wants to convert
with wx.FileDialog(
self, "select Image file", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
) as file_dialog:
if (
file_dialog.ShowModal() == wx.ID_CANCEL
): # The user cancelled the operation
return
in_file = Path(file_dialog.GetPath())
# Check if the image format is valid
image_extension = in_file.suffix
valid_extensions = [".png", ".tif", ".jpg", ".jpeg"]
if image_extension not in valid_extensions:
self.show_message("Invalid file extension")
return
# Store the directory path and image name for later use in the application of the prediction model
self.image_dir_path.append(in_file.parents[0])
self.png_image_name.append(in_file.name)
# Call the function that convert and loads the png or tif image
self.load_png_image_from_path(in_file)
def on_load_mask_button(self, event):
"""
This function is called when the user presses on the loadMask button. It allows the user to select an existing
PNG mask, convert it into a NIfTI and load it into FSLeyes.
The mask needs to contain an axon + myelin mask. The Axons should have an intensity > 200. The myelin should
have an intensity between 100 and 200. The data should be in uint8.
"""
# Ask the user to select the mask image
with wx.FileDialog(
self, "select mask .png file", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
) as file_dialog:
if (
file_dialog.ShowModal() == wx.ID_CANCEL
): # The user cancelled the operation
return
in_file = Path(file_dialog.GetPath())
# Check if the image format is valid
image_extension = in_file.suffix
valid_extensions = [".png", ".tif", ".jpg", ".jpeg"]
if image_extension not in valid_extensions:
self.show_message("Invalid file extension")
return
# Get the image data
img_png2D = ads_utils.imread(in_file)
image_name = in_file.stem
# Extract the Axon mask
axon_mask = img_png2D > 200
axon_mask = params.intensity['binary'] * np.array(axon_mask, dtype=np.uint8)
# Extract the Myelin mask
myelin_mask = (img_png2D > 100) & (img_png2D < 200)
myelin_mask = params.intensity['binary'] * np.array(myelin_mask, dtype=np.uint8)
# Load the masks into FSLeyes
axon_outfile = self.ads_temp_dir / (image_name + "-axon.png")
ads_utils.imwrite(axon_outfile, axon_mask)
self.load_png_image_from_path(axon_outfile, is_mask=True, colormap="blue")
myelin_outfile = self.ads_temp_dir / (image_name + "-myelin.png")
ads_utils.imwrite(myelin_outfile, myelin_mask)
self.load_png_image_from_path(myelin_outfile, is_mask=True, colormap="red")
def on_apply_model_button(self, event):
"""
This function is called when the user presses on the ApplyModel button. It is used to apply the prediction model
selected in the combobox. The segmentation masks are then loaded into FSLeyes
"""
# Declare the default resolution of the model
resolution = 0.1
# Get the image name and directory
image_overlay = self.get_visible_image_overlay()
if self.get_visible_image_overlay() is None:
return
n_loaded_images = self.png_image_name.__len__()
image_name = None
image_directory = None
for i in range(n_loaded_images):
if image_overlay.name == (Path(self.png_image_name[i])).stem:
image_name = self.png_image_name[i]
image_directory = self.image_dir_path[i]
if (image_name is None) or (image_directory is None):
self.show_message(
"Couldn't find the path to the loaded image. "
"Please use the plugin's image loader to import the image you wish to segment. "
)
return
image_path = image_directory / image_name
image_name_no_extension = Path(image_name).stem
# Get the selected model
selected_model = self.model_combobox.GetStringSelection()
if selected_model == "":
self.show_message("Please select a model")
return
# Get the path of the selected model
if any(selected_model in models for models in ads_utils.get_existing_models_list()):
dir_path = Path(AxonDeepSeg.__file__).parents[0]
model_path = dir_path / "models" / selected_model
else:
self.show_message("Please select a model")
return
# If the TEM model is selected, modify the resolution
if "TEM" in selected_model.upper():
resolution = 0.01
# Check if the pixel size txt file exist in the imageDirPath
pixel_size_exists = (image_directory / "pixel_size_in_micrometer.txt").exists()
# if it doesn't exist, ask the user to input the pixel size
if pixel_size_exists is False:
with wx.TextEntryDialog(
self, "Enter the pixel size in micrometer", value="0.07"
) as text_entry:
if text_entry.ShowModal() == wx.ID_CANCEL:
return
pixel_size_str = text_entry.GetValue()
pixel_size_float = float(pixel_size_str)
else: # read the pixel size
resolution_file = open((image_directory / "pixel_size_in_micrometer.txt").__str__(), 'r')
pixel_size_float = float(resolution_file.read())
# Load model configs and apply prediction
model_configfile = model_path / "config_network.json"
with open(model_configfile.__str__(), "r") as fd:
config_network = json.loads(fd.read())
segment_image(
image_path,
model_path,
self.settings.overlap_value,
config_network,
resolution,
acquired_resolution=pixel_size_float * self.settings.zoom_factor,
verbosity_level=3
)
# The axon_segmentation function creates the segmentation masks and stores them as PNG files in the same folder
# as the original image file.
# Load the axon and myelin masks into FSLeyes
axon_mask_path = image_directory / (image_name_no_extension + str(axon_suffix))
myelin_mask_path = image_directory / (image_name_no_extension + str(myelin_suffix))
self.load_png_image_from_path(axon_mask_path, is_mask=True, colormap="blue")
self.load_png_image_from_path(myelin_mask_path, is_mask=True, colormap="red")
self.pixel_size_float = pixel_size_float
return self
def on_save_segmentation_button(self, event):
"""
This function saves the active myelin and axon masks as PNG images. Three (3) images are generated in a folder
selected by the user : one with the axon mask, one with the myelin mask and one with both.
"""
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_corrected_axon_overlay()
if axon_mask_overlay is None:
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# Ask the user where to save the segmentation
with wx.DirDialog(
self,
"select the directory in which the segmentation will be save",
defaultPath="",
style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST,
) as file_dialog:
if file_dialog.ShowModal() == wx.ID_CANCEL:
return
save_dir = Path(file_dialog.GetPath())
# store the data of the masks in variables as numpy arrays.
# Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
# done.
# Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back
myelin_array = np.array(
myelin_mask_overlay[:, :, 0], copy=True, dtype=np.uint8
)
myelin_array = np.flipud(myelin_array)
myelin_array = np.rot90(myelin_array, k=1, axes=(1, 0))
axon_array = np.array(
axon_mask_overlay[:, :, 0], copy=True, dtype=np.uint8
)
axon_array = np.flipud(axon_array)
axon_array = np.rot90(axon_array, k=1, axes=(1, 0))
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# Remove the intersection
myelin_array, axon_array, intersection = postprocessing.remove_intersection(
myelin_array, axon_array, priority=1, return_overlap=True)
if intersection.sum() > 0:
self.show_message(
"There is an overlap between the axon mask and the myelin mask. The myelin will have priority.")
# Scale the pixel values of the masks to 255 for image saving
myelin_array = myelin_array * params.intensity['binary']
axon_array = axon_array * params.intensity['binary']
image_name = myelin_mask_overlay.name[:-len("_seg-myelin")]
myelin_and_axon_array = (myelin_array // 2 + axon_array).astype(np.uint8)
ads_utils.imwrite(filename=save_dir / (image_name + str(axonmyelin_suffix)), img=myelin_and_axon_array)
ads_utils.imwrite(filename=save_dir / (image_name + str(myelin_suffix)), img=myelin_array)
ads_utils.imwrite(filename=save_dir / (image_name + str(axon_suffix)), img=axon_array)
def on_run_watershed_button(self, event):
"""
This function is called then the user presses on the runWatershed button. This creates a watershed mask that is
used to locate where are the connections between the axon-myelin objects.
The runWatershed button is currently commented, so this function is unused at the moment.
"""
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# Extract the data from the overlays
axon_array = axon_mask_overlay[:, :, 0]
myelin_array = myelin_mask_overlay[:, :, 0]
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# If a watershed mask already exists, remove it.
for an_overlay in self.overlayList:
if (self.most_recent_watershed_mask_name is not None) and (
an_overlay.name == self.most_recent_watershed_mask_name
):
self.overlayList.remove(an_overlay)
# Compute the watershed mask
watershed_data = self.get_watershed_segmentation(axon_array, myelin_array)
# Save the watershed mask as a png then load it as an overlay
watershed_image_array = np.rot90(watershed_data, k=3, axes=(1, 0))
watershed_image = Image.fromarray(watershed_image_array)
file_name = self.ads_temp_dir.name + "/watershed_mask.png"
watershed_image.save(file_name)
wantershed_mask_overlay = self.load_png_image_from_path(
file_name, add_to_overlayList=False
)
wantershed_mask_overlay[:, :, 0] = watershed_data
self.overlayList.append(wantershed_mask_overlay)
# Apply a "random" colour mapping to the watershed mask
opts = self.displayCtx.getOpts(wantershed_mask_overlay)
opts.cmap = "random"
self.most_recent_watershed_mask_name = "watershed_mask"
def on_fill_axons_button(self, event):
"""
This function is called when the fillAxon button is pressed by the user. It uses a flood fill algorithm to fill
the inside of the myelin objects with the axon mask
"""
# Find the visible myelin and axon mask
myelin_mask_overlay = self.get_visible_myelin_overlay()
axon_mask_overlay = self.get_visible_axon_overlay()
if myelin_mask_overlay is None:
return
if axon_mask_overlay is None:
return
# Extract the data from the overlays
myelin_array = myelin_mask_overlay[:, :, 0]
axon_array = axon_mask_overlay[:, :, 0]
# Perform the floodfill operation
axon_extracted_array = postprocessing.floodfill_axons(axon_array, myelin_array)
axon_corr_array = np.flipud(axon_extracted_array)
axon_corr_array = params.intensity['binary'] * np.rot90(axon_corr_array, k=1, axes=(1, 0))
file_name = self.ads_temp_dir / (myelin_mask_overlay.name[:-len("-myelin")] + "-axon-corr.png")
ads_utils.imwrite(filename=file_name, img=axon_corr_array)
self.load_png_image_from_path(file_name, is_mask=True, colormap="blue")
def on_compute_morphometrics_button(self, event):
"""
Compute morphometrics and save them to an Excel file.
"""
# Get pixel size
try:
pixel_size = self.pixel_size_float
except:
with wx.TextEntryDialog(
self, "Enter the pixel size in micrometer", value="0.07"
) as text_entry:
if text_entry.ShowModal() == wx.ID_CANCEL:
return
pixel_size_str = text_entry.GetValue()
pixel_size = float(pixel_size_str)
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_corrected_axon_overlay()
if axon_mask_overlay is None:
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# store the data of the masks in variables as numpy arrays.
# Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
# done.
# Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back
myelin_array = np.array(
myelin_mask_overlay[:, :, 0] * params.intensity['binary'], copy=True, dtype=np.uint8
)
myelin_array = np.flipud(myelin_array)
myelin_array = np.rot90(myelin_array, k=1, axes=(1, 0))
axon_array = np.array(
axon_mask_overlay[:, :, 0] * params.intensity['binary'], copy=True, dtype=np.uint8
)
axon_array = np.flipud(axon_array)
axon_array = np.rot90(axon_array, k=1, axes=(1, 0))
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# Save the arrays as PNG files
pred = (myelin_array // 2 + axon_array).astype(np.uint8)
pred_axon = pred > 200
pred_myelin = np.logical_and(pred >= 50, pred <= 200)
x = np.array([], dtype=[
('x0', 'f4'),
('y0', 'f4'),
('gratio','f4'),
('axon_area','f4'),
('axon_perimeter','f4'),
('myelin_area','f4'),
('axon_diam','f4'),
('myelin_thickness','f4'),
('axonmyelin_area','f4'),
('axonmyelin_perimeter','f4'),
('solidity','f4'),
('eccentricity','f4'),
('orientation','f4')
]
)
# Compute statistics
stats_array, index_image_array = compute_morphs.get_axon_morphometrics(im_axon=pred_axon, im_myelin=pred_myelin,
pixel_size=pixel_size,
axon_shape=self.settings.axon_shape,
return_index_image=True)
for stats in stats_array:
x = np.append(x,
np.array(
[(
stats['x0'],
stats['y0'],
stats['gratio'],
stats['axon_area'],
stats['axon_perimeter'],
stats['myelin_area'],
stats['axon_diam'],
stats['myelin_thickness'],
stats['axonmyelin_area'],
stats['axonmyelin_perimeter'],
stats['solidity'],
stats['eccentricity'],
stats['orientation']
)],
dtype=x.dtype)
)
with wx.FileDialog(self, "Save morphometrics file", wildcard="Excel files (*.xlsx)|*.xlsx",
defaultFile="axon_morphometrics.xlsx", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# save the current contents in the file
pathname = fileDialog.GetPath()
if not (pathname.lower().endswith((".xlsx", ".csv"))): # If the user didn't add the extension, add it here
pathname = pathname + ".xlsx"
try:
# Export to excel
pd.DataFrame(x).to_excel(pathname)
except IOError:
wx.LogError("Cannot save current data in file '%s'." % pathname)
# Generate and load the index image
original_image_name = (axon_mask_overlay.name).split("-axon")[0]
original_image_name = original_image_name.split("_seg")[0]
index_outfile = Path(pathname).parents[0] / (original_image_name + str(index_suffix))
ads_utils.imwrite(index_outfile, index_image_array)
self.load_png_image_from_path(index_outfile, is_mask=False, colormap="yellow")
# Generate the colored image with indexes
axon_array, myelin_array = postprocessing.remove_intersection(axon_array//255, myelin_array//255)
axonmyelin_image = axon_array * params.intensity["axon"] + myelin_array * params.intensity["myelin"]
axonmyelin_outfile = self.ads_temp_dir / axonmyelin_suffix
ads_utils.imwrite(axonmyelin_outfile, axonmyelin_image)
postprocessing.generate_and_save_colored_image_with_index_numbers(
filename= Path(pathname).parents[0] / (original_image_name + str(axonmyelin_index_suffix)),
axonmyelin_image_path= axonmyelin_outfile,
index_image_array=index_image_array
)
return
def get_watershed_segmentation(self, im_axon, im_myelin, return_centroids=False):
"""
Parts of this function were copied from the code found in this document :
https://github.com/neuropoly/axondeepseg/blob/master/AxonDeepSeg/morphometrics/compute_morphometrics.py
In the future, the referenced script should be modified in order to avoid repetition.
:param im_axon: the binary mask corresponding to axons
:type im_axon: ndarray
:param im_myelin: the binary mask corresponding to the myelin
:type im_myelin: ndarray
:param return_centroids: (optional) if this is set to true, the function will also return the centroids of the
axon objects as a list of tuples
:type return_centroids: bool
:return: the label corresponding to the axon+myelin objects
:rtype: ndarray
"""
# Label each axon object
im_axon_label = measure.label(im_axon)
# Measure properties for each axon object
axon_objects = measure.regionprops(im_axon_label)
# Deal with myelin mask
if im_myelin is not None:
# sum axon and myelin masks
im_axonmyelin = im_axon + im_myelin
# Compute distance between each pixel and the background. Note: this distance is calculated from the im_axon,
# note from the im_axonmyelin image, because we know that each axon object is already isolated, therefore the
# distance metric will be more useful for the watershed algorithm below.
distance = ndi.distance_transform_edt(im_axon)
# local_maxi = feature.peak_local_max(distance, indices=False, footprint=np.ones((31, 31)), labels=axonmyelin)
# Get axon centroid as int (not float) to be used as index
ind_centroid = (
[int(props.centroid[0]) for props in axon_objects],
[int(props.centroid[1]) for props in axon_objects],
)
# Create an image with axon centroids, which value corresponds to the value of the axon object
im_centroid = np.zeros_like(im_axon, dtype="uint16")
for i in range(len(ind_centroid[0])):
# Note: The value "i" corresponds to the label number of im_axon_label
im_centroid[ind_centroid[0][i], ind_centroid[1][i]] = i + 1
# Watershed segmentation of axonmyelin using distance map
im_axonmyelin_label = morphology.watershed(
-distance, im_centroid, mask=im_axonmyelin
)
if return_centroids is True:
return im_axonmyelin_label, ind_centroid
else:
return im_axonmyelin_label
def load_png_image_from_path(
self, image_path, is_mask=False, add_to_overlayList=True, colormap="greyscale"
):
"""
This function converts a 2D image into a NIfTI image and loads it as an overlay.
The parameter add_to_overlayList allows to display the overlay into FSLeyes.
:param image_path: The location of the image, including the name and the .extension
:type image_path: Path
:param is_mask: (optional) Whether or not this is a segmentation mask. It will be treated as a normal
image by default.
:type is_mask: bool
:param add_to_overlayList: (optional) Whether or not to add the image to the overlay list. If so, the image will
be displayed in the application. This parameter is True by default.
:type add_to_overlayList: bool
:param colormap: (optional) the colormap of image that will be displayed. This parameter is set to greyscale by
default.
:type colormap: string
:return: the FSLeyes overlay corresponding to the loaded image.
:rtype: overlay
"""
# Open the 2D image
img_png2D = ads_utils.imread(image_path)
if is_mask is True:
img_png2D = img_png2D // params.intensity['binary'] # Segmentation masks should be binary
# Flip the image on the Y axis so that the morphometrics file shows the right coordinates
img_png2D = np.flipud(img_png2D)
# Convert image data into a NIfTI image
# Note: PIL and NiBabel use different axis conventions, so some array manipulation has to be done.
img_NIfTI = nib.Nifti1Image(
np.rot90(img_png2D, k=1, axes=(1, 0)), np.eye(4)
)
# Save the NIfTI image in a temporary directory
img_name = image_path.stem
out_file = self.ads_temp_dir.__str__() + "/" + img_name + ".nii.gz"
nib.save(img_NIfTI, out_file)
# Load the NIfTI image as an overlay
img_overlay = ovLoad.loadOverlays(paths=[out_file], inmem=True, blocking=True)[
0
]
# Display the overlay
if add_to_overlayList is True:
self.overlayList.append(img_overlay)
opts = self.displayCtx.getOpts(img_overlay)
opts.cmap = colormap
return img_overlay
def get_visible_overlays(self):
"""
This function returns a list containing evey overlays that are visible on FSLeyes.
:return: The list of the visible overlays
:rtype: list
"""
visible_overlay_list = []
for an_overlay in self.overlayList:
an_overlay_display = self.displayCtx.getDisplay(an_overlay)
if an_overlay_display.enabled is True:
visible_overlay_list.append(an_overlay)
return visible_overlay_list
def get_visible_image_overlay(self):
"""
This function is used to find the active microscopy image. This image should be visible and should NOT have the
following keywords in its name : axon, myelin, Myelin, watershed, Watershed.
:return: The visible microscopy image
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
image_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
if visible_overlay_list.__len__() is 1:
return visible_overlay_list[0]
for an_overlay in visible_overlay_list:
if (
("watershed" not in an_overlay.name)
and ("Watershed" not in an_overlay.name)
and (not an_overlay.name.endswith("-myelin"))
and (not an_overlay.name.endswith("-Myelin"))
and (not an_overlay.name.endswith("-Axon"))
and (not an_overlay.name.endswith("-axon"))
):
n_found_overlays = n_found_overlays + 1
image_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one microscopy image has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible microscopy image has been found")
return None
return image_overlay
def get_visible_axon_overlay(self):
"""
This method finds the currently visible axon overlay
:return: The visible overlay that corresponds to the axon mask
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
axon_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-axon")) or (an_overlay.name.endswith("-Axon")):
n_found_overlays = n_found_overlays + 1
axon_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one axon mask has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible axon mask has been found")
return None
return axon_overlay
def get_corrected_axon_overlay(self):
"""
This method finds a the visible corrected axon overlay if it exists
:return: The visible corrected axon overlay
:rtype overlay
"""
visible_overlay_list = self.get_visible_overlays()
axon_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-axon-corr")) or (an_overlay.name.endswith("-Axon-corr")):
n_found_overlays = n_found_overlays + 1
axon_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one corrected axon mask has been found")
return None
if n_found_overlays is 0:
return None
return axon_overlay
def get_visible_myelin_overlay(self):
"""
This method finds the currently visible myelin overlay
:return: The visible overlay that corresponds to the myelin mask
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
myelin_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-myelin")) or (an_overlay.name.endswith("-Myelin")):
n_found_overlays = n_found_overlays + 1
myelin_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one myelin mask has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible myelin mask has been found")
return None
return myelin_overlay
def show_message(self, message, caption="Error"):
"""
This function is used to show a popup message on the FSLeyes interface.
:param message: The message to be displayed.
:type message: String
:param caption: (Optional) The caption of the message box.
:type caption: String
"""
with wx.MessageDialog(
self,
message,
caption=caption,
style=wx.OK | wx.CENTRE,
pos=wx.DefaultPosition,
) as msg:
msg.ShowModal()
def verrify_version(self):
"""
This function checks if the plugin version is the same as the one in the AxonDeepSeg directory
"""
ads_path = Path(AxonDeepSeg.__file__).parents[0]
plugin_path_parts = ads_path.parts[:-1]
plugin_path = Path(*plugin_path_parts)
plugin_file = plugin_path / "ads_plugin.py"
# Check if the plugin file exists
plugin_file_exists = plugin_file.exists()
if plugin_file_exists is False:
return
# Check the version of the plugin
with open(plugin_file.__str__()) as plugin_file_reader:
plugin_file_lines = plugin_file_reader.readlines()
plugin_file_lines = [x.strip() for x in plugin_file_lines]
version_line = 'VERSION = "' + VERSION + '"'
plugin_is_up_to_date = True
version_found = False
for lines in plugin_file_lines:
if (lines.startswith("VERSION = ")):
version_found = True
if not (lines == version_line):
plugin_is_up_to_date = False
if (version_found is False) or (plugin_is_up_to_date is False):
message = (
"A more recent version of the AxonDeepSeg plugin was found in your AxonDeepSeg installation folder. "
"You will need to replace the current FSLeyes plugin which the new one. "
"To proceed, go to: file -> load plugin -> ads_plugin.py. Then, restart FSLeyes."
)
self.show_message(message, "Warning")
return
def get_citation(self):
"""
This function returns the AxonDeepSeg paper citation.
:return: The AxonDeepSeg citation
:rtype: string
"""
return (
"If you use this work in your research, please cite it as follows: \n"
"<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018). "
"AxonDeepSeg: automatic axon and myelin segmentation from microscopy data using convolutional "
"neural networks. Scientific Reports, 8(1), 3816. "
"Link to paper: https://doi.org/10.1038/s41598-018-22181-4. \n"
"Copyright (c) 2018 NeuroPoly (Polytechnique Montreal)"
)
def get_logo(self):
"""
This function finds the AxonDeepSeg logo saved as a png image and returns it as a wx bitmap image.
:return: The AxonDeepSeg logo
:rtype: wx.StaticBitmap
"""
ads_path = Path(AxonDeepSeg.__file__).parents[0]
logo_file = ads_path / "logo_ads-alpha_small.png"
png = wx.Image(str(logo_file), wx.BITMAP_TYPE_ANY).ConvertToBitmap()
png.SetSize((png.GetWidth(), png.GetHeight()))
logo_image = wx.StaticBitmap(
self, -1, png, wx.DefaultPosition, (png.GetWidth(), png.GetHeight())
)
return logo_image
@staticmethod
def supportedViews():
"""
I am not sure what this method does.
"""
from fsleyes.views.orthopanel import OrthoPanel
return [OrthoPanel]
@staticmethod
def defaultLayout():
"""
This method makes the control panel appear on the left of the FSLeyes window.
"""
return {"location": wx.LEFT}
| [
"AxonDeepSeg.ads_utils.imread",
"AxonDeepSeg.morphometrics.compute_morphometrics.get_axon_morphometrics",
"pathlib.Path",
"numpy.rot90",
"skimage.measure.label",
"skimage.measure.regionprops",
"pandas.DataFrame",
"fsleyes.controls.controlpanel.ControlPanel.__init__",
"scipy.ndimage.distance_transfor... | [((1880, 1941), 'wx.Frame', 'wx.Frame', (['self.ads_control'], {'title': '"""Settings"""', 'size': '(600, 300)'}), "(self.ads_control, title='Settings', size=(600, 300))\n", (1888, 1941), False, 'import wx\n'), ((1966, 1990), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (1977, 1990), False, 'import wx\n'), ((2075, 2101), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (2086, 2101), False, 'import wx\n'), ((2134, 2262), 'wx.ToolTip', 'wx.ToolTip', (['"""Represents the number of pixels that overlap two patches of the image when applying the prediction model"""'], {}), "(\n 'Represents the number of pixels that overlap two patches of the image when applying the prediction model'\n )\n", (2144, 2262), False, 'import wx\n'), ((2439, 2515), 'wx.SpinCtrl', 'wx.SpinCtrl', (['self.settings_frame'], {'min': '(0)', 'max': '(100)', 'initial': 'self.overlap_value'}), '(self.settings_frame, min=0, max=100, initial=self.overlap_value)\n', (2450, 2515), False, 'import wx\n'), ((2893, 2919), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (2904, 2919), False, 'import wx\n'), ((2950, 3127), 'wx.ToolTip', 'wx.ToolTip', (['"""When applying the model, the pixel size of the image will be multiplied by this number. The zoom factor does not affect the computation of morphometrics."""'], {}), "(\n 'When applying the model, the pixel size of the image will be multiplied by this number. The zoom factor does not affect the computation of morphometrics.'\n )\n", (2960, 3127), False, 'import wx\n'), ((3293, 3369), 'wx.SpinCtrlDouble', 'wx.SpinCtrlDouble', (['self.settings_frame'], {'initial': 'self.zoom_factor', 'inc': '(0.0001)'}), '(self.settings_frame, initial=self.zoom_factor, inc=0.0001)\n', (3310, 3369), False, 'import wx\n'), ((3795, 3821), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (3806, 3821), False, 'import wx\n'), ((3851, 4130), 'wx.ToolTip', 'wx.ToolTip', (['"""Select what is the shape of the axons that will be considered when computing the morphometrics. "circle" will use the equivalent diameter (diameter of a circle with the same area as the axon). "ellipse" will use minor axis of a fitted ellipse as diameter."""'], {}), '(\n \'Select what is the shape of the axons that will be considered when computing the morphometrics. "circle" will use the equivalent diameter (diameter of a circle with the same area as the axon). "ellipse" will use minor axis of a fitted ellipse as diameter.\'\n )\n', (3861, 4130), False, 'import wx\n'), ((4329, 4432), 'wx.ComboBox', 'wx.ComboBox', (['self.settings_frame'], {'choices': 'axon_shape_choices', 'size': '(100, 20)', 'value': 'self.axon_shape'}), '(self.settings_frame, choices=axon_shape_choices, size=(100, 20),\n value=self.axon_shape)\n', (4340, 4432), False, 'import wx\n'), ((4837, 4863), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (4848, 4863), False, 'import wx\n'), ((4886, 4930), 'wx.Button', 'wx.Button', (['self.settings_frame'], {'label': '"""Done"""'}), "(self.settings_frame, label='Done')\n", (4895, 4930), False, 'import wx\n'), ((6198, 6259), 'fsleyes.controls.controlpanel.ControlPanel.__init__', 'ctrlpanel.ControlPanel.__init__', (['self', 'ortho', '*args'], {}), '(self, ortho, *args, **kwargs)\n', (6229, 6259), True, 'import fsleyes.controls.controlpanel as ctrlpanel\n'), ((6448, 6472), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (6459, 6472), False, 'import wx\n'), ((6919, 7046), 'wx.lib.agw.hyperlink.HyperLinkCtrl', 'hl.HyperLinkCtrl', (['self', '(-1)'], {'label': '"""Need help? Read the documentation"""', 'URL': '"""https://axondeepseg.readthedocs.io/en/latest/"""'}), "(self, -1, label='Need help? Read the documentation', URL=\n 'https://axondeepseg.readthedocs.io/en/latest/')\n", (6935, 7046), True, 'import wx.lib.agw.hyperlink as hl\n'), ((7271, 7316), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Load PNG or TIF file"""'}), "(self, label='Load PNG or TIF file')\n", (7280, 7316), False, 'import wx\n'), ((7672, 7715), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Load existing mask"""'}), "(self, label='Load existing mask')\n", (7681, 7715), False, 'import wx\n'), ((8901, 8952), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Apply ADS prediction model"""'}), "(self, label='Apply ADS prediction model')\n", (8910, 8952), False, 'import wx\n'), ((10031, 10066), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Fill axons"""'}), "(self, label='Fill axons')\n", (10040, 10066), False, 'import wx\n'), ((10671, 10713), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Save segmentation"""'}), "(self, label='Save segmentation')\n", (10680, 10713), False, 'import wx\n'), ((11169, 11215), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Compute morphometrics"""'}), "(self, label='Compute morphometrics')\n", (11178, 11215), False, 'import wx\n'), ((11815, 11848), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Settings"""'}), "(self, label='Settings')\n", (11824, 11848), False, 'import wx\n'), ((12857, 12886), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (12884, 12886), False, 'import tempfile\n'), ((13070, 13102), 'pathlib.Path', 'Path', (['self.ads_temp_dir_var.name'], {}), '(self.ads_temp_dir_var.name)\n', (13074, 13102), False, 'from pathlib import Path\n'), ((15565, 15590), 'AxonDeepSeg.ads_utils.imread', 'ads_utils.imread', (['in_file'], {}), '(in_file)\n', (15581, 15590), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((16080, 16122), 'AxonDeepSeg.ads_utils.imwrite', 'ads_utils.imwrite', (['axon_outfile', 'axon_mask'], {}), '(axon_outfile, axon_mask)\n', (16097, 16122), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((16289, 16335), 'AxonDeepSeg.ads_utils.imwrite', 'ads_utils.imwrite', (['myelin_outfile', 'myelin_mask'], {}), '(myelin_outfile, myelin_mask)\n', (16306, 16335), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((19411, 19599), 'AxonDeepSeg.segment.segment_image', 'segment_image', (['image_path', 'model_path', 'self.settings.overlap_value', 'config_network', 'resolution'], {'acquired_resolution': '(pixel_size_float * self.settings.zoom_factor)', 'verbosity_level': '(3)'}), '(image_path, model_path, self.settings.overlap_value,\n config_network, resolution, acquired_resolution=pixel_size_float * self\n .settings.zoom_factor, verbosity_level=3)\n', (19424, 19599), False, 'from AxonDeepSeg.segment import segment_image\n'), ((21802, 21867), 'numpy.array', 'np.array', (['myelin_mask_overlay[:, :, 0]'], {'copy': '(True)', 'dtype': 'np.uint8'}), '(myelin_mask_overlay[:, :, 0], copy=True, dtype=np.uint8)\n', (21810, 21867), True, 'import numpy as np\n'), ((21913, 21936), 'numpy.flipud', 'np.flipud', (['myelin_array'], {}), '(myelin_array)\n', (21922, 21936), True, 'import numpy as np\n'), ((21960, 22000), 'numpy.rot90', 'np.rot90', (['myelin_array'], {'k': '(1)', 'axes': '(1, 0)'}), '(myelin_array, k=1, axes=(1, 0))\n', (21968, 22000), True, 'import numpy as np\n'), ((22022, 22085), 'numpy.array', 'np.array', (['axon_mask_overlay[:, :, 0]'], {'copy': '(True)', 'dtype': 'np.uint8'}), '(axon_mask_overlay[:, :, 0], copy=True, dtype=np.uint8)\n', (22030, 22085), True, 'import numpy as np\n'), ((22129, 22150), 'numpy.flipud', 'np.flipud', (['axon_array'], {}), '(axon_array)\n', (22138, 22150), True, 'import numpy as np\n'), ((22172, 22210), 'numpy.rot90', 'np.rot90', (['axon_array'], {'k': '(1)', 'axes': '(1, 0)'}), '(axon_array, k=1, axes=(1, 0))\n', (22180, 22210), True, 'import numpy as np\n'), ((22481, 22578), 'AxonDeepSeg.postprocessing.remove_intersection', 'postprocessing.remove_intersection', (['myelin_array', 'axon_array'], {'priority': '(1)', 'return_overlap': '(True)'}), '(myelin_array, axon_array, priority=1,\n return_overlap=True)\n', (22515, 22578), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((24932, 24974), 'numpy.rot90', 'np.rot90', (['watershed_data'], {'k': '(3)', 'axes': '(1, 0)'}), '(watershed_data, k=3, axes=(1, 0))\n', (24940, 24974), True, 'import numpy as np\n'), ((25001, 25039), 'PIL.Image.fromarray', 'Image.fromarray', (['watershed_image_array'], {}), '(watershed_image_array)\n', (25016, 25039), False, 'from PIL import Image, ImageDraw, ImageOps\n'), ((26365, 26421), 'AxonDeepSeg.postprocessing.floodfill_axons', 'postprocessing.floodfill_axons', (['axon_array', 'myelin_array'], {}), '(axon_array, myelin_array)\n', (26395, 26421), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((26449, 26480), 'numpy.flipud', 'np.flipud', (['axon_extracted_array'], {}), '(axon_extracted_array)\n', (26458, 26480), True, 'import numpy as np\n'), ((26692, 26750), 'AxonDeepSeg.ads_utils.imwrite', 'ads_utils.imwrite', ([], {'filename': 'file_name', 'img': 'axon_corr_array'}), '(filename=file_name, img=axon_corr_array)\n', (26709, 26750), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((28089, 28188), 'numpy.array', 'np.array', (["(myelin_mask_overlay[:, :, 0] * params.intensity['binary'])"], {'copy': '(True)', 'dtype': 'np.uint8'}), "(myelin_mask_overlay[:, :, 0] * params.intensity['binary'], copy=\n True, dtype=np.uint8)\n", (28097, 28188), True, 'import numpy as np\n'), ((28229, 28252), 'numpy.flipud', 'np.flipud', (['myelin_array'], {}), '(myelin_array)\n', (28238, 28252), True, 'import numpy as np\n'), ((28276, 28316), 'numpy.rot90', 'np.rot90', (['myelin_array'], {'k': '(1)', 'axes': '(1, 0)'}), '(myelin_array, k=1, axes=(1, 0))\n', (28284, 28316), True, 'import numpy as np\n'), ((28338, 28434), 'numpy.array', 'np.array', (["(axon_mask_overlay[:, :, 0] * params.intensity['binary'])"], {'copy': '(True)', 'dtype': 'np.uint8'}), "(axon_mask_overlay[:, :, 0] * params.intensity['binary'], copy=True,\n dtype=np.uint8)\n", (28346, 28434), True, 'import numpy as np\n'), ((28474, 28495), 'numpy.flipud', 'np.flipud', (['axon_array'], {}), '(axon_array)\n', (28483, 28495), True, 'import numpy as np\n'), ((28517, 28555), 'numpy.rot90', 'np.rot90', (['axon_array'], {'k': '(1)', 'axes': '(1, 0)'}), '(axon_array, k=1, axes=(1, 0))\n', (28525, 28555), True, 'import numpy as np\n'), ((28901, 28940), 'numpy.logical_and', 'np.logical_and', (['(pred >= 50)', '(pred <= 200)'], {}), '(pred >= 50, pred <= 200)\n', (28915, 28940), True, 'import numpy as np\n'), ((28954, 29284), 'numpy.array', 'np.array', (['[]'], {'dtype': "[('x0', 'f4'), ('y0', 'f4'), ('gratio', 'f4'), ('axon_area', 'f4'), (\n 'axon_perimeter', 'f4'), ('myelin_area', 'f4'), ('axon_diam', 'f4'), (\n 'myelin_thickness', 'f4'), ('axonmyelin_area', 'f4'), (\n 'axonmyelin_perimeter', 'f4'), ('solidity', 'f4'), ('eccentricity',\n 'f4'), ('orientation', 'f4')]"}), "([], dtype=[('x0', 'f4'), ('y0', 'f4'), ('gratio', 'f4'), (\n 'axon_area', 'f4'), ('axon_perimeter', 'f4'), ('myelin_area', 'f4'), (\n 'axon_diam', 'f4'), ('myelin_thickness', 'f4'), ('axonmyelin_area',\n 'f4'), ('axonmyelin_perimeter', 'f4'), ('solidity', 'f4'), (\n 'eccentricity', 'f4'), ('orientation', 'f4')])\n", (28962, 29284), True, 'import numpy as np\n'), ((29793, 29966), 'AxonDeepSeg.morphometrics.compute_morphometrics.get_axon_morphometrics', 'compute_morphs.get_axon_morphometrics', ([], {'im_axon': 'pred_axon', 'im_myelin': 'pred_myelin', 'pixel_size': 'pixel_size', 'axon_shape': 'self.settings.axon_shape', 'return_index_image': '(True)'}), '(im_axon=pred_axon, im_myelin=\n pred_myelin, pixel_size=pixel_size, axon_shape=self.settings.axon_shape,\n return_index_image=True)\n', (29830, 29966), True, 'import AxonDeepSeg.morphometrics.compute_morphometrics as compute_morphs\n'), ((32020, 32071), 'AxonDeepSeg.ads_utils.imwrite', 'ads_utils.imwrite', (['index_outfile', 'index_image_array'], {}), '(index_outfile, index_image_array)\n', (32037, 32071), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((32245, 32319), 'AxonDeepSeg.postprocessing.remove_intersection', 'postprocessing.remove_intersection', (['(axon_array // 255)', '(myelin_array // 255)'], {}), '(axon_array // 255, myelin_array // 255)\n', (32279, 32319), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((32500, 32555), 'AxonDeepSeg.ads_utils.imwrite', 'ads_utils.imwrite', (['axonmyelin_outfile', 'axonmyelin_image'], {}), '(axonmyelin_outfile, axonmyelin_image)\n', (32517, 32555), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((33807, 33829), 'skimage.measure.label', 'measure.label', (['im_axon'], {}), '(im_axon)\n', (33820, 33829), False, 'from skimage import measure, morphology, feature\n'), ((33903, 33937), 'skimage.measure.regionprops', 'measure.regionprops', (['im_axon_label'], {}), '(im_axon_label)\n', (33922, 33937), False, 'from skimage import measure, morphology, feature\n'), ((36762, 36790), 'AxonDeepSeg.ads_utils.imread', 'ads_utils.imread', (['image_path'], {}), '(image_path)\n', (36778, 36790), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((37042, 37062), 'numpy.flipud', 'np.flipud', (['img_png2D'], {}), '(img_png2D)\n', (37051, 37062), True, 'import numpy as np\n'), ((37503, 37532), 'nibabel.save', 'nib.save', (['img_NIfTI', 'out_file'], {}), '(img_NIfTI, out_file)\n', (37511, 37532), True, 'import nibabel as nib\n'), ((43752, 43776), 'pathlib.Path', 'Path', (['*plugin_path_parts'], {}), '(*plugin_path_parts)\n', (43756, 43776), False, 'from pathlib import Path\n'), ((2331, 2399), 'wx.StaticText', 'wx.StaticText', (['self.settings_frame'], {'label': '"""Overlap value (pixels): """'}), "(self.settings_frame, label='Overlap value (pixels): ')\n", (2344, 2399), False, 'import wx\n'), ((3192, 3249), 'wx.StaticText', 'wx.StaticText', (['self.settings_frame'], {'label': '"""Zoom factor: """'}), "(self.settings_frame, label='Zoom factor: ')\n", (3205, 3249), False, 'import wx\n'), ((4236, 4292), 'wx.StaticText', 'wx.StaticText', (['self.settings_frame'], {'label': '"""Axon shape: """'}), "(self.settings_frame, label='Axon shape: ')\n", (4249, 4292), False, 'import wx\n'), ((7485, 7537), 'wx.ToolTip', 'wx.ToolTip', (['"""Loads a .png or .tif file into FSLeyes"""'], {}), "('Loads a .png or .tif file into FSLeyes')\n", (7495, 7537), False, 'import wx\n'), ((7901, 8156), 'wx.ToolTip', 'wx.ToolTip', (['"""Loads an existing axonmyelin mask into FSLeyes. The selected image should contain both the axon and myelin masks. The regions on the image should have an intensity of 0 for the background, 127 for the myelin and 255 for the axons. """'], {}), "(\n 'Loads an existing axonmyelin mask into FSLeyes. The selected image should contain both the axon and myelin masks. The regions on the image should have an intensity of 0 for the background, 127 for the myelin and 255 for the axons. '\n )\n", (7911, 8156), False, 'import wx\n'), ((8671, 8730), 'wx.ToolTip', 'wx.ToolTip', (['"""Select the modality used to acquire the image"""'], {}), "('Select the modality used to acquire the image')\n", (8681, 8730), False, 'import wx\n'), ((9146, 9211), 'wx.ToolTip', 'wx.ToolTip', (['"""Applies the prediction model and displays the masks"""'], {}), "('Applies the prediction model and displays the masks')\n", (9156, 9211), False, 'import wx\n'), ((10256, 10454), 'wx.ToolTip', 'wx.ToolTip', (['"""Automatically fills the axons inside myelin objects. THE MYELIN OBJECTS NEED TO BE CLOSED AND SEPARATED FROM EACH OTHER (THEY MUST NOT TOUCH) FOR THIS TOOL TO WORK CORRECTLY."""'], {}), "(\n 'Automatically fills the axons inside myelin objects. THE MYELIN OBJECTS NEED TO BE CLOSED AND SEPARATED FROM EACH OTHER (THEY MUST NOT TOUCH) FOR THIS TOOL TO WORK CORRECTLY.'\n )\n", (10266, 10454), False, 'import wx\n'), ((10931, 10999), 'wx.ToolTip', 'wx.ToolTip', (['"""Saves the axon and myelin masks in the selected folder"""'], {}), "('Saves the axon and myelin masks in the selected folder')\n", (10941, 10999), False, 'import wx\n'), ((11449, 11624), 'wx.ToolTip', 'wx.ToolTip', (['"""Calculates and saves the morphometrics to an excel and csv file. Shows the indexes of the axons at the coordinates specified in the morphometrics file."""'], {}), "(\n 'Calculates and saves the morphometrics to an excel and csv file. Shows the indexes of the axons at the coordinates specified in the morphometrics file.'\n )\n", (11459, 11624), False, 'import wx\n'), ((13481, 13568), 'wx.FileDialog', 'wx.FileDialog', (['self', '"""select Image file"""'], {'style': '(wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)'}), "(self, 'select Image file', style=wx.FD_OPEN | wx.\n FD_FILE_MUST_EXIST)\n", (13494, 13568), False, 'import wx\n'), ((14916, 15007), 'wx.FileDialog', 'wx.FileDialog', (['self', '"""select mask .png file"""'], {'style': '(wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)'}), "(self, 'select mask .png file', style=wx.FD_OPEN | wx.\n FD_FILE_MUST_EXIST)\n", (14929, 15007), False, 'import wx\n'), ((15743, 15778), 'numpy.array', 'np.array', (['axon_mask'], {'dtype': 'np.uint8'}), '(axon_mask, dtype=np.uint8)\n', (15751, 15778), True, 'import numpy as np\n'), ((15925, 15962), 'numpy.array', 'np.array', (['myelin_mask'], {'dtype': 'np.uint8'}), '(myelin_mask, dtype=np.uint8)\n', (15933, 15962), True, 'import numpy as np\n'), ((17659, 17675), 'pathlib.Path', 'Path', (['image_name'], {}), '(image_name)\n', (17663, 17675), False, 'from pathlib import Path\n'), ((21136, 21291), 'wx.DirDialog', 'wx.DirDialog', (['self', '"""select the directory in which the segmentation will be save"""'], {'defaultPath': '""""""', 'style': '(wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)'}), "(self,\n 'select the directory in which the segmentation will be save',\n defaultPath='', style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)\n", (21148, 21291), False, 'import wx\n'), ((26536, 26579), 'numpy.rot90', 'np.rot90', (['axon_corr_array'], {'k': '(1)', 'axes': '(1, 0)'}), '(axon_corr_array, k=1, axes=(1, 0))\n', (26544, 26579), True, 'import numpy as np\n'), ((30936, 31113), 'wx.FileDialog', 'wx.FileDialog', (['self', '"""Save morphometrics file"""'], {'wildcard': '"""Excel files (*.xlsx)|*.xlsx"""', 'defaultFile': '"""axon_morphometrics.xlsx"""', 'style': '(wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)'}), "(self, 'Save morphometrics file', wildcard=\n 'Excel files (*.xlsx)|*.xlsx', defaultFile='axon_morphometrics.xlsx',\n style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\n", (30949, 31113), False, 'import wx\n'), ((34444, 34479), 'scipy.ndimage.distance_transform_edt', 'ndi.distance_transform_edt', (['im_axon'], {}), '(im_axon)\n', (34470, 34479), True, 'from scipy import ndimage as ndi\n'), ((34988, 35026), 'numpy.zeros_like', 'np.zeros_like', (['im_axon'], {'dtype': '"""uint16"""'}), "(im_axon, dtype='uint16')\n", (35001, 35026), True, 'import numpy as np\n'), ((35345, 35409), 'skimage.morphology.watershed', 'morphology.watershed', (['(-distance)', 'im_centroid'], {'mask': 'im_axonmyelin'}), '(-distance, im_centroid, mask=im_axonmyelin)\n', (35365, 35409), False, 'from skimage import measure, morphology, feature\n'), ((37268, 37305), 'numpy.rot90', 'np.rot90', (['img_png2D'], {'k': '(1)', 'axes': '(1, 0)'}), '(img_png2D, k=1, axes=(1, 0))\n', (37276, 37305), True, 'import numpy as np\n'), ((37307, 37316), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (37313, 37316), True, 'import numpy as np\n'), ((37601, 37665), 'fsleyes.actions.loadoverlay.loadOverlays', 'ovLoad.loadOverlays', ([], {'paths': '[out_file]', 'inmem': '(True)', 'blocking': '(True)'}), '(paths=[out_file], inmem=True, blocking=True)\n', (37620, 37665), True, 'import fsleyes.actions.loadoverlay as ovLoad\n'), ((43261, 43362), 'wx.MessageDialog', 'wx.MessageDialog', (['self', 'message'], {'caption': 'caption', 'style': '(wx.OK | wx.CENTRE)', 'pos': 'wx.DefaultPosition'}), '(self, message, caption=caption, style=wx.OK | wx.CENTRE,\n pos=wx.DefaultPosition)\n', (43277, 43362), False, 'import wx\n'), ((8434, 8470), 'AxonDeepSeg.ads_utils.get_existing_models_list', 'ads_utils.get_existing_models_list', ([], {}), '()\n', (8468, 8470), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((18658, 18734), 'wx.TextEntryDialog', 'wx.TextEntryDialog', (['self', '"""Enter the pixel size in micrometer"""'], {'value': '"""0.07"""'}), "(self, 'Enter the pixel size in micrometer', value='0.07')\n", (18676, 18734), False, 'import wx\n'), ((30275, 30597), 'numpy.array', 'np.array', (["[(stats['x0'], stats['y0'], stats['gratio'], stats['axon_area'], stats[\n 'axon_perimeter'], stats['myelin_area'], stats['axon_diam'], stats[\n 'myelin_thickness'], stats['axonmyelin_area'], stats[\n 'axonmyelin_perimeter'], stats['solidity'], stats['eccentricity'],\n stats['orientation'])]"], {'dtype': 'x.dtype'}), "([(stats['x0'], stats['y0'], stats['gratio'], stats['axon_area'],\n stats['axon_perimeter'], stats['myelin_area'], stats['axon_diam'],\n stats['myelin_thickness'], stats['axonmyelin_area'], stats[\n 'axonmyelin_perimeter'], stats['solidity'], stats['eccentricity'],\n stats['orientation'])], dtype=x.dtype)\n", (30283, 30597), True, 'import numpy as np\n'), ((43644, 43670), 'pathlib.Path', 'Path', (['AxonDeepSeg.__file__'], {}), '(AxonDeepSeg.__file__)\n', (43648, 43670), False, 'from pathlib import Path\n'), ((45976, 46002), 'pathlib.Path', 'Path', (['AxonDeepSeg.__file__'], {}), '(AxonDeepSeg.__file__)\n', (45980, 46002), False, 'from pathlib import Path\n'), ((17142, 17170), 'pathlib.Path', 'Path', (['self.png_image_name[i]'], {}), '(self.png_image_name[i])\n', (17146, 17170), False, 'from pathlib import Path\n'), ((17988, 18024), 'AxonDeepSeg.ads_utils.get_existing_models_list', 'ads_utils.get_existing_models_list', ([], {}), '()\n', (18022, 18024), False, 'from AxonDeepSeg import postprocessing, params, ads_utils\n'), ((18050, 18076), 'pathlib.Path', 'Path', (['AxonDeepSeg.__file__'], {}), '(AxonDeepSeg.__file__)\n', (18054, 18076), False, 'from pathlib import Path\n'), ((27092, 27168), 'wx.TextEntryDialog', 'wx.TextEntryDialog', (['self', '"""Enter the pixel size in micrometer"""'], {'value': '"""0.07"""'}), "(self, 'Enter the pixel size in micrometer', value='0.07')\n", (27110, 27168), False, 'import wx\n'), ((31667, 31731), 'wx.LogError', 'wx.LogError', (['("Cannot save current data in file \'%s\'." % pathname)'], {}), '("Cannot save current data in file \'%s\'." % pathname)\n', (31678, 31731), False, 'import wx\n'), ((31942, 31956), 'pathlib.Path', 'Path', (['pathname'], {}), '(pathname)\n', (31946, 31956), False, 'from pathlib import Path\n'), ((31587, 31602), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (31599, 31602), True, 'import pandas as pd\n'), ((32653, 32667), 'pathlib.Path', 'Path', (['pathname'], {}), '(pathname)\n', (32657, 32667), False, 'from pathlib import Path\n')] |
"""Running basic code:
Importing packages, setting working directory,
printing out date"""
import os as os
os.chdir('C:/Users/falco/Desktop/directory/Missing_links_in_viral_host_communities/')
import datetime as dt
str(dt.datetime.now())
from sklearn.metrics import confusion_matrix
import seaborn as sns
#from pandas_ml import ConfusionMatrix
data_path = 'C:/Users/falco/Desktop/directory/Missing_links_in_viral_host_communities/data'
output_path = 'C:/Users/falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs'
from HPnex import functions as f
from HPnex import classification as classify
from HPnex import fitting_functions as fitt
import numpy as np
import networkx as nx
#np.random.seed(42)
from sklearn.ensemble import RandomForestClassifier
#from pandas_ml import ConfusionMatrix
from matplotlib import pyplot as plt
import seaborn as sns
import scipy.stats as stats
from sklearn import model_selection
import math
height = 6
font = 12
import sklearn
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
#from sklearn.cross_validation import
from sklearn.model_selection import StratifiedKFold ,cross_val_score, train_test_split, cross_val_predict
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import learning_curve
#from pandas_ml import ConfusionMatrix
from textblob import TextBlob
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
#### Standardize continuous variables
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
#from pandas_ml import ConfusionMatrix
from HPnex import functions as f
### Running cross validation scores and predictions
from sklearn.model_selection import StratifiedKFold ,cross_val_score, train_test_split, cross_val_predict
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix, precision_recall_fscore_support
import matplotlib.style as style
style.use('fivethirtyeight')
plt.rcParams['font.family'] = 'Times New Roman'
sns.set_context("notebook", font_scale=1.30, rc={"lines.linewidth": 0.8})
import itertools as itertools
import pandas as pd
import joblib
###############################################################################################################################
###############################################################################################################################
def generete_temp_network(virus, hosts, ViralFamily, PubMed, BPnx_group, Gc,IUCN, virus_df):
#print('this function is in multiclass validation file 1st function')
import math
temp_BPnx = BPnx_group.copy()
#print (temp_BPnx.number_of_nodes()) ## checking number of nodes
virus_nodes = [x for x,y in temp_BPnx.nodes(data=True) if y['type']=='virus'] #creating list of virus nodes from bipartite network
df = pd.DataFrame({'Virus2':virus_nodes}) # converting them to a dataframe
df['Virus1'] = virus # dataframe with all possible combinations of new virus and viruses from BPnx
temp_BPnx.add_node(virus, virusname=virus, type='virus', bipartite = 1) ## adding new node to the Bpnxtemp
#print (temp_BPnx.number_of_nodes()) ## rechecking number of nodes
for h in hosts:
temp_BPnx.add_edge(virus, h) ## adding new edge to the Bpnxtemp
def get_n_shared_hosts(c): ## calculating number of neighbours for our new virus
return len(list(nx.common_neighbors(temp_BPnx, c['Virus1'],c['Virus2'])))
df['n_shared_hosts'] = df.apply(get_n_shared_hosts, axis=1)
def addsharedhosts (c): ## identifiying number of neighbours for our new virus
return sorted(nx.common_neighbors(temp_BPnx, c['Virus1'],c['Virus2']))
df["shared_hosts"] = df.apply(addsharedhosts, axis=1)
def add_hosts_orders (c):
order_list = IUCN[IUCN.ScientificName.isin(c['shared_hosts'])]['Order'].unique().tolist()
return order_list
df["shared_orders"] = df.apply(add_hosts_orders, axis=1)
new_edges = df[df['n_shared_hosts']>0] ### list of new edges for new viruses
#print(new_edges.shape)
Gc_temp = Gc.copy() ## creating a temporary copy of GC complete
Gc_temp.add_node(virus, ViralFamily=ViralFamily, type='virus', bipartite = 1) ## adding new node to the Bpnxtemp
for index, row in new_edges.iterrows():
if row['n_shared_hosts'] > 0:
Gc_temp.add_edge(row['Virus1'], row['Virus2'], weight = row['n_shared_hosts'], hosts = ','.join(row['shared_hosts']),
orders = ','.join(row['shared_orders']))
#edges_to_predict = df[df['n_shared_hosts']==0]
edges_to_predict = df
edges_to_predict = edges_to_predict[edges_to_predict.Virus2 != 'nan']
virus_df_temp = virus_df.copy()
virus_df_temp.loc[len(virus_df_temp)]=[virus, ViralFamily, math.log(PubMed),1, 1]
return Gc_temp, edges_to_predict, virus_df_temp
###############################################################################################################################
###############################################################################################################################
def prediction(temp_x, clf_multi, inv_dictionary):
#print('this function is in multiclass validation file 1st function')
inv_dictionary = dict((k, v.title()) for k,v in inv_dictionary.iteritems())
Order_prediction = pd.DataFrame(clf_multi.predict(temp_x)).replace(inv_dictionary)
temp_x.columns = ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9']
#temp_x.columns = ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11','f12', 'f13']
probs = clf_multi.predict_proba(temp_x)
prob_max =[]
for i in range (len(probs)):
prob_max.append(np.amax(probs[i], axis = 1))
max_prob = pd.DataFrame(prob_max).T
prediction = Order_prediction.join(max_prob, lsuffix='_pr', rsuffix='_shakyata')
return prediction
###############################################################################################################################
###############################################################################################################################
def cross_validation_predict(virus, hosts, ViralFamily, PubMed, BPnx_group, Gc, virus_df, clf_multi, inv_dictionary):
#print('this function is in multiclass validation file')
from HPnex import predict_multi as pred_m
Gc_temp_group, edges_to_predict, virus_df_temp = generete_temp_network(virus = virus,
hosts = hosts,
ViralFamily = ViralFamily,
PubMed = PubMed,
BPnx_group = BPnx_group,
Gc = Gc,
virus_df = virus_df)
temp_x = pred_m.preprocessing_x(data_frame = edges_to_predict,
network = Gc_temp_group,
virus_df_temp = virus_df_temp,
virus_df = virus_df)
pred_group = prediction(temp_x =temp_x, clf_multi =clf_multi, inv_dictionary = inv_dictionary)
result_group = pred_group.join(edges_to_predict)
return result_group, edges_to_predict
###############################################################################################################################
###############################################################################################################################
def generete_temp_network(virus, hosts, ViralFamily, PubMed, BPnx_group, Gc,IUCN, virus_df):
#print('this function is in multiclass validation file')
import math
temp_BPnx = BPnx_group.copy()
#print (temp_BPnx.number_of_nodes()) ## checking number of nodes
#virus_nodes = [x for x,y in temp_BPnx.nodes(data=True) if y['type']=='virus']
q_df = pd.DataFrame.from_dict(dict(BPnx_group.nodes(data=True)), orient='index')
q_df = q_df.loc[q_df.index.dropna()]
virus_nodes = q_df[q_df['type'] == 'virus'].index.tolist()#creating list of virus nodes from bipartite network
df = pd.DataFrame({'Virus2':virus_nodes}) # converting them to a dataframe
df['Virus1'] = virus # dataframe with all possible combinations of new virus and viruses from BPnx
temp_BPnx.add_node(virus, virusname=virus, type='virus', bipartite = 1) ## adding new node to the Bpnxtemp
#print (temp_BPnx.number_of_nodes()) ## rechecking number of nodes
for h in hosts:
temp_BPnx.add_edge(virus, h) ## adding new edge to the Bpnxtemp
def get_n_shared_hosts(c): ## calculating number of neighbours for our new virus
return len(list(nx.common_neighbors(temp_BPnx, c['Virus1'],c['Virus2'])))
df['n_shared_hosts'] = df.apply(get_n_shared_hosts, axis=1)
def addsharedhosts (c): ## identifiying number of neighbours for our new virus
return sorted(nx.common_neighbors(temp_BPnx, c['Virus1'],c['Virus2']))
df["shared_hosts"] = df.apply(addsharedhosts, axis=1)
def add_hosts_orders (c):
order_list = IUCN[IUCN.ScientificName.isin(c['shared_hosts'])]['Order'].unique().tolist()
return order_list
df["shared_orders"] = df.apply(add_hosts_orders, axis=1)
new_edges = df[df['n_shared_hosts']>0] ### list of new edges for new viruses
#print(new_edges.shape)
Gc_temp = Gc.copy() ## creating a temporary copy of GC complete
Gc_temp.add_node(virus, ViralFamily=ViralFamily, type='virus', bipartite = 1) ## adding new node to the Bpnxtemp
for index, row in new_edges.iterrows():
if row['n_shared_hosts'] > 0:
Gc_temp.add_edge(row['Virus1'], row['Virus2'], weight = row['n_shared_hosts'], hosts = ','.join(row['shared_hosts']),
orders = ','.join(row['shared_orders']))
#edges_to_predict = df[df['n_shared_hosts']==0]
edges_to_predict = df
edges_to_predict = edges_to_predict[edges_to_predict.Virus2 != 'nan']
virus_df_temp = virus_df.copy()
virus_df_temp.loc[len(virus_df_temp)]=[virus, ViralFamily, math.log(PubMed),1, 1]
return Gc_temp, edges_to_predict, virus_df_temp
def prediction(temp_x, clf_multi, inv_dictionary):
#print('prediction function is in multiclass validation file 2nd function')
#print(temp_x.shape)
inv_dictionary = dict((k, v.title()) for k,v in inv_dictionary.iteritems())
temp_x.columns = ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9']
#temp_x.columns = ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13']
Order_prediction = pd.DataFrame(clf_multi.predict(temp_x)).replace(inv_dictionary)
#print (Order_prediction.shape)
probs = clf_multi.predict_proba(temp_x)
prob_max =[]
for i in range (len(probs)):
prob_max.append(np.amax(probs[i], axis = 1))
max_prob = pd.DataFrame(prob_max).T
prediction = Order_prediction.join(max_prob, lsuffix='_pr', rsuffix='_shakyata')
return prediction
def cross_validation_predict(virus, hosts, ViralFamily, PubMed, BPnx_group, Gc, virus_df, clf_multi, inv_dictionary, IUCN):
#print('cross_validation_predict function is in multiclass validation file')
from HPnex import predict_multi as pred_m
Gc_temp_group, edges_to_predict, virus_df_temp = generete_temp_network(virus = virus,
hosts = hosts,
ViralFamily = ViralFamily,
PubMed = PubMed,
BPnx_group = BPnx_group,
IUCN = IUCN,
Gc = Gc,
virus_df = virus_df)
temp_x = pred_m.preprocessing_x(data_frame = edges_to_predict,
network = Gc_temp_group,
virus_df_temp = virus_df_temp,
virus_df = virus_df)
pred_group = prediction(temp_x =temp_x, clf_multi =clf_multi, inv_dictionary = inv_dictionary)
result_group = pred_group.join(edges_to_predict)
return result_group, edges_to_predict
###############################################################################################################################
###############################################################################################################################
def run_cross_validation(i, df, XGB, data_path, virus_df, IUCN):
print('run_cross_validation function is in multiclass validation file')
from HPnex import functions as f
from HPnex import classification as classify
from HPnex import fitting_functions as fitt
print('running model for group '+ str(i) )
df_temp = df[df.group != i]
import pickle
dictionary = pickle.load(open("C:/Users/falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/dictionary_order_humans.pkl", "rb"))
inv_dictionary = {v: k for k, v in dictionary.iteritems()}
print ("first construct bipartite network to reterive original data information about shared hosts")
BPnx_group = f.construct_bipartite_taxa_virus_network(
dataframe=df_temp,
taxa_level = 'Order',
network_name='Go',
plot=False,
filter_file=False,
taxonomic_filter=None)
print('generation of observed network after removing group '+ str(i))
Gc_df, Gc = f.construct_unipartite_taxa_level_virus_virus_network(
dataframe=df_temp,
taxa_level = 'Order',
network_name='Gc Order level',
layout_func='fruchterman_reingold',
plot=False,
filter_file=False,
taxonomic_filter=None,
return_df=True)
print ('getting network data for Observed network using Gc and BPnx_group')
Multiclass_data = fitt.get_complete_network_data_for_fitting_multiclass(Gc = Gc, BPnx = BPnx_group, data_path= data_path,
virus_df = virus_df, Species_file_name='\IUCN Mammals, Birds, Reptiles, and Amphibians.csv')
print('preprocessing data for fitting model')
from xgboost import XGBClassifier
#### Standardize continuous variables
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from pandas_ml import ConfusionMatrix
from HPnex import functions as f
### Running cross validation scores and predictions
from sklearn.model_selection import StratifiedKFold ,cross_val_score, train_test_split, cross_val_predict
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix, precision_recall_fscore_support
model_data = Multiclass_data
from sklearn.metrics import classification_report, f1_score
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
#def run_mulitlabel_model(model_data, cv, rf, virus_df, Gc_data):
#predictors = [
# 'jaccard', 'betweeness_diff', 'in_same_cluster', 'degree_diff',
# 'FamilyMatch', 'PubMed_diff', 'PubMed_Search_ln1', 'PubMed_Search_ln2', 'neighbors_n',
# 'adamic_adar', 'resource', 'preferential_attach'
#]
predictors = [
'jaccard', 'betweeness_diff', 'in_same_cluster', 'degree_diff',
'FamilyMatch', 'PubMed_diff', 'PubMed_Search_ln1', 'PubMed_Search_ln2',
]
import sklearn
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC, LinearSVC
from sklearn import preprocessing
from sklearn_pandas import DataFrameMapper
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
model_data['shared_hosts_label'] = model_data['orders_label'].apply(lambda y: ['No_Sharing'] if len(y)==0 else y)
Y_ml_df = model_data['shared_hosts_label'].apply(str).str.strip("['']").str.replace("'", "").str.strip().str.split(', ', expand = True)
Y_ml_df = Y_ml_df.replace(dictionary)
X = model_data[list(predictors)].values
#### Standardize continuous variables
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_std = scaler.fit_transform(X)
data_processed = pd.DataFrame(X_std, columns=predictors)
#data_processed.head()
### Encoding categorical variables
le = preprocessing.LabelEncoder()
le.fit(virus_df.viral_family.unique())
model_data['F1'] = le.transform(model_data.ViralFamily1.fillna('Not_Assinged'))
model_data['F2'] = le.transform(model_data.ViralFamily2.fillna('Not_Assinged'))
data_processed['F1'] = model_data.F1
data_processed['F2'] = model_data.F2
data_processed.fillna(0, inplace=True)
print('fitting the model for group '+ str(i))
from HPnex import functions as f
from sklearn.model_selection import cross_val_predict, cross_val_score
from sklearn.multioutput import MultiOutputClassifier
XGB = XGB
multi_target_classifier = MultiOutputClassifier(XGB, n_jobs=1)
multi_target_classifier.fit(data_processed, Y_ml_df.fillna(19).values)
print(multi_target_classifier)
print ('predicting using fitted model for group '+ str(i))
predict_df = df[df.group == i]
predict_df = predict_df.groupby('Virus').agg({'Order':'unique',
'viral_family':'unique',
'PubMed_Search':'unique'}) #,ScientificName , 'PubMed_Search']
predict_df['viral_family'] = predict_df['viral_family'].str.get(0)
predict_df['PubMed_Search'] = predict_df['PubMed_Search'].str.get(0).astype(int)
predict_df.reset_index(inplace = True)
print ('running predictions')
RESULT = []
e_predict = []
for index, row in predict_df.dropna().iterrows():
result, edges_to_predict = cross_validation_predict(virus =row['Virus'],
hosts = row['Order'],
PubMed = row['PubMed_Search'],
ViralFamily = row['viral_family'],
BPnx_group = BPnx_group,
Gc = Gc,
virus_df = virus_df,
clf_multi = multi_target_classifier,
inv_dictionary = inv_dictionary,
IUCN = IUCN)
RESULT.append(result)
e_predict.append(edges_to_predict)
result_group = pd.concat(RESULT, axis=0)
edges_group = pd.concat(e_predict, axis=0)
return result_group, edges_group
#######################################################################################################################################
#######################################################################################################################################
def run_cross_validation(i, df, XGB, data_path, virus_df, IUCN):
print('run_cross_validation function is in multiclass validation file')
from HPnex import functions as f
from HPnex import classification as classify
from HPnex import fitting_functions as fitt
print('running model for group '+ str(i) )
df_temp = df[df.group != i]
import pickle
dictionary = pickle.load(open("C:/Users/falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/dictionary_order_humans.pkl", "rb"))
inv_dictionary = {v: k for k, v in dictionary.iteritems()}
print ("first construct bipartite network to reterive original data information about shared hosts")
BPnx_group = f.construct_bipartite_host_virus_network(
dataframe=df_temp,
network_name='Go',
plot=False,
filter_file=False,
taxonomic_filter=None)
print('generation of observed network after removing group '+ str(i))
Gc_df, Gc = f.construct_unipartite_virus_virus_network_order(
dataframe=df_temp,
network_name='all_network',
IUCN = IUCN,
layout_func='fruchterman_reingold',
plot=False,
filter_file=False,
taxonomic_filter=None,
return_df=True)
print ('getting network data for Observed network using Gc and BPnx_group')
Multiclass_data = fitt.get_complete_network_data_for_fitting_multiclass(Gc = Gc, BPnx = BPnx_group, data_path= data_path,
virus_df = virus_df, Species_file_name='\IUCN Mammals, Birds, Reptiles, and Amphibians.csv')
print('preprocessing data for fitting model')
from xgboost import XGBClassifier
#### Standardize continuous variables
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from pandas_ml import ConfusionMatrix
from HPnex import functions as f
### Running cross validation scores and predictions
from sklearn.model_selection import StratifiedKFold ,cross_val_score, train_test_split, cross_val_predict
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix, precision_recall_fscore_support
model_data = Multiclass_data
from sklearn.metrics import classification_report, f1_score
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
#def run_mulitlabel_model(model_data, cv, rf, virus_df, Gc_data):
#predictors = [
# 'jaccard', 'betweeness_diff', 'in_same_cluster', 'degree_diff',
# 'FamilyMatch', 'PubMed_diff', 'PubMed_Search_ln1', 'PubMed_Search_ln2', 'neighbors_n',
# 'adamic_adar', 'resource', 'preferential_attach'
#]
predictors = [
'jaccard', 'betweeness_diff', 'in_same_cluster', 'degree_diff',
'FamilyMatch', 'PubMed_diff', 'PubMed_Search_ln1', 'PubMed_Search_ln2'
]
#predictors = [
# 'jaccard', 'betweeness_diff', 'in_same_cluster', 'degree_diff',
# 'FamilyMatch', 'PubMed_diff', 'PubMed_Search_ln1', 'PubMed_Search_ln2',
# 'VirusCluster1', 'VirusCluster2', 'resource', 'preferential_attach'
#]
import sklearn
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC, LinearSVC
from sklearn import preprocessing
from sklearn_pandas import DataFrameMapper
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
model_data['shared_hosts_label'] = model_data['orders_label'].apply(lambda y: ['No_Sharing'] if len(y)==0 else y)
Y_ml_df = model_data['shared_hosts_label'].apply(str).str.strip("['']").str.replace("'", "").str.strip().str.split(', ', expand = True)
Y_ml_df = Y_ml_df.replace(dictionary)
X = model_data[list(predictors)].values
#### Standardize continuous variables
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_std = scaler.fit_transform(X)
data_processed = pd.DataFrame(X_std, columns=predictors)
#data_processed.head()
### Encoding categorical variables
le = preprocessing.LabelEncoder()
le.fit(virus_df.viral_family.unique())
model_data['F1'] = le.transform(model_data.ViralFamily1.fillna('Not_Assinged'))
model_data['F2'] = le.transform(model_data.ViralFamily2.fillna('Not_Assinged'))
data_processed['F1'] = model_data.F1
data_processed['F2'] = model_data.F2
data_processed.fillna(0, inplace=True)
print('fitting the model for group '+ str(i))
from HPnex import functions as f
from sklearn.model_selection import cross_val_predict, cross_val_score
from sklearn.multioutput import MultiOutputClassifier
XGB = XGB
multi_target_classifier = MultiOutputClassifier(XGB, n_jobs=1)
multi_target_classifier.fit(data_processed, Y_ml_df.fillna(19).values)
print(multi_target_classifier)
print ('predicting using fitted model for group '+ str(i))
predict_df = df[df.group == i]
predict_df = predict_df.groupby('Virus').agg({
'ScientificName': 'unique',
'Order':'unique',
'viral_family':'unique',
'PubMed_Search':'unique'}) #,ScientificName , 'PubMed_Search']
print('scientific names')
predict_df['viral_family'] = predict_df['viral_family'].str.get(0)
predict_df['PubMed_Search'] = predict_df['PubMed_Search'].str.get(0).astype(int)
predict_df.reset_index(inplace = True)
print ('running predictions')
RESULT = []
e_predict = []
for index, row in predict_df.dropna().iterrows():
result, edges_to_predict = cross_validation_predict(virus =row['Virus'],
hosts = row['ScientificName'],
PubMed = row['PubMed_Search'],
ViralFamily = row['viral_family'],
BPnx_group = BPnx_group,
Gc = Gc,
virus_df = virus_df,
clf_multi = multi_target_classifier,
inv_dictionary = inv_dictionary,
IUCN = IUCN)
RESULT.append(result)
e_predict.append(edges_to_predict)
result_group = pd.concat(RESULT, axis=0)
edges_group = pd.concat(e_predict, axis=0)
return result_group, edges_group
#######################################################################################################################################
#######################################################################################################################################
def generate_score(cv_preds, cv_epreds, virus_df, i, plot = False):
print('generate_score function is in multiclass validation file')
r_group = pd.concat([cv_preds, cv_epreds], axis=1)
cols = r_group.filter(regex='_pr').columns.tolist()
#r_group['combined_orders']=r_group[['0_pr', '10_pr',u'11_pr', '12_pr', '13_pr', '14_pr', '15_pr', '16_pr', '17_pr', '1_pr',
#'2_pr', '3_pr', '4_pr', '5_pr', '6_pr', '7_pr', '8_pr', '9_pr']].values.tolist()
r_group['combined_orders']=r_group[cols].values.tolist()
r_group = r_group.loc[:,~r_group.columns.duplicated()]
r_group['shared_hosts'] = r_group['shared_orders'].apply(lambda y: ['No_Sharing'] if len(y)==0 else y)
r_group['combined_orders'] = r_group['combined_orders'].apply(lambda x: set(x))
r_group['shared_hosts'] = r_group['shared_hosts'].apply(lambda x: set(x))
r_group['shared_hosts'] = r_group['shared_hosts'].apply(lambda x: map(str.title, x))
print('accuracy matrix based on first prediction' )
a =r_group[['0_pr', 'shared_hosts']]
a = pd.concat([a, a.shared_hosts.apply(pd.Series)], axis=1)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
cm = confusion_matrix(a['0_pr'], a[0])
print ('Accuracy Score :',accuracy_score(a['0_pr'], a[0]))
print('Classification Report : ')
print (classification_report(a['0_pr'], a[0]))
r_group['TP'] = [list(set(a).intersection(set(b))) for a, b in zip(r_group.shared_hosts, r_group.combined_orders)]
r_group['FP'] = [list(set(b).difference(set(a))) for a, b in zip(r_group.shared_hosts, r_group.combined_orders)]
r_group['FN'] = [list(set(a).difference(set(b))) for a, b in zip(r_group.shared_hosts, r_group.combined_orders)]
#r_group = pd.merge(r_group, virus_df, left_on='Virus1', right_on='virus_name', how='left')
r_group['group'] = i
#r_group.group.fillna(0, inplace= True)
m = []
for g in r_group.group.unique():
temp_r = r_group[r_group.group == g]
TP = temp_r['TP'].apply(pd.Series).stack().reset_index(drop=True).value_counts()
FP = temp_r['FP'].apply(pd.Series).stack().reset_index(drop=True).value_counts()
FN = temp_r['FN'].apply(pd.Series).stack().reset_index(drop=True).value_counts()
matrix_group = pd.concat([TP, FP, FN], axis = 1)
matrix_group.columns = ['TP', 'FP', 'FN']
matrix_group['Group'] = g
matrix_group['PPV'] = matrix_group.TP.fillna(0)/(matrix_group.TP.fillna(0)+ matrix_group.FP.fillna(0))
matrix_group['Sensitivity'] = matrix_group.TP.fillna(0)/(matrix_group.TP.fillna(0)+ matrix_group.FN.fillna(0))
m.append(matrix_group)
matrix = pd.concat(m, axis=0).reset_index()
matrix['support'] = matrix.TP+ matrix.FP +matrix.FN
matrix['f1-score'] = 2*((matrix['PPV']*matrix['Sensitivity'])/(matrix['PPV']+matrix['Sensitivity']))
matrix.columns = ['Order', 'TP', 'FP', 'FN', 'Group', 'PPV', 'Sensitivity', 'support', 'f1-score']
if plot:
import matplotlib.style as style
style.use('fivethirtyeight')
plt.rcParams['font.family'] = 'Times New Roman'
sns.set_context("notebook", font_scale=1.0, rc={"lines.linewidth": 0.8})
validation_matrix = matrix
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize = [12,8], sharey= False)
sns.boxplot(x="support", y="Order", data=validation_matrix.dropna(), ax = ax1)
sns.stripplot(x="support", y="Order", data=validation_matrix.dropna(), jitter= True, color='#252525', ax= ax1)
ax1.set_xlabel('support')
ax1.set_title('Predicting Shared Host Order\n\n\n', horizontalalignment = 'center', loc = 'left', fontsize=16)
text1 = 'Sample size for validation of XGBoost model performance in correctly predicting the type of links (host order) between two viruses\nthat did not share hosts in the observed network '+ r'$G_o$'+ ' and shared hosts in '+ r'$G_c$'+'.\n'
ax1.text(-0.3, 0.99, text1, verticalalignment='bottom',
horizontalalignment='left',
transform=ax1.transAxes,
color='gray', fontsize=14)
ax1.set_xscale('log')
ax1.set_ylabel('')
sns.boxplot(x="f1-score", y="Order", data=validation_matrix.dropna(), ax = ax2)
sns.stripplot(x="f1-score", y="Order", data=validation_matrix.dropna(), jitter= True, color='#252525', ax= ax2)
ax2.set_xlabel('f1-score')
ax2.set_xlim(0,1.02)
ax2.set_ylabel('')
sns.boxplot(x="Sensitivity", y="Order", data=validation_matrix.dropna(), ax = ax3)
sns.stripplot(x="Sensitivity", y="Order", data=validation_matrix.dropna(), jitter= True, color='#252525', ax= ax3)
ax3.set_xlim(0,1.02)
ax3.set_xlabel('Sensitivity')
ax3.set_ylabel('')
sns.boxplot(x="PPV", y="Order", data=validation_matrix.dropna(), ax = ax4)
sns.stripplot(x="PPV", y="Order", data=validation_matrix.dropna(), jitter= True, color='#252525', ax= ax4)
ax4.set_xlim(0,1.02)
ax4.set_xlabel('Positive Predictive Value')
ax4.set_ylabel('')
plt.tight_layout()
#plt.savefig('outputs/XGBoost_order_prediction_performance.png', dpi = 600)
plt.show()
return matrix
| [
"sklearn.preprocessing.StandardScaler",
"matplotlib.style.use",
"HPnex.functions.construct_bipartite_host_virus_network",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"HPnex.functions.construct_unipartite_taxa_level_virus_virus_network",
"matplotlib.pyplot.tight_layout",
... | [((109, 204), 'os.chdir', 'os.chdir', (['"""C:/Users/falco/Desktop/directory/Missing_links_in_viral_host_communities/"""'], {}), "(\n 'C:/Users/falco/Desktop/directory/Missing_links_in_viral_host_communities/'\n )\n", (117, 204), True, 'import os as os\n'), ((2546, 2574), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (2555, 2574), True, 'import matplotlib.style as style\n'), ((2623, 2695), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.3)', 'rc': "{'lines.linewidth': 0.8}"}), "('notebook', font_scale=1.3, rc={'lines.linewidth': 0.8})\n", (2638, 2695), True, 'import seaborn as sns\n'), ((221, 238), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (236, 238), True, 'import datetime as dt\n'), ((3453, 3490), 'pandas.DataFrame', 'pd.DataFrame', (["{'Virus2': virus_nodes}"], {}), "({'Virus2': virus_nodes})\n", (3465, 3490), True, 'import pandas as pd\n'), ((7563, 7689), 'HPnex.predict_multi.preprocessing_x', 'pred_m.preprocessing_x', ([], {'data_frame': 'edges_to_predict', 'network': 'Gc_temp_group', 'virus_df_temp': 'virus_df_temp', 'virus_df': 'virus_df'}), '(data_frame=edges_to_predict, network=Gc_temp_group,\n virus_df_temp=virus_df_temp, virus_df=virus_df)\n', (7585, 7689), True, 'from HPnex import predict_multi as pred_m\n'), ((8832, 8869), 'pandas.DataFrame', 'pd.DataFrame', (["{'Virus2': virus_nodes}"], {}), "({'Virus2': virus_nodes})\n", (8844, 8869), True, 'import pandas as pd\n'), ((12610, 12736), 'HPnex.predict_multi.preprocessing_x', 'pred_m.preprocessing_x', ([], {'data_frame': 'edges_to_predict', 'network': 'Gc_temp_group', 'virus_df_temp': 'virus_df_temp', 'virus_df': 'virus_df'}), '(data_frame=edges_to_predict, network=Gc_temp_group,\n virus_df_temp=virus_df_temp, virus_df=virus_df)\n', (12632, 12736), True, 'from HPnex import predict_multi as pred_m\n'), ((13993, 14154), 'HPnex.functions.construct_bipartite_taxa_virus_network', 'f.construct_bipartite_taxa_virus_network', ([], {'dataframe': 'df_temp', 'taxa_level': '"""Order"""', 'network_name': '"""Go"""', 'plot': '(False)', 'filter_file': '(False)', 'taxonomic_filter': 'None'}), "(dataframe=df_temp, taxa_level=\n 'Order', network_name='Go', plot=False, filter_file=False,\n taxonomic_filter=None)\n", (14033, 14154), True, 'from HPnex import functions as f\n'), ((14288, 14531), 'HPnex.functions.construct_unipartite_taxa_level_virus_virus_network', 'f.construct_unipartite_taxa_level_virus_virus_network', ([], {'dataframe': 'df_temp', 'taxa_level': '"""Order"""', 'network_name': '"""Gc Order level"""', 'layout_func': '"""fruchterman_reingold"""', 'plot': '(False)', 'filter_file': '(False)', 'taxonomic_filter': 'None', 'return_df': '(True)'}), "(dataframe=df_temp,\n taxa_level='Order', network_name='Gc Order level', layout_func=\n 'fruchterman_reingold', plot=False, filter_file=False, taxonomic_filter\n =None, return_df=True)\n", (14341, 14531), True, 'from HPnex import functions as f\n'), ((14688, 14888), 'HPnex.fitting_functions.get_complete_network_data_for_fitting_multiclass', 'fitt.get_complete_network_data_for_fitting_multiclass', ([], {'Gc': 'Gc', 'BPnx': 'BPnx_group', 'data_path': 'data_path', 'virus_df': 'virus_df', 'Species_file_name': '"""\\\\IUCN Mammals, Birds, Reptiles, and Amphibians.csv"""'}), "(Gc=Gc, BPnx=\n BPnx_group, data_path=data_path, virus_df=virus_df, Species_file_name=\n '\\\\IUCN Mammals, Birds, Reptiles, and Amphibians.csv')\n", (14741, 14888), True, 'from HPnex import fitting_functions as fitt\n'), ((17332, 17348), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (17346, 17348), False, 'from sklearn.preprocessing import StandardScaler\n'), ((17406, 17445), 'pandas.DataFrame', 'pd.DataFrame', (['X_std'], {'columns': 'predictors'}), '(X_std, columns=predictors)\n', (17418, 17445), True, 'import pandas as pd\n'), ((17522, 17550), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (17548, 17550), False, 'from sklearn import preprocessing\n'), ((18160, 18196), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['XGB'], {'n_jobs': '(1)'}), '(XGB, n_jobs=1)\n', (18181, 18196), False, 'from sklearn.multioutput import MultiOutputClassifier\n'), ((19773, 19798), 'pandas.concat', 'pd.concat', (['RESULT'], {'axis': '(0)'}), '(RESULT, axis=0)\n', (19782, 19798), True, 'import pandas as pd\n'), ((19817, 19845), 'pandas.concat', 'pd.concat', (['e_predict'], {'axis': '(0)'}), '(e_predict, axis=0)\n', (19826, 19845), True, 'import pandas as pd\n'), ((20879, 21016), 'HPnex.functions.construct_bipartite_host_virus_network', 'f.construct_bipartite_host_virus_network', ([], {'dataframe': 'df_temp', 'network_name': '"""Go"""', 'plot': '(False)', 'filter_file': '(False)', 'taxonomic_filter': 'None'}), "(dataframe=df_temp, network_name=\n 'Go', plot=False, filter_file=False, taxonomic_filter=None)\n", (20919, 21016), True, 'from HPnex import functions as f\n'), ((21145, 21371), 'HPnex.functions.construct_unipartite_virus_virus_network_order', 'f.construct_unipartite_virus_virus_network_order', ([], {'dataframe': 'df_temp', 'network_name': '"""all_network"""', 'IUCN': 'IUCN', 'layout_func': '"""fruchterman_reingold"""', 'plot': '(False)', 'filter_file': '(False)', 'taxonomic_filter': 'None', 'return_df': '(True)'}), "(dataframe=df_temp,\n network_name='all_network', IUCN=IUCN, layout_func=\n 'fruchterman_reingold', plot=False, filter_file=False, taxonomic_filter\n =None, return_df=True)\n", (21193, 21371), True, 'from HPnex import functions as f\n'), ((21528, 21728), 'HPnex.fitting_functions.get_complete_network_data_for_fitting_multiclass', 'fitt.get_complete_network_data_for_fitting_multiclass', ([], {'Gc': 'Gc', 'BPnx': 'BPnx_group', 'data_path': 'data_path', 'virus_df': 'virus_df', 'Species_file_name': '"""\\\\IUCN Mammals, Birds, Reptiles, and Amphibians.csv"""'}), "(Gc=Gc, BPnx=\n BPnx_group, data_path=data_path, virus_df=virus_df, Species_file_name=\n '\\\\IUCN Mammals, Birds, Reptiles, and Amphibians.csv')\n", (21581, 21728), True, 'from HPnex import fitting_functions as fitt\n'), ((24416, 24432), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (24430, 24432), False, 'from sklearn.preprocessing import StandardScaler\n'), ((24490, 24529), 'pandas.DataFrame', 'pd.DataFrame', (['X_std'], {'columns': 'predictors'}), '(X_std, columns=predictors)\n', (24502, 24529), True, 'import pandas as pd\n'), ((24606, 24634), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (24632, 24634), False, 'from sklearn import preprocessing\n'), ((25244, 25280), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['XGB'], {'n_jobs': '(1)'}), '(XGB, n_jobs=1)\n', (25265, 25280), False, 'from sklearn.multioutput import MultiOutputClassifier\n'), ((26998, 27023), 'pandas.concat', 'pd.concat', (['RESULT'], {'axis': '(0)'}), '(RESULT, axis=0)\n', (27007, 27023), True, 'import pandas as pd\n'), ((27042, 27070), 'pandas.concat', 'pd.concat', (['e_predict'], {'axis': '(0)'}), '(e_predict, axis=0)\n', (27051, 27070), True, 'import pandas as pd\n'), ((27540, 27580), 'pandas.concat', 'pd.concat', (['[cv_preds, cv_epreds]'], {'axis': '(1)'}), '([cv_preds, cv_epreds], axis=1)\n', (27549, 27580), True, 'import pandas as pd\n'), ((28665, 28698), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (["a['0_pr']", 'a[0]'], {}), "(a['0_pr'], a[0])\n", (28681, 28698), False, 'from sklearn.metrics import confusion_matrix\n'), ((5425, 5441), 'math.log', 'math.log', (['PubMed'], {}), '(PubMed)\n', (5433, 5441), False, 'import math\n'), ((6406, 6428), 'pandas.DataFrame', 'pd.DataFrame', (['prob_max'], {}), '(prob_max)\n', (6418, 6428), True, 'import pandas as pd\n'), ((10804, 10820), 'math.log', 'math.log', (['PubMed'], {}), '(PubMed)\n', (10812, 10820), False, 'import math\n'), ((11596, 11618), 'pandas.DataFrame', 'pd.DataFrame', (['prob_max'], {}), '(prob_max)\n', (11608, 11618), True, 'import pandas as pd\n'), ((28729, 28760), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["a['0_pr']", 'a[0]'], {}), "(a['0_pr'], a[0])\n", (28743, 28760), False, 'from sklearn.metrics import accuracy_score\n'), ((28812, 28850), 'sklearn.metrics.classification_report', 'classification_report', (["a['0_pr']", 'a[0]'], {}), "(a['0_pr'], a[0])\n", (28833, 28850), False, 'from sklearn.metrics import classification_report\n'), ((29765, 29796), 'pandas.concat', 'pd.concat', (['[TP, FP, FN]'], {'axis': '(1)'}), '([TP, FP, FN], axis=1)\n', (29774, 29796), True, 'import pandas as pd\n'), ((30518, 30546), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (30527, 30546), True, 'import matplotlib.style as style\n'), ((30613, 30685), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.0)', 'rc': "{'lines.linewidth': 0.8}"}), "('notebook', font_scale=1.0, rc={'lines.linewidth': 0.8})\n", (30628, 30685), True, 'import seaborn as sns\n'), ((30760, 30809), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '[12, 8]', 'sharey': '(False)'}), '(2, 2, figsize=[12, 8], sharey=False)\n', (30772, 30809), True, 'from matplotlib import pyplot as plt\n'), ((32602, 32620), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (32618, 32620), True, 'from matplotlib import pyplot as plt\n'), ((32713, 32723), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32721, 32723), True, 'from matplotlib import pyplot as plt\n'), ((4245, 4301), 'networkx.common_neighbors', 'nx.common_neighbors', (['temp_BPnx', "c['Virus1']", "c['Virus2']"], {}), "(temp_BPnx, c['Virus1'], c['Virus2'])\n", (4264, 4301), True, 'import networkx as nx\n'), ((6362, 6387), 'numpy.amax', 'np.amax', (['probs[i]'], {'axis': '(1)'}), '(probs[i], axis=1)\n', (6369, 6387), True, 'import numpy as np\n'), ((9624, 9680), 'networkx.common_neighbors', 'nx.common_neighbors', (['temp_BPnx', "c['Virus1']", "c['Virus2']"], {}), "(temp_BPnx, c['Virus1'], c['Virus2'])\n", (9643, 9680), True, 'import networkx as nx\n'), ((11552, 11577), 'numpy.amax', 'np.amax', (['probs[i]'], {'axis': '(1)'}), '(probs[i], axis=1)\n', (11559, 11577), True, 'import numpy as np\n'), ((30157, 30177), 'pandas.concat', 'pd.concat', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (30166, 30177), True, 'import pandas as pd\n'), ((4013, 4069), 'networkx.common_neighbors', 'nx.common_neighbors', (['temp_BPnx', "c['Virus1']", "c['Virus2']"], {}), "(temp_BPnx, c['Virus1'], c['Virus2'])\n", (4032, 4069), True, 'import networkx as nx\n'), ((9392, 9448), 'networkx.common_neighbors', 'nx.common_neighbors', (['temp_BPnx', "c['Virus1']", "c['Virus2']"], {}), "(temp_BPnx, c['Virus1'], c['Virus2'])\n", (9411, 9448), True, 'import networkx as nx\n')] |
import numpy as np
from objects import (StaticObject, Road, PedestrianCross)
def scenario(l_staticObject, l_cross, l_road):
"""
Coordinates of objects in a scenario. Include:
l_staticObject: list of static objects
l_cross: list of pedestrian crosses
l_road: list of road layouts
"""
# # static object
# obs1 = StaticObject(
# idx=1,
# poly=np.array([[-10, -20], [-1, -20], [-1, -4],
# [-10, -4]]))
# l_staticObject.append(obs1)
# obs2 = StaticObject(
# idx=2,
# poly=np.array([[10, -20], [60, -20], [60, -7],
# [10, -7]]))
# l_staticObject.append(obs2)
# obs5 = StaticObject(
# idx=5,
# poly=np.array([[-60, 7], [60, 7],
# [60, 20], [-60, 20]]))
# l_staticObject.append(obs5)
# # pedestrian cross
# cross1 = PedestrianCross(
# left=np.array([[0, -7], [0, 7]]),
# right=np.array([[4, -7], [4, 7]]),
# density=0.5
# )
# l_cross.append(cross1)
# # road layout
# road = Road(
# left=np.array([[-100, 4], [100, 4]]),
# right=np.array([[-100, -4], [100, -4]]),
# lane=np.array([[-100, 0], [100, 0]])
# )
# l_road.append(road)
# static object
obs1 = StaticObject(
idx=1,
poly=np.array([[-40, -20], [-2, -20], [-2, -5],
[-40, -5]]))
l_staticObject.append(obs1)
obs2 = StaticObject(
idx=2,
poly=np.array([[5, -20], [60, -20], [60, -5],
[5, -5]]))
l_staticObject.append(obs2)
obs3 = StaticObject(
idx=3,
poly=np.array([[-40, 20], [-2, 20], [-2, 8],
[-40, 8]]))
l_staticObject.append(obs3)
obs4 = StaticObject(
idx=4,
poly=np.array([[5, 20], [60, 20], [60, 5],
[5, 5]]))
l_staticObject.append(obs4)
# pedestrian cross
cross1 = PedestrianCross(
left=np.array([[0, -10], [0, 8]]),
right=np.array([[4, -10], [4, 8]]),
density=0.5
)
l_cross.append(cross1)
# road layout
road = Road(
left=np.array([[-100, 2], [100, 2]]),
right=np.array([[-100, -4], [100, -4]]),
lane=np.array([[-100, -1], [100, -1]])
)
l_road.append(road)
| [
"numpy.array"
] | [((1368, 1422), 'numpy.array', 'np.array', (['[[-40, -20], [-2, -20], [-2, -5], [-40, -5]]'], {}), '([[-40, -20], [-2, -20], [-2, -5], [-40, -5]])\n', (1376, 1422), True, 'import numpy as np\n'), ((1533, 1583), 'numpy.array', 'np.array', (['[[5, -20], [60, -20], [60, -5], [5, -5]]'], {}), '([[5, -20], [60, -20], [60, -5], [5, -5]])\n', (1541, 1583), True, 'import numpy as np\n'), ((1694, 1744), 'numpy.array', 'np.array', (['[[-40, 20], [-2, 20], [-2, 8], [-40, 8]]'], {}), '([[-40, 20], [-2, 20], [-2, 8], [-40, 8]])\n', (1702, 1744), True, 'import numpy as np\n'), ((1855, 1901), 'numpy.array', 'np.array', (['[[5, 20], [60, 20], [60, 5], [5, 5]]'], {}), '([[5, 20], [60, 20], [60, 5], [5, 5]])\n', (1863, 1901), True, 'import numpy as np\n'), ((2025, 2053), 'numpy.array', 'np.array', (['[[0, -10], [0, 8]]'], {}), '([[0, -10], [0, 8]])\n', (2033, 2053), True, 'import numpy as np\n'), ((2069, 2097), 'numpy.array', 'np.array', (['[[4, -10], [4, 8]]'], {}), '([[4, -10], [4, 8]])\n', (2077, 2097), True, 'import numpy as np\n'), ((2201, 2232), 'numpy.array', 'np.array', (['[[-100, 2], [100, 2]]'], {}), '([[-100, 2], [100, 2]])\n', (2209, 2232), True, 'import numpy as np\n'), ((2248, 2281), 'numpy.array', 'np.array', (['[[-100, -4], [100, -4]]'], {}), '([[-100, -4], [100, -4]])\n', (2256, 2281), True, 'import numpy as np\n'), ((2296, 2329), 'numpy.array', 'np.array', (['[[-100, -1], [100, -1]]'], {}), '([[-100, -1], [100, -1]])\n', (2304, 2329), True, 'import numpy as np\n')] |
import neural_renderer as nr
import numpy as np
from skimage.io import imread
import torch
from torch.autograd import Variable
from src.util.common import resize_img
from src.util.torch_utils import orthographic_proj_withz_idrot
from src.util.render_utils import (
draw_skeleton,
draw_text,
)
COLORS = {
# colorblind/print/copy safe:
'blue': [0.65098039, 0.74117647, 0.85882353],
'pink': [.9, .7, .7],
'mint': [ 166/255., 229/255., 204/255.],
'mint2': [ 202/255., 229/255., 223/255.],
'green': [ 153/255., 216/255., 201/255.],
'green2': [ 171/255., 221/255., 164/255.],
'red': [ 251/255., 128/255., 114/255.],
'orange': [ 253/255., 174/255., 97/255.],
'yellow': [ 250/255., 230/255., 154/255.]
}
def get_dims(x):
return x.dim() if isinstance(x, torch.Tensor) else x.ndim
class VisRenderer(object):
"""
Utility to render meshes using pytorch NMR
faces are F x 3 or 1 x F x 3 numpy
this is for visualization only -- does not allow backprop.
This class assumes all inputs are Torch/numpy variables.
This renderer expects quarternion rotation for camera,,
"""
def __init__(self,
img_size=256,
face_path='models/smpl_faces.npy',
t_size=1):
self.renderer = nr.Renderer(
img_size, camera_mode='look_at', perspective=False)
self.set_light_dir([1, .5, -1], int_dir=0.3, int_amb=0.7)
self.set_bgcolor([1, 1, 1.])
self.img_size = img_size
self.faces_np = np.load(face_path).astype(np.int)
self.faces = to_variable(torch.IntTensor(self.faces_np).cuda())
if self.faces.dim() == 2:
self.faces = torch.unsqueeze(self.faces, 0)
# Default color:
default_tex = np.ones((1, self.faces.shape[1], t_size, t_size, t_size,
3))
self.default_tex = to_variable(torch.FloatTensor(default_tex).cuda())
# Default camera:
cam = np.hstack([0.9, 0, 0])
default_cam = to_variable(torch.FloatTensor(cam).cuda())
self.default_cam = torch.unsqueeze(default_cam, 0)
# Setup proj fn:
self.proj_fn = orthographic_proj_withz_idrot
def __call__(self,
verts,
cam=None,
texture=None,
rend_mask=False,
alpha=False,
img=None,
color_name='blue'):
"""
verts is |V| x 3 numpy/cuda torch Variable or B x V x 3
cams is 3D [s, tx, ty], numpy/cuda torch Variable or B x 3
cams is NOT the same as OpenDR renderer.
Directly use the cams of HMR output
Returns N x N x 3 numpy, where N is the image size.
Or B x N x N x 3 when input was batched
if you're using this as a batch, make sure you send in B x 3 cameras
as well as B x * x * x 3 images if you're using it.
"""
num_batch = 1
if get_dims(verts) == 3 and verts.shape[0] != 1:
print('batch mode')
num_batch = verts.shape[0]
# Make sure everything else is also batch mode.
if cam is not None:
assert get_dims(cam) == 2 and cam.shape[0] == num_batch
if img is not None:
assert img.ndim == 4 and img.shape[0] == num_batch
if texture is None:
# single color.
color = torch.FloatTensor(COLORS[color_name]).cuda()
texture = color * self.default_tex
texture = texture.repeat(num_batch, 1, 1, 1, 1, 1)
else:
texture = to_float_tensor(texture)
if texture.dim() == 5:
# Here input it F x T x T x T x 3 (instead of F x T x T x 3)
# So add batch dim.
texture = torch.unsqueeze(texture, 0)
if cam is None:
cam = self.default_cam
if num_batch > 1:
cam = cam.repeat(num_batch, 1)
else:
cam = to_float_tensor(cam)
if cam.dim() == 1:
cam = torch.unsqueeze(cam, 0)
verts = to_float_tensor(verts)
if verts.dim() == 2:
verts = torch.unsqueeze(verts, 0)
verts = to_variable(verts)
cam = to_variable(cam)
texture = to_variable(texture)
# set offset_z for persp proj
proj_verts = self.proj_fn(verts, cam, offset_z=0)
# Flipping the y-axis here to make it align with
# the image coordinate system!
proj_verts[:, :, 1] *= -1
# Adjust for batch.
faces = self.faces.repeat(num_batch, 1, 1)
if rend_mask:
rend = self.renderer.render_silhouettes(proj_verts, faces)
rend = torch.unsqueeze(rend, 0)
rend = rend.repeat(1, 3, 1, 1)
else:
rend = self.renderer.render(proj_verts, faces, texture)
rend = rend[0].data.cpu().numpy().transpose((0, 2, 3, 1))
rend = np.clip(rend, 0, 1) * 255.0
if num_batch == 1:
rend = rend[0]
if not rend_mask and (alpha or img is not None):
mask = self.renderer.render_silhouettes(proj_verts, faces)
mask = mask.data.cpu().numpy()
if img is not None:
mask = np.repeat(np.expand_dims(mask, 3), 3, axis=3)
if num_batch == 1:
mask = mask[0]
# TODO: Make sure img is [0, 255]!!!
return (img * (1 - mask) + rend * mask).astype(np.uint8)
else:
# TODO: Temporary hack
mask = mask.reshape((rend.shape[:2]) + (1,))
return self.make_alpha(rend, mask)
else:
return rend.astype(np.uint8)
def rotated(self,
verts,
deg,
axis='y',
cam=None,
texture=None,
rend_mask=False,
alpha=False,
color_name='blue'):
"""
vert is N x 3, torch FloatTensor (or Variable)
"""
import cv2
if axis == 'y':
axis = [0, 1., 0]
elif axis == 'x':
axis = [1., 0, 0]
else:
axis = [0, 0, 1.]
new_rot = cv2.Rodrigues(np.deg2rad(deg) * np.array(axis))[0]
new_rot = to_float_tensor(new_rot)
verts = to_float_tensor(verts)
if get_dims(verts) == 2:
# Make it in to 1 x N x 3
verts = verts.unsqueeze(0)
num_batch = verts.shape[0]
new_rot = new_rot.unsqueeze(0)
new_rot = new_rot.repeat(num_batch, 1, 1)
center = verts.mean(1, keepdim=True)
centered_v = (verts - center)
new_verts = torch.matmul(new_rot, centered_v.permute(0, 2, 1))
new_verts = new_verts.permute(0, 2, 1) + center
return self.__call__(
new_verts,
cam=cam,
texture=texture,
rend_mask=rend_mask,
alpha=alpha,
color_name=color_name
)
def make_alpha(self, rend, mask):
rend = rend.astype(np.uint8)
alpha = (mask * 255).astype(np.uint8)
imgA = np.dstack((rend, alpha))
return imgA
def set_light_dir(self, direction, int_dir=0.8, int_amb=0.8):
self.renderer.light_direction = direction
self.renderer.light_intensity_directional = int_dir
self.renderer.light_intensity_ambient = int_amb
def set_bgcolor(self, color):
self.renderer.background_color = color
def to_variable(x):
if type(x) is not torch.autograd.Variable:
x = Variable(x, requires_grad=False)
return x
def to_float_tensor(x):
if isinstance(x, np.ndarray):
x = torch.FloatTensor(x).cuda()
# ow assumed it's already a Tensor..
return x
def convert_as(src, trg):
src = src.type_as(trg)
if src.is_cuda:
src = src.cuda(device=trg.get_device())
if type(trg) is torch.autograd.Variable:
src = Variable(src, requires_grad=False)
return src
def visualize_img(img,
cam,
kp_pred,
vert,
renderer,
kp_gt=None,
text={},
rotated_view=False,
mesh_color='blue',
pad_vals=None,
no_text=False):
"""
Visualizes the image with the ground truth keypoints and
predicted keypoints on left and image with mesh on right.
Keypoints should be in normalized coordinates, not image coordinates.
Args:
img: Image.
cam (3x1): Camera parameters.
kp_gt: Ground truth keypoints.
kp_pred: Predicted keypoints.
vert: Vertices.
renderer: SMPL renderer.
text (dict): Optional information to include in the image.
rotated_view (bool): If True, also visualizes mesh from another angle.
if pad_vals (2,) is not None, removes those values from the image
(undo img pad to make square)
Returns:
Combined image.
"""
img_size = img.shape[0]
text.update({'sc': cam[0], 'tx': cam[1], 'ty': cam[2]})
if kp_gt is not None:
gt_vis = kp_gt[:, 2].astype(bool)
loss = np.sum((kp_gt[gt_vis, :2] - kp_pred[gt_vis])**2)
text['kpl'] = loss
# Undo pre-processing.
# Make sure img is [0-255]
input_img = ((img + 1) * 0.5) * 255.
rend_img = renderer(vert, cam=cam, img=input_img, color_name=mesh_color)
if not no_text:
rend_img = draw_text(rend_img, text)
# Draw skeletons
pred_joint = ((kp_pred + 1) * 0.5) * img_size
skel_img = draw_skeleton(input_img, pred_joint)
if kp_gt is not None:
gt_joint = ((kp_gt[:, :2] + 1) * 0.5) * img_size
skel_img = draw_skeleton(
skel_img, gt_joint, draw_edges=False, vis=gt_vis)
if pad_vals is not None:
skel_img = remove_pads(skel_img, pad_vals)
rend_img = remove_pads(rend_img, pad_vals)
if rotated_view:
rot_img = renderer.rotated(
vert, 90, cam=cam, alpha=False, color_name=mesh_color)
if pad_vals is not None:
rot_img = remove_pads(rot_img, pad_vals)
return skel_img / 255, rend_img / 255, rot_img / 255
else:
return skel_img / 255, rend_img / 255
def visualize_img_orig(cam, kp_pred, vert, renderer, start_pt, scale,
proc_img_shape, im_path=None, img=None,
rotated_view=False, mesh_color='blue', max_img_size=300,
no_text=False, bbox=None, crop_cam=None):
"""
Visualizes the image with the ground truth keypoints and predicted keypoints
in the original image space (squared).
If you get out of memory error, make max_img_size smaller.
Args:
must supply either the im_path or img
start_pt, scale, proc_img_shape are parameters used to preprocess the
image.
scale_result is how much to scale the current image
Returns:
Combined image.
"""
if img is None:
img = imread(im_path)
# Pre-process image to [-1, 1] bc it expects this.
img = ((img / 255.) - 0.5) * 2
if np.max(img.shape[:2]) > max_img_size:
# if the image is too big it wont fit in gpu and nmr poops out.
scale_orig = max_img_size / float(np.max(img.shape[:2]))
img, _ = resize_img(img, scale_orig)
undo_scale = (1. / np.array(scale)) * scale_orig
else:
undo_scale = 1. / np.array(scale)
if bbox is not None:
assert(crop_cam is not None)
img = img[bbox[0]:bbox[1], bbox[2]:bbox[3]]
# For these, the cameras are already adjusted.
start_pt = np.array([0, 0])
# NMR needs images to be square..
img, pad_vals = make_square(img)
img_size = np.max(img.shape[:2])
renderer.renderer.image_size = img_size
# Adjust kp_pred.
# This is in 224x224 cropped space.
pred_joint = ((kp_pred + 1) * 0.5) * proc_img_shape[0]
# This is in the original image.
pred_joint_orig = (pred_joint + start_pt - proc_img_shape[0]) * undo_scale
# in normalize coord of the original image:
kp_orig = 2 * (pred_joint_orig / img_size) - 1
if bbox is not None:
use_cam = crop_cam
else:
# This is camera in crop image coord.
cam_crop = np.hstack([proc_img_shape[0] * cam[0] * 0.5,
cam[1:] + (2./cam[0]) * 0.5])
# This is camera in orig image coord
cam_orig = np.hstack([
cam_crop[0] * undo_scale,
cam_crop[1:] + (start_pt - proc_img_shape[0]) / cam_crop[0]
])
# This is the camera in normalized orig_image coord
new_cam = np.hstack([
cam_orig[0] * (2. / img_size),
cam_orig[1:] - (1 / ((2./img_size) * cam_orig[0]))
])
new_cam = new_cam.astype(np.float32)
use_cam = new_cam
# Call visualize_img with this camera:
rendered_orig = visualize_img(
img=img,
cam=use_cam,
kp_pred=kp_orig,
vert=vert,
renderer=renderer,
rotated_view=rotated_view,
mesh_color=mesh_color,
pad_vals=pad_vals,
no_text=no_text,
)
return rendered_orig
def visualize_mesh_og(cam, vert, renderer, start_pt, scale, proc_img_shape,
im_path=None, img=None, deg=0, mesh_color='blue',
max_img_size=300, pad=50, crop_cam=None, bbox=None):
"""
Visualize mesh in original image space.
If you get out of memory error, make max_img_size smaller.
If crop_cam and bbox is not None,
crops the image and uses the crop_cam to render.
(See compute_video_bbox.py)
"""
if img is None:
img = imread(im_path)
# Pre-process image to [-1, 1] bc it expects this.
img = ((img / 255.) - 0.5) * 2
if bbox is not None:
assert(crop_cam is not None)
img = img[bbox[0]:bbox[1], bbox[2]:bbox[3]]
# For these, the cameras are already adjusted.
scale = 1.
start_pt = np.array([0, 0])
if np.max(img.shape[:2]) > max_img_size:
# if the image is too big it wont fit in gpu and nmr poops out.
scale_orig = max_img_size / float(np.max(img.shape[:2]))
img, _ = resize_img(img, scale_orig)
undo_scale = (1. / np.array(scale)) * scale_orig
else:
undo_scale = 1. / np.array(scale)
# NMR needs images to be square..
img, pad_vals = make_square(img)
img_size = np.max(img.shape[:2])
renderer.renderer.image_size = img_size
if bbox is not None:
return renderer.rotated(
verts=vert,
deg=deg,
cam=crop_cam,
color_name=mesh_color,
)
else:
# This is camera in crop image coord.
cam_crop = np.hstack([proc_img_shape[0] * cam[0] * 0.5,
cam[1:] + (2./cam[0]) * 0.5])
# This is camera in orig image coord
cam_orig = np.hstack([
cam_crop[0] * undo_scale,
cam_crop[1:] + (start_pt - proc_img_shape[0]) / cam_crop[0]
])
# This is the camera in normalized orig_image coord
new_cam = np.hstack([
cam_orig[0] * (2. / img_size),
cam_orig[1:] - (1 / ((2./img_size) * cam_orig[0]))
])
new_cam = new_cam.astype(np.float32)
return renderer.rotated(
verts=vert,
deg=deg,
cam=new_cam,
color_name=mesh_color,
)
def make_square(img):
"""
Bc nmr only deals with square image, adds pad to the shorter side.
"""
img_size = np.max(img.shape[:2])
pad_vals = img_size - img.shape[:2]
img = np.pad(
array=img,
pad_width=((0, pad_vals[0]), (0, pad_vals[1]), (0, 0)),
mode='constant'
)
return img, pad_vals
def remove_pads(img, pad_vals):
"""
Undos padding done by make_square.
"""
if pad_vals[0] != 0:
img = img[:-pad_vals[0], :]
if pad_vals[1] != 0:
img = img[:, :-pad_vals[1]]
return img
def compute_video_bbox(cams, kps, proc_infos, margin=10):
"""
Given the prediction and original image info,
figures out the min/max extent (bbox)
of the person in the entire video.
Adjust the cameras so now ppl project in this new bbox.
Needed to crop the video around the person and also to
rotate the mesh.
cams: N x 3, predicted camera
joints: N x K x 3, predicted 3D joints for debug
kp: N x K x 3, predicted 2D joints to figure out extent
proc_infos: dict holding:
start_pt, scale: N x 2, N x 1
preprocessing done on this image.
im_shape: image shape after preprocessing
im_path: to the first image to figure out size of orig video
"""
im_path = proc_infos[0]['im_path']
img = imread(im_path)
img_h, img_w = img.shape[:2]
img_size = np.max([img_h, img_w])
im_shape = proc_infos[0]['im_shape'][0]
new_cams = []
bboxes = []
# For each image, get the joints in the original coord frame:
for i, (proc_info, kp, cam) in enumerate(zip(proc_infos, kps, cams)):
scale = proc_info['scale']
start_pt = proc_info['start_pt']
undo_scale = 1. / np.array(scale)
# Adjust kp_pred.
# This is in 224x224 cropped space.
pred_joint = ((kp + 1) * 0.5) * im_shape
# This is in the original image.
pred_joint_orig = (pred_joint + start_pt - im_shape) * undo_scale
# in normalize coord of the original image:
# kp_orig = 2 * (pred_joint_orig / img_size) - 1
# This is camera in crop image coord (224x224).
cam_crop = np.hstack([im_shape * cam[0] * 0.5,
cam[1:] + (2./cam[0]) * 0.5])
# This is camera in orig image coord
cam_orig = np.hstack([
cam_crop[0] * undo_scale,
cam_crop[1:] + (start_pt - im_shape) / cam_crop[0]
])
# This is the camera in normalized orig_image coord
new_cam = np.hstack([
cam_orig[0] * (2. / img_size),
cam_orig[1:] - (1 / ((2./img_size) * cam_orig[0]))
])
new_cams.append(new_cam.astype(np.float32))
x = pred_joint_orig[:, 0]
y = pred_joint_orig[:, 1]
ymin = max(0, min(y) - margin)
ymax = min(img_h - 1, max(y) + margin)
xmin = max(0, min(x) - margin)
xmax = min(img_w - 1, max(x) + margin)
bbox = np.array([ymin, ymax, xmin, xmax])
bboxes.append(bbox)
# Figure out the video level bbox.
# bbox is in format [ymin, ymax, xmin, xmax]
bboxes = np.stack(bboxes)
bbox = np.array([
np.min(bboxes[:, 0]),
np.max(bboxes[:, 1]),
np.min(bboxes[:, 2]),
np.max(bboxes[:, 3])
])
bbox = bbox.astype(np.int)
# Now adjust the cams by this bbox offset.
ymin, xmin = bbox[0], bbox[2]
new_offset = np.array([xmin, ymin])
new_offset_norm = np.linalg.norm(new_offset)
img_size_crop = np.max([bbox[1] - bbox[0], bbox[3] - bbox[2]])
# Rotated images: save delta translation
new_cams_cropped = []
for i, (proc_info, kp, cam) in enumerate(zip(proc_infos, kps, cams)):
scale = proc_info['scale']
undo_scale = 1. / np.array(scale)
start_pt0 = proc_info['start_pt']
start_pt = start_pt0 - (new_offset * scale)
if np.linalg.norm(proc_info['start_pt']) < new_offset_norm:
print('crop is more than start pt..?')
import ipdb; ipdb.set_trace()
# This is camera in crop image coord (224x224).
cam_crop = np.hstack([im_shape * cam[0] * 0.5,
cam[1:] + (2./cam[0]) * 0.5])
# This is camera in orig image coord
cam_orig = np.hstack([
cam_crop[0] * undo_scale,
cam_crop[1:] + (start_pt - im_shape) / cam_crop[0]
])
# This is the camera in normalized orig_image coord
new_cam = np.hstack([
cam_orig[0] * (2. / img_size_crop),
cam_orig[1:] - (1 / ((2./img_size_crop) * cam_orig[0]))
])
new_cams_cropped.append(new_cam.astype(np.float32))
return bbox, new_cams_cropped
def get_params_from_omega(smpl_model, regressor, omega, cam=None):
cam = omega[:3] if cam is None else cam
pose = omega[3:3 + 72]
shape = omega[75:]
smpl_model.pose[:] = pose
smpl_model.betas[:] = shape
verts = np.copy(smpl_model.r)
joints = regressor.dot(verts)
kps = cam[0] * (joints[:, :2] + cam[1:])
return {
'cam': cam,
'joints': joints,
'kps': kps,
'pose': pose,
'shape': shape,
'verts': verts,
}
| [
"numpy.load",
"numpy.sum",
"ipdb.set_trace",
"numpy.ones",
"numpy.clip",
"numpy.linalg.norm",
"numpy.pad",
"numpy.copy",
"torch.FloatTensor",
"numpy.max",
"src.util.common.resize_img",
"skimage.io.imread",
"numpy.stack",
"numpy.dstack",
"torch.autograd.Variable",
"src.util.render_utils... | [((9718, 9754), 'src.util.render_utils.draw_skeleton', 'draw_skeleton', (['input_img', 'pred_joint'], {}), '(input_img, pred_joint)\n', (9731, 9754), False, 'from src.util.render_utils import draw_skeleton, draw_text\n'), ((11901, 11922), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (11907, 11922), True, 'import numpy as np\n'), ((14623, 14644), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (14629, 14644), True, 'import numpy as np\n'), ((15770, 15791), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (15776, 15791), True, 'import numpy as np\n'), ((15843, 15937), 'numpy.pad', 'np.pad', ([], {'array': 'img', 'pad_width': '((0, pad_vals[0]), (0, pad_vals[1]), (0, 0))', 'mode': '"""constant"""'}), "(array=img, pad_width=((0, pad_vals[0]), (0, pad_vals[1]), (0, 0)),\n mode='constant')\n", (15849, 15937), True, 'import numpy as np\n'), ((16981, 16996), 'skimage.io.imread', 'imread', (['im_path'], {}), '(im_path)\n', (16987, 16996), False, 'from skimage.io import imread\n'), ((17045, 17067), 'numpy.max', 'np.max', (['[img_h, img_w]'], {}), '([img_h, img_w])\n', (17051, 17067), True, 'import numpy as np\n'), ((18789, 18805), 'numpy.stack', 'np.stack', (['bboxes'], {}), '(bboxes)\n', (18797, 18805), True, 'import numpy as np\n'), ((19083, 19105), 'numpy.array', 'np.array', (['[xmin, ymin]'], {}), '([xmin, ymin])\n', (19091, 19105), True, 'import numpy as np\n'), ((19128, 19154), 'numpy.linalg.norm', 'np.linalg.norm', (['new_offset'], {}), '(new_offset)\n', (19142, 19154), True, 'import numpy as np\n'), ((19175, 19221), 'numpy.max', 'np.max', (['[bbox[1] - bbox[0], bbox[3] - bbox[2]]'], {}), '([bbox[1] - bbox[0], bbox[3] - bbox[2]])\n', (19181, 19221), True, 'import numpy as np\n'), ((20615, 20636), 'numpy.copy', 'np.copy', (['smpl_model.r'], {}), '(smpl_model.r)\n', (20622, 20636), True, 'import numpy as np\n'), ((1317, 1380), 'neural_renderer.Renderer', 'nr.Renderer', (['img_size'], {'camera_mode': '"""look_at"""', 'perspective': '(False)'}), "(img_size, camera_mode='look_at', perspective=False)\n", (1328, 1380), True, 'import neural_renderer as nr\n'), ((1799, 1859), 'numpy.ones', 'np.ones', (['(1, self.faces.shape[1], t_size, t_size, t_size, 3)'], {}), '((1, self.faces.shape[1], t_size, t_size, t_size, 3))\n', (1806, 1859), True, 'import numpy as np\n'), ((2010, 2032), 'numpy.hstack', 'np.hstack', (['[0.9, 0, 0]'], {}), '([0.9, 0, 0])\n', (2019, 2032), True, 'import numpy as np\n'), ((2125, 2156), 'torch.unsqueeze', 'torch.unsqueeze', (['default_cam', '(0)'], {}), '(default_cam, 0)\n', (2140, 2156), False, 'import torch\n'), ((7229, 7253), 'numpy.dstack', 'np.dstack', (['(rend, alpha)'], {}), '((rend, alpha))\n', (7238, 7253), True, 'import numpy as np\n'), ((7670, 7702), 'torch.autograd.Variable', 'Variable', (['x'], {'requires_grad': '(False)'}), '(x, requires_grad=False)\n', (7678, 7702), False, 'from torch.autograd import Variable\n'), ((8052, 8086), 'torch.autograd.Variable', 'Variable', (['src'], {'requires_grad': '(False)'}), '(src, requires_grad=False)\n', (8060, 8086), False, 'from torch.autograd import Variable\n'), ((9313, 9363), 'numpy.sum', 'np.sum', (['((kp_gt[gt_vis, :2] - kp_pred[gt_vis]) ** 2)'], {}), '((kp_gt[gt_vis, :2] - kp_pred[gt_vis]) ** 2)\n', (9319, 9363), True, 'import numpy as np\n'), ((9605, 9630), 'src.util.render_utils.draw_text', 'draw_text', (['rend_img', 'text'], {}), '(rend_img, text)\n', (9614, 9630), False, 'from src.util.render_utils import draw_skeleton, draw_text\n'), ((9857, 9920), 'src.util.render_utils.draw_skeleton', 'draw_skeleton', (['skel_img', 'gt_joint'], {'draw_edges': '(False)', 'vis': 'gt_vis'}), '(skel_img, gt_joint, draw_edges=False, vis=gt_vis)\n', (9870, 9920), False, 'from src.util.render_utils import draw_skeleton, draw_text\n'), ((11154, 11169), 'skimage.io.imread', 'imread', (['im_path'], {}), '(im_path)\n', (11160, 11169), False, 'from skimage.io import imread\n'), ((11275, 11296), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (11281, 11296), True, 'import numpy as np\n'), ((11467, 11494), 'src.util.common.resize_img', 'resize_img', (['img', 'scale_orig'], {}), '(img, scale_orig)\n', (11477, 11494), False, 'from src.util.common import resize_img\n'), ((11793, 11809), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (11801, 11809), True, 'import numpy as np\n'), ((12433, 12508), 'numpy.hstack', 'np.hstack', (['[proc_img_shape[0] * cam[0] * 0.5, cam[1:] + 2.0 / cam[0] * 0.5]'], {}), '([proc_img_shape[0] * cam[0] * 0.5, cam[1:] + 2.0 / cam[0] * 0.5])\n', (12442, 12508), True, 'import numpy as np\n'), ((12603, 12705), 'numpy.hstack', 'np.hstack', (['[cam_crop[0] * undo_scale, cam_crop[1:] + (start_pt - proc_img_shape[0]) /\n cam_crop[0]]'], {}), '([cam_crop[0] * undo_scale, cam_crop[1:] + (start_pt -\n proc_img_shape[0]) / cam_crop[0]])\n', (12612, 12705), True, 'import numpy as np\n'), ((12815, 12913), 'numpy.hstack', 'np.hstack', (['[cam_orig[0] * (2.0 / img_size), cam_orig[1:] - 1 / (2.0 / img_size *\n cam_orig[0])]'], {}), '([cam_orig[0] * (2.0 / img_size), cam_orig[1:] - 1 / (2.0 /\n img_size * cam_orig[0])])\n', (12824, 12913), True, 'import numpy as np\n'), ((13858, 13873), 'skimage.io.imread', 'imread', (['im_path'], {}), '(im_path)\n', (13864, 13873), False, 'from skimage.io import imread\n'), ((14180, 14196), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (14188, 14196), True, 'import numpy as np\n'), ((14204, 14225), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (14210, 14225), True, 'import numpy as np\n'), ((14396, 14423), 'src.util.common.resize_img', 'resize_img', (['img', 'scale_orig'], {}), '(img, scale_orig)\n', (14406, 14423), False, 'from src.util.common import resize_img\n'), ((14939, 15014), 'numpy.hstack', 'np.hstack', (['[proc_img_shape[0] * cam[0] * 0.5, cam[1:] + 2.0 / cam[0] * 0.5]'], {}), '([proc_img_shape[0] * cam[0] * 0.5, cam[1:] + 2.0 / cam[0] * 0.5])\n', (14948, 15014), True, 'import numpy as np\n'), ((15109, 15211), 'numpy.hstack', 'np.hstack', (['[cam_crop[0] * undo_scale, cam_crop[1:] + (start_pt - proc_img_shape[0]) /\n cam_crop[0]]'], {}), '([cam_crop[0] * undo_scale, cam_crop[1:] + (start_pt -\n proc_img_shape[0]) / cam_crop[0]])\n', (15118, 15211), True, 'import numpy as np\n'), ((15321, 15419), 'numpy.hstack', 'np.hstack', (['[cam_orig[0] * (2.0 / img_size), cam_orig[1:] - 1 / (2.0 / img_size *\n cam_orig[0])]'], {}), '([cam_orig[0] * (2.0 / img_size), cam_orig[1:] - 1 / (2.0 /\n img_size * cam_orig[0])])\n', (15330, 15419), True, 'import numpy as np\n'), ((17825, 17891), 'numpy.hstack', 'np.hstack', (['[im_shape * cam[0] * 0.5, cam[1:] + 2.0 / cam[0] * 0.5]'], {}), '([im_shape * cam[0] * 0.5, cam[1:] + 2.0 / cam[0] * 0.5])\n', (17834, 17891), True, 'import numpy as np\n'), ((17985, 18078), 'numpy.hstack', 'np.hstack', (['[cam_crop[0] * undo_scale, cam_crop[1:] + (start_pt - im_shape) / cam_crop[0]]'], {}), '([cam_crop[0] * undo_scale, cam_crop[1:] + (start_pt - im_shape) /\n cam_crop[0]])\n', (17994, 18078), True, 'import numpy as np\n'), ((18187, 18285), 'numpy.hstack', 'np.hstack', (['[cam_orig[0] * (2.0 / img_size), cam_orig[1:] - 1 / (2.0 / img_size *\n cam_orig[0])]'], {}), '([cam_orig[0] * (2.0 / img_size), cam_orig[1:] - 1 / (2.0 /\n img_size * cam_orig[0])])\n', (18196, 18285), True, 'import numpy as np\n'), ((18623, 18657), 'numpy.array', 'np.array', (['[ymin, ymax, xmin, xmax]'], {}), '([ymin, ymax, xmin, xmax])\n', (18631, 18657), True, 'import numpy as np\n'), ((19780, 19846), 'numpy.hstack', 'np.hstack', (['[im_shape * cam[0] * 0.5, cam[1:] + 2.0 / cam[0] * 0.5]'], {}), '([im_shape * cam[0] * 0.5, cam[1:] + 2.0 / cam[0] * 0.5])\n', (19789, 19846), True, 'import numpy as np\n'), ((19941, 20034), 'numpy.hstack', 'np.hstack', (['[cam_crop[0] * undo_scale, cam_crop[1:] + (start_pt - im_shape) / cam_crop[0]]'], {}), '([cam_crop[0] * undo_scale, cam_crop[1:] + (start_pt - im_shape) /\n cam_crop[0]])\n', (19950, 20034), True, 'import numpy as np\n'), ((20144, 20252), 'numpy.hstack', 'np.hstack', (['[cam_orig[0] * (2.0 / img_size_crop), cam_orig[1:] - 1 / (2.0 /\n img_size_crop * cam_orig[0])]'], {}), '([cam_orig[0] * (2.0 / img_size_crop), cam_orig[1:] - 1 / (2.0 /\n img_size_crop * cam_orig[0])])\n', (20153, 20252), True, 'import numpy as np\n'), ((1720, 1750), 'torch.unsqueeze', 'torch.unsqueeze', (['self.faces', '(0)'], {}), '(self.faces, 0)\n', (1735, 1750), False, 'import torch\n'), ((4227, 4252), 'torch.unsqueeze', 'torch.unsqueeze', (['verts', '(0)'], {}), '(verts, 0)\n', (4242, 4252), False, 'import torch\n'), ((4778, 4802), 'torch.unsqueeze', 'torch.unsqueeze', (['rend', '(0)'], {}), '(rend, 0)\n', (4793, 4802), False, 'import torch\n'), ((5010, 5029), 'numpy.clip', 'np.clip', (['rend', '(0)', '(1)'], {}), '(rend, 0, 1)\n', (5017, 5029), True, 'import numpy as np\n'), ((11588, 11603), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (11596, 11603), True, 'import numpy as np\n'), ((14517, 14532), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (14525, 14532), True, 'import numpy as np\n'), ((17391, 17406), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (17399, 17406), True, 'import numpy as np\n'), ((18836, 18856), 'numpy.min', 'np.min', (['bboxes[:, 0]'], {}), '(bboxes[:, 0])\n', (18842, 18856), True, 'import numpy as np\n'), ((18866, 18886), 'numpy.max', 'np.max', (['bboxes[:, 1]'], {}), '(bboxes[:, 1])\n', (18872, 18886), True, 'import numpy as np\n'), ((18896, 18916), 'numpy.min', 'np.min', (['bboxes[:, 2]'], {}), '(bboxes[:, 2])\n', (18902, 18916), True, 'import numpy as np\n'), ((18926, 18946), 'numpy.max', 'np.max', (['bboxes[:, 3]'], {}), '(bboxes[:, 3])\n', (18932, 18946), True, 'import numpy as np\n'), ((19431, 19446), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (19439, 19446), True, 'import numpy as np\n'), ((19554, 19591), 'numpy.linalg.norm', 'np.linalg.norm', (["proc_info['start_pt']"], {}), "(proc_info['start_pt'])\n", (19568, 19591), True, 'import numpy as np\n'), ((19687, 19703), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (19701, 19703), False, 'import ipdb\n'), ((1555, 1573), 'numpy.load', 'np.load', (['face_path'], {}), '(face_path)\n', (1562, 1573), True, 'import numpy as np\n'), ((3844, 3871), 'torch.unsqueeze', 'torch.unsqueeze', (['texture', '(0)'], {}), '(texture, 0)\n', (3859, 3871), False, 'import torch\n'), ((4114, 4137), 'torch.unsqueeze', 'torch.unsqueeze', (['cam', '(0)'], {}), '(cam, 0)\n', (4129, 4137), False, 'import torch\n'), ((7788, 7808), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (7805, 7808), False, 'import torch\n'), ((11427, 11448), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (11433, 11448), True, 'import numpy as np\n'), ((11522, 11537), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (11530, 11537), True, 'import numpy as np\n'), ((14356, 14377), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (14362, 14377), True, 'import numpy as np\n'), ((14451, 14466), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (14459, 14466), True, 'import numpy as np\n'), ((1622, 1652), 'torch.IntTensor', 'torch.IntTensor', (['self.faces_np'], {}), '(self.faces_np)\n', (1637, 1652), False, 'import torch\n'), ((1930, 1960), 'torch.FloatTensor', 'torch.FloatTensor', (['default_tex'], {}), '(default_tex)\n', (1947, 1960), False, 'import torch\n'), ((2067, 2089), 'torch.FloatTensor', 'torch.FloatTensor', (['cam'], {}), '(cam)\n', (2084, 2089), False, 'import torch\n'), ((3454, 3491), 'torch.FloatTensor', 'torch.FloatTensor', (['COLORS[color_name]'], {}), '(COLORS[color_name])\n', (3471, 3491), False, 'import torch\n'), ((5330, 5353), 'numpy.expand_dims', 'np.expand_dims', (['mask', '(3)'], {}), '(mask, 3)\n', (5344, 5353), True, 'import numpy as np\n'), ((6318, 6333), 'numpy.deg2rad', 'np.deg2rad', (['deg'], {}), '(deg)\n', (6328, 6333), True, 'import numpy as np\n'), ((6336, 6350), 'numpy.array', 'np.array', (['axis'], {}), '(axis)\n', (6344, 6350), True, 'import numpy as np\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from paddle import fluid
from paddle.fluid.layers import lstm as LSTM
from paddle.fluid.layers import fill_constant
from paddle.fluid.framework import program_guard, Program
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
EXP_MAX_INPUT = 40.0
def identity(x):
return x
def sigmoid(x):
y = np.copy(x)
y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN
y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX
return 1. / (1. + np.exp(-y))
def tanh(x):
y = -2. * x
y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT
return (2. / (1. + np.exp(y))) - 1.
def relu(x):
return np.maximum(x, 0)
ACTIVATION = {
'identity': identity,
'sigmoid': sigmoid,
'tanh': tanh,
'relu': relu
}
def lstm(
input, # T x 4D
lod, # 1 x N
h0=None, # N x D
c0=None, # N x D
w_h=None, # D x 4D
w_b=None, # 1 x 4D
w_c=None, # 1 x 3D
is_reverse=False,
act_gate=None,
act_cell=None,
act_cand=None):
def _step(x, w_h, w_c, h_pre, c_pre, act_gate, act_cell, act_cand):
g = np.dot(h_pre, w_h) # 1 x 4D
g = g + x
g = np.reshape(g, (1, g.size))
c, g_i, g_f, g_o = np.split(g, 4, axis=1)
if w_c is None:
g_i = act_gate(g_i) # 1 x D
g_f = act_gate(g_f) # 1 x D
else:
w_ic, w_fc, w_oc = np.split(w_c, 3, axis=1)
g_i = act_gate(g_i + w_ic * c_pre) # 1 x D
g_f = act_gate(g_f + w_fc * c_pre) # 1 x D
c = g_f * c_pre + g_i * act_cand(c) # 1 x D
if w_c is None:
g_o = act_gate(g_o) # 1 x D
else:
_, _, w_oc = np.split(w_c, 3, axis=1)
g_o = act_gate(g_o + w_oc * c) # 1 x D
h = g_o * act_cell(c)
return h, c
def _reverse(x, offset):
y = np.zeros_like(x)
for i in range(len(offset) - 1):
b, e = offset[i], offset[i + 1]
y[b:e, :] = np.flip(x[b:e, :], 0)
return y
offset = [0]
for l in lod[0]:
offset.append(offset[-1] + l)
batch_size = len(lod[0])
hidden = []
cell = []
input = _reverse(input, offset) if is_reverse else input
if w_b is not None:
input = input + np.tile(w_b, (offset[-1], 1))
for i in range(batch_size):
# compute one sequence
seq_len = lod[0][i]
x = input[offset[i]:offset[i + 1], :]
h_pre = h0[i] # 1 x D
c_pre = c0[i] # 1 x D
for j in range(seq_len):
# compute one step
h_pre, c_pre = _step(x[j], w_h, w_c, h_pre, c_pre, act_gate,
act_cell, act_cand)
hidden.append(h_pre.flatten())
cell.append(c_pre.flatten())
hidden = np.array(hidden).astype('float64')
cell = np.array(cell).astype('float64')
hidden = _reverse(hidden, offset) if is_reverse else hidden
cell = _reverse(cell, offset) if is_reverse else cell
assert hidden.shape == (input.shape[0], input.shape[1] / 4)
assert cell.shape == (input.shape[0], input.shape[1] / 4)
return hidden, cell
class LstmUnitTestError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size = 20
seq_len = 100
dropout_prob = 0.2
hidden_size = 150
num_layers = 1
input = fluid.data(name='input',
shape=[batch_size, seq_len, hidden_size],
dtype='float32')
pre_hidden = fill_constant([num_layers, batch_size, hidden_size],
'float32', 0.0)
pre_cell = fill_constant([num_layers, batch_size, hidden_size],
'float32', 0.0)
np_input = np.random.uniform(
-0.1, 0.1, (batch_size, seq_len, hidden_size)).astype('float64')
np_pre_hidden = np.random.uniform(
-0.1, 0.1,
(num_layers, batch_size, hidden_size)).astype('float64')
np_pre_cell = np.random.uniform(
-0.1, 0.1,
(num_layers, batch_size, hidden_size)).astype('float64')
def test_input_Variable():
LSTM(np_input, pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
LSTM(np_input, np_pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_pre_cell_Variable():
LSTM(np_input, pre_hidden, np_pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_cell_Variable)
def test_input_type():
error_input = fluid.data(name='error_input',
shape=[None, hidden_size * 3],
dtype='int32')
LSTM(error_input, pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
LSTM(input, error_pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_hidden_type)
def test_pre_cell_type():
error_pre_cell = fluid.data(name='error_pre_cell',
shape=[None, hidden_size],
dtype='int32')
LSTM(input, pre_hidden, error_pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_cell_type)
class TestLstmOp(OpTest):
def set_is_test(self):
self.is_test = False
def set_lod(self):
self.lod = [[2, 3, 2]]
def set_argument(self):
self.set_is_test()
self.set_lod()
self.D = 16
self.act_gate = 'sigmoid'
self.act_cell = 'tanh'
self.act_cand = 'tanh'
self.has_initial_state = False
self.is_reverse = False
self.use_peepholes = True
def setUp(self):
self.set_argument()
self.op_type = 'lstm'
T = sum(self.lod[0])
N = len(self.lod[0])
x = np.random.normal(size=(T, 4 * self.D)).astype('float64')
if self.has_initial_state:
h0 = np.random.normal(size=(N, self.D)).astype('float64')
c0 = np.random.normal(size=(N, self.D)).astype('float64')
else:
h0 = np.zeros((N, self.D)).astype('float64')
c0 = np.zeros((N, self.D)).astype('float64')
w = np.random.normal(size=(self.D, 4 * self.D)).astype('float64')
if self.use_peepholes:
b = np.random.normal(size=(1, 7 * self.D)).astype('float64')
else:
b = np.random.normal(size=(1, 4 * self.D)).astype('float64')
w_b = b[:, 0:4 * self.D]
w_c = b[:, 4 * self.D:] if self.use_peepholes else None
h, c = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse,
ACTIVATION[self.act_gate], ACTIVATION[self.act_cell],
ACTIVATION[self.act_cand])
self.inputs = {'Input': (x, self.lod), 'Weight': w}
self.inputs['Bias'] = b
if self.has_initial_state:
self.inputs['H0'] = h0
self.inputs['C0'] = c0
self.outputs = {
'Hidden': (h, self.lod),
'Cell': (c, self.lod),
}
self.attrs = {
'use_peepholes': self.use_peepholes,
'is_reverse': self.is_reverse,
'gate_activation': self.act_gate,
'cell_activation': self.act_cell,
'candidate_activation': self.act_cand,
'is_test': self.is_test
}
def test_check_output(self):
self.check_output(atol=1e-8, check_dygraph=False)
def test_check_grad(self):
# TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0])
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(['Input', 'Weight', 'Bias'], ['Hidden'],
max_relative_error=5e-4,
check_dygraph=False)
class TestLstmOpCase1(TestLstmOp):
def set_lod(self):
self.lod = [[0, 3, 2]]
class TestLstmOpCase2(TestLstmOp):
def set_lod(self):
self.lod = [[0, 3, 0]]
class TestLstmOpCase3(TestLstmOp):
def set_lod(self):
self.lod = [[2, 0, 4]]
class TestLstmOpInference(TestLstmOp):
def set_is_test(self):
self.is_test = True
# avoid checking gradient
def test_check_grad(self):
pass
class TestLstmOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_Variable():
input_data = np.random.random((1, 2048)).astype("float32")
fluid.layers.dynamic_lstm(input=input_data,
size=2048,
use_peepholes=False)
self.assertRaises(TypeError, test_Variable)
def test_h_0():
in_data = fluid.data(name="input",
shape=[None, 2048],
dtype="float32")
h = fluid.data(name="h", shape=[None, 512], dtype="int32")
c = fluid.data(name="c", shape=[None, 512], dtype="float32")
fluid.layers.dynamic_lstm(input=in_data,
size=2048,
use_peepholes=False,
h_0=h,
c_0=c)
self.assertRaises(TypeError, test_h_0)
def test_c_0():
in_data_ = fluid.data(name="input_",
shape=[None, 2048],
dtype="float32")
h_ = fluid.data(name="h_", shape=[None, 512], dtype="float32")
c_ = fluid.data(name="c_", shape=[None, 512], dtype="int32")
fluid.layers.dynamic_lstm(input=in_data_,
size=2048,
use_peepholes=False,
h_0=h_,
c_0=c_)
self.assertRaises(TypeError, test_c_0)
# class TestLstmOpHasInitial(TestLstmOp):
# def set_argument(self):
# self.lod = [[2, 3, 2]]
# self.D = 16
# self.act_gate = 'sigmoid'
# self.act_cell = 'tanh'
# self.act_cand = 'tanh'
# self.has_initial_state = True
# self.is_reverse = True
# self.use_peepholes = True
# def test_check_grad(self):
# # TODO(qingqing) remove folowing lines after the check_grad is refined.
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'],
# max_relative_error=5e-4)
# def test_check_grad_ingore_bias(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Weight'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('Bias'))
# def test_check_grad_ingore_weight(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Bias'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('Weight'))
# def test_check_grad_ingore_input(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Weight', 'Bias'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('Input'))
# def test_check_grad_ingore_h0(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Weight', 'Bias', 'C0'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('H0'))
# def test_check_grad_ingore_c0(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Weight', 'Bias', 'H0'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('C0'))
# class TestLstmOpRerverse(TestLstmOp):
# def set_argument(self):
# self.lod = [[2, 3, 2]]
# self.D = 16
# self.act_gate = 'sigmoid'
# self.act_cell = 'tanh'
# self.act_cand = 'tanh'
# self.has_initial_state = False
# self.is_reverse = True
# self.use_peepholes = True
# class TestLstmOpNotUsePeepholes(TestLstmOp):
# def set_argument(self):
# self.lod = [[2, 3, 2]]
# self.D = 16
# self.act_gate = 'sigmoid'
# self.act_cell = 'tanh'
# self.act_cand = 'tanh'
# self.has_initial_state = False
# self.is_reverse = True
# self.use_peepholes = False
if __name__ == '__main__':
unittest.main()
| [
"paddle.fluid.layers.dynamic_lstm",
"paddle.fluid.data",
"numpy.maximum",
"numpy.exp",
"numpy.tile",
"numpy.random.normal",
"unittest.main",
"numpy.zeros_like",
"numpy.copy",
"paddle.fluid.layers.fill_constant",
"numpy.reshape",
"paddle.fluid.layers.lstm",
"paddle.fluid.framework.Program",
... | [((1047, 1057), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (1054, 1057), True, 'import numpy as np\n'), ((1344, 1360), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (1354, 1360), True, 'import numpy as np\n'), ((15700, 15715), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15713, 15715), False, 'import unittest\n'), ((1841, 1859), 'numpy.dot', 'np.dot', (['h_pre', 'w_h'], {}), '(h_pre, w_h)\n', (1847, 1859), True, 'import numpy as np\n'), ((1900, 1926), 'numpy.reshape', 'np.reshape', (['g', '(1, g.size)'], {}), '(g, (1, g.size))\n', (1910, 1926), True, 'import numpy as np\n'), ((1954, 1976), 'numpy.split', 'np.split', (['g', '(4)'], {'axis': '(1)'}), '(g, 4, axis=1)\n', (1962, 1976), True, 'import numpy as np\n'), ((2592, 2608), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2605, 2608), True, 'import numpy as np\n'), ((1194, 1204), 'numpy.exp', 'np.exp', (['(-y)'], {}), '(-y)\n', (1200, 1204), True, 'import numpy as np\n'), ((2128, 2152), 'numpy.split', 'np.split', (['w_c', '(3)'], {'axis': '(1)'}), '(w_c, 3, axis=1)\n', (2136, 2152), True, 'import numpy as np\n'), ((2423, 2447), 'numpy.split', 'np.split', (['w_c', '(3)'], {'axis': '(1)'}), '(w_c, 3, axis=1)\n', (2431, 2447), True, 'import numpy as np\n'), ((2718, 2739), 'numpy.flip', 'np.flip', (['x[b:e, :]', '(0)'], {}), '(x[b:e, :], 0)\n', (2725, 2739), True, 'import numpy as np\n'), ((3002, 3031), 'numpy.tile', 'np.tile', (['w_b', '(offset[-1], 1)'], {}), '(w_b, (offset[-1], 1))\n', (3009, 3031), True, 'import numpy as np\n'), ((3519, 3535), 'numpy.array', 'np.array', (['hidden'], {}), '(hidden)\n', (3527, 3535), True, 'import numpy as np\n'), ((3565, 3579), 'numpy.array', 'np.array', (['cell'], {}), '(cell)\n', (3573, 3579), True, 'import numpy as np\n'), ((4158, 4246), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input"""', 'shape': '[batch_size, seq_len, hidden_size]', 'dtype': '"""float32"""'}), "(name='input', shape=[batch_size, seq_len, hidden_size], dtype=\n 'float32')\n", (4168, 4246), False, 'from paddle import fluid\n'), ((4329, 4397), 'paddle.fluid.layers.fill_constant', 'fill_constant', (['[num_layers, batch_size, hidden_size]', '"""float32"""', '(0.0)'], {}), "([num_layers, batch_size, hidden_size], 'float32', 0.0)\n", (4342, 4397), False, 'from paddle.fluid.layers import fill_constant\n'), ((4460, 4528), 'paddle.fluid.layers.fill_constant', 'fill_constant', (['[num_layers, batch_size, hidden_size]', '"""float32"""', '(0.0)'], {}), "([num_layers, batch_size, hidden_size], 'float32', 0.0)\n", (4473, 4528), False, 'from paddle.fluid.layers import fill_constant\n'), ((1301, 1310), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (1307, 1310), True, 'import numpy as np\n'), ((3973, 3982), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (3980, 3982), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((3984, 3993), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (3991, 3993), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((5038, 5139), 'paddle.fluid.layers.lstm', 'LSTM', (['np_input', 'pre_hidden', 'pre_cell', 'seq_len', 'hidden_size', 'num_layers'], {'dropout_prob': 'dropout_prob'}), '(np_input, pre_hidden, pre_cell, seq_len, hidden_size, num_layers,\n dropout_prob=dropout_prob)\n', (5042, 5139), True, 'from paddle.fluid.layers import lstm as LSTM\n'), ((5304, 5408), 'paddle.fluid.layers.lstm', 'LSTM', (['np_input', 'np_pre_hidden', 'pre_cell', 'seq_len', 'hidden_size', 'num_layers'], {'dropout_prob': 'dropout_prob'}), '(np_input, np_pre_hidden, pre_cell, seq_len, hidden_size, num_layers,\n dropout_prob=dropout_prob)\n', (5308, 5408), True, 'from paddle.fluid.layers import lstm as LSTM\n'), ((5576, 5680), 'paddle.fluid.layers.lstm', 'LSTM', (['np_input', 'pre_hidden', 'np_pre_cell', 'seq_len', 'hidden_size', 'num_layers'], {'dropout_prob': 'dropout_prob'}), '(np_input, pre_hidden, np_pre_cell, seq_len, hidden_size, num_layers,\n dropout_prob=dropout_prob)\n', (5580, 5680), True, 'from paddle.fluid.layers import lstm as LSTM\n'), ((5853, 5929), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_input"""', 'shape': '[None, hidden_size * 3]', 'dtype': '"""int32"""'}), "(name='error_input', shape=[None, hidden_size * 3], dtype='int32')\n", (5863, 5929), False, 'from paddle import fluid\n'), ((6028, 6132), 'paddle.fluid.layers.lstm', 'LSTM', (['error_input', 'pre_hidden', 'pre_cell', 'seq_len', 'hidden_size', 'num_layers'], {'dropout_prob': 'dropout_prob'}), '(error_input, pre_hidden, pre_cell, seq_len, hidden_size, num_layers,\n dropout_prob=dropout_prob)\n', (6032, 6132), True, 'from paddle.fluid.layers import lstm as LSTM\n'), ((6308, 6385), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_pre_hidden"""', 'shape': '[None, hidden_size]', 'dtype': '"""int32"""'}), "(name='error_pre_hidden', shape=[None, hidden_size], dtype='int32')\n", (6318, 6385), False, 'from paddle import fluid\n'), ((6494, 6598), 'paddle.fluid.layers.lstm', 'LSTM', (['input', 'error_pre_hidden', 'pre_cell', 'seq_len', 'hidden_size', 'num_layers'], {'dropout_prob': 'dropout_prob'}), '(input, error_pre_hidden, pre_cell, seq_len, hidden_size, num_layers,\n dropout_prob=dropout_prob)\n', (6498, 6598), True, 'from paddle.fluid.layers import lstm as LSTM\n'), ((6775, 6850), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_pre_cell"""', 'shape': '[None, hidden_size]', 'dtype': '"""int32"""'}), "(name='error_pre_cell', shape=[None, hidden_size], dtype='int32')\n", (6785, 6850), False, 'from paddle import fluid\n'), ((6955, 7059), 'paddle.fluid.layers.lstm', 'LSTM', (['input', 'pre_hidden', 'error_pre_cell', 'seq_len', 'hidden_size', 'num_layers'], {'dropout_prob': 'dropout_prob'}), '(input, pre_hidden, error_pre_cell, seq_len, hidden_size, num_layers,\n dropout_prob=dropout_prob)\n', (6959, 7059), True, 'from paddle.fluid.layers import lstm as LSTM\n'), ((7755, 7793), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(T, 4 * self.D)'}), '(size=(T, 4 * self.D))\n', (7771, 7793), True, 'import numpy as np\n'), ((8127, 8170), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.D, 4 * self.D)'}), '(size=(self.D, 4 * self.D))\n', (8143, 8170), True, 'import numpy as np\n'), ((9551, 9576), 'numpy.zeros', 'np.zeros', (['(N, 4 * self.D)'], {}), '((N, 4 * self.D))\n', (9559, 9576), True, 'import numpy as np\n'), ((9637, 9658), 'numpy.zeros', 'np.zeros', (['(N, self.D)'], {}), '((N, self.D))\n', (9645, 9658), True, 'import numpy as np\n'), ((10396, 10405), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (10403, 10405), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((10407, 10416), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (10414, 10416), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((10544, 10619), 'paddle.fluid.layers.dynamic_lstm', 'fluid.layers.dynamic_lstm', ([], {'input': 'input_data', 'size': '(2048)', 'use_peepholes': '(False)'}), '(input=input_data, size=2048, use_peepholes=False)\n', (10569, 10619), False, 'from paddle import fluid\n'), ((10816, 10877), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input"""', 'shape': '[None, 2048]', 'dtype': '"""float32"""'}), "(name='input', shape=[None, 2048], dtype='float32')\n", (10826, 10877), False, 'from paddle import fluid\n'), ((10972, 11026), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""h"""', 'shape': '[None, 512]', 'dtype': '"""int32"""'}), "(name='h', shape=[None, 512], dtype='int32')\n", (10982, 11026), False, 'from paddle import fluid\n'), ((11047, 11103), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""c"""', 'shape': '[None, 512]', 'dtype': '"""float32"""'}), "(name='c', shape=[None, 512], dtype='float32')\n", (11057, 11103), False, 'from paddle import fluid\n'), ((11120, 11210), 'paddle.fluid.layers.dynamic_lstm', 'fluid.layers.dynamic_lstm', ([], {'input': 'in_data', 'size': '(2048)', 'use_peepholes': '(False)', 'h_0': 'h', 'c_0': 'c'}), '(input=in_data, size=2048, use_peepholes=False,\n h_0=h, c_0=c)\n', (11145, 11210), False, 'from paddle import fluid\n'), ((11483, 11545), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input_"""', 'shape': '[None, 2048]', 'dtype': '"""float32"""'}), "(name='input_', shape=[None, 2048], dtype='float32')\n", (11493, 11545), False, 'from paddle import fluid\n'), ((11643, 11700), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""h_"""', 'shape': '[None, 512]', 'dtype': '"""float32"""'}), "(name='h_', shape=[None, 512], dtype='float32')\n", (11653, 11700), False, 'from paddle import fluid\n'), ((11722, 11777), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""c_"""', 'shape': '[None, 512]', 'dtype': '"""int32"""'}), "(name='c_', shape=[None, 512], dtype='int32')\n", (11732, 11777), False, 'from paddle import fluid\n'), ((11794, 11887), 'paddle.fluid.layers.dynamic_lstm', 'fluid.layers.dynamic_lstm', ([], {'input': 'in_data_', 'size': '(2048)', 'use_peepholes': '(False)', 'h_0': 'h_', 'c_0': 'c_'}), '(input=in_data_, size=2048, use_peepholes=False,\n h_0=h_, c_0=c_)\n', (11819, 11887), False, 'from paddle import fluid\n'), ((4590, 4654), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(batch_size, seq_len, hidden_size)'], {}), '(-0.1, 0.1, (batch_size, seq_len, hidden_size))\n', (4607, 4654), True, 'import numpy as np\n'), ((4718, 4785), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(num_layers, batch_size, hidden_size)'], {}), '(-0.1, 0.1, (num_layers, batch_size, hidden_size))\n', (4735, 4785), True, 'import numpy as np\n'), ((4863, 4930), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(num_layers, batch_size, hidden_size)'], {}), '(-0.1, 0.1, (num_layers, batch_size, hidden_size))\n', (4880, 4930), True, 'import numpy as np\n'), ((7864, 7898), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, self.D)'}), '(size=(N, self.D))\n', (7880, 7898), True, 'import numpy as np\n'), ((7934, 7968), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, self.D)'}), '(size=(N, self.D))\n', (7950, 7968), True, 'import numpy as np\n'), ((8018, 8039), 'numpy.zeros', 'np.zeros', (['(N, self.D)'], {}), '((N, self.D))\n', (8026, 8039), True, 'import numpy as np\n'), ((8075, 8096), 'numpy.zeros', 'np.zeros', (['(N, self.D)'], {}), '((N, self.D))\n', (8083, 8096), True, 'import numpy as np\n'), ((8236, 8274), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 7 * self.D)'}), '(size=(1, 7 * self.D))\n', (8252, 8274), True, 'import numpy as np\n'), ((8323, 8361), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 4 * self.D)'}), '(size=(1, 4 * self.D))\n', (8339, 8361), True, 'import numpy as np\n'), ((10482, 10509), 'numpy.random.random', 'np.random.random', (['(1, 2048)'], {}), '((1, 2048))\n', (10498, 10509), True, 'import numpy as np\n')] |
import collections
import json
import numpy as np
from data_reader import next_batch
from helpers import FileLogger
from wavenet import *
LEARNING_RATE = 1e-5
WAVENET_PARAMS = 'wavenet_params.json'
MOMENTUM = 0.9
SEQUENCE_LENGTH = 32
def main():
with open(WAVENET_PARAMS, 'r') as f:
wavenet_params = json.load(f)
with tf.name_scope('create_inputs'):
x_placeholder = tf.placeholder('float32', [SEQUENCE_LENGTH, 1])
y_placeholder = tf.placeholder('float32', [1, 1])
net = WaveNet(wavenet_params['dilations'], SEQUENCE_LENGTH, x_placeholder, y_placeholder)
loss = net.loss()
pred = net.pred()
optimizer = create_adam_optimizer(LEARNING_RATE, MOMENTUM)
trainable = tf.trainable_variables()
grad_update = optimizer.minimize(loss, var_list=trainable)
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.initialize_all_variables()
sess.run(init)
print('Total # of parameters to train: {}'.format(count_trainable_parameters()))
file_logger = FileLogger('log.tsv', ['step', 'training_loss', 'benchmark_loss'])
d = collections.deque(maxlen=10)
benchmark_d = collections.deque(maxlen=10)
for step in range(1, int(1e9)):
x, y = next_batch()
loss_value, _, pred_value = sess.run([loss, grad_update, pred],
feed_dict={x_placeholder: x,
y_placeholder: y})
# The mean converges to 0.5 for IID U(0,1) random variables. Good benchmark.
benchmark_d.append(sum((0.5 - y) ** 2))
d.append(loss_value)
mean_loss = np.mean(d)
benchmark_mean_loss = np.mean(benchmark_d)
file_logger.write([step, mean_loss, benchmark_mean_loss])
print('y = {}, p = {}, mean_loss = {}, bench_loss = {}'.format(y, pred_value, mean_loss, benchmark_mean_loss))
file_logger.close()
if __name__ == '__main__':
main()
| [
"json.load",
"data_reader.next_batch",
"numpy.mean",
"helpers.FileLogger",
"collections.deque"
] | [((1046, 1112), 'helpers.FileLogger', 'FileLogger', (['"""log.tsv"""', "['step', 'training_loss', 'benchmark_loss']"], {}), "('log.tsv', ['step', 'training_loss', 'benchmark_loss'])\n", (1056, 1112), False, 'from helpers import FileLogger\n'), ((1121, 1149), 'collections.deque', 'collections.deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (1138, 1149), False, 'import collections\n'), ((1168, 1196), 'collections.deque', 'collections.deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (1185, 1196), False, 'import collections\n'), ((317, 329), 'json.load', 'json.load', (['f'], {}), '(f)\n', (326, 329), False, 'import json\n'), ((1248, 1260), 'data_reader.next_batch', 'next_batch', ([], {}), '()\n', (1258, 1260), False, 'from data_reader import next_batch\n'), ((1664, 1674), 'numpy.mean', 'np.mean', (['d'], {}), '(d)\n', (1671, 1674), True, 'import numpy as np\n'), ((1705, 1725), 'numpy.mean', 'np.mean', (['benchmark_d'], {}), '(benchmark_d)\n', (1712, 1725), True, 'import numpy as np\n')] |
from __future__ import print_function
import glob
import json
import os
import sys
import uuid
import cv2
import numpy as np
import pytesseract
import tensorflow as tf
from flask import Flask, request, redirect, render_template, Response ,jsonify
from tensorflow.python.platform import gfile
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from db import Base, File
import datetime
from lib.fast_rcnn.config import cfg, cfg_from_file
from lib.fast_rcnn.test import _get_blobs
from lib.rpn_msr.proposal_layer_tf import proposal_layer
from lib.text_connector.detectors import TextDetector
from lib.text_connector.text_connect_cfg import Config as TextLineCfg
sys.path.append(os.getcwd())
app = Flask(__name__)
engine = create_engine('sqlite:///example.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app.config['UPLOAD_FOLDER'] = 'static\\images'
app.config['UPLOAD_ORIGINAL_FOLDER'] = 'static\\original'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def index():
return render_template('/aicontainer.html')
@app.route('/demo')
def demo():
return render_template('/demo.html')
@app.route('/savebox',methods=['GET', 'POST'])
def savebox():
jsondata = request.get_json()
filename = jsondata['filename']
filepath = jsondata['filepath']
saveboxvalue = str(jsondata['saveboxvalue'])
new_file = File(id = filename , Value = saveboxvalue , root = filepath ,CreateTime = datetime.datetime.now() )
session.add(new_file)
session.commit()
return jsonify(True)
@app.route('/encoding', methods=['GET', 'POST'])
def upload_face_image():
# Check if a valid image file was uploaded
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
# container recognition
if file and allowed_file(file.filename):
# delete file
images = glob.glob(app.config['UPLOAD_FOLDER'] + '\\*')
for item in images:
os.remove(item)
# save file
path = os.path.join(app.config['UPLOAD_FOLDER'] , file.filename)
path = os.path.join(app.config['UPLOAD_ORIGINAL_FOLDER'] , file.filename)
file.save(path)
# rename file
filename, extension = os.path.splitext(path)
filename = str(uuid.uuid1()) + extension
os.rename(path, os.path.join(app.config['UPLOAD_ORIGINAL_FOLDER'] , filename))
return jsonify(os.path.join(app.config['UPLOAD_ORIGINAL_FOLDER'] , filename))
# If no valid image file was uploaded, show the file upload form:
return render_template('index.html')
@app.route('/ocr', methods=['POST'])
def ocr():
# get data
jsonData = request.get_json()
ori_file = jsonData['path']
# init session
cfg_from_file('ctpn/text.yml')
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
with gfile.FastGFile('data/ctpn.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
sess.run(tf.global_variables_initializer())
input_img = sess.graph.get_tensor_by_name('Placeholder:0')
output_cls_prob = sess.graph.get_tensor_by_name('Reshape_2:0')
output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0')
im_names = glob.glob(os.path.join(ori_file))
for im_name in im_names:
img = cv2.imread(im_name)
img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
blobs, im_scales = _get_blobs(img, None)
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
cls_prob, box_pred = sess.run([output_cls_prob, output_box_pred], feed_dict={input_img: blobs['data']})
rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES)
scores = rois[:, 0]
boxes = rois[:, 1:5] / im_scales[0]
textdetector = TextDetector()
boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
im_dict = draw_boxes(img, im_name, boxes, scale)
return Response(json.dumps(im_dict), mimetype='application/json')
@app.route('/dailydata', methods=['GET'])
def dailydata():
todaydata = []
for data in session.query(File).filter(File.CreateTime >= datetime.date.today()).all():
result = {
"id": data.id,
"root": data.root,
"Value": data.Value,
"CreateTime": data.CreateTime
}
todaydata.append(result)
session.close()
return jsonify(todaydata)
def resize_im(im, scale, max_scale=None):
f = float(scale) / min(im.shape[0], im.shape[1])
if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:
f = float(max_scale) / max(im.shape[0], im.shape[1])
return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f
def draw_boxes(img, image_name, boxes, scale):
im_dict = dict()
base_name = image_name.split('\\')[-1]
with open('static\\images\\' + 'res_{}.txt'.format(base_name.split('.')[0]), 'w') as f:
for index, box in enumerate(boxes):
if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3] - box[0]) < 5:
continue
if box[8] >= 0.9:
color = (0, 255, 0)
elif box[8] >= 0.8:
color = (255, 0, 0)
cv2.line(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, 2)
cv2.line(img, (int(box[0]), int(box[1])), (int(box[4]), int(box[5])), color, 2)
cv2.line(img, (int(box[6]), int(box[7])), (int(box[2]), int(box[3])), color, 2)
cv2.line(img, (int(box[4]), int(box[5])), (int(box[6]), int(box[7])), color, 2)
min_x = min(int(box[0] / scale), int(box[2] / scale), int(box[4] / scale), int(box[6] / scale))
min_y = min(int(box[1] / scale), int(box[3] / scale), int(box[5] / scale), int(box[7] / scale))
max_x = max(int(box[0] / scale), int(box[2] / scale), int(box[4] / scale), int(box[6] / scale))
max_y = max(int(box[1] / scale), int(box[3] / scale), int(box[5] / scale), int(box[7] / scale))
line = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\r\n'
f.write(line)
# crop image
file_name = base_name.split('.')
image_crop = cv2.imread(image_name)[min_y:max_y, min_x:max_x]
# resize image
r = 100.0 / image_crop.shape[0]
width, height = (int(image_crop.shape[1] * r), 100)
img_resize = cv2.resize(image_crop, (width, height))
# save image
whitelist = "01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ."
save_name = file_name[0] + '_' + str(index) + '.' + file_name[1]
cv2.imwrite(os.path.join("static\\images", save_name), img_resize)
im_text = pytesseract.image_to_string(image=img_resize,
lang='cntr',
config="-psm 6 -c tessedit_char_whitelist=" + whitelist)
im_dict['static\\images\\' + save_name] = im_text
img = cv2.resize(img, None, None, fx=1.0 / scale, fy=1.0 / scale, interpolation=cv2.INTER_LINEAR)
cv2.imwrite(os.path.join("static\\images", base_name), img)
return im_dict
if __name__ == "__main__":
app.run(debug=True)
| [
"tensorflow.python.platform.gfile.FastGFile",
"os.remove",
"json.dumps",
"tensorflow.ConfigProto",
"flask.jsonify",
"numpy.linalg.norm",
"glob.glob",
"os.path.join",
"flask.request.get_json",
"flask.redirect",
"lib.text_connector.detectors.TextDetector",
"flask.render_template",
"tensorflow.... | [((725, 740), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (730, 740), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((750, 787), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///example.db"""'], {}), "('sqlite:///example.db')\n", (763, 787), False, 'from sqlalchemy import create_engine\n'), ((829, 854), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (841, 854), False, 'from sqlalchemy.orm import sessionmaker\n'), ((705, 716), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (714, 716), False, 'import os\n'), ((1207, 1243), 'flask.render_template', 'render_template', (['"""/aicontainer.html"""'], {}), "('/aicontainer.html')\n", (1222, 1243), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((1288, 1317), 'flask.render_template', 'render_template', (['"""/demo.html"""'], {}), "('/demo.html')\n", (1303, 1317), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((1396, 1414), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1412, 1414), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((1712, 1725), 'flask.jsonify', 'jsonify', (['(True)'], {}), '(True)\n', (1719, 1725), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((2930, 2959), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (2945, 2959), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((3039, 3057), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (3055, 3057), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((3114, 3144), 'lib.fast_rcnn.config.cfg_from_file', 'cfg_from_file', (['"""ctpn/text.yml"""'], {}), "('ctpn/text.yml')\n", (3127, 3144), False, 'from lib.fast_rcnn.config import cfg, cfg_from_file\n'), ((3158, 3199), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (3172, 3199), True, 'import tensorflow as tf\n'), ((3211, 3236), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3221, 3236), True, 'import tensorflow as tf\n'), ((5100, 5118), 'flask.jsonify', 'jsonify', (['todaydata'], {}), '(todaydata)\n', (5107, 5118), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((7757, 7853), 'cv2.resize', 'cv2.resize', (['img', 'None', 'None'], {'fx': '(1.0 / scale)', 'fy': '(1.0 / scale)', 'interpolation': 'cv2.INTER_LINEAR'}), '(img, None, None, fx=1.0 / scale, fy=1.0 / scale, interpolation=\n cv2.INTER_LINEAR)\n', (7767, 7853), False, 'import cv2\n'), ((3246, 3283), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['"""data/ctpn.pb"""', '"""rb"""'], {}), "('data/ctpn.pb', 'rb')\n", (3261, 3283), False, 'from tensorflow.python.platform import gfile\n'), ((3310, 3323), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (3321, 3323), True, 'import tensorflow as tf\n'), ((3408, 3447), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (3427, 3447), True, 'import tensorflow as tf\n'), ((3461, 3494), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3492, 3494), True, 'import tensorflow as tf\n'), ((3734, 3756), 'os.path.join', 'os.path.join', (['ori_file'], {}), '(ori_file)\n', (3746, 3756), False, 'import os\n'), ((3801, 3820), 'cv2.imread', 'cv2.imread', (['im_name'], {}), '(im_name)\n', (3811, 3820), False, 'import cv2\n'), ((3942, 3963), 'lib.fast_rcnn.test._get_blobs', '_get_blobs', (['img', 'None'], {}), '(img, None)\n', (3952, 3963), False, 'from lib.fast_rcnn.test import _get_blobs\n'), ((4304, 4402), 'lib.rpn_msr.proposal_layer_tf.proposal_layer', 'proposal_layer', (['cls_prob', 'box_pred', "blobs['im_info']", '"""TEST"""'], {'anchor_scales': 'cfg.ANCHOR_SCALES'}), "(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=\n cfg.ANCHOR_SCALES)\n", (4318, 4402), False, 'from lib.rpn_msr.proposal_layer_tf import proposal_layer\n'), ((4494, 4508), 'lib.text_connector.detectors.TextDetector', 'TextDetector', ([], {}), '()\n', (4506, 4508), False, 'from lib.text_connector.detectors import TextDetector\n'), ((4668, 4687), 'json.dumps', 'json.dumps', (['im_dict'], {}), '(im_dict)\n', (4678, 4687), False, 'import json\n'), ((5364, 5434), 'cv2.resize', 'cv2.resize', (['im', 'None', 'None'], {'fx': 'f', 'fy': 'f', 'interpolation': 'cv2.INTER_LINEAR'}), '(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR)\n', (5374, 5434), False, 'import cv2\n'), ((7865, 7906), 'os.path.join', 'os.path.join', (['"""static\\\\images"""', 'base_name'], {}), "('static\\\\images', base_name)\n", (7877, 7906), False, 'import os\n'), ((1625, 1648), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1646, 1648), False, 'import datetime\n'), ((1940, 1961), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (1948, 1961), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((2051, 2072), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (2059, 2072), False, 'from flask import Flask, request, redirect, render_template, Response, jsonify\n'), ((2202, 2248), 'glob.glob', 'glob.glob', (["(app.config['UPLOAD_FOLDER'] + '\\\\*')"], {}), "(app.config['UPLOAD_FOLDER'] + '\\\\*')\n", (2211, 2248), False, 'import glob\n'), ((2357, 2413), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'file.filename'], {}), "(app.config['UPLOAD_FOLDER'], file.filename)\n", (2369, 2413), False, 'import os\n'), ((2434, 2499), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_ORIGINAL_FOLDER']", 'file.filename'], {}), "(app.config['UPLOAD_ORIGINAL_FOLDER'], file.filename)\n", (2446, 2499), False, 'import os\n'), ((2590, 2612), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (2606, 2612), False, 'import os\n'), ((2785, 2845), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_ORIGINAL_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_ORIGINAL_FOLDER'], filename)\n", (2797, 2845), False, 'import os\n'), ((4060, 4145), 'numpy.array', 'np.array', (['[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]]'], {'dtype': 'np.float32'}), '([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32\n )\n', (4068, 4145), True, 'import numpy as np\n'), ((7158, 7197), 'cv2.resize', 'cv2.resize', (['image_crop', '(width, height)'], {}), '(image_crop, (width, height))\n', (7168, 7197), False, 'import cv2\n'), ((7467, 7587), 'pytesseract.image_to_string', 'pytesseract.image_to_string', ([], {'image': 'img_resize', 'lang': '"""cntr"""', 'config': "('-psm 6 -c tessedit_char_whitelist=' + whitelist)"}), "(image=img_resize, lang='cntr', config=\n '-psm 6 -c tessedit_char_whitelist=' + whitelist)\n", (7494, 7587), False, 'import pytesseract\n'), ((2297, 2312), 'os.remove', 'os.remove', (['item'], {}), '(item)\n', (2306, 2312), False, 'import os\n'), ((2694, 2754), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_ORIGINAL_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_ORIGINAL_FOLDER'], filename)\n", (2706, 2754), False, 'import os\n'), ((6948, 6970), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (6958, 6970), False, 'import cv2\n'), ((7390, 7431), 'os.path.join', 'os.path.join', (['"""static\\\\images"""', 'save_name'], {}), "('static\\\\images', save_name)\n", (7402, 7431), False, 'import os\n'), ((2640, 2652), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (2650, 2652), False, 'import uuid\n'), ((4860, 4881), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (4879, 4881), False, 'import datetime\n'), ((5702, 5733), 'numpy.linalg.norm', 'np.linalg.norm', (['(box[0] - box[1])'], {}), '(box[0] - box[1])\n', (5716, 5733), True, 'import numpy as np\n'), ((5741, 5772), 'numpy.linalg.norm', 'np.linalg.norm', (['(box[3] - box[0])'], {}), '(box[3] - box[0])\n', (5755, 5772), True, 'import numpy as np\n')] |
from jbdl.rbdl.utils import ModelWrapper
import os
import numpy as np
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
print(CURRENT_PATH)
mdlw = ModelWrapper()
mdlw.load(os.path.join(CURRENT_PATH, 'whole_max_v0.json'))
mdlw.nf = 3
mdlw.contact_force_lb = np.array([-1000.0, -1000.0, 0.0]).reshape(-1, 1)
mdlw.contact_force_ub = np.array([1000.0, 1000.0, 3000.0]).reshape(-1, 1)
mdlw.contact_force_kp = np.array([10000.0, 10000.0, 10000.0]).reshape(-1, 1)
mdlw.contact_force_kd = np.array([1000.0, 1000.0, 1000.0]).reshape(-1, 1)
mdlw.contact_pos_lb = np.array([0.0001, 0.0001, 0.0001]).reshape(-1, 1)
mdlw.contact_pos_ub = np.array([0.0001, 0.0001, 0.0001]).reshape(-1, 1)
mdlw.contact_vel_lb = np.array([-0.05, -0.05, -0.05]).reshape(-1, 1)
mdlw.contact_vel_ub = np.array([0.01, 0.01, 0.01]).reshape(-1, 1)
mdlw.save(os.path.join(CURRENT_PATH, 'whole_max_v1.json')) | [
"numpy.array",
"os.path.realpath",
"os.path.join",
"jbdl.rbdl.utils.ModelWrapper"
] | [((158, 172), 'jbdl.rbdl.utils.ModelWrapper', 'ModelWrapper', ([], {}), '()\n', (170, 172), False, 'from jbdl.rbdl.utils import ModelWrapper\n'), ((103, 129), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (119, 129), False, 'import os\n'), ((183, 230), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""whole_max_v0.json"""'], {}), "(CURRENT_PATH, 'whole_max_v0.json')\n", (195, 230), False, 'import os\n'), ((835, 882), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""whole_max_v1.json"""'], {}), "(CURRENT_PATH, 'whole_max_v1.json')\n", (847, 882), False, 'import os\n'), ((270, 303), 'numpy.array', 'np.array', (['[-1000.0, -1000.0, 0.0]'], {}), '([-1000.0, -1000.0, 0.0])\n', (278, 303), True, 'import numpy as np\n'), ((343, 377), 'numpy.array', 'np.array', (['[1000.0, 1000.0, 3000.0]'], {}), '([1000.0, 1000.0, 3000.0])\n', (351, 377), True, 'import numpy as np\n'), ((417, 454), 'numpy.array', 'np.array', (['[10000.0, 10000.0, 10000.0]'], {}), '([10000.0, 10000.0, 10000.0])\n', (425, 454), True, 'import numpy as np\n'), ((494, 528), 'numpy.array', 'np.array', (['[1000.0, 1000.0, 1000.0]'], {}), '([1000.0, 1000.0, 1000.0])\n', (502, 528), True, 'import numpy as np\n'), ((567, 601), 'numpy.array', 'np.array', (['[0.0001, 0.0001, 0.0001]'], {}), '([0.0001, 0.0001, 0.0001])\n', (575, 601), True, 'import numpy as np\n'), ((639, 673), 'numpy.array', 'np.array', (['[0.0001, 0.0001, 0.0001]'], {}), '([0.0001, 0.0001, 0.0001])\n', (647, 673), True, 'import numpy as np\n'), ((711, 742), 'numpy.array', 'np.array', (['[-0.05, -0.05, -0.05]'], {}), '([-0.05, -0.05, -0.05])\n', (719, 742), True, 'import numpy as np\n'), ((780, 808), 'numpy.array', 'np.array', (['[0.01, 0.01, 0.01]'], {}), '([0.01, 0.01, 0.01])\n', (788, 808), True, 'import numpy as np\n')] |
from itertools import combinations
from math import sqrt, log
import numpy as np
from random import uniform
from statsmodels.stats.power import GofChisquarePower
from typing import List
from beta_bernouilli_bandit import BetaBernouilliBandit
from thompson_sampling import ThompsonSamplingPolicy
from gen_preference_matrix import PreferenceMatrix
import pdb
class DoubleThompsonSamplingPolicy:
def __init__(self, preference_matrix: PreferenceMatrix, alpha: float = 0.001):
self.preference_matrix = preference_matrix
self.rewards_matrix = np.zeros(preference_matrix.shape)
self.alpha = alpha
self.timestep = 1
self.rewards_over_time = []
self.bandits = {}
self.num_actions = preference_matrix.shape[0]
for i in range(preference_matrix.shape[0]):
self.bandits[i] = {
j: BetaBernouilliBandit()
for j in range(i, preference_matrix.shape[0])
if j != i
}
self.upper_conf_bound = np.zeros((self.num_actions, self.num_actions))
self.lower_conf_bound = np.zeros((self.num_actions, self.num_actions))
self.strong_regret = 0
self.weak_regret = 0
def choose_actions(self):
first_action = self.choose_first_action()
second_action = self.choose_second_action(first_action)
self.update_borda_reward(first_action, second_action)
reward = (
1
if uniform(0, 1) < self.preference_matrix[first_action][second_action]
else 0
)
if first_action < second_action:
self.bandits[first_action][second_action].update_success_or_failure(reward)
else:
self.bandits[second_action][first_action].update_success_or_failure(
1 - reward
)
if not ((first_action == 0 and reward == 1) or (second_action == 0 and reward == 0)):
# Only update the regret if the arm chosen is not the condorcet winner.
self.strong_regret += self.get_epsilon(first_action)
self.strong_regret += self.get_epsilon(second_action)
self.weak_regret += min(self.get_epsilon(first_action), self.get_epsilon(second_action))
self.timestep += 1
return first_action, second_action
def choose_first_action(self):
upper_conf_bound = np.zeros((self.num_actions, self.num_actions))
lower_conf_bound = np.zeros((self.num_actions, self.num_actions))
for i in range(self.num_actions):
for j in range(self.num_actions):
if j == i:
upper_conf_bound[i][j] = 0.5
lower_conf_bound[i][j] = 0.5
continue
if j < i:
wins = self.bandits[j][i].losses
losses = self.bandits[j][i].wins
else:
wins = self.bandits[i][j].wins
losses = self.bandits[i][j].losses
if wins + losses == 0:
history = 1
cb = 1
else:
history = wins / (wins + losses)
cb = sqrt((self.alpha * log(self.timestep)) / (wins + losses))
upper_conf_bound[i][j] = history + cb
lower_conf_bound[i][j] = history - cb
self.upper_conf_bound = upper_conf_bound
self.lower_conf_bound = lower_conf_bound
# copeland_ub = (1 / (self.preference_matrix.shape[0] - 1)) * np.sum(
# upper_conf_bound, axis=1
# )
copeland_ub = np.zeros(self.num_actions)
for i in range(0, self.num_actions):
copeland_score = 0
for j in range(0, self.num_actions):
if i == j:
continue;
elif upper_conf_bound[i][j] > 0.5:
copeland_score += 1
copeland_ub[i] = copeland_score
candidates = np.argwhere(copeland_ub == np.amax(copeland_ub))
estimated_samples = np.zeros(self.rewards_matrix.shape)
for i in range(self.rewards_matrix.shape[0]):
for j in range(i + 1, self.num_actions):
estimated_samples[i][j] = self.bandits[i][j].draw()
estimated_samples[j][i] = 1 - estimated_samples[i][j]
likely_wins = np.zeros(self.rewards_matrix.shape)
for c in candidates:
for j in range(self.num_actions):
if estimated_samples[i][j] > 1 / 2:
likely_wins[c] += 1
action = np.random.choice(
np.argwhere(likely_wins == np.amax(likely_wins))[0]
) # break ties randomly
return action
def choose_second_action(self, first_action: int):
expected_samples = np.zeros(self.rewards_matrix.shape)
expected_samples[first_action][first_action] = 0.5
for i in range(self.num_actions):
if i == first_action:
continue
if i < first_action:
expected_samples[i][first_action] = self.bandits[i][first_action].draw()
else:
expected_samples[i][first_action] = (
1 - self.bandits[first_action][i].draw()
)
uncertain_pairs = np.zeros((self.num_actions, 1))
for i in range(self.num_actions):
if i == first_action:
uncertain_pairs[i] = -1 # do not allow self-dueling.
if self.lower_conf_bound[i][first_action] < 1 / 2:
uncertain_pairs[i] = expected_samples[i][first_action]
action = np.argmax(uncertain_pairs)
return action
def update_borda_reward(self, first_action: int, second_action: int) -> None:
total_reward = 0
total_reward += self.preference_matrix.borda_score(first_action)
total_reward += self.preference_matrix.borda_score(second_action)
self.rewards_over_time.append(total_reward)
def return_preferences_from_duel_history(self):
predicted_pref_matrix = np.zeros(self.rewards_matrix.shape)
for i in self.bandits:
for j in self.bandits[i]:
predicted_pref_matrix[i][j] = self.bandits[i][j].wins / (
self.bandits[i][j].wins + self.bandits[i][j].losses
)
predicted_pref_matrix[j][i] = self.bandits[i][j].losses / (
self.bandits[i][j].wins + self.bandits[i][j].losses
)
return predicted_pref_matrix
def get_power(self, effect_size: float, action1: int, action2: int) -> float:
if action1 < action2:
power = GofChisquarePower().solve_power(
effect_size=effect_size,
nobs=self.bandits[action1][action2].wins
+ self.bandits[action1][action2].losses,
alpha=0.05,
n_bins=2,
)
else:
power = GofChisquarePower().solve_power(
effect_size=effect_size,
nobs=self.bandits[action2][action1].wins
+ self.bandits[action2][action1].losses,
alpha=0.05,
n_bins=2,
)
return power
def get_all_power(self, effect_size: float) -> List:
powers = []
for action1 in range(self.num_actions):
for action2 in range(action1 + 1, self.num_actions):
if(effect_size is None): effect_size = 2 * abs(self.preference_matrix.data[action1][action2] - 0.5)
powers.append(
(self.get_power(effect_size, action1, action2), action1, action2)
)
return powers
def get_epsilon(self, action):
best_arm = self.preference_matrix.condorcet_winner()
return self.preference_matrix[best_arm][action] - 0.5
if __name__ == "__main__":
pm = PreferenceMatrix(num_actions=4)
pm.set_matrix_explicit(
np.array(
[
[0.5, 0.8, 0.6, 0.4],
[0.2, 0.5, 0.9, 0.3],
[0.4, 0.1, 0.5, 0.5],
[0.6, 0.7, 0.5, 0.5],
]
)
)
#pm.set_matrix_random_with_condorcet_winner(0.1)
print(pm.num_observations)
# sampler = DoubleThompsonSamplingPolicy(preference_matrix=pm, alpha=1e-32)
# actions_arr = {}
# for _ in range(100000):
# actions = sampler.choose_actions()
# if actions not in actions_arr:
# actions_arr[actions] = 1
# else:
# actions_arr[actions] += 1
# print(actions_arr)
# print(sampler.return_preferences_from_duel_history())
| [
"beta_bernouilli_bandit.BetaBernouilliBandit",
"statsmodels.stats.power.GofChisquarePower",
"numpy.argmax",
"random.uniform",
"numpy.zeros",
"numpy.amax",
"numpy.array",
"math.log",
"gen_preference_matrix.PreferenceMatrix"
] | [((8156, 8187), 'gen_preference_matrix.PreferenceMatrix', 'PreferenceMatrix', ([], {'num_actions': '(4)'}), '(num_actions=4)\n', (8172, 8187), False, 'from gen_preference_matrix import PreferenceMatrix\n'), ((578, 611), 'numpy.zeros', 'np.zeros', (['preference_matrix.shape'], {}), '(preference_matrix.shape)\n', (586, 611), True, 'import numpy as np\n'), ((1053, 1099), 'numpy.zeros', 'np.zeros', (['(self.num_actions, self.num_actions)'], {}), '((self.num_actions, self.num_actions))\n', (1061, 1099), True, 'import numpy as np\n'), ((1133, 1179), 'numpy.zeros', 'np.zeros', (['(self.num_actions, self.num_actions)'], {}), '((self.num_actions, self.num_actions))\n', (1141, 1179), True, 'import numpy as np\n'), ((2432, 2478), 'numpy.zeros', 'np.zeros', (['(self.num_actions, self.num_actions)'], {}), '((self.num_actions, self.num_actions))\n', (2440, 2478), True, 'import numpy as np\n'), ((2507, 2553), 'numpy.zeros', 'np.zeros', (['(self.num_actions, self.num_actions)'], {}), '((self.num_actions, self.num_actions))\n', (2515, 2553), True, 'import numpy as np\n'), ((3731, 3757), 'numpy.zeros', 'np.zeros', (['self.num_actions'], {}), '(self.num_actions)\n', (3739, 3757), True, 'import numpy as np\n'), ((4189, 4224), 'numpy.zeros', 'np.zeros', (['self.rewards_matrix.shape'], {}), '(self.rewards_matrix.shape)\n', (4197, 4224), True, 'import numpy as np\n'), ((4499, 4534), 'numpy.zeros', 'np.zeros', (['self.rewards_matrix.shape'], {}), '(self.rewards_matrix.shape)\n', (4507, 4534), True, 'import numpy as np\n'), ((4976, 5011), 'numpy.zeros', 'np.zeros', (['self.rewards_matrix.shape'], {}), '(self.rewards_matrix.shape)\n', (4984, 5011), True, 'import numpy as np\n'), ((5484, 5515), 'numpy.zeros', 'np.zeros', (['(self.num_actions, 1)'], {}), '((self.num_actions, 1))\n', (5492, 5515), True, 'import numpy as np\n'), ((5820, 5846), 'numpy.argmax', 'np.argmax', (['uncertain_pairs'], {}), '(uncertain_pairs)\n', (5829, 5846), True, 'import numpy as np\n'), ((6273, 6308), 'numpy.zeros', 'np.zeros', (['self.rewards_matrix.shape'], {}), '(self.rewards_matrix.shape)\n', (6281, 6308), True, 'import numpy as np\n'), ((8226, 8328), 'numpy.array', 'np.array', (['[[0.5, 0.8, 0.6, 0.4], [0.2, 0.5, 0.9, 0.3], [0.4, 0.1, 0.5, 0.5], [0.6, \n 0.7, 0.5, 0.5]]'], {}), '([[0.5, 0.8, 0.6, 0.4], [0.2, 0.5, 0.9, 0.3], [0.4, 0.1, 0.5, 0.5],\n [0.6, 0.7, 0.5, 0.5]])\n', (8234, 8328), True, 'import numpy as np\n'), ((892, 914), 'beta_bernouilli_bandit.BetaBernouilliBandit', 'BetaBernouilliBandit', ([], {}), '()\n', (912, 914), False, 'from beta_bernouilli_bandit import BetaBernouilliBandit\n'), ((1505, 1518), 'random.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1512, 1518), False, 'from random import uniform\n'), ((4136, 4156), 'numpy.amax', 'np.amax', (['copeland_ub'], {}), '(copeland_ub)\n', (4143, 4156), True, 'import numpy as np\n'), ((6891, 6910), 'statsmodels.stats.power.GofChisquarePower', 'GofChisquarePower', ([], {}), '()\n', (6908, 6910), False, 'from statsmodels.stats.power import GofChisquarePower\n'), ((7189, 7208), 'statsmodels.stats.power.GofChisquarePower', 'GofChisquarePower', ([], {}), '()\n', (7206, 7208), False, 'from statsmodels.stats.power import GofChisquarePower\n'), ((4808, 4828), 'numpy.amax', 'np.amax', (['likely_wins'], {}), '(likely_wins)\n', (4815, 4828), True, 'import numpy as np\n'), ((3293, 3311), 'math.log', 'log', (['self.timestep'], {}), '(self.timestep)\n', (3296, 3311), False, 'from math import sqrt, log\n')] |
import numpy as np
import pytest
import torch
from PIL import Image
from torchvision.transforms import transforms
from continuum.datasets import InMemoryDataset
from continuum.scenarios import TransformationIncremental
NB_CLASSES = 6
@pytest.fixture
def numpy_data():
nb_data = 100 # not too small to have all classes
x_train = []
y_train = []
x_train.append(
np.array([np.random.randint(100, size=(2, 2, 3)).astype(dtype=np.uint8)] * nb_data)
)
y_train.append(np.random.randint(NB_CLASSES, size=(nb_data)))
x_train = np.concatenate(x_train)
y_train = np.concatenate(y_train)
return x_train, y_train.astype(int)
'''
Test the initialization with three tasks
'''
def test_init(numpy_data):
x, y = numpy_data
dummy = InMemoryDataset(x, y, train='train')
Trsf_0 = []
Trsf_1 = [transforms.RandomAffine(degrees=[45, 45])]
Trsf_2 = [transforms.RandomAffine(degrees=[90, 90])]
list_transf = [Trsf_0, Trsf_1, Trsf_2]
scenario = TransformationIncremental(
cl_dataset=dummy, incremental_transformations=list_transf
)
ref_data = None
raw_ref_data = None
for task_id, taskset in enumerate(scenario):
samples, _, _ = taskset.get_random_samples(10)
# we need raw data to apply same transformation as the TransformationIncremental class
raw_samples, _, _ = taskset.get_raw_samples(range(10))
if task_id == 0:
ref_data = samples
raw_ref_data = raw_samples
else:
# we verify that data has changed
assert not torch.all(ref_data.eq(samples))
assert (raw_samples == raw_ref_data
).all() # raw data should be the same in this scenario
# we test transformation on one data point and verify if it is applied
trsf = list_transf[task_id][0]
raw_sample = Image.fromarray(raw_ref_data[0].astype("uint8"))
trsf_data = trsf(raw_sample)
trsf_data = transforms.ToTensor()(trsf_data)
assert torch.all(trsf_data.eq(samples[0]))
'''
Test the initialization with three tasks with degree range
'''
def test_init_range(numpy_data):
x, y = numpy_data
dummy = InMemoryDataset(x, y)
Trsf_0 = []
Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]
list_transf = [Trsf_0, Trsf_1, Trsf_2]
scenario = TransformationIncremental(
cl_dataset=dummy, incremental_transformations=list_transf
)
@pytest.mark.parametrize("shared_label_space", [False, True])
def test_init_shared_label_space(numpy_data, shared_label_space):
x, y = numpy_data
dummy = InMemoryDataset(x, y)
Trsf_0 = []
Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]
dummy_transf = [Trsf_0, Trsf_1, Trsf_2]
scenario = TransformationIncremental(
cl_dataset=dummy,
incremental_transformations=dummy_transf,
shared_label_space=shared_label_space
)
for task_id, taskset in enumerate(scenario):
assert taskset.nb_classes == NB_CLASSES
classes = taskset.get_classes()
if shared_label_space:
assert classes.max() == NB_CLASSES - 1
assert classes.min() == 0
else:
assert classes.max() == (NB_CLASSES * (task_id + 1)) - 1
assert classes.min() == (NB_CLASSES * task_id)
def test_get_task_transformation(numpy_data):
x, y = numpy_data
dummy = InMemoryDataset(x, y)
Trsf_0 = []
Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]
dummy_transf = [Trsf_0, Trsf_1, Trsf_2]
base_transformations = [
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]
scenario = TransformationIncremental(
cl_dataset=dummy,
incremental_transformations=dummy_transf,
base_transformations=base_transformations
)
for task_id, taskset in enumerate(scenario):
# first task specific transformation then base_transformation
tot_transf_task = transforms.Compose(dummy_transf[task_id] + base_transformations)
# we compare the str representation of the composition
assert tot_transf_task.__repr__() == scenario.get_task_transformation(task_id).__repr__()
def test_init_fail2(numpy_data):
train = numpy_data
dummy = InMemoryDataset(*train)
# No transformation is set
with pytest.raises(TypeError):
scenario = TransformationIncremental(cl_dataset=dummy)
| [
"continuum.datasets.InMemoryDataset",
"continuum.scenarios.TransformationIncremental",
"torchvision.transforms.transforms.RandomAffine",
"torchvision.transforms.transforms.ToTensor",
"pytest.raises",
"numpy.random.randint",
"torchvision.transforms.transforms.Normalize",
"torchvision.transforms.transfo... | [((2556, 2616), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shared_label_space"""', '[False, True]'], {}), "('shared_label_space', [False, True])\n", (2579, 2616), False, 'import pytest\n'), ((560, 583), 'numpy.concatenate', 'np.concatenate', (['x_train'], {}), '(x_train)\n', (574, 583), True, 'import numpy as np\n'), ((598, 621), 'numpy.concatenate', 'np.concatenate', (['y_train'], {}), '(y_train)\n', (612, 621), True, 'import numpy as np\n'), ((777, 813), 'continuum.datasets.InMemoryDataset', 'InMemoryDataset', (['x', 'y'], {'train': '"""train"""'}), "(x, y, train='train')\n", (792, 813), False, 'from continuum.datasets import InMemoryDataset\n'), ((1005, 1094), 'continuum.scenarios.TransformationIncremental', 'TransformationIncremental', ([], {'cl_dataset': 'dummy', 'incremental_transformations': 'list_transf'}), '(cl_dataset=dummy, incremental_transformations=\n list_transf)\n', (1030, 1094), False, 'from continuum.scenarios import TransformationIncremental\n'), ((2241, 2262), 'continuum.datasets.InMemoryDataset', 'InMemoryDataset', (['x', 'y'], {}), '(x, y)\n', (2256, 2262), False, 'from continuum.datasets import InMemoryDataset\n'), ((2454, 2543), 'continuum.scenarios.TransformationIncremental', 'TransformationIncremental', ([], {'cl_dataset': 'dummy', 'incremental_transformations': 'list_transf'}), '(cl_dataset=dummy, incremental_transformations=\n list_transf)\n', (2479, 2543), False, 'from continuum.scenarios import TransformationIncremental\n'), ((2717, 2738), 'continuum.datasets.InMemoryDataset', 'InMemoryDataset', (['x', 'y'], {}), '(x, y)\n', (2732, 2738), False, 'from continuum.datasets import InMemoryDataset\n'), ((2931, 3060), 'continuum.scenarios.TransformationIncremental', 'TransformationIncremental', ([], {'cl_dataset': 'dummy', 'incremental_transformations': 'dummy_transf', 'shared_label_space': 'shared_label_space'}), '(cl_dataset=dummy, incremental_transformations=\n dummy_transf, shared_label_space=shared_label_space)\n', (2956, 3060), False, 'from continuum.scenarios import TransformationIncremental\n'), ((3568, 3589), 'continuum.datasets.InMemoryDataset', 'InMemoryDataset', (['x', 'y'], {}), '(x, y)\n', (3583, 3589), False, 'from continuum.datasets import InMemoryDataset\n'), ((3900, 4033), 'continuum.scenarios.TransformationIncremental', 'TransformationIncremental', ([], {'cl_dataset': 'dummy', 'incremental_transformations': 'dummy_transf', 'base_transformations': 'base_transformations'}), '(cl_dataset=dummy, incremental_transformations=\n dummy_transf, base_transformations=base_transformations)\n', (3925, 4033), False, 'from continuum.scenarios import TransformationIncremental\n'), ((4502, 4525), 'continuum.datasets.InMemoryDataset', 'InMemoryDataset', (['*train'], {}), '(*train)\n', (4517, 4525), False, 'from continuum.datasets import InMemoryDataset\n'), ((499, 542), 'numpy.random.randint', 'np.random.randint', (['NB_CLASSES'], {'size': 'nb_data'}), '(NB_CLASSES, size=nb_data)\n', (516, 542), True, 'import numpy as np\n'), ((845, 886), 'torchvision.transforms.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '[45, 45]'}), '(degrees=[45, 45])\n', (868, 886), False, 'from torchvision.transforms import transforms\n'), ((902, 943), 'torchvision.transforms.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '[90, 90]'}), '(degrees=[90, 90])\n', (925, 943), False, 'from torchvision.transforms import transforms\n'), ((2294, 2335), 'torchvision.transforms.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '[40, 50]'}), '(degrees=[40, 50])\n', (2317, 2335), False, 'from torchvision.transforms import transforms\n'), ((2351, 2392), 'torchvision.transforms.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '[85, 95]'}), '(degrees=[85, 95])\n', (2374, 2392), False, 'from torchvision.transforms import transforms\n'), ((2770, 2811), 'torchvision.transforms.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '[40, 50]'}), '(degrees=[40, 50])\n', (2793, 2811), False, 'from torchvision.transforms import transforms\n'), ((2827, 2868), 'torchvision.transforms.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '[85, 95]'}), '(degrees=[85, 95])\n', (2850, 2868), False, 'from torchvision.transforms import transforms\n'), ((3621, 3662), 'torchvision.transforms.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '[40, 50]'}), '(degrees=[40, 50])\n', (3644, 3662), False, 'from torchvision.transforms import transforms\n'), ((3678, 3719), 'torchvision.transforms.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '[85, 95]'}), '(degrees=[85, 95])\n', (3701, 3719), False, 'from torchvision.transforms import transforms\n'), ((3804, 3825), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3823, 3825), False, 'from torchvision.transforms import transforms\n'), ((3835, 3877), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (3855, 3877), False, 'from torchvision.transforms import transforms\n'), ((4205, 4269), 'torchvision.transforms.transforms.Compose', 'transforms.Compose', (['(dummy_transf[task_id] + base_transformations)'], {}), '(dummy_transf[task_id] + base_transformations)\n', (4223, 4269), False, 'from torchvision.transforms import transforms\n'), ((4567, 4591), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4580, 4591), False, 'import pytest\n'), ((4612, 4655), 'continuum.scenarios.TransformationIncremental', 'TransformationIncremental', ([], {'cl_dataset': 'dummy'}), '(cl_dataset=dummy)\n', (4637, 4655), False, 'from continuum.scenarios import TransformationIncremental\n'), ((2014, 2035), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2033, 2035), False, 'from torchvision.transforms import transforms\n'), ((400, 438), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(2, 2, 3)'}), '(100, size=(2, 2, 3))\n', (417, 438), True, 'import numpy as np\n')] |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from scipy import special
import great_expectations.exceptions as ge_exceptions
from great_expectations import DataContext
from great_expectations.rule_based_profiler.domain_builder.domain import Domain
from great_expectations.rule_based_profiler.parameter_builder import (
MultiBatchParameterBuilder,
)
from great_expectations.rule_based_profiler.parameter_builder.parameter_container import (
ParameterContainer,
build_parameter_container,
)
from great_expectations.rule_based_profiler.util import (
get_parameter_value_and_validate_return_type,
)
from great_expectations.util import is_int, is_numeric
from great_expectations.validator.validation_graph import MetricConfiguration
from great_expectations.validator.validator import Validator
NP_EPSILON: np.float64 = np.finfo(float).eps
NP_SQRT_2: np.float64 = np.sqrt(2.0)
MAX_DECIMALS: int = 9
class NumericMetricRangeMultiBatchParameterBuilder(MultiBatchParameterBuilder):
"""
A Multi-Batch implementation for obtaining the range estimation bounds for a resolved (evaluated) numeric metric,
using domain_kwargs, value_kwargs, metric_name, and false_positive_rate (tolerance) as arguments.
This Multi-Batch ParameterBuilder is general in the sense that any metric that computes numbers can be accommodated.
On the other hand, it is specific in the sense that the parameter names will always have the semantics of numeric
ranges, which will incorporate the requirements, imposed by the configured false_positive_rate tolerances.
"""
RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS: set = {
"lower_bound",
"upper_bound",
}
def __init__(
self,
parameter_name: str,
metric_name: str,
metric_domain_kwargs: Optional[Union[str, dict]] = "$domain.domain_kwargs",
metric_value_kwargs: Optional[Union[str, dict]] = None,
false_positive_rate: Optional[Union[float, str]] = 0.0,
round_decimals: Optional[Union[int, str]] = False,
truncate_distribution: Optional[
Union[Dict[str, Union[Optional[int], Optional[float]]], str]
] = None,
data_context: Optional[DataContext] = None,
batch_request: Optional[Union[dict, str]] = None,
):
"""
Args:
parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration);
it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter."
and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>").
metric_name: the name of a metric used in MetricConfiguration (must be a supported and registered metric)
metric_domain_kwargs: used in MetricConfiguration
metric_value_kwargs: used in MetricConfiguration
false_positive_rate: user-configured fraction between 0 and 1 -- "FP/(FP + TN)" -- where:
FP stands for "false positives" and TN stands for "true negatives"; this rate specifies allowed "fall-out"
(in addition, a helpful identity used in this method is: false_positive_rate = 1 - true_negative_rate).
round_decimals: user-configured non-negative integer indicating the number of decimals of the
rounding precision of the computed parameter values (i.e., min_value, max_value) prior to packaging them on
output. If omitted, then no rounding is performed, unless the computed value is already an integer.
truncate_distribution: user-configured directive for whether or not to allow the computed parameter values
(i.e., lower_bound, upper_bound) to take on values outside the specified bounds when packaged on output.
data_context: DataContext
batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
"""
super().__init__(
parameter_name=parameter_name,
data_context=data_context,
batch_request=batch_request,
)
self._metric_name = metric_name
self._metric_domain_kwargs = metric_domain_kwargs
self._metric_value_kwargs = metric_value_kwargs
self._false_positive_rate = false_positive_rate
self._round_decimals = round_decimals
if not truncate_distribution:
truncate_distribution = {
"lower_bound": None,
"upper_bound": None,
}
else:
truncate_distribution_keys: set = set(truncate_distribution.keys())
if (
not truncate_distribution_keys
<= NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS
):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Unrecognized truncate_distribution_keys key(s) in {self.__class__.__name__}:
"{str(truncate_distribution_keys - NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS)}" detected.
"""
)
self._truncate_distribution = truncate_distribution
def _build_parameters(
self,
parameter_container: ParameterContainer,
domain: Domain,
validator: Validator,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
):
"""
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details.
Args:
:return: a ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details
The algorithm operates according to the following steps:
1. Obtain batch IDs of interest using DataContext and BatchRequest (unless passed explicitly as argument). Note
that this particular BatchRequest was specified as part of configuration for the present ParameterBuilder class.
(This is in contrast to the BatchRequest specified in Checkpoint configuration, or in pipeline, notebook, etc.)
2. Set up metric_domain_kwargs and metric_value_kwargs (using configuration and/or variables and parameters).
3. Instantiate the Validator object corresponding to BatchRequest (with a temporary expectation_suite_name) in
order to have access to all Batch objects, on each of which the specified metric_name will be computed.
4. While looping through the available batch_ids:
4.1: Update the metric_domain_kwargs with the specific batch_id (the iteration variable of the loop).
4.2: Create the metric_configuration_arguments using the metric_domain_kwargs from the previous step.
4.3: Compute metric_value using the local Validator object (which has access to the required Batch objects).
4.4: Insure that the metric_value is numeric (ranges can be computed for numeric-valued metrics only).
4.5: Append the value of the computed metric to the list (one for each batch_id -- loop iteration variable).
5. Convert the list of floating point metric computation results to a numpy array (for further computations).
6. Compute the mean and the standard deviation of the metric (aggregated over all the gathered Batch objects).
7. Compute the number of standard deviations (as a floating point number rounded to the nearest highest integer)
needed to create the "band" around the mean so as to achieve the specified false_positive_rate (note that the
false_positive_rate of 0.0 would result in an infinite number of standard deviations, hence it is "nudged" by
a small quantity, "epsilon", above 0.0 if false_positive_rate of 0.0 is provided as argument in constructor).
(Please refer to "https://en.wikipedia.org/wiki/Normal_distribution" and references therein for background.)
8. Compute the "band" around the mean as the min_value and max_value (to be used in ExpectationConfiguration).
9. Set up the arguments for and call build_parameter_container() to store the parameter as part of "rule state".
"""
batch_ids_for_metrics_calculations: Optional[
List[str]
] = self.get_batch_ids_for_metrics_calculations(
domain=domain,
variables=variables,
parameters=parameters,
)
if not batch_ids_for_metrics_calculations:
raise ge_exceptions.ProfilerExecutionError(
message=f"Utilizing a {self.__class__.__name__} requires a non-empty list of batch identifiers."
)
validator_for_metrics_calculations: Validator = (
self.get_validator_for_metrics_calculations(
validator=validator,
domain=domain,
variables=variables,
parameters=parameters,
)
)
# Obtain domain kwargs from rule state (i.e., variables and parameters); from instance variable otherwise.
metric_domain_kwargs: Optional[
Union[str, dict]
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self._metric_domain_kwargs,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
# Obtain value kwargs from rule state (i.e., variables and parameters); from instance variable otherwise.
metric_value_kwargs: Optional[
Union[str, dict]
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self._metric_value_kwargs,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
metric_values: Union[
np.ndarray,
List[Union[int, np.int32, np.int64, float, np.float32, np.float64]],
] = []
metric_domain_kwargs_with_specific_batch_id: Optional[
Dict[str, Any]
] = copy.deepcopy(metric_domain_kwargs)
metric_value: Union[int, np.int32, np.int64, float, np.float32, np.float64]
batch_id: str
for batch_id in batch_ids_for_metrics_calculations:
metric_domain_kwargs_with_specific_batch_id["batch_id"] = batch_id
metric_configuration_arguments: Dict[str, Any] = {
"metric_name": self._metric_name,
"metric_domain_kwargs": metric_domain_kwargs_with_specific_batch_id,
"metric_value_kwargs": metric_value_kwargs,
"metric_dependencies": None,
}
metric_value = validator_for_metrics_calculations.get_metric(
metric=MetricConfiguration(**metric_configuration_arguments)
)
if not is_numeric(value=metric_value):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Applicability of {self.__class__.__name__} is restricted to numeric-valued metrics \
(value of type "{str(type(metric_value))}" was computed).
"""
)
metric_values.append(metric_value)
# Obtain round_decimals directive from rule state (i.e., variables and parameters); from instance variable otherwise.
round_decimals: Optional[
Union[Any]
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self._round_decimals,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
if round_decimals is None:
round_decimals = MAX_DECIMALS
elif not isinstance(round_decimals, int) or (round_decimals < 0):
raise ge_exceptions.ProfilerExecutionError(
message=f"""The directive "round_decimals" for {self.__class__.__name__} can be 0 or a
positive integer, or must be omitted (or set to None).
"""
)
if all(
[
np.issubdtype(type(metric_value), np.integer)
for metric_value in metric_values
]
):
round_decimals = 0
# Obtain truncate_distribution directive from rule state (i.e., variables and parameters); from instance variable otherwise.
truncate_distribution: Dict[
str, Union[Optional[int], Optional[float]]
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self._truncate_distribution,
expected_return_type=dict,
variables=variables,
parameters=parameters,
)
distribution_boundary: Optional[Union[int, float]]
if not all(
[
(
distribution_boundary is None
or is_numeric(value=distribution_boundary)
)
for distribution_boundary in truncate_distribution.values()
]
):
raise ge_exceptions.ProfilerExecutionError(
message=f"""The directive "truncate_distribution" for {self.__class__.__name__} must specify the
[lower_bound, upper_bound] closed interval, where either boundary is a numeric value (or None).
"""
)
metric_values = np.array(metric_values, dtype=np.float64)
mean: Union[np.ndarray, np.float64] = np.mean(metric_values)
std: Union[np.ndarray, np.float64] = np.std(metric_values)
# Obtain false_positive_rate from rule state (i.e., variables and parameters); from instance variable otherwise.
false_positive_rate: Union[
Any, str
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self._false_positive_rate,
expected_return_type=(int, float),
variables=variables,
parameters=parameters,
)
if not (0.0 <= false_positive_rate <= 1.0):
raise ge_exceptions.ProfilerExecutionError(
message=f"False-Positive Rate for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval."
)
if np.isclose(false_positive_rate, 0.0):
false_positive_rate = false_positive_rate + NP_EPSILON
true_negative_rate: float = 1.0 - false_positive_rate
stds_multiplier: np.float64 = NP_SQRT_2 * special.erfinv(true_negative_rate)
min_value: float = mean - stds_multiplier * std
max_value: float = mean + stds_multiplier * std
if round_decimals == 0:
min_value = round(min_value)
max_value = round(max_value)
else:
min_value = round(min_value, round_decimals)
max_value = round(max_value, round_decimals)
lower_bound: Optional[Union[int, float]] = truncate_distribution.get(
"lower_bound"
)
upper_bound: Optional[Union[int, float]] = truncate_distribution.get(
"upper_bound"
)
if lower_bound is not None:
min_value = max(min_value, lower_bound)
if upper_bound is not None:
max_value = min(max_value, upper_bound)
parameter_values: Dict[str, Any] = {
f"$parameter.{self.parameter_name}": {
"value": {
"min_value": min_value,
"max_value": max_value,
},
"details": {
# Note: the "metric_domain_kwargs" value, used in "details", corresponds to the active Batch.
# While any information can be placed into the "details" dictionary, this judicious choice will
# allow for the relevant "details" to be used as "meta" in ExpectationConfiguration and render well,
# without overwhelming the user (e.g., if instead all "batch_id" values were captured in "details").
"metric_configuration": {
"metric_name": self._metric_name,
"metric_domain_kwargs": metric_domain_kwargs,
"metric_value_kwargs": metric_value_kwargs,
"metric_dependencies": None,
},
},
},
}
build_parameter_container(
parameter_container=parameter_container, parameter_values=parameter_values
)
| [
"copy.deepcopy",
"great_expectations.rule_based_profiler.parameter_builder.parameter_container.build_parameter_container",
"great_expectations.rule_based_profiler.util.get_parameter_value_and_validate_return_type",
"numpy.std",
"great_expectations.util.is_numeric",
"scipy.special.erfinv",
"numpy.finfo",... | [((915, 927), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (922, 927), True, 'import numpy as np\n'), ((871, 886), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (879, 886), True, 'import numpy as np\n'), ((9316, 9503), 'great_expectations.rule_based_profiler.util.get_parameter_value_and_validate_return_type', 'get_parameter_value_and_validate_return_type', ([], {'domain': 'domain', 'parameter_reference': 'self._metric_domain_kwargs', 'expected_return_type': 'None', 'variables': 'variables', 'parameters': 'parameters'}), '(domain=domain,\n parameter_reference=self._metric_domain_kwargs, expected_return_type=\n None, variables=variables, parameters=parameters)\n', (9360, 9503), False, 'from great_expectations.rule_based_profiler.util import get_parameter_value_and_validate_return_type\n'), ((9760, 9946), 'great_expectations.rule_based_profiler.util.get_parameter_value_and_validate_return_type', 'get_parameter_value_and_validate_return_type', ([], {'domain': 'domain', 'parameter_reference': 'self._metric_value_kwargs', 'expected_return_type': 'None', 'variables': 'variables', 'parameters': 'parameters'}), '(domain=domain,\n parameter_reference=self._metric_value_kwargs, expected_return_type=\n None, variables=variables, parameters=parameters)\n', (9804, 9946), False, 'from great_expectations.rule_based_profiler.util import get_parameter_value_and_validate_return_type\n'), ((10262, 10297), 'copy.deepcopy', 'copy.deepcopy', (['metric_domain_kwargs'], {}), '(metric_domain_kwargs)\n', (10275, 10297), False, 'import copy\n'), ((11577, 11757), 'great_expectations.rule_based_profiler.util.get_parameter_value_and_validate_return_type', 'get_parameter_value_and_validate_return_type', ([], {'domain': 'domain', 'parameter_reference': 'self._round_decimals', 'expected_return_type': 'None', 'variables': 'variables', 'parameters': 'parameters'}), '(domain=domain,\n parameter_reference=self._round_decimals, expected_return_type=None,\n variables=variables, parameters=parameters)\n', (11621, 11757), False, 'from great_expectations.rule_based_profiler.util import get_parameter_value_and_validate_return_type\n'), ((12640, 12828), 'great_expectations.rule_based_profiler.util.get_parameter_value_and_validate_return_type', 'get_parameter_value_and_validate_return_type', ([], {'domain': 'domain', 'parameter_reference': 'self._truncate_distribution', 'expected_return_type': 'dict', 'variables': 'variables', 'parameters': 'parameters'}), '(domain=domain,\n parameter_reference=self._truncate_distribution, expected_return_type=\n dict, variables=variables, parameters=parameters)\n', (12684, 12828), False, 'from great_expectations.rule_based_profiler.util import get_parameter_value_and_validate_return_type\n'), ((13542, 13583), 'numpy.array', 'np.array', (['metric_values'], {'dtype': 'np.float64'}), '(metric_values, dtype=np.float64)\n', (13550, 13583), True, 'import numpy as np\n'), ((13631, 13653), 'numpy.mean', 'np.mean', (['metric_values'], {}), '(metric_values)\n', (13638, 13653), True, 'import numpy as np\n'), ((13699, 13720), 'numpy.std', 'np.std', (['metric_values'], {}), '(metric_values)\n', (13705, 13720), True, 'import numpy as np\n'), ((13912, 14106), 'great_expectations.rule_based_profiler.util.get_parameter_value_and_validate_return_type', 'get_parameter_value_and_validate_return_type', ([], {'domain': 'domain', 'parameter_reference': 'self._false_positive_rate', 'expected_return_type': '(int, float)', 'variables': 'variables', 'parameters': 'parameters'}), '(domain=domain,\n parameter_reference=self._false_positive_rate, expected_return_type=(\n int, float), variables=variables, parameters=parameters)\n', (13956, 14106), False, 'from great_expectations.rule_based_profiler.util import get_parameter_value_and_validate_return_type\n'), ((14422, 14458), 'numpy.isclose', 'np.isclose', (['false_positive_rate', '(0.0)'], {}), '(false_positive_rate, 0.0)\n', (14432, 14458), True, 'import numpy as np\n'), ((16539, 16644), 'great_expectations.rule_based_profiler.parameter_builder.parameter_container.build_parameter_container', 'build_parameter_container', ([], {'parameter_container': 'parameter_container', 'parameter_values': 'parameter_values'}), '(parameter_container=parameter_container,\n parameter_values=parameter_values)\n', (16564, 16644), False, 'from great_expectations.rule_based_profiler.parameter_builder.parameter_container import ParameterContainer, build_parameter_container\n'), ((8670, 8814), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""Utilizing a {self.__class__.__name__} requires a non-empty list of batch identifiers."""'}), "(message=\n f'Utilizing a {self.__class__.__name__} requires a non-empty list of batch identifiers.'\n )\n", (8706, 8814), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((13252, 13496), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""The directive "truncate_distribution" for {self.__class__.__name__} must specify the\n[lower_bound, upper_bound] closed interval, where either boundary is a numeric value (or None).\n"""'}), '(message=\n f"""The directive "truncate_distribution" for {self.__class__.__name__} must specify the\n[lower_bound, upper_bound] closed interval, where either boundary is a numeric value (or None).\n"""\n )\n', (13288, 13496), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((14239, 14389), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""False-Positive Rate for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval."""'}), "(message=\n f'False-Positive Rate for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval.'\n )\n", (14275, 14389), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((14640, 14674), 'scipy.special.erfinv', 'special.erfinv', (['true_negative_rate'], {}), '(true_negative_rate)\n', (14654, 14674), False, 'from scipy import special\n'), ((11044, 11074), 'great_expectations.util.is_numeric', 'is_numeric', ([], {'value': 'metric_value'}), '(value=metric_value)\n', (11054, 11074), False, 'from great_expectations.util import is_int, is_numeric\n'), ((11990, 12183), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""The directive "round_decimals" for {self.__class__.__name__} can be 0 or a\npositive integer, or must be omitted (or set to None).\n"""'}), '(message=\n f"""The directive "round_decimals" for {self.__class__.__name__} can be 0 or a\npositive integer, or must be omitted (or set to None).\n"""\n )\n', (12026, 12183), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((10957, 11010), 'great_expectations.validator.validation_graph.MetricConfiguration', 'MetricConfiguration', ([], {}), '(**metric_configuration_arguments)\n', (10976, 11010), False, 'from great_expectations.validator.validation_graph import MetricConfiguration\n'), ((13075, 13114), 'great_expectations.util.is_numeric', 'is_numeric', ([], {'value': 'distribution_boundary'}), '(value=distribution_boundary)\n', (13085, 13114), False, 'from great_expectations.util import is_int, is_numeric\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8
from __future__ import print_function, division, unicode_literals
import argparse
import glob
import json
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Ubuntu']})
rc('font', **{'monospace': ['Ubuntu Mono']})
rc('axes', titlesize="11")
rc('axes', labelsize="9")
rc('xtick', labelsize="9")
rc('ytick', labelsize="9")
def parse_args():
parser = argparse.ArgumentParser("Plot tool")
parser.add_argument(
"-b", "--bench-mode",
help="The 'benchmark mode' part of the file prefix"
)
parser.add_argument(
"-g", "--gen-mode",
help="The 'generator mode' part of the file prefix"
)
args = parser.parse_args()
return args
def construct_color_map(keys):
prop_cycle = plt.rcParams['axes.prop_cycle']
color_cycle = cycle(prop_cycle.by_key()['color'])
colors = {}
for key in keys:
color = next(color_cycle)
colors[key] = color
return colors
def compute_stats(keys, data):
stats = {}
for key in keys:
entries = [entry for entry in data if entry["name"] == key]
final_values = np.array([entry["times"][-1] for entry in entries])
stats[key] = final_values
return stats
class ZBiasFreePlotter(object):
def __init__(self):
self.plot_calls = []
def add_plot(self, f, xs, ys, *args, **kwargs):
self.plot_calls.append((f, xs, ys, args, kwargs))
def draw_plots(self, chunk_size=512):
scheduled_calls = []
for f, xs, ys, args, kwargs in self.plot_calls:
assert(len(xs) == len(ys))
index = np.arange(len(xs))
np.random.shuffle(index)
index_blocks = [index[i:i+chunk_size] for i in np.arange(len(index))[::chunk_size]]
for i, index_block in enumerate(index_blocks):
# Only attach a label for one of the chunks
if i != 0 and kwargs.get("label") is not None:
kwargs = kwargs.copy()
kwargs["label"] = None
scheduled_calls.append((f, xs[index_block], ys[index_block], args, kwargs))
np.random.shuffle(scheduled_calls)
for f, xs, ys, args, kwargs in scheduled_calls:
f(xs, ys, *args, **kwargs)
def set_share_axes(ax, ax_target):
# https://stackoverflow.com/a/51684195/1804173
ax_target._shared_x_axes.join(ax_target, ax)
ax_target.xaxis.set_tick_params(which='both', labelbottom=False, labeltop=False)
ax_target.xaxis.offsetText.set_visible(False)
def main():
args = parse_args()
bench_mode = args.bench_mode
gen_mode = args.gen_mode
files = glob.glob("results/{}_{}_*.json".format(bench_mode, gen_mode))
if False:
# Hack to display "within group" comparisons. Should we fully support that?
files = glob.glob("results/insert_*_ArrayStump_*.json".format(bench_mode, gen_mode))
def patch(data, fn):
if "avg" in fn:
data["name"] = data["name"] + "AVG"
if "asc" in fn:
data["name"] = data["name"] + "ASC"
if "dsc" in fn:
data["name"] = data["name"] + "DSC"
return data
data = [
patch(json.load(open(f)), f)
for f in sorted(files)
]
data = [
json.load(open(f))
for f in sorted(files)
]
keys = [entry["name"] for entry in data if entry["run"] == 1]
color_map = construct_color_map(keys)
stats = compute_stats(keys, data)
# import IPython; IPython.embed()
fig, axes = plt.subplots(3, 1, figsize=(11.5, 9.5))
set_share_axes(axes[1], axes[0])
bias_free_plotter1 = ZBiasFreePlotter()
bias_free_plotter2 = ZBiasFreePlotter()
line_spacing = 0.024
y_text = 0.91
fig.text(0.77, y_text + line_spacing, "Total elapsed times [ms]", fontsize=9, weight="bold")
for i, entry in enumerate(data):
name = entry["name"]
iters = np.array(entry["iters"])
times = np.array(entry["times"]) * 1000
color = color_map[name]
is_primary = entry["run"] == 1
if is_primary:
label = name
mean = stats[name].mean() * 1000
std = stats[name].std() * 1000
fig.text(0.77, y_text, name, fontsize=9)
fig.text(0.91, y_text, "{:.3f}".format(mean), fontsize=9, ha="right")
fig.text(0.97, y_text, "± {:6.3f}".format(std), fontsize=9, ha="right")
y_text -= line_spacing
else:
label = None
deltas_xs = iters[1:]
deltas_ys = (times[1:] - times[:-1]) / entry["measure_every"]
axes[0].plot(
iters, times, "-",
c=color, alpha=0.5, label=label,
)
if False:
axes[1].plot(
deltas_xs, deltas_ys,
"o", c=color, ms=0.4, alpha=0.8, label=label,
)
else:
for ax in axes[1:]:
bias_free_plotter1.add_plot(
ax.plot, deltas_xs, deltas_ys,
",", c=color, ms=1, alpha=1, label=label
)
bias_free_plotter2.add_plot(
ax.plot, deltas_xs, deltas_ys,
"o", c=color, ms=4, alpha=0.007,
)
bias_free_plotter1.draw_plots()
bias_free_plotter2.draw_plots()
axes[0].legend(loc="best", prop={'size': 9})
for ax in axes:
ax.grid(color="#DDDDDD")
ax.set_facecolor('#FCFEFF')
axes[0].set_title("Total time elapsed", fontsize=10)
axes[1].set_title("Delta times (semi-log)", fontsize=10)
axes[2].set_title("Delta times (log-log)", fontsize=10)
axes[0].set_ylabel("Time [ms]")
axes[1].set_ylabel("Time / op [ms]")
axes[1].set_xlabel("Operations")
axes[2].set_ylabel("Time / op [ms]")
axes[2].set_xlabel("Operations")
axes[1].set_yscale("log")
axes[2].set_xscale("log")
axes[2].set_yscale("log")
fig.tight_layout()
plt.subplots_adjust(right=0.75)
plt.savefig("results/{}_{}_comparison.png".format(bench_mode, gen_mode))
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.rc",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.array",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle"
] | [((260, 314), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Ubuntu']})\n", (262, 314), False, 'from matplotlib import rc\n'), ((315, 359), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'monospace': ['Ubuntu Mono']})\n", (317, 359), False, 'from matplotlib import rc\n'), ((361, 387), 'matplotlib.rc', 'rc', (['"""axes"""'], {'titlesize': '"""11"""'}), "('axes', titlesize='11')\n", (363, 387), False, 'from matplotlib import rc\n'), ((388, 413), 'matplotlib.rc', 'rc', (['"""axes"""'], {'labelsize': '"""9"""'}), "('axes', labelsize='9')\n", (390, 413), False, 'from matplotlib import rc\n'), ((414, 440), 'matplotlib.rc', 'rc', (['"""xtick"""'], {'labelsize': '"""9"""'}), "('xtick', labelsize='9')\n", (416, 440), False, 'from matplotlib import rc\n'), ((441, 467), 'matplotlib.rc', 'rc', (['"""ytick"""'], {'labelsize': '"""9"""'}), "('ytick', labelsize='9')\n", (443, 467), False, 'from matplotlib import rc\n'), ((501, 537), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Plot tool"""'], {}), "('Plot tool')\n", (524, 537), False, 'import argparse\n'), ((3701, 3740), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(11.5, 9.5)'}), '(3, 1, figsize=(11.5, 9.5))\n', (3713, 3740), True, 'import matplotlib.pyplot as plt\n'), ((6126, 6157), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.75)'}), '(right=0.75)\n', (6145, 6157), True, 'import matplotlib.pyplot as plt\n'), ((6240, 6250), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6248, 6250), True, 'import matplotlib.pyplot as plt\n'), ((1240, 1291), 'numpy.array', 'np.array', (["[entry['times'][-1] for entry in entries]"], {}), "([entry['times'][-1] for entry in entries])\n", (1248, 1291), True, 'import numpy as np\n'), ((2250, 2284), 'numpy.random.shuffle', 'np.random.shuffle', (['scheduled_calls'], {}), '(scheduled_calls)\n', (2267, 2284), True, 'import numpy as np\n'), ((4091, 4115), 'numpy.array', 'np.array', (["entry['iters']"], {}), "(entry['iters'])\n", (4099, 4115), True, 'import numpy as np\n'), ((1760, 1784), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (1777, 1784), True, 'import numpy as np\n'), ((4132, 4156), 'numpy.array', 'np.array', (["entry['times']"], {}), "(entry['times'])\n", (4140, 4156), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Showcases colour models plotting examples.
"""
from numpy import array
from pprint import pprint
import colour
from colour.plotting import * # noqa
from colour.utilities.verbose import message_box
message_box('Colour Models Plots')
message_box('Plotting "RGB" colourspaces in "CIE 1931 Chromaticity Diagram".')
pprint(sorted(colour.RGB_COLOURSPACES.keys()))
colourspaces_CIE_1931_chromaticity_diagram_plot(
['sRGB', 'ACES RGB', 'Adobe RGB 1998'])
print('\n')
message_box(('Plotting a single custom "RGB" colourspace in '
'"CIE 1931 Chromaticity Diagram".'))
colour.RGB_COLOURSPACES['Awful RGB'] = colour.RGB_Colourspace(
'Awful RGB',
primaries=array([[0.1, 0.2],
[0.3, 0.15],
[0.05, 0.6]]),
whitepoint=(1 / 3, 1 / 3))
pprint(sorted(colour.RGB_COLOURSPACES.keys()))
colourspaces_CIE_1931_chromaticity_diagram_plot(['sRGB', 'Awful RGB'])
print('\n')
message_box('Plotting a single "RGB" colourspace transfer function.')
single_transfer_function_plot('sRGB')
print('\n')
message_box('Plotting multiple "RGB" colourspaces transfer functions.')
multi_transfer_function_plot(['sRGB', 'Rec. 709'])
| [
"colour.utilities.verbose.message_box",
"colour.RGB_COLOURSPACES.keys",
"numpy.array"
] | [((252, 286), 'colour.utilities.verbose.message_box', 'message_box', (['"""Colour Models Plots"""'], {}), "('Colour Models Plots')\n", (263, 286), False, 'from colour.utilities.verbose import message_box\n'), ((288, 366), 'colour.utilities.verbose.message_box', 'message_box', (['"""Plotting "RGB" colourspaces in "CIE 1931 Chromaticity Diagram"."""'], {}), '(\'Plotting "RGB" colourspaces in "CIE 1931 Chromaticity Diagram".\')\n', (299, 366), False, 'from colour.utilities.verbose import message_box\n'), ((521, 624), 'colour.utilities.verbose.message_box', 'message_box', (['"""Plotting a single custom "RGB" colourspace in "CIE 1931 Chromaticity Diagram"."""'], {}), '(\n \'Plotting a single custom "RGB" colourspace in "CIE 1931 Chromaticity Diagram".\'\n )\n', (532, 624), False, 'from colour.utilities.verbose import message_box\n'), ((979, 1048), 'colour.utilities.verbose.message_box', 'message_box', (['"""Plotting a single "RGB" colourspace transfer function."""'], {}), '(\'Plotting a single "RGB" colourspace transfer function.\')\n', (990, 1048), False, 'from colour.utilities.verbose import message_box\n'), ((1101, 1172), 'colour.utilities.verbose.message_box', 'message_box', (['"""Plotting multiple "RGB" colourspaces transfer functions."""'], {}), '(\'Plotting multiple "RGB" colourspaces transfer functions.\')\n', (1112, 1172), False, 'from colour.utilities.verbose import message_box\n'), ((381, 411), 'colour.RGB_COLOURSPACES.keys', 'colour.RGB_COLOURSPACES.keys', ([], {}), '()\n', (409, 411), False, 'import colour\n'), ((727, 772), 'numpy.array', 'array', (['[[0.1, 0.2], [0.3, 0.15], [0.05, 0.6]]'], {}), '([[0.1, 0.2], [0.3, 0.15], [0.05, 0.6]])\n', (732, 772), False, 'from numpy import array\n'), ((861, 891), 'colour.RGB_COLOURSPACES.keys', 'colour.RGB_COLOURSPACES.keys', ([], {}), '()\n', (889, 891), False, 'import colour\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import tempfile
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from flaky import flaky
from pydantic import PositiveInt
from gluonts.dataset.artificial import constant_dataset
from gluonts.dataset.common import Dataset
from gluonts.evaluation import backtest_metrics, Evaluator
from gluonts.model.naive_2 import Naive2Predictor
from gluonts.model.predictor import Predictor
from gluonts.model.seasonal_naive import SeasonalNaivePredictor
from gluonts.support.pandas import forecast_start
def generate_random_dataset(
num_ts: int, start_time: str, freq: str, min_length: int, max_length: int
) -> Dataset:
start_timestamp = pd.Timestamp(start_time, freq=freq)
for _ in range(num_ts):
ts_length = np.random.randint(low=min_length, high=max_length)
target = np.random.uniform(size=(ts_length,))
data = {"target": target, "start": start_timestamp}
yield data
PREDICTION_LENGTH = PositiveInt(30)
SEASON_LENGTH = PositiveInt(210)
START_TIME = "2018-01-03 14:37:12" # That's a Wednesday
MIN_LENGTH = 300
MAX_LENGTH = 400
NUM_TS = 10
@pytest.mark.parametrize(
"predictor_cls", [SeasonalNaivePredictor, Naive2Predictor]
)
@pytest.mark.parametrize(
"freq", ["1min", "15min", "30min", "1H", "2H", "12H", "7D", "1W", "1M"]
)
def test_predictor(predictor_cls, freq: str):
predictor = predictor_cls(
freq=freq,
prediction_length=PREDICTION_LENGTH,
season_length=SEASON_LENGTH,
)
dataset = list(
generate_random_dataset(
num_ts=NUM_TS,
start_time=START_TIME,
freq=freq,
min_length=MIN_LENGTH,
max_length=MAX_LENGTH,
)
)
# get forecasts
forecasts = list(predictor.predict(dataset))
assert len(dataset) == NUM_TS
assert len(forecasts) == NUM_TS
# check forecasts are as expected
for data, forecast in zip(dataset, forecasts):
assert forecast.samples.shape == (1, PREDICTION_LENGTH)
ref = data["target"][
-SEASON_LENGTH : -SEASON_LENGTH + PREDICTION_LENGTH
]
assert forecast.start_date == forecast_start(data)
# specifically for the seasonal naive we can test the supposed result directly
if predictor_cls == SeasonalNaivePredictor:
assert np.allclose(forecast.samples[0], ref)
# CONSTANT DATASET TESTS:
dataset_info, constant_train_ds, constant_test_ds = constant_dataset()
CONSTANT_DATASET_FREQ = dataset_info.metadata.freq
CONSTANT_DATASET_PREDICTION_LENGTH = dataset_info.prediction_length
def seasonal_naive_predictor():
return (
SeasonalNaivePredictor,
dict(prediction_length=CONSTANT_DATASET_PREDICTION_LENGTH),
)
def naive_2_predictor():
return (
Naive2Predictor,
dict(prediction_length=CONSTANT_DATASET_PREDICTION_LENGTH),
)
@flaky(max_runs=3, min_passes=1)
@pytest.mark.parametrize(
"predictor_cls, parameters, accuracy",
[seasonal_naive_predictor() + (0.0,), naive_2_predictor() + (0.0,)],
)
def test_accuracy(predictor_cls, parameters, accuracy):
predictor = predictor_cls(freq=CONSTANT_DATASET_FREQ, **parameters)
agg_metrics, item_metrics = backtest_metrics(
test_dataset=constant_test_ds,
predictor=predictor,
evaluator=Evaluator(calculate_owa=True),
)
assert agg_metrics["ND"] <= accuracy
# SERIALIZATION/DESERIALIZATION TESTS:
@pytest.mark.parametrize(
"predictor_cls, parameters",
[seasonal_naive_predictor(), naive_2_predictor()],
)
def test_seriali_predictors(predictor_cls, parameters):
predictor = predictor_cls(freq=CONSTANT_DATASET_FREQ, **parameters)
with tempfile.TemporaryDirectory() as temp_dir:
predictor.serialize(Path(temp_dir))
predictor_exp = Predictor.deserialize(Path(temp_dir))
assert predictor == predictor_exp
| [
"numpy.random.uniform",
"pandas.Timestamp",
"flaky.flaky",
"tempfile.TemporaryDirectory",
"gluonts.support.pandas.forecast_start",
"numpy.allclose",
"pydantic.PositiveInt",
"gluonts.evaluation.Evaluator",
"pathlib.Path",
"numpy.random.randint",
"pytest.mark.parametrize",
"gluonts.dataset.artif... | [((1533, 1548), 'pydantic.PositiveInt', 'PositiveInt', (['(30)'], {}), '(30)\n', (1544, 1548), False, 'from pydantic import PositiveInt\n'), ((1565, 1581), 'pydantic.PositiveInt', 'PositiveInt', (['(210)'], {}), '(210)\n', (1576, 1581), False, 'from pydantic import PositiveInt\n'), ((1688, 1775), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""predictor_cls"""', '[SeasonalNaivePredictor, Naive2Predictor]'], {}), "('predictor_cls', [SeasonalNaivePredictor,\n Naive2Predictor])\n", (1711, 1775), False, 'import pytest\n'), ((1779, 1879), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""freq"""', "['1min', '15min', '30min', '1H', '2H', '12H', '7D', '1W', '1M']"], {}), "('freq', ['1min', '15min', '30min', '1H', '2H',\n '12H', '7D', '1W', '1M'])\n", (1802, 1879), False, 'import pytest\n'), ((3029, 3047), 'gluonts.dataset.artificial.constant_dataset', 'constant_dataset', ([], {}), '()\n', (3045, 3047), False, 'from gluonts.dataset.artificial import constant_dataset\n'), ((3462, 3493), 'flaky.flaky', 'flaky', ([], {'max_runs': '(3)', 'min_passes': '(1)'}), '(max_runs=3, min_passes=1)\n', (3467, 3493), False, 'from flaky import flaky\n'), ((1243, 1278), 'pandas.Timestamp', 'pd.Timestamp', (['start_time'], {'freq': 'freq'}), '(start_time, freq=freq)\n', (1255, 1278), True, 'import pandas as pd\n'), ((1327, 1377), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'min_length', 'high': 'max_length'}), '(low=min_length, high=max_length)\n', (1344, 1377), True, 'import numpy as np\n'), ((1395, 1431), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(ts_length,)'}), '(size=(ts_length,))\n', (1412, 1431), True, 'import numpy as np\n'), ((4277, 4306), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4304, 4306), False, 'import tempfile\n'), ((2729, 2749), 'gluonts.support.pandas.forecast_start', 'forecast_start', (['data'], {}), '(data)\n', (2743, 2749), False, 'from gluonts.support.pandas import forecast_start\n'), ((2909, 2946), 'numpy.allclose', 'np.allclose', (['forecast.samples[0]', 'ref'], {}), '(forecast.samples[0], ref)\n', (2920, 2946), True, 'import numpy as np\n'), ((3902, 3931), 'gluonts.evaluation.Evaluator', 'Evaluator', ([], {'calculate_owa': '(True)'}), '(calculate_owa=True)\n', (3911, 3931), False, 'from gluonts.evaluation import backtest_metrics, Evaluator\n'), ((4348, 4362), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (4352, 4362), False, 'from pathlib import Path\n'), ((4410, 4424), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (4414, 4424), False, 'from pathlib import Path\n')] |
import utils
import imageutils
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import kerasmodel
# load in Chessboard Calibration images
directory = "camera_cal/"
project_test_images = utils.Load_images_for_directory(directory)
def get_image_corners(img, nx, ny):
""" Gets the image corners for img
"""
# nx, ny = 9, 6
# Convert image to grayscale
gray = imageutils.convert_to_gray(img)
# Get Chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
return ret, corners
def get_image_points(img, nx=9, ny=6):
""" Gets image points and object points for each image in imgs
Returns:
"""
# Prepare object point by creating a zeros array of the same size as the image
object_points = np.zeros((nx*ny, 3), np.float32)
object_points[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
ret, corners = get_image_corners(img, nx, ny)
if ret:
# Add image corners
image_points = corners
# Create an image copy to draw on
image_copy = img.copy()
# Draw found corners on the image
corner_image = cv2.drawChessboardCorners(image_copy,
(nx, ny),
corners,
ret)
return object_points, image_points, corner_image
else:
return np.array([]), np.array([]), np.array([])
def get_images_points(imgs, nx=9, ny=6):
""" Gets image points and object points for each image in imgs
Returns:
"""
images_object_points = []
images_points = []
corner_images = []
for img in imgs:
object_points, image_points, corner_image = get_image_points(img, nx, ny)
if object_points.size > 0:
# Add image corners
images_points.append(image_points)
# Add the prepared object points
images_object_points.append(object_points)
# Draw found corners on the image
corner_images.append(corner_image)
return images_object_points, images_points, corner_images
def calibrate_camera(images_object_points, images_points, images_shape):
""" Calibrates camera images
"""
# Calibrate camera using found corners
# image_shape = img.shape[1::-1]
ret, mtx, dist, rvecs, tvec = cv2.calibrateCamera(images_object_points,
images_points,
images_shape,
None,
None)
return ret, mtx, dist
def undistort_image(img, mtx, dist):
""" Undistorts image
mtx:
dist:
img:
"""
return cv2.undistort(img, mtx, dist, None, mtx)
def undistort_images(imgs, images_object_points, images_points):
""" Calibrates and undistorts images
"""
# Array for Undistorted images
undistorted_images = []
# Calibrate camera using found corners
image_shape = imgs[0].shape[1::-1]
ret, mtx, dist, rvecs, tvec = cv2.calibrateCamera(object_points,
image_points,
image_shape,
None,
None)
for img in imgs:
# Undistort images
undistroted_image = cv2.undistort(img, mtx, dist, None, mtx)
undistorted_images.append(undistroted_image)
return undistorted_images
def warp_and_transform_image(undistorted_img, src, dest):
height, width = undistorted_img.shape[0], undistorted_img.shape[1]
M = cv2.getPerspectiveTransform(src, dest)
Minv = cv2.getPerspectiveTransform(dest, src)
warped = cv2.warpPerspective(undistorted_img, M, (width, height))
return warped, M, Minv
# Color thresholding
def other_color_thresholds(img, b_threshold=(145, 200), l_threshold=(215,255)):
# LAB color space
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
binary_b = np.zeros_like(img[:,:,0])
B_channel = lab[:,:,2]
binary_b[(B_channel > b_threshold[0]) & (B_channel <= b_threshold[1])] = 1
# LUV color space
luv = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
L_channel = luv[:,:,0]
binary_l = np.zeros_like(img[:,:,0])
binary_l[(L_channel > l_threshold[0]) & (L_channel <= l_threshold[1])] = 1
# Combined threshold
binary_threshold = np.zeros_like(img[:,:,0])
binary_threshold[(binary_b == 1) | (binary_l == 1)] = 1
return binary_threshold
def red_color_threshold(img, threshold=(200, 250)):
R_channel = img[:, :, 0]
binary_red = np.zeros_like(R_channel)
binary_red[(R_channel > threshold[0]) & (R_channel <= threshold[1])] = 1
return binary_red
def hLs_color_threshold(img, threshold=(90, 255)):
hls_image = imageutils.convert_to_hsl(img)
S_channel = hls_image[:, :, 2]
binary_S = np.zeros_like(S_channel)
binary_S[(S_channel > threshold[0]) & (S_channel <= threshold[1])] = 1
return binary_S
def combined_color_threshold(img, red_thresh, hls_thresh):
red_binary_threshold = red_color_threshold(img, red_thresh)
hls_binary_threshold = hLs_color_threshold(img, hls_thresh)
other_binary_thresholds = other_color_thresholds(img)
binary_threshold = np.zeros_like(red_binary_threshold)
binary_threshold[(red_binary_threshold == 1) |
(hls_binary_threshold == 1) |
(other_binary_thresholds == 1)] = 1
return binary_threshold
# Gradient Thresholding
def abs_sobel_thresh(gray_image, orient='x', sobel_kernel=3, thresh=(0, 255)):
# 1) Convert to grayscale
# gray_image = imageutils.convert_to_gray(img)
#
x, y = (1, 0) if orient == 'x' else (0, 1)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
sobel = cv2.Sobel(gray_image, cv2.CV_64F, x, y, ksize=sobel_kernel)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
# 6) Return this mask as your binary_output image
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def combined_abs_sobelxy_thresh(gray_image, sobel_kernel=3, thresh=(0, 255)):
gradx_binary = abs_sobel_thresh(gray_image, 'x', sobel_kernel, thresh)
grady_binary = abs_sobel_thresh(gray_image, 'y', sobel_kernel, thresh)
combined = np.zeros_like(gradx_binary)
combined[((gradx_binary == 1) & (grady_binary == 1))] = 1
return combined
def mag_sobel_thresh(gray_image, sobel_kernel=3, mag_thresh=(0, 255)):
# 1) Convert to grayscale
# gray_image = imageutils.convert_to_gray(img)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray_image, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray_image, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
abs_sobelxy = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobelx = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
scaled_sobely = np.uint8(255 * abs_sobely / np.max(abs_sobely))
scaled_sobelxy = np.uint8(255 * abs_sobelxy / np.max(abs_sobelxy))
# 5) Create a binary mask where mag thresholds are met
mag_binary = np.zeros_like(scaled_sobelxy)
mag_binary[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return mag_binary
def dir_sobel_thresh(gray_image, sobel_kernel=3, thresh=(0, np.pi/2)):
# 1) Convert to grayscale
# gray_image = imageutils.convert_to_gray(img)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray_image, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray_image, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
abs_sobelxy = np.sqrt(sobelx**2 + sobely**2)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
direction = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
dir_binary = np.zeros_like(direction)
dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return dir_binary
def combined_sobel_mag_dir_thresh(gray_image, sobel_kernel=17, mag_thresh=(30, 100), dir_thresh=(0.7, 1.3)):
mag_binary = mag_sobel_thresh(gray_image, sobel_kernel, mag_thresh)
dir_binary = dir_sobel_thresh(gray_image, sobel_kernel, dir_thresh)
combined = np.zeros_like(dir_binary)
combined[((mag_binary == 1) & (dir_binary == 1))] = 1
return combined
def combined_sobel_thresh(img,
abs_kernel=3,
mag_dir_kernel=17,
abs_thresh=(200, 250),
mag_thresh=(30, 100),
dir_thresh=(0.7, 1.3)):
gray_image = imageutils.convert_to_gray(img)
combined_sobel = np.zeros_like(gray_image)
combined_binary_abs_sobel = combined_abs_sobelxy_thresh(gray_image, abs_kernel, abs_thresh)
combined_binary_mag_dir_sobel = combined_sobel_mag_dir_thresh(gray_image,
mag_dir_kernel,
mag_thresh,
dir_thresh)
combined_sobel[(combined_binary_abs_sobel == 1) | (combined_binary_mag_dir_sobel == 1)] = 1
return combined_sobel
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
# Masking
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def distance_from_center(l_line, r_line, image_width):
return None
def convolution_sliding_window(binary_warped):
return None
def window_mask(width, height, img_ref, center, level, nonzeroy, nonzerox):
low_y = int(img_ref.shape[0] - (level + 1) * height)
high_y = int(img_ref.shape[0] - level * height)
low_x = max(0, int(center - width / 2))
high_x = min(int(center + width / 2), img_ref.shape[1])
# Output image
output = np.zeros_like(img_ref)
output[low_y:high_y, low_x:high_x] = 1
# Identify the nonzero pixels in x and y within the window
good_inds = ((nonzeroy >= low_y) & (nonzeroy < high_y) &
(nonzerox >= low_x) & (nonzerox < high_x)).nonzero()[0]
# # If you found > minpix pixels, recenter next window on their mean position
# if len(good_inds) > minpix:
# leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
return output, good_inds
def histogram_sliding_window(binary_warped):
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
height, width = binary_warped.shape[0], binary_warped.shape[1]
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high),
(0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high),
(0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)
** 1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])
** 2)**1.5) / np.absolute(2*right_fit[0])
print(left_curverad, right_curverad)
# Calculate the new radii of curvature
left_curvature = get_rad_curv(lefty, leftx)
right_curvature = get_rad_curv(righty, rightx)
print('Left curvature: {}m, Right curvature: {}m'.format(left_curverad, right_curverad))
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, width)
plt.ylim(height, 0)
plt.show()
def find_window_centroids(image, window_width, window_height, margin, previous_l_center=None, previous_r_center=None):
# Identify image height and width
height, width = image.shape[0], image.shape[1]
window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
if not previous_l_center and not previous_r_center:
# print('Yay')
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3 * height / 4):, :int(width / 2)],
axis=0) # *3 to get the bottom quarter
l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2
r_sum = np.sum(image[int(3 * height / 4):, int(width / 2):], axis=0)
r_center = np.argmax(np.convolve(window, r_sum))-window_width/2+int(image.shape[1]/2)
starting_level = 1
else:
print('Nay')
# Add what we found for the first layer
window_centroids.append((previous_l_center, previous_r_center))
l_center = previous_l_center
r_center = previous_r_center
starting_level = 0
# Add what we found for the first layer
window_centroids.append((l_center, r_center))
# Go through each other (save the last one) layer looking for max pixel locations
for level in range(starting_level, (int)(height / window_height)):
# convolve the window into the vertical slice of the image
# 720 - 160 second window : 720 - 80
image_layer = np.sum(image[int(height - (level + 1) * window_height) : int(height - level * window_height), :], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as an offset because convolution signal reference is at right side of window, not center of window
offset = window_width / 2
l_min_index = int(max(l_center + offset - margin, 0))
l_max_index = int(min(l_center + offset + margin, width))\
# x = np.argmax(conv_signal[l_min_index:l_max_index]) + l_min_index - offset
# if np.abs(x - l_center)
l_center = np.argmax(conv_signal[l_min_index:l_max_index]) + l_min_index - offset
# print(conv_signal[int(x)])
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center + offset - margin, 0))
r_max_index = int(min(r_center + offset + margin, width))
r_center = np.argmax(conv_signal[r_min_index:r_max_index]) + r_min_index - offset
# Add what we found for that layer
window_centroids.append((l_center, r_center))
return window_centroids
def draw_lines_windows(warped, window_width, window_height, nonzerox, nonzeroy, window_centroids):
left_indices, right_indices = [], []
# Points used to draw all the left and right windows
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Go through each level and draw the windows
for level in range(0, len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask, l_inds = window_mask(window_width,
window_height,
warped,
window_centroids[level][0],
level,
nonzeroy,
nonzerox)
r_mask, r_inds = window_mask(window_width,
window_height,
warped,
window_centroids[level][1],
level,
nonzeroy,
nonzerox)
left_indices.append(l_inds)
right_indices.append(r_inds)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1))] = 255
r_points[(r_points == 255) | ((r_mask == 1))] = 255
# Draw the results
# add both left and right window pixels together
template = np.array(r_points + l_points, np.uint8)
zero_channel = np.zeros_like(template) # create a zero color channel
template = np.array(cv2.merge((zero_channel, template, zero_channel)),
np.uint8) # make window pixels green
# making the original road pixels 3 color channels
warpage = np.dstack((warped, warped, warped))*255
# overlay the orignal road image with window results
output = cv2.addWeighted(warpage, 1, template, 0.8, 0.0)
return output, np.concatenate(left_indices), np.concatenate(right_indices)
def polyfit_lines(leftx, lefty, rightx, righty):
# Fit a second order polynomial to each
# Quadratic cofficent A
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
return left_fit, right_fit
def compute_polyfit(left_fit, right_fit, out_img):
# Generate x and y values for plotting
height, width, _ = out_img.shape
ploty = np.linspace(0, height - 1, num=height)
left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]
# plt.imshow(out_img)
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, width)
# plt.ylim(height, 0)
return ploty, left_fitx, right_fitx
def get_rad_curv(y_vals, x_vals, ym_per_pix=30/720, xm_per_pix=3.7/700):
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
fit_cr = np.polyfit(y_vals * ym_per_pix, x_vals * xm_per_pix, 2)
y_eval = np.max(y_vals)
return ((1 + (2 * fit_cr[0] * y_eval * ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2 * fit_cr[0])
def get_curvature(lefty, leftx, righty, rightx):
# Calculate the new radii of curvature
left_curverad = get_rad_curv(lefty, leftx)
right_curverad = get_rad_curv(righty, rightx)
# print('Left curvature: {}, Right curvature: {}\n'.format(left_curverad, right_curverad))
# print("Difference between both line's curvatures {} ".format(np.abs(left_curverad - right_curverad)))
# print("ok {}, {}".format(left_curverad, right_curverad))
return np.average([left_curverad, right_curverad])
def get_vehicle_position(img, left_fitx, right_fitx, xm_per_pix=3.7/700):
height, width, _ = img.shape
car_center_bottom = width / 2 # becausze the car is the center of the image
lane_center = (left_fitx[height - 1] + right_fitx[height - 1]) / 2
# print((car_center_bottom - lane_center) * xm_per_pix)
# exit
return (car_center_bottom - lane_center) * xm_per_pix
def draw_drivable_area(warped, undist_image, ploty, left_fitx, right_fitx, Minv):
# ploty = np.linspace(0, height - 1, num=height)
# Create an image to draw the lines on
new_copy = np.copy(undist_image)
if left_fitx is None or right_fitx is None:
return undist_image
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.polylines(color_warp, np.int32([pts_left]),
isClosed=False, color=(255, 0, 255), thickness=20)
cv2.polylines(color_warp, np.int32([pts_right]),
isClosed=False, color=(0, 255, 255), thickness=20)
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (undist_image.shape[1], undist_image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(new_copy, 1, newwarp, 0.7, 0)
return result
from collections import deque
class LaneLineFinder:
SAMPLE_FRAMES = 15
def __init__(self, mtx, dist, keras_model=None):
self.previous_frames = deque(maxlen= self.SAMPLE_FRAMES)
self.left_lane_fits = deque(maxlen= self.SAMPLE_FRAMES)
self.right_lane_fits = deque(maxlen= self.SAMPLE_FRAMES)
self.curvatures = deque(maxlen= self.SAMPLE_FRAMES)
self.center_values = deque(maxlen= self.SAMPLE_FRAMES)
self.mtx = mtx
self.dist = dist
self.keras_model = keras_model
self.previous_image_centroids = None
self.drivable = deque(maxlen=self.SAMPLE_FRAMES)
def average_frame_sampling(self, frame, previous_frames):
previous_frames.append(frame)
if len(previous_frames) > 0:
frame = np.mean(previous_frames, axis=0, dtype=np.int32)
# line = tuple(map(tuple, line))
return frame
def process_image(self, image):
height, width, _ = image.shape
# Color Thresholds
red_thresh = (220, 250)
hls_thresh = (90, 255)
hls2_thresh = (170, 255)
# Gradient Thresholds
xy_threshold = (20, 100)
mag_threshold = (70, 100)
dir_threshold = (1.1, 1.3)
image_offset = 10
src = np.float32([[width * 0.45, height * 0.63] # Top left vertix 60% if the image's hight
, [width * 0.10, height * 0.95] # Bottom left
, [width * 0.94, height * 0.95] # Bottom right
, [width * 0.56, height * 0.63]]) # Top right vetrix
dest = np.float32([[image_offset, 0], # Top left
[image_offset, height], # Bottom left
[width - image_offset, height], # Bottom right
[width - image_offset, 0]]) # Top right
# Undistort image
undistorted_image = undistort_image(image, self.mtx, self.dist)
if not self.keras_model:
# Thresholding
# Color Thresholding
color_binary_threshold = combined_color_threshold(
undistorted_image, red_thresh, hls2_thresh)
# Gradient Thresholding (Sobel)
sobel_binary_threshold = combined_sobel_thresh(undistorted_image,
abs_kernel=3,
mag_dir_kernel=17,
abs_thresh=xy_threshold,
mag_thresh=mag_threshold,
dir_thresh=dir_threshold)
combined_color_gradient = np.zeros_like(color_binary_threshold)
combined_color_gradient[(color_binary_threshold == 1) |
(sobel_binary_threshold == 1)] = 1
image_to_warp = combined_color_gradient
# plt.imshow(image_to_warp)
# plt.show()
# print(image_to_warp.dtype)
# print(image_to_warp.shape)
# Perspective transform (Warp)
warped_image, M, Minv = warp_and_transform_image(image_to_warp, src, dest)
# Smooth the image (Gaussian blur)
warped_image = gaussian_blur(warped_image, 11)
# plt.imshow(warped_image, cmap="gray")
# plt.show()
# Detecting lane lines
# Identify the x and y positions of all nonzero pixels in the image
nonzero = warped_image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# window settings
window_width = 100
window_height = height / 6 # Break image into 5 vertical layers since image height is 720
margin = 100 # How much to slide left and right for searching
# Calculate lanes centroids
# Get lanes centroids (Windows)
window_centroids = find_window_centroids(warped_image, window_width, window_height, margin)
# Draw detected windows
output, left_indices, right_indices = draw_lines_windows(warped_image,
window_width,
window_height,
nonzerox,
nonzeroy,
window_centroids)
# Extract left and right line pixel positions
leftx = nonzerox[left_indices]
lefty = nonzeroy[left_indices]
rightx = nonzerox[right_indices]
righty = nonzeroy[right_indices]
# Get poly fits for the lane lines
if len(leftx) > 0 and len(lefty) > 0 and len(rightx) > 0 and len(righty) > 0:
# Get the polyfits for the lane lines
left_fit, right_fit = polyfit_lines(leftx, lefty, rightx, righty)
# Color lane lines
# making the original road pixels 3 color channels
out_img = np.dstack((warped_image, warped_image, warped_image)) * 255
out_img[nonzeroy[left_indices], nonzerox[left_indices]] = [255, 0, 0]
out_img[nonzeroy[right_indices], nonzerox[right_indices]] = [0, 0, 255]
# Draw polyfit and lane lines
ploty, left_fitx, right_fitx = compute_polyfit(left_fit,
right_fit,
out_img)
# Get curvature
lane_curvature = self.average_curvature(get_curvature(lefty, leftx, righty, rightx))
# if len(left_fitx) > 0 and len(right_fitx) > 0
# Get car position
car_position = get_vehicle_position(out_img, left_fitx, right_fitx)
actual_size=3.7/700
# print(np.abs(leftx[0] - rightx[0]) * actual_size)
lane_dist_high = np.abs(leftx[0] - rightx[0])
lane_dist_low = np.abs(leftx[-1] - rightx[-1])
print(lane_dist_high)
print(lane_dist_low)
left_curverad = get_rad_curv(lefty, leftx)
right_curverad = get_rad_curv(righty, rightx)
print('left curv', left_curverad)
print('right curv', right_curverad)
if (lane_dist_low < 750) or (lane_dist_high < 750):
# print(lane_dist_high)
# print(lane_dist_low)
# print('LOW')
diff = left_curverad - right_curverad
if np.abs(diff) >= 200:
if diff < 0 and right_curverad > 380:
right_fitx = right_fitx
left_fitx = right_fitx - 900
elif diff > 0 and left_curverad > 380:
right_fitx = left_fitx + 900
left_fitx = left_fitx
else:
right_fitx, left_fitx = np.array([]), np.array([])
right_fitx = self.average_lane_sampling(right_fitx, self.right_lane_fits)
left_fitx = self.average_lane_sampling(left_fitx, self.left_lane_fits)
masked_lane_image = draw_drivable_area(
warped_image, image, ploty, left_fitx, right_fitx, Minv)
write_text_on_image(masked_lane_image, int(lane_curvature), car_position)
return masked_lane_image
else:
return image
else:
perdicted_image = self.keras_model.predict(undistorted_image).astype('uint8')
height, width, _ = undistorted_image.shape
# Get lane polyFit
nonzero = perdicted_image[:,:,2].nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
curvature = self.average_curvature(get_rad_curv(nonzeroy, nonzerox) if len(nonzeroy) > 0 and len(nonzerox) > 0 else 0)
new_copy = np.copy(undistorted_image)
frame_to_return = perdicted_image#self.average_frame_sampling(image_to_warp, self.previous_frames)
write_text_on_image(new_copy, int(curvature))
return cv2.addWeighted(new_copy, 1, frame_to_return.astype('uint8')*255, 0.7, 0)
def average_lane_sampling(self, line_fit, previous_fits):
if line_fit.size > 0:
# print('append')
previous_fits.append(line_fit)
if len(previous_fits) > 0:
line_fit = np.mean(previous_fits, axis = 0, dtype=np.int32)
return line_fit
def average_curvature(self, curvature):
if curvature > 0:
self.curvatures.append(curvature)
if len(self.curvatures) > 0:
curvature = np.mean(self.curvatures, axis = 0, dtype=np.int32)
return curvature
def average_car_position(self, center):
self.center_values.append(center)
if len(self.center_values) > 0:
center = np.mean(self.center_values, axis = 0, dtype=np.int32)
return center
def write_text_on_image(img, curv, center):
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (int(img.shape[1] * 0.32), int(img.shape[0] * 0.10))
fontScale = 1
fontColor = (255,255,255)
lineType = 2
# img[:100, :] -= 5
bottomLeftCornerOfText2 = (int(img.shape[1] * 0.32), int(img.shape[0] * 0.20))
curv = 'Straigh' if curv > 3000 else curv
cv2.putText(img, 'Curvature is ({}) meters'.format(curv),
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
cv2.putText(img, 'The Car is ({}) meters of center'.format(round(center, 1)),
bottomLeftCornerOfText2,
font,
fontScale,
fontColor,
lineType) | [
"numpy.absolute",
"cv2.GaussianBlur",
"numpy.arctan2",
"numpy.sum",
"cv2.bitwise_and",
"numpy.argmax",
"cv2.getPerspectiveTransform",
"imageutils.convert_to_hsl",
"numpy.polyfit",
"numpy.abs",
"numpy.ones",
"cv2.fillPoly",
"numpy.mean",
"cv2.rectangle",
"numpy.convolve",
"imageutils.co... | [((210, 252), 'utils.Load_images_for_directory', 'utils.Load_images_for_directory', (['directory'], {}), '(directory)\n', (241, 252), False, 'import utils\n'), ((403, 434), 'imageutils.convert_to_gray', 'imageutils.convert_to_gray', (['img'], {}), '(img)\n', (429, 434), False, 'import imageutils\n'), ((483, 530), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(nx, ny)', 'None'], {}), '(gray, (nx, ny), None)\n', (508, 530), False, 'import cv2\n'), ((792, 826), 'numpy.zeros', 'np.zeros', (['(nx * ny, 3)', 'np.float32'], {}), '((nx * ny, 3), np.float32)\n', (800, 826), True, 'import numpy as np\n'), ((2400, 2486), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['images_object_points', 'images_points', 'images_shape', 'None', 'None'], {}), '(images_object_points, images_points, images_shape, None,\n None)\n', (2419, 2486), False, 'import cv2\n'), ((2860, 2900), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (2873, 2900), False, 'import cv2\n'), ((3197, 3270), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['object_points', 'image_points', 'image_shape', 'None', 'None'], {}), '(object_points, image_points, image_shape, None, None)\n', (3216, 3270), False, 'import cv2\n'), ((3827, 3865), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dest'], {}), '(src, dest)\n', (3854, 3865), False, 'import cv2\n'), ((3877, 3915), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dest', 'src'], {}), '(dest, src)\n', (3904, 3915), False, 'import cv2\n'), ((3929, 3985), 'cv2.warpPerspective', 'cv2.warpPerspective', (['undistorted_img', 'M', '(width, height)'], {}), '(undistorted_img, M, (width, height))\n', (3948, 3985), False, 'import cv2\n'), ((4150, 4186), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2LAB'], {}), '(img, cv2.COLOR_RGB2LAB)\n', (4162, 4186), False, 'import cv2\n'), ((4202, 4229), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (4215, 4229), True, 'import numpy as np\n'), ((4367, 4403), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2LUV'], {}), '(img, cv2.COLOR_RGB2LUV)\n', (4379, 4403), False, 'import cv2\n'), ((4446, 4473), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (4459, 4473), True, 'import numpy as np\n'), ((4600, 4627), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (4613, 4627), True, 'import numpy as np\n'), ((4814, 4838), 'numpy.zeros_like', 'np.zeros_like', (['R_channel'], {}), '(R_channel)\n', (4827, 4838), True, 'import numpy as np\n'), ((5007, 5037), 'imageutils.convert_to_hsl', 'imageutils.convert_to_hsl', (['img'], {}), '(img)\n', (5032, 5037), False, 'import imageutils\n'), ((5088, 5112), 'numpy.zeros_like', 'np.zeros_like', (['S_channel'], {}), '(S_channel)\n', (5101, 5112), True, 'import numpy as np\n'), ((5478, 5513), 'numpy.zeros_like', 'np.zeros_like', (['red_binary_threshold'], {}), '(red_binary_threshold)\n', (5491, 5513), True, 'import numpy as np\n'), ((6021, 6080), 'cv2.Sobel', 'cv2.Sobel', (['gray_image', 'cv2.CV_64F', 'x', 'y'], {'ksize': 'sobel_kernel'}), '(gray_image, cv2.CV_64F, x, y, ksize=sobel_kernel)\n', (6030, 6080), False, 'import cv2\n'), ((6160, 6178), 'numpy.absolute', 'np.absolute', (['sobel'], {}), '(sobel)\n', (6171, 6178), True, 'import numpy as np\n'), ((6487, 6514), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobel'], {}), '(scaled_sobel)\n', (6500, 6514), True, 'import numpy as np\n'), ((6863, 6890), 'numpy.zeros_like', 'np.zeros_like', (['gradx_binary'], {}), '(gradx_binary)\n', (6876, 6890), True, 'import numpy as np\n'), ((7189, 7248), 'cv2.Sobel', 'cv2.Sobel', (['gray_image', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'sobel_kernel'}), '(gray_image, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n', (7198, 7248), False, 'import cv2\n'), ((7262, 7321), 'cv2.Sobel', 'cv2.Sobel', (['gray_image', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'sobel_kernel'}), '(gray_image, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n', (7271, 7321), False, 'import cv2\n'), ((7372, 7391), 'numpy.absolute', 'np.absolute', (['sobelx'], {}), '(sobelx)\n', (7383, 7391), True, 'import numpy as np\n'), ((7409, 7428), 'numpy.absolute', 'np.absolute', (['sobely'], {}), '(sobely)\n', (7420, 7428), True, 'import numpy as np\n'), ((7447, 7481), 'numpy.sqrt', 'np.sqrt', (['(sobelx ** 2 + sobely ** 2)'], {}), '(sobelx ** 2 + sobely ** 2)\n', (7454, 7481), True, 'import numpy as np\n'), ((7826, 7855), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobelxy'], {}), '(scaled_sobelxy)\n', (7839, 7855), True, 'import numpy as np\n'), ((8238, 8297), 'cv2.Sobel', 'cv2.Sobel', (['gray_image', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'sobel_kernel'}), '(gray_image, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n', (8247, 8297), False, 'import cv2\n'), ((8311, 8370), 'cv2.Sobel', 'cv2.Sobel', (['gray_image', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'sobel_kernel'}), '(gray_image, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n', (8320, 8370), False, 'import cv2\n'), ((8446, 8465), 'numpy.absolute', 'np.absolute', (['sobelx'], {}), '(sobelx)\n', (8457, 8465), True, 'import numpy as np\n'), ((8483, 8502), 'numpy.absolute', 'np.absolute', (['sobely'], {}), '(sobely)\n', (8494, 8502), True, 'import numpy as np\n'), ((8521, 8555), 'numpy.sqrt', 'np.sqrt', (['(sobelx ** 2 + sobely ** 2)'], {}), '(sobelx ** 2 + sobely ** 2)\n', (8528, 8555), True, 'import numpy as np\n'), ((8659, 8693), 'numpy.arctan2', 'np.arctan2', (['abs_sobely', 'abs_sobelx'], {}), '(abs_sobely, abs_sobelx)\n', (8669, 8693), True, 'import numpy as np\n'), ((8776, 8800), 'numpy.zeros_like', 'np.zeros_like', (['direction'], {}), '(direction)\n', (8789, 8800), True, 'import numpy as np\n'), ((9219, 9244), 'numpy.zeros_like', 'np.zeros_like', (['dir_binary'], {}), '(dir_binary)\n', (9232, 9244), True, 'import numpy as np\n'), ((9605, 9636), 'imageutils.convert_to_gray', 'imageutils.convert_to_gray', (['img'], {}), '(img)\n', (9631, 9636), False, 'import imageutils\n'), ((9658, 9683), 'numpy.zeros_like', 'np.zeros_like', (['gray_image'], {}), '(gray_image)\n', (9671, 9683), True, 'import numpy as np\n'), ((10310, 10362), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(kernel_size, kernel_size)', '(0)'], {}), '(img, (kernel_size, kernel_size), 0)\n', (10326, 10362), False, 'import cv2\n'), ((10640, 10658), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (10653, 10658), True, 'import numpy as np\n'), ((11040, 11087), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'vertices', 'ignore_mask_color'], {}), '(mask, vertices, ignore_mask_color)\n', (11052, 11087), False, 'import cv2\n'), ((11169, 11195), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'mask'], {}), '(img, mask)\n', (11184, 11195), False, 'import cv2\n'), ((11682, 11704), 'numpy.zeros_like', 'np.zeros_like', (['img_ref'], {}), '(img_ref)\n', (11695, 11704), True, 'import numpy as np\n'), ((12356, 12418), 'numpy.sum', 'np.sum', (['binary_warped[binary_warped.shape[0] // 2:, :]'], {'axis': '(0)'}), '(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)\n', (12362, 12418), True, 'import numpy as np\n'), ((12707, 12738), 'numpy.int', 'np.int', (['(histogram.shape[0] // 2)'], {}), '(histogram.shape[0] // 2)\n', (12713, 12738), True, 'import numpy as np\n'), ((12754, 12785), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (12763, 12785), True, 'import numpy as np\n'), ((13024, 13066), 'numpy.int', 'np.int', (['(binary_warped.shape[0] // nwindows)'], {}), '(binary_warped.shape[0] // nwindows)\n', (13030, 13066), True, 'import numpy as np\n'), ((13190, 13210), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (13198, 13210), True, 'import numpy as np\n'), ((13226, 13246), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (13234, 13246), True, 'import numpy as np\n'), ((15343, 15373), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (15357, 15373), True, 'import numpy as np\n'), ((15396, 15427), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (15410, 15427), True, 'import numpy as np\n'), ((15691, 15718), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (15701, 15718), True, 'import numpy as np\n'), ((15735, 15764), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (15745, 15764), True, 'import numpy as np\n'), ((15821, 15887), 'numpy.linspace', 'np.linspace', (['(0)', '(binary_warped.shape[0] - 1)', 'binary_warped.shape[0]'], {}), '(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n', (15832, 15887), True, 'import numpy as np\n'), ((16209, 16222), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (16215, 16222), True, 'import numpy as np\n'), ((16761, 16780), 'matplotlib.pyplot.imshow', 'plt.imshow', (['out_img'], {}), '(out_img)\n', (16771, 16780), True, 'import matplotlib.pyplot as plt\n'), ((16785, 16827), 'matplotlib.pyplot.plot', 'plt.plot', (['left_fitx', 'ploty'], {'color': '"""yellow"""'}), "(left_fitx, ploty, color='yellow')\n", (16793, 16827), True, 'import matplotlib.pyplot as plt\n'), ((16832, 16875), 'matplotlib.pyplot.plot', 'plt.plot', (['right_fitx', 'ploty'], {'color': '"""yellow"""'}), "(right_fitx, ploty, color='yellow')\n", (16840, 16875), True, 'import matplotlib.pyplot as plt\n'), ((16880, 16898), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'width'], {}), '(0, width)\n', (16888, 16898), True, 'import matplotlib.pyplot as plt\n'), ((16903, 16922), 'matplotlib.pyplot.ylim', 'plt.ylim', (['height', '(0)'], {}), '(height, 0)\n', (16911, 16922), True, 'import matplotlib.pyplot as plt\n'), ((16927, 16937), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16935, 16937), True, 'import matplotlib.pyplot as plt\n'), ((17250, 17271), 'numpy.ones', 'np.ones', (['window_width'], {}), '(window_width)\n', (17257, 17271), True, 'import numpy as np\n'), ((20201, 20222), 'numpy.zeros_like', 'np.zeros_like', (['warped'], {}), '(warped)\n', (20214, 20222), True, 'import numpy as np\n'), ((20238, 20259), 'numpy.zeros_like', 'np.zeros_like', (['warped'], {}), '(warped)\n', (20251, 20259), True, 'import numpy as np\n'), ((21478, 21517), 'numpy.array', 'np.array', (['(r_points + l_points)', 'np.uint8'], {}), '(r_points + l_points, np.uint8)\n', (21486, 21517), True, 'import numpy as np\n'), ((21537, 21560), 'numpy.zeros_like', 'np.zeros_like', (['template'], {}), '(template)\n', (21550, 21560), True, 'import numpy as np\n'), ((21908, 21955), 'cv2.addWeighted', 'cv2.addWeighted', (['warpage', '(1)', 'template', '(0.8)', '(0.0)'], {}), '(warpage, 1, template, 0.8, 0.0)\n', (21923, 21955), False, 'import cv2\n'), ((22173, 22200), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (22183, 22200), True, 'import numpy as np\n'), ((22217, 22246), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (22227, 22246), True, 'import numpy as np\n'), ((22424, 22462), 'numpy.linspace', 'np.linspace', (['(0)', '(height - 1)'], {'num': 'height'}), '(0, height - 1, num=height)\n', (22435, 22462), True, 'import numpy as np\n'), ((23105, 23160), 'numpy.polyfit', 'np.polyfit', (['(y_vals * ym_per_pix)', '(x_vals * xm_per_pix)', '(2)'], {}), '(y_vals * ym_per_pix, x_vals * xm_per_pix, 2)\n', (23115, 23160), True, 'import numpy as np\n'), ((23174, 23188), 'numpy.max', 'np.max', (['y_vals'], {}), '(y_vals)\n', (23180, 23188), True, 'import numpy as np\n'), ((23763, 23806), 'numpy.average', 'np.average', (['[left_curverad, right_curverad]'], {}), '([left_curverad, right_curverad])\n', (23773, 23806), True, 'import numpy as np\n'), ((24394, 24415), 'numpy.copy', 'np.copy', (['undist_image'], {}), '(undist_image)\n', (24401, 24415), True, 'import numpy as np\n'), ((24564, 24608), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (24573, 24608), True, 'import numpy as np\n'), ((24845, 24877), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (24854, 24877), True, 'import numpy as np\n'), ((25333, 25423), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'Minv', '(undist_image.shape[1], undist_image.shape[0])'], {}), '(color_warp, Minv, (undist_image.shape[1], undist_image.\n shape[0]))\n', (25352, 25423), False, 'import cv2\n'), ((25481, 25526), 'cv2.addWeighted', 'cv2.addWeighted', (['new_copy', '(1)', 'newwarp', '(0.7)', '(0)'], {}), '(new_copy, 1, newwarp, 0.7, 0)\n', (25496, 25526), False, 'import cv2\n'), ((1151, 1212), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['image_copy', '(nx, ny)', 'corners', 'ret'], {}), '(image_copy, (nx, ny), corners, ret)\n', (1176, 1212), False, 'import cv2\n'), ((3563, 3603), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (3576, 3603), False, 'import cv2\n'), ((12497, 12553), 'numpy.dstack', 'np.dstack', (['(binary_warped, binary_warped, binary_warped)'], {}), '((binary_warped, binary_warped, binary_warped))\n', (12506, 12553), True, 'import numpy as np\n'), ((12804, 12835), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (12813, 12835), True, 'import numpy as np\n'), ((14163, 14263), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (0, 255, 0), 2)\n', (14176, 14263), False, 'import cv2\n'), ((14290, 14392), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xright_low, win_y_low), (win_xright_high,\n win_y_high), (0, 255, 0), 2)\n', (14303, 14392), False, 'import cv2\n'), ((16321, 16349), 'numpy.absolute', 'np.absolute', (['(2 * left_fit[0])'], {}), '(2 * left_fit[0])\n', (16332, 16349), True, 'import numpy as np\n'), ((16451, 16480), 'numpy.absolute', 'np.absolute', (['(2 * right_fit[0])'], {}), '(2 * right_fit[0])\n', (16462, 16480), True, 'import numpy as np\n'), ((18900, 18932), 'numpy.convolve', 'np.convolve', (['window', 'image_layer'], {}), '(window, image_layer)\n', (18911, 18932), True, 'import numpy as np\n'), ((21616, 21665), 'cv2.merge', 'cv2.merge', (['(zero_channel, template, zero_channel)'], {}), '((zero_channel, template, zero_channel))\n', (21625, 21665), False, 'import cv2\n'), ((21798, 21833), 'numpy.dstack', 'np.dstack', (['(warped, warped, warped)'], {}), '((warped, warped, warped))\n', (21807, 21833), True, 'import numpy as np\n'), ((21975, 22003), 'numpy.concatenate', 'np.concatenate', (['left_indices'], {}), '(left_indices)\n', (21989, 22003), True, 'import numpy as np\n'), ((22005, 22034), 'numpy.concatenate', 'np.concatenate', (['right_indices'], {}), '(right_indices)\n', (22019, 22034), True, 'import numpy as np\n'), ((23268, 23294), 'numpy.absolute', 'np.absolute', (['(2 * fit_cr[0])'], {}), '(2 * fit_cr[0])\n', (23279, 23294), True, 'import numpy as np\n'), ((24957, 24977), 'numpy.int32', 'np.int32', (['[pts_left]'], {}), '([pts_left])\n', (24965, 24977), True, 'import numpy as np\n'), ((25078, 25099), 'numpy.int32', 'np.int32', (['[pts_right]'], {}), '([pts_right])\n', (25086, 25099), True, 'import numpy as np\n'), ((25199, 25213), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (25206, 25213), True, 'import numpy as np\n'), ((25709, 25741), 'collections.deque', 'deque', ([], {'maxlen': 'self.SAMPLE_FRAMES'}), '(maxlen=self.SAMPLE_FRAMES)\n', (25714, 25741), False, 'from collections import deque\n'), ((25773, 25805), 'collections.deque', 'deque', ([], {'maxlen': 'self.SAMPLE_FRAMES'}), '(maxlen=self.SAMPLE_FRAMES)\n', (25778, 25805), False, 'from collections import deque\n'), ((25838, 25870), 'collections.deque', 'deque', ([], {'maxlen': 'self.SAMPLE_FRAMES'}), '(maxlen=self.SAMPLE_FRAMES)\n', (25843, 25870), False, 'from collections import deque\n'), ((25898, 25930), 'collections.deque', 'deque', ([], {'maxlen': 'self.SAMPLE_FRAMES'}), '(maxlen=self.SAMPLE_FRAMES)\n', (25903, 25930), False, 'from collections import deque\n'), ((25961, 25993), 'collections.deque', 'deque', ([], {'maxlen': 'self.SAMPLE_FRAMES'}), '(maxlen=self.SAMPLE_FRAMES)\n', (25966, 25993), False, 'from collections import deque\n'), ((26151, 26183), 'collections.deque', 'deque', ([], {'maxlen': 'self.SAMPLE_FRAMES'}), '(maxlen=self.SAMPLE_FRAMES)\n', (26156, 26183), False, 'from collections import deque\n'), ((26832, 26972), 'numpy.float32', 'np.float32', (['[[width * 0.45, height * 0.63], [width * 0.1, height * 0.95], [width * 0.94,\n height * 0.95], [width * 0.56, height * 0.63]]'], {}), '([[width * 0.45, height * 0.63], [width * 0.1, height * 0.95], [\n width * 0.94, height * 0.95], [width * 0.56, height * 0.63]])\n', (26842, 26972), True, 'import numpy as np\n'), ((27161, 27279), 'numpy.float32', 'np.float32', (['[[image_offset, 0], [image_offset, height], [width - image_offset, height],\n [width - image_offset, 0]]'], {}), '([[image_offset, 0], [image_offset, height], [width -\n image_offset, height], [width - image_offset, 0]])\n', (27171, 27279), True, 'import numpy as np\n'), ((1442, 1454), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1450, 1454), True, 'import numpy as np\n'), ((1456, 1468), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1464, 1468), True, 'import numpy as np\n'), ((1470, 1482), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1478, 1482), True, 'import numpy as np\n'), ((6291, 6308), 'numpy.max', 'np.max', (['abs_sobel'], {}), '(abs_sobel)\n', (6297, 6308), True, 'import numpy as np\n'), ((7591, 7609), 'numpy.max', 'np.max', (['abs_sobelx'], {}), '(abs_sobelx)\n', (7597, 7609), True, 'import numpy as np\n'), ((7659, 7677), 'numpy.max', 'np.max', (['abs_sobely'], {}), '(abs_sobely)\n', (7665, 7677), True, 'import numpy as np\n'), ((7729, 7748), 'numpy.max', 'np.max', (['abs_sobelxy'], {}), '(abs_sobelxy)\n', (7735, 7748), True, 'import numpy as np\n'), ((24508, 24529), 'numpy.zeros_like', 'np.zeros_like', (['warped'], {}), '(warped)\n', (24521, 24529), True, 'import numpy as np\n'), ((26342, 26390), 'numpy.mean', 'np.mean', (['previous_frames'], {'axis': '(0)', 'dtype': 'np.int32'}), '(previous_frames, axis=0, dtype=np.int32)\n', (26349, 26390), True, 'import numpy as np\n'), ((28294, 28331), 'numpy.zeros_like', 'np.zeros_like', (['color_binary_threshold'], {}), '(color_binary_threshold)\n', (28307, 28331), True, 'import numpy as np\n'), ((29166, 29186), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (29174, 29186), True, 'import numpy as np\n'), ((29210, 29230), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (29218, 29230), True, 'import numpy as np\n'), ((33701, 33721), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (33709, 33721), True, 'import numpy as np\n'), ((33745, 33765), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (33753, 33765), True, 'import numpy as np\n'), ((33921, 33947), 'numpy.copy', 'np.copy', (['undistorted_image'], {}), '(undistorted_image)\n', (33928, 33947), True, 'import numpy as np\n'), ((34447, 34493), 'numpy.mean', 'np.mean', (['previous_fits'], {'axis': '(0)', 'dtype': 'np.int32'}), '(previous_fits, axis=0, dtype=np.int32)\n', (34454, 34493), True, 'import numpy as np\n'), ((34711, 34759), 'numpy.mean', 'np.mean', (['self.curvatures'], {'axis': '(0)', 'dtype': 'np.int32'}), '(self.curvatures, axis=0, dtype=np.int32)\n', (34718, 34759), True, 'import numpy as np\n'), ((34934, 34985), 'numpy.mean', 'np.mean', (['self.center_values'], {'axis': '(0)', 'dtype': 'np.int32'}), '(self.center_values, axis=0, dtype=np.int32)\n', (34941, 34985), True, 'import numpy as np\n'), ((15132, 15165), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (15139, 15165), True, 'import numpy as np\n'), ((15245, 15279), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (15252, 15279), True, 'import numpy as np\n'), ((17854, 17880), 'numpy.convolve', 'np.convolve', (['window', 'l_sum'], {}), '(window, l_sum)\n', (17865, 17880), True, 'import numpy as np\n'), ((19453, 19500), 'numpy.argmax', 'np.argmax', (['conv_signal[l_min_index:l_max_index]'], {}), '(conv_signal[l_min_index:l_max_index])\n', (19462, 19500), True, 'import numpy as np\n'), ((19790, 19837), 'numpy.argmax', 'np.argmax', (['conv_signal[r_min_index:r_max_index]'], {}), '(conv_signal[r_min_index:r_max_index])\n', (19799, 19837), True, 'import numpy as np\n'), ((24718, 24747), 'numpy.vstack', 'np.vstack', (['[left_fitx, ploty]'], {}), '([left_fitx, ploty])\n', (24727, 24747), True, 'import numpy as np\n'), ((31814, 31842), 'numpy.abs', 'np.abs', (['(leftx[0] - rightx[0])'], {}), '(leftx[0] - rightx[0])\n', (31820, 31842), True, 'import numpy as np\n'), ((31876, 31906), 'numpy.abs', 'np.abs', (['(leftx[-1] - rightx[-1])'], {}), '(leftx[-1] - rightx[-1])\n', (31882, 31906), True, 'import numpy as np\n'), ((18007, 18033), 'numpy.convolve', 'np.convolve', (['window', 'r_sum'], {}), '(window, r_sum)\n', (18018, 18033), True, 'import numpy as np\n'), ((24800, 24830), 'numpy.vstack', 'np.vstack', (['[right_fitx, ploty]'], {}), '([right_fitx, ploty])\n', (24809, 24830), True, 'import numpy as np\n'), ((30844, 30897), 'numpy.dstack', 'np.dstack', (['(warped_image, warped_image, warped_image)'], {}), '((warped_image, warped_image, warped_image))\n', (30853, 30897), True, 'import numpy as np\n'), ((32479, 32491), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (32485, 32491), True, 'import numpy as np\n'), ((32917, 32929), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (32925, 32929), True, 'import numpy as np\n'), ((32931, 32943), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (32939, 32943), True, 'import numpy as np\n')] |
import numpy as np
from distributions.mixture.basemixture import *
from distributions.exponential import Exponential
from scipy.stats import expon
class CensrdExpMix(BaseMix):
def __init__(self, s, t, x, xs=None, xt=None, ws=None, wt=None, wx=None):
self.s = s
self.t = t
self.x = x
if ws is None:
self.ws = np.ones(len(s))
else:
self.ws = ws
if wt is None:
self.wt = np.ones(len(t))
else:
self.wt = wt
if wx is None:
self.wx = np.ones(len(x))
else:
self.wx = wx
if xs is None:
self.xs = np.ones(len(s))*max(s)
else:
self.xs = xs
if xt is None:
self.xt = np.ones(len(t))*max(t)
else:
self.xt = xt
self.lmb = len(self.t)/sum(self.t)
self.mu = len(self.s)/sum(self.s)
self.u = len(self.s)/(len(self.t)+len(self.s))
@staticmethod
def loglik_(mu, lmb, u, s, t, x):
n_s = len(s)
n_t = len(t)
return n_s*np.log(mu)-mu*sum(s)+n_t*np.log(lmb)\
-lmb*sum(t) + sum(np.log(u*np.exp(-mu*x)+(1-u)*np.exp(-lmb*x)))
def loglik(self, mu=None, lmb=None, u=None):
if mu is None:
return self.loglik_(self.mu, self.lmb, self.u, self.s, self.t, self.x)
else:
return self.loglik_(mu, lmb, u, self.s, self.t, self.x)
def loglik_prms(self, prms):
#TODO: move to parent class
[mu, lmb, u] = prms
return self.loglik(mu, lmb, u)
@staticmethod
def grad_(mu, lmb, u, s, t, x):
n_s = len(s)
n_t = len(t)
delmu = n_s/mu -sum(s) \
- u*sum(x*np.exp(-mu*x)/(u*np.exp(-mu*x)+(1-u)*np.exp(-lmb*x)))
dellmb = n_t/lmb -sum(t) \
- (1-u)*sum(x*np.exp(-lmb*x)/(u*np.exp(-mu*x)+(1-u)*np.exp(-lmb*x)))
delu = sum((np.exp(-mu*x)-np.exp(-lmb*x))/\
(u*np.exp(-mu*x)+(1-u)*np.exp(-lmb*x)))
return np.array([delmu, dellmb, delu])
def grad(self, mu=None, lmb=None, u=None):
if mu is None:
mu=self.mu; lmb=self.lmb; u=self.u
return CensrdExpMix.grad_(mu, lmb, u, self.s, self.t, self.x)
def grad_prm(self, prms):
#TODO: move to parent class
[mu, lmb, u] = prms
return self.grad(mu, lmb, u)
@staticmethod
def samples_(mu, lmb, u, n_samples, censor):
t_len = int(n_samples*(1-u))
s_len = int(n_samples*u)
t_samples = np.random.exponential(1/lmb,size=t_len)
s_samples = np.random.exponential(1/mu,size=s_len)
if type(censor) == int or type(censor) == float:
t_censor = np.ones(t_len)*censor
s_censor = np.ones(s_len)*censor
elif type(censor) == np.ndarray:
t_censor = np.random.choice(censor, size=t_len)
s_censor = np.random.choice(censor, size=s_len)
x_censored = np.concatenate((t_censor[t_samples>t_censor],\
s_censor[s_samples>s_censor]),axis=0)
t = t_samples[t_samples<t_censor]
s = s_samples[s_samples<s_censor]
xt = t_censor[t_samples<t_censor]
xs = s_censor[s_samples<s_censor]
return s,t,x_censored, xs, xt
def samples(self, n_samples, censor):
return CensrdExpMix.samples_(self.mu, self.lmb, self.u\
,n_samples, censor)
@staticmethod
def estimate_em_(s,t,x,xs,xt,ws=None,wt=None,wx=None,verbose=False):
if ws is None:
ws=np.ones(len(s)); wt=np.ones(len(t)); wx=np.ones(len(x))
#ns=len(s); nt=len(t);
ns=sum(ws); nt=sum(wt)
#mu=len(s)/sum(s); lmb=len(t)/sum(t)
mu=sum(ws)/sum(ws*s); lmb=sum(wt)/sum(wt*t)
mu_prev = mu
for tt in range(500):
lmb_sur = np.mean(np.exp(-lmb*xt*wt))
mu_sur = np.mean(np.exp(-mu*xs*ws))
u = ns*(1-lmb_sur)/(ns*(1-lmb_sur)+nt*(1-mu_sur))
tau = u*np.exp(-mu*x*wx)/(u*np.exp(-mu*x*wx)+\
(1-u)*np.exp(-lmb*x*wx))
mu = sum(ws)/(sum(s*ws)+sum(tau*x*wx))
lmb = sum(wt)/(sum(t*wt)+sum((1-tau)*x*wx))
if verbose and tt%100 == 0:
print("mu:" + str(mu) + ", lmb:"+str(lmb)+", u:"+str(u))
if(abs(mu_prev-mu)/mu_prev<1e-4):
break
mu_prev = mu
return mu, lmb, u
def estimate_em(self,verbose=False):
self.mu, self.lmb, self.u = self.estimate_em_(self.s,\
self.t, self.x, self.xs, self.xt,
self.ws, self.wt, self.wx, verbose)
@staticmethod
def u_from_lmb_mu(lmb, mu, xs, xt, ws, wt):
ns=sum(ws); nt=sum(wt)
lmb_sur = np.mean(np.exp(-lmb*xt*wt))
mu_sur = np.mean(np.exp(-mu*xs*ws))
u = ns*(1-lmb_sur)/(ns*(1-lmb_sur)+nt*(1-mu_sur))
return u
@staticmethod
def u_from_lmb_mu_simplified(mu, lmb, s, t, tau):
"""
Method u_from_lmb_mu for simple Ricks; without the hassles.
of weights on the data, variables censoring, etc.
(https://www.youtube.com/watch?v=CLqAFIMgpIU)
"""
ns=sum(s); nt=sum(t)
lmb_sur = np.exp(-lmb*tau); mu_sur = np.exp(-mu*tau)
u = ns*(1-lmb_sur)/(ns*(1-lmb_sur)+nt*(1-mu_sur))
return u
@staticmethod
def fit_censored_data(s,t,x_cen,censor):
scale0 = Exponential.fit_censored_data(s, censor)
scale1 = Exponential.fit_censored_data(t, censor)
censor0_prob = 1- expon.cdf(censor, loc=0, scale=scale0)
censor1_prob = 1 - expon.cdf(censor, loc=0, scale=scale1)
u = (len(x_cen)/(len(s) + len(t) + len(x_cen)) - censor1_prob) / (censor0_prob - censor1_prob)
return 1/scale0, 1/scale1, u
from distributions.lomax import Lomax
def lomax_mix():
k1 = 1.1; lmb1 = 20
k2 = 0.1; lmb2 = 30
n_samples = 10000; u=0.3
censor = 8.0
t_len = int(n_samples*(1-u))
s_len = int(n_samples*u)
t_samples = Lomax.samples_(k1, lmb1, size=t_len)
s_samples = Lomax.samples_(k2, lmb2, size=s_len)
t = t_samples[t_samples<censor]
s = s_samples[s_samples<censor]
x_censored = np.ones(sum(t_samples>censor)+sum(s_samples>censor))
def tst_exponmix_censored_fit(size=100000):
import random
import matplotlib.pyplot as plt
censor = 1.1
lmb0 = .7
lmb1 = 1
u = 0.33
track = []
for i in range(50):
s,t,x_cen,xs,xt = CensrdExpMix.samples_(lmb0,lmb1,u,size,censor)
lmb0_hat, lmb1_hat, u_hat = CensrdExpMix.fit_censored_data(s,t,x_cen, censor)
print("true lambda0 {}, lambda1 {}, mix {}; estimate lamda0 {}, lambda1 {}, mix {}".format(lmb0, lmb1, u, lmb0_hat, lmb1_hat, u_hat))
track.append((lmb0_hat, lmb1_hat, u))
plt.subplot(3, 1, 1)
plt.plot(list(map(lambda x:x[0], track)))
plt.subplot(3, 1, 2)
plt.plot(list(map(lambda x: x[1], track)))
plt.subplot(3, 1, 3)
plt.plot(list(map(lambda x: x[2], track)))
plt.title("Estimation for censored exponential mix. True params: λ1 = {}, λ2 = {}, u = {}".format(lmb0, lmb1, u))
plt.show()
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.log",
"numpy.random.exponential",
"numpy.ones",
"numpy.array",
"numpy.exp",
"scipy.stats.expon.cdf",
"numpy.random.choice",
"distributions.exponential.Exponential.fit_censored_data",
"numpy.concatenate",
"distributions.lomax.Lomax.s... | [((6039, 6075), 'distributions.lomax.Lomax.samples_', 'Lomax.samples_', (['k1', 'lmb1'], {'size': 't_len'}), '(k1, lmb1, size=t_len)\n', (6053, 6075), False, 'from distributions.lomax import Lomax\n'), ((6092, 6128), 'distributions.lomax.Lomax.samples_', 'Lomax.samples_', (['k2', 'lmb2'], {'size': 's_len'}), '(k2, lmb2, size=s_len)\n', (6106, 6128), False, 'from distributions.lomax import Lomax\n'), ((6822, 6842), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (6833, 6842), True, 'import matplotlib.pyplot as plt\n'), ((6893, 6913), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (6904, 6913), True, 'import matplotlib.pyplot as plt\n'), ((6965, 6985), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (6976, 6985), True, 'import matplotlib.pyplot as plt\n'), ((7155, 7165), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7163, 7165), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2047), 'numpy.array', 'np.array', (['[delmu, dellmb, delu]'], {}), '([delmu, dellmb, delu])\n', (2024, 2047), True, 'import numpy as np\n'), ((2526, 2568), 'numpy.random.exponential', 'np.random.exponential', (['(1 / lmb)'], {'size': 't_len'}), '(1 / lmb, size=t_len)\n', (2547, 2568), True, 'import numpy as np\n'), ((2586, 2627), 'numpy.random.exponential', 'np.random.exponential', (['(1 / mu)'], {'size': 's_len'}), '(1 / mu, size=s_len)\n', (2607, 2627), True, 'import numpy as np\n'), ((2954, 3046), 'numpy.concatenate', 'np.concatenate', (['(t_censor[t_samples > t_censor], s_censor[s_samples > s_censor])'], {'axis': '(0)'}), '((t_censor[t_samples > t_censor], s_censor[s_samples >\n s_censor]), axis=0)\n', (2968, 3046), True, 'import numpy as np\n'), ((5239, 5257), 'numpy.exp', 'np.exp', (['(-lmb * tau)'], {}), '(-lmb * tau)\n', (5245, 5257), True, 'import numpy as np\n'), ((5266, 5283), 'numpy.exp', 'np.exp', (['(-mu * tau)'], {}), '(-mu * tau)\n', (5272, 5283), True, 'import numpy as np\n'), ((5438, 5478), 'distributions.exponential.Exponential.fit_censored_data', 'Exponential.fit_censored_data', (['s', 'censor'], {}), '(s, censor)\n', (5467, 5478), False, 'from distributions.exponential import Exponential\n'), ((5496, 5536), 'distributions.exponential.Exponential.fit_censored_data', 'Exponential.fit_censored_data', (['t', 'censor'], {}), '(t, censor)\n', (5525, 5536), False, 'from distributions.exponential import Exponential\n'), ((4776, 4798), 'numpy.exp', 'np.exp', (['(-lmb * xt * wt)'], {}), '(-lmb * xt * wt)\n', (4782, 4798), True, 'import numpy as np\n'), ((4821, 4842), 'numpy.exp', 'np.exp', (['(-mu * xs * ws)'], {}), '(-mu * xs * ws)\n', (4827, 4842), True, 'import numpy as np\n'), ((5563, 5601), 'scipy.stats.expon.cdf', 'expon.cdf', (['censor'], {'loc': '(0)', 'scale': 'scale0'}), '(censor, loc=0, scale=scale0)\n', (5572, 5601), False, 'from scipy.stats import expon\n'), ((5629, 5667), 'scipy.stats.expon.cdf', 'expon.cdf', (['censor'], {'loc': '(0)', 'scale': 'scale1'}), '(censor, loc=0, scale=scale1)\n', (5638, 5667), False, 'from scipy.stats import expon\n'), ((2705, 2719), 'numpy.ones', 'np.ones', (['t_len'], {}), '(t_len)\n', (2712, 2719), True, 'import numpy as np\n'), ((2750, 2764), 'numpy.ones', 'np.ones', (['s_len'], {}), '(s_len)\n', (2757, 2764), True, 'import numpy as np\n'), ((2836, 2872), 'numpy.random.choice', 'np.random.choice', (['censor'], {'size': 't_len'}), '(censor, size=t_len)\n', (2852, 2872), True, 'import numpy as np\n'), ((2896, 2932), 'numpy.random.choice', 'np.random.choice', (['censor'], {'size': 's_len'}), '(censor, size=s_len)\n', (2912, 2932), True, 'import numpy as np\n'), ((3843, 3865), 'numpy.exp', 'np.exp', (['(-lmb * xt * wt)'], {}), '(-lmb * xt * wt)\n', (3849, 3865), True, 'import numpy as np\n'), ((3892, 3913), 'numpy.exp', 'np.exp', (['(-mu * xs * ws)'], {}), '(-mu * xs * ws)\n', (3898, 3913), True, 'import numpy as np\n'), ((1915, 1930), 'numpy.exp', 'np.exp', (['(-mu * x)'], {}), '(-mu * x)\n', (1921, 1930), True, 'import numpy as np\n'), ((1929, 1945), 'numpy.exp', 'np.exp', (['(-lmb * x)'], {}), '(-lmb * x)\n', (1935, 1945), True, 'import numpy as np\n'), ((3993, 4013), 'numpy.exp', 'np.exp', (['(-mu * x * wx)'], {}), '(-mu * x * wx)\n', (3999, 4013), True, 'import numpy as np\n'), ((1109, 1120), 'numpy.log', 'np.log', (['lmb'], {}), '(lmb)\n', (1115, 1120), True, 'import numpy as np\n'), ((1964, 1979), 'numpy.exp', 'np.exp', (['(-mu * x)'], {}), '(-mu * x)\n', (1970, 1979), True, 'import numpy as np\n'), ((1984, 2000), 'numpy.exp', 'np.exp', (['(-lmb * x)'], {}), '(-lmb * x)\n', (1990, 2000), True, 'import numpy as np\n'), ((4013, 4033), 'numpy.exp', 'np.exp', (['(-mu * x * wx)'], {}), '(-mu * x * wx)\n', (4019, 4033), True, 'import numpy as np\n'), ((4058, 4079), 'numpy.exp', 'np.exp', (['(-lmb * x * wx)'], {}), '(-lmb * x * wx)\n', (4064, 4079), True, 'import numpy as np\n'), ((1084, 1094), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (1090, 1094), True, 'import numpy as np\n'), ((1161, 1176), 'numpy.exp', 'np.exp', (['(-mu * x)'], {}), '(-mu * x)\n', (1167, 1176), True, 'import numpy as np\n'), ((1181, 1197), 'numpy.exp', 'np.exp', (['(-lmb * x)'], {}), '(-lmb * x)\n', (1187, 1197), True, 'import numpy as np\n'), ((1725, 1740), 'numpy.exp', 'np.exp', (['(-mu * x)'], {}), '(-mu * x)\n', (1731, 1740), True, 'import numpy as np\n'), ((1840, 1856), 'numpy.exp', 'np.exp', (['(-lmb * x)'], {}), '(-lmb * x)\n', (1846, 1856), True, 'import numpy as np\n'), ((1742, 1757), 'numpy.exp', 'np.exp', (['(-mu * x)'], {}), '(-mu * x)\n', (1748, 1757), True, 'import numpy as np\n'), ((1762, 1778), 'numpy.exp', 'np.exp', (['(-lmb * x)'], {}), '(-lmb * x)\n', (1768, 1778), True, 'import numpy as np\n'), ((1858, 1873), 'numpy.exp', 'np.exp', (['(-mu * x)'], {}), '(-mu * x)\n', (1864, 1873), True, 'import numpy as np\n'), ((1878, 1894), 'numpy.exp', 'np.exp', (['(-lmb * x)'], {}), '(-lmb * x)\n', (1884, 1894), True, 'import numpy as np\n')] |
import numpy as np
import scipy.io.wavfile
import scipy.signal
import fastburg as burg
import soundfile as sf
import argparse
def ar_filter_offline(
samples,
pos,
dur,
n=4000, # AR order
ns=4000, # number of samples to adapat on
):
"""`Freezes` signal using AR model and IIR filtering.
Parameters
----------
signal : ndarray, shape (nb_samples,)
input signal of `ndim = 1`. Typically a mono audio signal
pos : int
Freeze position in samples
dur : int
Number of samples to extrapolate
n : int, optional
AR model order, defaults to 4000
ns : int, optional
Number of samples `[pos - ns]` that are used to identify the AR model,
defaults to 4000
Returns
-------
ndarray, shape=(nb_samples,)
Extrapolated samples
"""
# create buffer
outBuffer = np.zeros(dur, np.float)
# filter identification
a = np.real(
burg._arburg2(samples[pos - ns - 1:pos], n)[0]
)
# compute initial filter states
z = scipy.signal.lfiltic([1.0], a, samples[pos-(np.arange(1, n+1))])
outBuffer, z = scipy.signal.lfilter(
[1.0], a, np.zeros(dur, np.float), zi=z
)
return outBuffer
def ar_filter_block(
samples,
pos,
dur,
n=200,
ns=200,
nd=1024,
):
"""`Freezes` signal using AR model and IIR filtering.
Filtering is realised in a block wise fashion which might reduce
computational complexity.
Parameters
----------
signal : ndarray, shape (nb_samples,)
input signal of `ndim = 1`. Typically a mono audio signal
pos : int
Freeze position in samples
dur : int
Number of samples to extrapolate
n : int, optional
AR model order, defaults to 200
ns : int, optional
Number of samples `[pos - ns]` that are used to identify the AR model,
defaults to 200
nd: int, optional
Block size in samples, defaults to 1024
Returns
-------
ndarray, shape=(nb_samples,)
Extrapolated samples
"""
# Number of extrapolation blocks in nl * nd
nl = dur / nd
# create buffer
outBuffer = np.zeros(nd * nl, np.float)
# filter identification
a = np.real(burg._arburg2(samples[pos - ns - 1:pos], n)[0])
# compute initial filter states
z = scipy.signal.lfiltic([1], a, samples[pos-(np.arange(1, n+1))])
for x in range(nl):
y, z = scipy.signal.lfilter(
[1], a, np.zeros(nd, np.float), zi=z
)
outBuffer[x*nd:(x+1)*nd] = y
return outBuffer
def extrapolate(
signal,
pos,
dur,
n_signal=4000,
ns_signal=4000,
):
"""`Freezes` signal using AR model and IIR filtering.
Appends extrapolation to signal. Uses offline computation
Parameters
----------
signal : ndarray, shape (nb_samples,)
input signal of `ndim = 1`. Typically a mono audio signal
pos : int
Freeze position in samples
dur : int
Number of samples to extrapolate
n_signal : int, optional
AR model order, defaults to 4000
ns_signal : int, optional
Number of samples `[pos - ns]` that are used to identify the AR model,
defaults to 4000
Returns
-------
ndarray, shape=(nb_samples,)
Signal with extrapolated samples concatenated
"""
concealed = ar_filter_offline(signal, pos, dur, n=n_signal, ns=ns_signal)
return np.concatenate((signal[:pos], concealed))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Freeze Audio')
parser.add_argument(
'input', help='input file', type=str
)
parser.add_argument(
'output', help='output file', type=str
)
parser.add_argument(
'-n', '--n', type=int, default=4000,
help='Filter order')
parser.add_argument(
'-x', '--x', type=int, default=44100,
help='Extrapolation start sample')
parser.add_argument(
'-d', '--d', type=int, default=441000,
help='Duration of extrapolation in samples')
args = parser.parse_args()
samples, rate = sf.read(args.input, always_2d=True)
samples = np.squeeze(np.mean(samples, axis=1))
out = extrapolate(
samples,
pos=args.x,
dur=args.d,
n_signal=args.n,
ns_signal=args.n,
)
sf.write(args.output, out, samplerate=rate)
| [
"soundfile.read",
"argparse.ArgumentParser",
"numpy.zeros",
"numpy.mean",
"numpy.arange",
"soundfile.write",
"fastburg._arburg2",
"numpy.concatenate"
] | [((879, 902), 'numpy.zeros', 'np.zeros', (['dur', 'np.float'], {}), '(dur, np.float)\n', (887, 902), True, 'import numpy as np\n'), ((2189, 2216), 'numpy.zeros', 'np.zeros', (['(nd * nl)', 'np.float'], {}), '(nd * nl, np.float)\n', (2197, 2216), True, 'import numpy as np\n'), ((3468, 3509), 'numpy.concatenate', 'np.concatenate', (['(signal[:pos], concealed)'], {}), '((signal[:pos], concealed))\n', (3482, 3509), True, 'import numpy as np\n'), ((3552, 3603), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Freeze Audio"""'}), "(description='Freeze Audio')\n", (3575, 3603), False, 'import argparse\n'), ((4163, 4198), 'soundfile.read', 'sf.read', (['args.input'], {'always_2d': '(True)'}), '(args.input, always_2d=True)\n', (4170, 4198), True, 'import soundfile as sf\n'), ((4392, 4435), 'soundfile.write', 'sf.write', (['args.output', 'out'], {'samplerate': 'rate'}), '(args.output, out, samplerate=rate)\n', (4400, 4435), True, 'import soundfile as sf\n'), ((1180, 1203), 'numpy.zeros', 'np.zeros', (['dur', 'np.float'], {}), '(dur, np.float)\n', (1188, 1203), True, 'import numpy as np\n'), ((4224, 4248), 'numpy.mean', 'np.mean', (['samples'], {'axis': '(1)'}), '(samples, axis=1)\n', (4231, 4248), True, 'import numpy as np\n'), ((957, 1000), 'fastburg._arburg2', 'burg._arburg2', (['samples[pos - ns - 1:pos]', 'n'], {}), '(samples[pos - ns - 1:pos], n)\n', (970, 1000), True, 'import fastburg as burg\n'), ((2262, 2305), 'fastburg._arburg2', 'burg._arburg2', (['samples[pos - ns - 1:pos]', 'n'], {}), '(samples[pos - ns - 1:pos], n)\n', (2275, 2305), True, 'import fastburg as burg\n'), ((2500, 2522), 'numpy.zeros', 'np.zeros', (['nd', 'np.float'], {}), '(nd, np.float)\n', (2508, 2522), True, 'import numpy as np\n'), ((1099, 1118), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (1108, 1118), True, 'import numpy as np\n'), ((2397, 2416), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (2406, 2416), True, 'import numpy as np\n')] |
import flash
import numpy as np
import pandas as pd
import torch
from autofe.feature_engineering.groupby import get_category_columns, get_numerical_columns
from autofe.get_feature import generate_cross_feature, get_cross_columns, get_groupby_total_data
from flash.tabular import TabularClassificationData, TabularClassifier
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
if __name__ == '__main__':
root_path = './data/processed_data/adult/'
test_datafile = root_path + 'test.csv'
train_data = pd.read_csv(root_path + 'train.csv')
len_train = len(train_data)
test_data = pd.read_csv(root_path + 'test.csv')
total_data = pd.concat([train_data, test_data]).reset_index(drop=True)
target_name = 'target'
cat_col_names = get_category_columns(total_data, target_name)
num_col_names = get_numerical_columns(total_data, target_name)
X_train = train_data.drop(target_name, axis=1)
y_train = train_data[target_name]
X_test = test_data.drop(target_name, axis=1)
y_test = test_data[target_name]
# tabnet
# 1. Create the DataModule
datamodule = TabularClassificationData.from_data_frame(
categorical_fields=cat_col_names,
numerical_fields=num_col_names,
target_fields=target_name,
train_data_frame=train_data,
val_data_frame=test_data,
batch_size=128,
)
# 2. Build the task
model = TabularClassifier.from_data(datamodule)
# 3. Create the trainer and train the model
trainer = flash.Trainer(max_epochs=10, gpus=torch.cuda.device_count())
trainer.fit(model, datamodule=datamodule)
# 4. Generate predictions from a CSV
preds_mat = model.predict(test_datafile)
preds_mat = np.array(preds_mat)
preds_prob = preds_mat[:, 1]
print(preds_mat.shape)
preds = np.argmax(preds_mat, axis=1)
acc = accuracy_score(y_test, preds)
auc = roc_auc_score(y_test, preds_prob)
f1 = f1_score(y_test, preds)
print(type(preds))
print(f'Accuracy: {acc}. F1: {f1}. ROC_AUC: {auc}')
# tabnet + groupby
threshold = 0.9
k = 5
methods = ['min', 'max', 'sum', 'mean', 'std', 'count']
cross_col_names = get_cross_columns(cat_col_names)
total_data = generate_cross_feature(
total_data, crossed_cols=cross_col_names)
total_data_groupby = get_groupby_total_data(total_data, target_name,
threshold, k, methods)
total_data_groupby = pd.get_dummies(total_data_groupby).fillna(0)
total_data_groupby.to_csv(root_path + 'adult_groupby.csv', index=False)
cat_col_names = get_category_columns(total_data_groupby, target_name)
num_col_names = get_numerical_columns(total_data_groupby, target_name)
train_data = total_data_groupby.iloc[:len_train]
test_data = total_data_groupby.iloc[len_train:]
X_train = train_data.drop(target_name, axis=1)
y_train = train_data[target_name]
X_test = test_data.drop(target_name, axis=1)
y_test = test_data[target_name]
test_data.to_csv(test_datafile, index=None)
# tabnet
# 1. Create the DataModule
datamodule = TabularClassificationData.from_data_frame(
categorical_fields=cat_col_names,
numerical_fields=num_col_names,
target_fields=target_name,
train_data_frame=train_data,
val_data_frame=test_data,
batch_size=128,
)
# 2. Build the task
model = TabularClassifier.from_data(datamodule)
# 3. Create the trainer and train the model
trainer = flash.Trainer(max_epochs=10, gpus=torch.cuda.device_count())
trainer.fit(model, datamodule=datamodule)
# 4. Generate predictions from a CSV
preds_mat = model.predict(test_datafile)
preds_mat = np.array(preds_mat)
preds_prob = preds_mat[:, 1]
print(preds_mat.shape)
preds = np.argmax(preds_mat, axis=1)
acc = accuracy_score(y_test, preds)
auc = roc_auc_score(y_test, preds_prob)
f1 = f1_score(y_test, preds)
print(type(preds))
print(f'Accuracy: {acc}. F1: {f1}. ROC_AUC: {auc}')
| [
"flash.tabular.TabularClassifier.from_data",
"numpy.argmax",
"pandas.read_csv",
"pandas.get_dummies",
"sklearn.metrics.accuracy_score",
"autofe.feature_engineering.groupby.get_category_columns",
"autofe.feature_engineering.groupby.get_numerical_columns",
"autofe.get_feature.get_groupby_total_data",
... | [((527, 563), 'pandas.read_csv', 'pd.read_csv', (["(root_path + 'train.csv')"], {}), "(root_path + 'train.csv')\n", (538, 563), True, 'import pandas as pd\n'), ((612, 647), 'pandas.read_csv', 'pd.read_csv', (["(root_path + 'test.csv')"], {}), "(root_path + 'test.csv')\n", (623, 647), True, 'import pandas as pd\n'), ((771, 816), 'autofe.feature_engineering.groupby.get_category_columns', 'get_category_columns', (['total_data', 'target_name'], {}), '(total_data, target_name)\n', (791, 816), False, 'from autofe.feature_engineering.groupby import get_category_columns, get_numerical_columns\n'), ((837, 883), 'autofe.feature_engineering.groupby.get_numerical_columns', 'get_numerical_columns', (['total_data', 'target_name'], {}), '(total_data, target_name)\n', (858, 883), False, 'from autofe.feature_engineering.groupby import get_category_columns, get_numerical_columns\n'), ((1122, 1335), 'flash.tabular.TabularClassificationData.from_data_frame', 'TabularClassificationData.from_data_frame', ([], {'categorical_fields': 'cat_col_names', 'numerical_fields': 'num_col_names', 'target_fields': 'target_name', 'train_data_frame': 'train_data', 'val_data_frame': 'test_data', 'batch_size': '(128)'}), '(categorical_fields=cat_col_names,\n numerical_fields=num_col_names, target_fields=target_name,\n train_data_frame=train_data, val_data_frame=test_data, batch_size=128)\n', (1163, 1335), False, 'from flash.tabular import TabularClassificationData, TabularClassifier\n'), ((1419, 1458), 'flash.tabular.TabularClassifier.from_data', 'TabularClassifier.from_data', (['datamodule'], {}), '(datamodule)\n', (1446, 1458), False, 'from flash.tabular import TabularClassificationData, TabularClassifier\n'), ((1730, 1749), 'numpy.array', 'np.array', (['preds_mat'], {}), '(preds_mat)\n', (1738, 1749), True, 'import numpy as np\n'), ((1822, 1850), 'numpy.argmax', 'np.argmax', (['preds_mat'], {'axis': '(1)'}), '(preds_mat, axis=1)\n', (1831, 1850), True, 'import numpy as np\n'), ((1861, 1890), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (1875, 1890), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((1901, 1934), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'preds_prob'], {}), '(y_test, preds_prob)\n', (1914, 1934), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((1944, 1967), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (1952, 1967), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((2183, 2215), 'autofe.get_feature.get_cross_columns', 'get_cross_columns', (['cat_col_names'], {}), '(cat_col_names)\n', (2200, 2215), False, 'from autofe.get_feature import generate_cross_feature, get_cross_columns, get_groupby_total_data\n'), ((2233, 2297), 'autofe.get_feature.generate_cross_feature', 'generate_cross_feature', (['total_data'], {'crossed_cols': 'cross_col_names'}), '(total_data, crossed_cols=cross_col_names)\n', (2255, 2297), False, 'from autofe.get_feature import generate_cross_feature, get_cross_columns, get_groupby_total_data\n'), ((2333, 2403), 'autofe.get_feature.get_groupby_total_data', 'get_groupby_total_data', (['total_data', 'target_name', 'threshold', 'k', 'methods'], {}), '(total_data, target_name, threshold, k, methods)\n', (2355, 2403), False, 'from autofe.get_feature import generate_cross_feature, get_cross_columns, get_groupby_total_data\n'), ((2619, 2672), 'autofe.feature_engineering.groupby.get_category_columns', 'get_category_columns', (['total_data_groupby', 'target_name'], {}), '(total_data_groupby, target_name)\n', (2639, 2672), False, 'from autofe.feature_engineering.groupby import get_category_columns, get_numerical_columns\n'), ((2693, 2747), 'autofe.feature_engineering.groupby.get_numerical_columns', 'get_numerical_columns', (['total_data_groupby', 'target_name'], {}), '(total_data_groupby, target_name)\n', (2714, 2747), False, 'from autofe.feature_engineering.groupby import get_category_columns, get_numerical_columns\n'), ((3139, 3352), 'flash.tabular.TabularClassificationData.from_data_frame', 'TabularClassificationData.from_data_frame', ([], {'categorical_fields': 'cat_col_names', 'numerical_fields': 'num_col_names', 'target_fields': 'target_name', 'train_data_frame': 'train_data', 'val_data_frame': 'test_data', 'batch_size': '(128)'}), '(categorical_fields=cat_col_names,\n numerical_fields=num_col_names, target_fields=target_name,\n train_data_frame=train_data, val_data_frame=test_data, batch_size=128)\n', (3180, 3352), False, 'from flash.tabular import TabularClassificationData, TabularClassifier\n'), ((3436, 3475), 'flash.tabular.TabularClassifier.from_data', 'TabularClassifier.from_data', (['datamodule'], {}), '(datamodule)\n', (3463, 3475), False, 'from flash.tabular import TabularClassificationData, TabularClassifier\n'), ((3747, 3766), 'numpy.array', 'np.array', (['preds_mat'], {}), '(preds_mat)\n', (3755, 3766), True, 'import numpy as np\n'), ((3839, 3867), 'numpy.argmax', 'np.argmax', (['preds_mat'], {'axis': '(1)'}), '(preds_mat, axis=1)\n', (3848, 3867), True, 'import numpy as np\n'), ((3878, 3907), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (3892, 3907), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((3918, 3951), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'preds_prob'], {}), '(y_test, preds_prob)\n', (3931, 3951), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((3961, 3984), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (3969, 3984), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((665, 699), 'pandas.concat', 'pd.concat', (['[train_data, test_data]'], {}), '([train_data, test_data])\n', (674, 699), True, 'import pandas as pd\n'), ((1555, 1580), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1578, 1580), False, 'import torch\n'), ((2477, 2511), 'pandas.get_dummies', 'pd.get_dummies', (['total_data_groupby'], {}), '(total_data_groupby)\n', (2491, 2511), True, 'import pandas as pd\n'), ((3572, 3597), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3595, 3597), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.