prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
from dashboard_helpers import *
from textwrap import wrap
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MultipleLocator
import matplotlib.ticker as ticker
import math
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
from scipy.signal import savgol_filter
from sklearn.metrics import r2_score
import streamlit as st
from helpers import *
def normeren(df, what_to_norm):
"""In : columlijst
Bewerking : max = 1
Out : columlijst met genormeerde kolommen"""
# print(df.dtypes)
normed_columns = []
for column in what_to_norm:
maxvalue = (df[column].max()) / scale_to_x
firstvalue = df[column].iloc[int(WDW2 / 2)] / scale_to_x
name = f"{column}_normed"
for i in range(len(df)):
if how_to_norm == "max":
df.loc[i, name] = df.loc[i, column] / maxvalue
else:
df.loc[i, name] = df.loc[i, column] / firstvalue
normed_columns.append(name)
print(f"{name} generated")
return df, normed_columns
def smooth_columnlist(df, columnlist, t, WDW2, centersmooth):
""" _ _ _ """
c_smoothen = []
wdw_savgol = 7
#if __name__ = "covid_dashboard_rcsmit":
# global WDW2, centersmooth, show_scenario
# WDW2=7
# st.write(__name__)
# centersmooth = False
# show_scenario = False
if columnlist is not None:
if type(columnlist) == list:
columnlist_ = columnlist
else:
columnlist_ = [columnlist]
# print (columnlist)
for c in columnlist_:
print(f"Smoothening {c}")
if t == "SMA":
new_column = c + "_SMA_" + str(WDW2)
print("Generating " + new_column + "...")
df[new_column] = (
df.iloc[:, df.columns.get_loc(c)]
.rolling(window=WDW2, center=centersmooth)
.mean()
)
elif t == "savgol":
new_column = c + "_savgol_" + str(WDW2)
print("Generating " + new_column + "...")
df[new_column] = df[c].transform(lambda x: savgol_filter(x, WDW2, 2))
elif t == None:
new_column = c + "_unchanged_"
df[new_column] = df[c]
print("Added " + new_column + "...~")
else:
print("ERROR in smooth_columnlist")
st.stop()
c_smoothen.append(new_column)
return df, c_smoothen
def graph_daily_normed(
df, what_to_show_day_l, what_to_show_day_r, how_to_smoothen, how_to_display, WDW2, centersmooth
):
"""IN : df, de kolommen die genormeerd moeten worden
ACTION : de grafieken met de genormeerde kolommen tonen"""
if what_to_show_day_l is None:
st.warning("Choose something")
st.stop()
df, smoothed_columns_l = smooth_columnlist(df, what_to_show_day_l, how_to_smoothen,WDW2, centersmooth)
df, normed_columns_l = normeren(df, smoothed_columns_l)
df, smoothed_columns_r = smooth_columnlist(df, what_to_show_day_r, how_to_smoothen, WDW2, centersmooth)
df, normed_columns_r = normeren(df, smoothed_columns_r)
graph_daily(df, normed_columns_l, normed_columns_r, None, how_to_display)
def graph_day(df, what_to_show_l, what_to_show_r, how_to_smooth, title, t, WDW2, centersmooth, FROM,UNTIL):
""" _ _ _ """
#st.write(f"t = {t}")
df_temp = | pd.DataFrame(columns=["date"]) | pandas.DataFrame |
# coding: utf-8
# # Visualize Networks
# In[78]:
import pandas as pd
import igraph as ig
from timeUtils import clock, elapsed, getTimeSuffix, getDateTime, addDays, printDateTime, getFirstLastDay
from pandasUtils import castDateTime, castInt64, cutDataFrameByDate, convertToDate, isSeries, isDataFrame, getColData
from network import makeNetworkDir, distHash
#import geohash
import pygeohash as geohash
from haversine import haversine
from vertexData import vertex
from edgeData import edge
from networkCategories import categories
def getLoc(ghash):
loc = geohash.decode_exactly(ghash)[:2]
loc = [round(x, 4) for x in loc]
return loc
def getVertexViews(dn, vtxmetrics, homeMetrics, metric='HubScore'):
from numpy import tanh, amax
from pandas import Series
from seaborn import cubehelix_palette
from seaborn import color_palette, light_palette
g = dn.getNetwork()
if metric == "HubScore":
vertexData = Series(g.hub_score())
elif metric == "Centrality":
vertexData = Series(g.centrality())
elif metric == "Degree":
vertexData = Series(g.degree())
else:
raise ValueError("metric {0} was not recognized".format(metric))
qvals = vertexData.quantile([0, 0.687, 0.955, 0.997, 1])
cols = cubehelix_palette(n_colors=7, start=2.8, rot=.1)
#cols = color_palette("OrRd", 7)
#cols = cubehelix_palette(7)
vcols = Series(vertexData.shape[0]*[0])
for i in range(1, len(qvals)):
idx = (vertexData <= qvals.iloc[i]) & (vertexData > qvals.iloc[i-1])
vcols[idx] = i-1
vcols = [cols[i] for i in vcols.values]
vtx0ID = dn.getVertexID(0)
vcols[vtx0ID] = cols[5]
if metric == "HubScore":
vsize = [amax([30*x,1]) for x in vertexData]
else:
vsize = [30*tanh(x/(0.5*vertexData.max())) for x in vertexData]
vshape = ['circle' for i in range(len(vsize))]
for v in g.vs:
vtxID = v.index
vertex = dn.vertices[vtxID]
try:
if vertex.getAttrDataByKey("POI") != "Normal":
vshape[vtxID] = 'triangle-down'
except:
pass
homeID = homeMetrics['Vtx']
vshape[homeID] = 'rectangle'
vsize[homeID] = max(10, vsize[homeID])
return vcols, vsize, vshape
def getEdgeViews(dn, edgemetrics):
from numpy import tanh, amax, linspace, power, log1p, log10, tanh
from pandas import Series
from seaborn import color_palette, light_palette
from seaborn import cubehelix_palette, hls_palette
g = dn.getNetwork()
edgeData = Series(g.es['weight'])
nEdges = edgeData.shape[0]
if nEdges <= 0:
return None, None
minmaxWeight = [0.0, 2.5]
print("Number of Edges: {0}".format(nEdges))
nRange=5
if nEdges > 100000:
minmaxWeight[1] = 2
nRange=6
weightSize = [power(x,11) for x in linspace(minmaxWeight[0], minmaxWeight[1], nRange)]
elif nEdges > 50000:
minmaxWeight[1] = 2
weightSize = [power(x,9) for x in linspace(minmaxWeight[0], minmaxWeight[1], 5)]
elif nEdges > 25000:
weightSize = [power(x,8) for x in linspace(minmaxWeight[0], minmaxWeight[1], 5)]
elif nEdges > 10000:
weightSize = [power(x,7) for x in linspace(minmaxWeight[0], minmaxWeight[1], 5)]
elif nEdges > 2000:
weightSize = [power(x,6) for x in linspace(minmaxWeight[0], minmaxWeight[1], 5)]
elif nEdges > 1000:
weightSize = [power(x,5) for x in linspace(minmaxWeight[0], minmaxWeight[1], 5)]
elif nEdges > 500:
weightSize = [power(x,4) for x in linspace(minmaxWeight[0], minmaxWeight[1], 5)]
elif nEdges > 100:
weightSize = [power(x,3) for x in linspace(minmaxWeight[0], minmaxWeight[1], 5)]
else:
weightSize = [power(x,2) for x in linspace(minmaxWeight[0], minmaxWeight[1], 5)]
scale = 2.5/amax(weightSize)
weightSize = [x*scale for x in weightSize]
#print(edgeData)
if nRange == 5:
qvals = edgeData.quantile([0, 0.687, 0.955, 0.997, 1])
elif nRange == 6:
qvals = edgeData.quantile([0, 0.687, 0.955, 0.997, 0.999, 1])
else:
raise ValueError("Did not reconigze range {0}".format(nRange))
#cols = cubehelix_palette(n_colors=5, start=2.8, rot=.1)
cols = color_palette("OrRd", 5)
cols_d = color_palette("OrRd_d", 5)
maxCol = hls_palette(8, l=.3, s=.8)
ecols = | Series(edgeData.shape[0]*[0]) | pandas.Series |
from re import I
import datetime as dt
import ib_insync
import pandas as pd
import backoff
from redis import Redis
from rq import Queue
from arctic.exceptions import OverlappingDataException
from ib_insync import Stock, IB, Index, Contract, Ticker, BarDataList
from dateutil.tz import tzlocal, gettz
from typing import Tuple, List, Optional, cast
from functools import reduce
from trader.common.data import TickData, ContractMetadata
from trader.common.logging_helper import setup_logging
from trader.common.helpers import dateify, day_iter, pdt
from trader.common.listener_helpers import Helpers
from trader.listeners.ibaiorx import IBAIORx, WhatToShow
logging = setup_logging(module_name='ibhistoryworker')
class IBHistoryWorker():
def __init__(self, ib_client: IB):
self.ib_client = ib_client
def __handle_error(self, reqId, errorCode, errorString, contract):
global error_code
# ignore the following:
# ib error reqId: -1 errorCode 2104 errorString Market data farm connection is OK:usfarm.nj contract None
if errorCode == 2104 or errorCode == 2158 or errorCode == 2106:
return
logging.warning('ib error reqId: {} errorCode {} errorString {} contract {}'.format(reqId,
errorCode,
errorString,
contract))
# @backoff.on_exception(backoff.expo, Exception, max_tries=3, max_time=240)
async def get_contract_history(
self,
contract: Contract,
what_to_show: WhatToShow,
bar_size: str,
start_date: dt.datetime,
end_date: dt.datetime,
filter_between_dates: bool = True,
tz_info: str = 'America/New_York'
) -> pd.DataFrame:
global has_error
error_code = 0
if self.__handle_error not in self.ib_client.errorEvent:
self.ib_client.errorEvent += self.__handle_error
if not self.ib_client.isConnected():
raise ConnectionError()
# 16 hours, 4am to 8pm
# duration_step_size = '57600 S'
# 24 hours
duration_step_size = '86400 S'
if bar_size == '1 day':
duration_step_size = '10 Y'
if bar_size == '1 hour':
duration_step_size = '4 Y'
if bar_size == '2 hours':
duration_step_size = '1 Y'
# we say that the 'end date' is the start of the day after
start_date = dateify(start_date, timezone=tz_info)
end_date_offset = dateify(end_date, timezone=tz_info) + dt.timedelta(days=1)
current_date = end_date_offset
local_tz = dt.datetime.now(dt.timezone.utc).astimezone().tzinfo
logging.info('get_contract_history {} {} {} {}'.format(contract.conId, str(what_to_show), pdt(start_date), pdt(end_date)))
bars: List[pd.DataFrame] = []
while current_date >= start_date:
result = await self.ib_client.reqHistoricalDataAsync(
contract,
endDateTime=current_date,
durationStr=duration_step_size,
barSizeSetting=bar_size,
whatToShow=str(what_to_show),
useRTH=False,
formatDate=1,
keepUpToDate=False,
)
# skip if 'no data' returned
if error_code > 0 and error_code != 162:
raise Exception('error_code: {}'.format(error_code))
df_result = ib_insync.util.df(result).set_index('date')
df_result['bar_size'] = bar_size
df_result.rename({'barCount': 'bar_count'}, inplace=True)
# arctic requires timezone to be set
df_result.index = pd.to_datetime(df_result.index) # type: ignore
df_result.index = df_result.index.tz_localize(local_tz) # type: ignore
df_result.index = df_result.index.tz_convert(tz_info)
df_result.sort_index(ascending=True, inplace=True)
# add to the bars list
bars.append(df_result)
pd_date = | pd.to_datetime(df_result.index[0]) | pandas.to_datetime |
import pandas as pd
import teradatasql
import logging
from execution_framework.utils.common_utils import separate_schema_table
from typing import Union
from pyhive import hive
logging.getLogger('pyhive').setLevel(logging.CRITICAL)
logger = logging.getLogger('DB UTILS')
def teradata_connection(user_name: str, password: str, host: str = '10.226.0.34',
database: str = 'dbi_min', **kwargs) -> teradatasql.TeradataConnection:
"""
Create connection to Teradata Database
:param user_name: Teradata user name
:param password: Teradata password
:param host: host Teradata runs on
:param database: database to use by default
:param kwargs: additional arguments for connect function
:return: Teradata Connection
"""
try:
conn = teradatasql.connect(host=host, user=user_name, password=password, database=database, **kwargs)
except Exception:
logger.error("Can't connect to Teradata database, check traceback", exc_info=True)
raise
return conn
def hive_connection(host: str = '10.4.88.31', port: int = 10000, database: str = 'dev_perm',
configuration: dict = None) -> hive.Connection:
"""
Create Hive connection
:param host: host HiveServer2 runs on
:param port: port HiveServer2 runs on
:param database: database to use by default
:param configuration:
:return: Hive Connection
"""
# Default configuration for Hive Connection
if configuration is None:
configuration = {'hive.resultset.use.unique.column.names': 'false',
'hive.exec.compress.output': 'true',
'hive.groupby.orderby.position.alias': 'true',
'hive.server2.thrift.resultset.default.fetch.size': '10000'}
# Create connection
try:
conn = hive.Connection(host=host, port=port, database=database, configuration=configuration)
except Exception:
logger.error(f"Can't create Hive Connection to {host} host and {port} port")
raise
return conn
def execute_db_statement(database_connection: Union[teradatasql.TeradataConnection, hive.Connection],
statement: str) -> None:
"""
Execute statement in Teradata or Hive
:param database_connection: Hive or Teradata database connection
:param statement: statement to be executed
:return:
"""
# Create cursor to execute statement
try:
cursor = database_connection.cursor()
except Exception:
logger.error("Can't initialize the cursor Object", exc_info=True)
raise
# Execute statement
try:
logger.debug(f"Executing '{statement}'")
cursor.execute(statement)
except Exception:
logger.error(f"Can't execute statement {statement}", exc_info=True)
raise
def execute_store_procedure(database_connection: teradatasql.TeradataConnection, metadata_procedure: dict):
"""
Execute store procedure in Teradata database
:param database_connection: Teradata database connection
:param metadata_procedure: procedure name and parameters
:return:
"""
# Get procedure name and parameters
sp_name = metadata_procedure['name']
parameters = list(metadata_procedure['parameters'].values())
logger.info(f"Executing {sp_name} procedure with parameter(s) {parameters}")
# Create cursor
try:
cursor = database_connection.cursor()
cursor.callproc(sp_name, parameters)
except Exception:
logger.error("Can't execute store procedure in Teradata", exc_info=True)
raise
logger.info(f"Store procedure executed successfully")
def check_data_exits_in_table(table_name: str, period_column: str, period: str,
database_connection: Union[teradatasql.TeradataConnection, hive.Connection]) -> bool:
"""
Given a certain period, check if data with that period exists in table
:param table_name: table name in Teradata
:param period_column: period column name in table
:param period: period to check in string format YYYY-MM-DD
:param database_connection: Hive or Teradata database connection
:return: True if data for the specific period exists
"""
# Generate query to validate if data exists
validate_query = f"SELECT COUNT(*) total_rows FROM {table_name} WHERE {period_column}='{period}'"
logger.debug(f'Executing this query to check if there is information : {validate_query}')
# Execute query and save results in pandas dataframe
result = read_query_to_df(database_connection, validate_query)
# Check results
number_rows = result['total_rows'][0]
if number_rows != 0:
logger.debug(f"Table '{table_name}' has {number_rows} rows in {period} period")
return True
else:
return False
def read_query_to_df(database_connection: Union[teradatasql.TeradataConnection, hive.Connection], query: str,
arraysize: int = 10000) -> pd.DataFrame:
"""
Executes query in database and result in dataframe
:param database_connection: Hive or Teradata database connection
:param query: sql query to be executed in database
:param arraysize: cursor array size to fetch results
:return: dataframe with result of query execution
"""
# Create cursor
cursor = database_connection.cursor()
cursor.arraysize = arraysize
logger.debug(f'Cursor array size of connection set to {cursor.arraysize}')
# Execute query
logger.debug(f"Starting to execute '{query}'")
try:
cursor.execute(query)
except Exception:
logger.error("Can't execute query. Check if it is correct", exc_info=True)
raise
logger.debug("Finished query execution")
# Fetch all results
logger.debug("Starting to fetch query results")
results = cursor.fetchall()
logger.debug('Finished fetching all query results')
# Transform into dataframe
logger.debug('Transforming result into dataframe')
df_results = | pd.DataFrame(results, columns=[desc[0] for desc in cursor.description]) | pandas.DataFrame |
import os
os.system('pip install pandas')
os.system('pip install gluonts')
import pandas as pd
import pathlib
import gluonts
import numpy as np
import argparse
import json
import boto3
from mxnet import gpu, cpu
from mxnet.context import num_gpus
from gluonts.dataset.util import to_pandas
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.model.lstnet import LSTNetEstimator
from gluonts.model.seq2seq import MQCNNEstimator
from gluonts.model.transformer import TransformerEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions, backtest_metrics
from gluonts.evaluation import Evaluator
from gluonts.model.predictor import Predictor
from gluonts.dataset.common import ListDataset
from gluonts.trainer import Trainer
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
from smdebug.mxnet import Hook
s3 = boto3.client("s3")
def uploadDirectory(model_dir,prefix,bucket):
for root,dirs,files in os.walk(model_dir):
for file in files:
print(os.path.join(root,file))
print(prefix+file)
s3.upload_file(os.path.join(root,file),bucket,prefix+file)
def train(bucket, seq, algo, freq, prediction_length, epochs, learning_rate, hybridize, num_batches_per_epoch):
#create train dataset
df = pd.read_csv(filepath_or_buffer=os.environ['SM_CHANNEL_TRAIN'] + "/train.csv", header=0, index_col=0)
training_data = ListDataset([{"start": df.index[0],
"target": df.usage[:],
"item_id": df.client[:]}],
freq=freq)
#create test dataset
df = pd.read_csv(filepath_or_buffer=os.environ['SM_CHANNEL_TEST'] + "/test.csv", header=0, index_col=0)
test_data = ListDataset([{"start": df.index[0],
"target": df.usage[:],
"item_id": 'client_12'}],
freq=freq)
hook = Hook.create_from_json_file()
#determine estimators##################################
if algo == "DeepAR":
estimator = DeepAREstimator(freq=freq,
prediction_length=prediction_length,
context_length=1,
trainer=Trainer(ctx="cpu",
epochs=epochs,
learning_rate=learning_rate,
hybridize=hybridize,
num_batches_per_epoch=num_batches_per_epoch
))
#train the model
predictor = estimator.train(training_data=training_data)
print("DeepAR training is complete SUCCESS")
elif algo == "SFeedFwd":
estimator = SimpleFeedForwardEstimator(freq=freq,
prediction_length=prediction_length,
trainer=Trainer(ctx="cpu",
epochs=epochs,
learning_rate=learning_rate,
hybridize=hybridize,
num_batches_per_epoch=num_batches_per_epoch
))
#train the model
predictor = estimator.train(training_data=training_data)
print("training is complete SUCCESS")
elif algo == "lstnet":
# Needed for LSTNet ONLY
grouper = MultivariateGrouper(max_target_dim=6)
training_data = grouper(training_data)
test_data = grouper(test_data)
context_length = prediction_length
num_series = 1
skip_size = 1
ar_window = 1
channels = 4
estimator = LSTNetEstimator(freq=freq,
prediction_length=prediction_length,
context_length=context_length,
num_series=num_series,
skip_size=skip_size,
ar_window=ar_window,
channels=channels,
trainer=Trainer(ctx="cpu",
epochs=epochs,
learning_rate=learning_rate,
hybridize=hybridize,
num_batches_per_epoch=num_batches_per_epoch
))
#train the model
predictor = estimator.train(training_data=training_data)
print("training is complete SUCCESS")
elif algo == "seq2seq":
estimator = MQCNNEstimator(freq=freq,
prediction_length=prediction_length,
trainer=Trainer(ctx="cpu",
epochs=epochs,
learning_rate=learning_rate,
hybridize=hybridize,
num_batches_per_epoch=num_batches_per_epoch
))
#train the model
predictor = estimator.train(training_data=training_data)
print("training is complete SUCCESS")
else:
estimator = TransformerEstimator(freq=freq,
prediction_length=prediction_length,
trainer=Trainer(ctx="cpu",
epochs=epochs,
learning_rate=learning_rate,
hybridize=hybridize,
num_batches_per_epoch=num_batches_per_epoch
))
#train the model
predictor = estimator.train(training_data=training_data)
print("training is complete SUCCESS")
###################################################
#evaluate trained model on test data
forecast_it, ts_it = make_evaluation_predictions(test_data, predictor, num_samples=100)
print("EVALUATION is complete SUCCESS")
forecasts = list(forecast_it)
tss = list(ts_it)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_data))
print("METRICS retrieved SUCCESS")
#bucket = "bwp-sandbox"
mainpref = "gluonts/blog-models/"
prefix = mainpref + str(seq) + "/"
agg_df = | pd.DataFrame(agg_metrics, index=[0]) | pandas.DataFrame |
from tqdm import tqdm
import pandas as pd
import numpy as np
import gc
import copy
def AutoComple(action,caction):
action['actionTime'] = action['actionTime'].apply(lambda x: pd.to_datetime(x))
aT4 = action[action['actionType'] > 3]
temp1 = aT4.groupby(['userid'],as_index=True)['actionType'].diff()
#print(temp1)
#print(caction)
temp2 = action.groupby(['userid'],as_index=True)['actionTime'].diff()
temp2 = pd.DataFrame(temp2)
temp2['actionTime'] = temp2['actionTime'].apply(lambda x: x.seconds)
temp2 = temp2[temp2['actionTime'] <= 21600]
#print(temp2)
temp2 = pd.DataFrame(temp2)
temp1 =pd.DataFrame(temp1)
diffd2 = temp1[temp1['actionType'] > 1]
#print(diffd2)
#indexs = list(diffd2.index)
indexs = list(set(diffd2.index).intersection(set(temp2.index)))
list.sort(indexs)
#print(indexs)
count = 0
c = 0
for index in tqdm(indexs):
c += 1
d = index
index += count
if int(caction.loc[index-1, ['actionType']]) < 4:
continue
above = caction.loc[:index-1]
below = caction.loc[index:]
c = int(diffd2.loc[d, ['actionType']])
count += c-1
for i in range(1, c):
action_type = i + int(caction.loc[index-1, ['actionType']])
userid = int(caction.loc[index-1, ['userid']])
time = (int(caction.loc[index,['actionTime']])-int(caction.loc[index-1,['actionTime']]))/c * i + int(caction.loc[index-1,['actionTime']])
insertrow = pd.DataFrame([[userid, action_type,time]],columns=['userid','actionType','actionTime'])
#print(insertrow)
above = above.append(insertrow, ignore_index=True)
caction = above.append(below, ignore_index=True)
caction.to_csv('insert_action2.csv',index=False)
def AutoComple1(action):
caction = copy.deepcopy(action)
caction.dropna(subset=['actionType_time'], inplace=True)
caction.reset_index(inplace=True)
action['action_time'] = action['action_time'].apply(lambda x: pd.to_datetime(x))
temp = action.groupby(['userid'], as_index=True)['action_time'].diff()
temp = pd.DataFrame(temp)
temp['action_time'] = temp['action_time'].apply(lambda x: x.seconds)
temp = temp[temp['action_time'] >= 21600] #选出时间间隔大于2小时的操作
indexs = list(temp.index)
del temp
gc.collect()
list.sort(indexs)
count = 0
for index in tqdm(indexs):
d = index
index += count
buquan = int(action.loc[index, ['actionType']])
if buquan == 1: #间隔大 且当前actionType不为1的需要补全
continue
userid1 = int(caction.loc[d, ['userid']])
temp1 = caction[caction['userid'] == userid1]
temp1.reset_index(inplace=True)
total_time = 0
cou = 0
for i in range(len(temp1)-1):
if int(temp1.loc[i,['actionType']]) == 1 and int(temp1.loc[i+1,['actionType']]) == buquan and int(temp1.loc[i,['actionType_time']]) < 1200:
total_time += int(temp1.loc[i,['actionType_time']])
cou += 1
if total_time == 0:
for i in range(len(caction)-1):
if int(caction.loc[i,['actionType']]) == 1 and int(caction.loc[i+1,['actionType']]) == buquan and int(caction.loc[i,['actionType_time']]) < 1200:
total_time += int(caction.loc[i,['actionType_time']])
cou += 1
above = action.loc[:index - 1]
below = action.loc[index:]
count += 1 #补一行1之后索引加1
action_type = 1
userid = int(caction.loc[index, ['userid']])
time = int(action.loc[index, ['actionTime']]) - total_time / cou
del action,temp1
gc.collect()
insertrow = pd.DataFrame([[userid, action_type, time, 1]], columns=['userid','actionType','actionTime', 'is_insert'])
above = above.append(insertrow, ignore_index=True)
action = above.append(below, ignore_index=True)
del above, below, insertrow
gc.collect()
action.to_csv('./new/test_insert_1.csv')
def AutoComple2(action):
action['action_time'] = action['action_time'].apply(lambda x: pd.to_datetime(x))
temp = action.groupby(['userid'], as_index=True)['action_time'].diff()
temp = pd.DataFrame(temp)
temp['action_time'] = temp['action_time'].apply(lambda x: x.seconds)
temp = temp[temp['action_time'] >= 21600] #选出时间间隔大于2小时的操作
indexs = list(temp.index)
del temp
gc.collect()
list.sort(indexs)
count = 0
for index in tqdm(indexs):
index += count
buquan = int(action.loc[index, ['actionType']])
if buquan == 1: #间隔大 且当前actionType不为1的需要补全
continue
above = action.loc[:index - 1]
below = action.loc[index:]
count += 1 #补一行1之后索引加1
action_type = 1
userid = int(action.loc[index, ['userid']])
time = 0
del action
gc.collect()
insertrow = | pd.DataFrame([[userid, action_type, time, 1]], columns=['userid','actionType','actionTime', 'is_insert']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 14 19:20:26 2020
@authors: <NAME>, <NAME>, <NAME>
"""
import os
import sys
import random
import numpy as np
import pandas as pd
module_path = os.path.abspath(os.path.join('../src'))
if module_path not in sys.path:
sys.path.append(module_path)
from matplotlib import pyplot as plt
from utils.file import load_from_json
import tensorflow.keras as ker
from misc_basicFuncs import getMaxIndex
# load configs
trans_configs = load_from_json("configs/athena-mnist.json")
model_configs = load_from_json("configs/model-mnist.json")
data_configs = load_from_json("configs/data-mnist.json")
verbose = 10 # print statements in this script
verModel = 0
activations = ["sigmoid","relu","elu"]
# set the activation for model training
activation = activations[2]
# set boolean to get individual evaluations or bulk for each category
getEachEval = True
getOverallEval = True
################################################################
def trainNewModel(inputData, trueData, epochs=7, verbose=2, active="relu"):
model = ker.models.Sequential([
ker.layers.Flatten(input_shape=(28, 28)),
ker.layers.Dense(128, activation=active),
ker.layers.Dense(128, activation=active),
ker.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=ker.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(inputData, trueData, epochs=7)
return model
############################################################
# load data
cleanData = np.load(data_configs.get("bs_file"))
trueLabels = np.load(data_configs.get("label_file"))
ensPred = np.load("models/ensemPredic_benignInput_probs.npy")
ensPred_indexes = np.zeros(np.shape(ensPred)[0])
trueLabels_indexes = np.zeros(np.shape(trueLabels)[0])
for i in range(np.shape(ensPred)[0]):
pred = getMaxIndex(ensPred[i])
trueLab = getMaxIndex(ensPred[i])
ensPred_indexes[i]=pred
trueLabels_indexes[i]=trueLab
#Clean Nans and extrenious values from arrays
nans = np.argwhere(np.isnan(ensPred_indexes))
cleanData = np.delete(cleanData,nans,0)
ensPred_indexes = np.delete(ensPred_indexes,nans,0)
trueLabels_indexes = np.delete(trueLabels_indexes,nans,0)
# Train ML Model
model = trainNewModel(cleanData[:8000,:,:,0], ensPred_indexes[:8000],active=activation)
if(verbose>4): print("finished training model")
# create dataframe to save evaluation results
cols=["ae_type","label","accuracy","loss"]
results = | pd.DataFrame(columns=cols) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
| tm.assert_frame_equal(rs, xp) | pandas._testing.assert_frame_equal |
import pandas as pd
import numpy as np
N_teams = 14
N_hitters = 9
N_SP = 8
N_RP = 4
budget = 260
frac_hitter_budget = 0.5
frac_pitcher_budget = 1 - frac_hitter_budget
def calcSGPHitters(df, cat_offsets):
"""Calculates SGP values for hitters"""
# Get the SGP replacement level headers from the matlab script (Get_SGP_thresholds_from_lastyeardata.m)
sgp = load_sgp_thresh_last_year('H')
# Sort the data
df = df.sort_values(by='wOBA', ascending=False)
# Keep only the top players for calculating averages for rate categories
top_hitters = df.head(N_hitters * N_teams)
# Calculate "wAVG"
numer = (N_hitters - 1) * top_hitters['H'].mean() + df['H']
denom = (N_hitters - 1) * top_hitters['AB'].mean() + df['AB']
df['wAVG'] = numer/denom - top_hitters['AVG'].mean()
# Calculate wOBA
monbase = top_hitters['PA'].mean() * top_hitters['OBP'].mean()
numer = (N_hitters - 1) * monbase + df['H'] + df['BB'] + df['HBP']
denom = (N_hitters - 1) * top_hitters['PA'].mean() + df['PA']
df['wOBP'] = numer/denom - top_hitters['OBP'].mean()
# Calculate wSLG
numer = (N_hitters - 1) * top_hitters['TB'].mean() + df['TB']
denom = (N_hitters - 1) * top_hitters['AB'].mean() + df['AB']
df['wSLG'] = numer/denom - top_hitters['SLG'].mean()
# Now get the sgp by dividing by the values calculated from last year's totals
for cat in ['AVG', 'OBP', 'SLG']:
df['s' + cat] = df['w' + cat] / sgp[cat][0] - cat_offsets['s' + cat][0]
for cat in ['HR', 'R', 'RBI', 'SB', 'TB']:
df['s' + cat] = (df[cat] - sgp[cat][1]) / sgp[cat][0] - cat_offsets['s' + cat][0]
# Sum up all of these entries to get the total SGP
df['SGP'] = df[['sAVG', 'sOBP', 'sSLG', 'sHR',
'sR', 'sRBI', 'sSB', 'sTB']].sum(axis=1)
# Now sort by total SGP descending
df = df.sort_values(by='SGP', ascending=False)
return df.reset_index(drop=True)
def calcPositionOffsets(cat_offsets, df):
"""Calculate the position offset values.
Go through all hitters in order of SGP and assign them positions. It doesn't
actually matter what list a player is assigned to. The point is to get
replacement values"""
# Initiailize each list by putting in the best hitter (will remove later)
defensive_spectrum = {'C': 0, 'SS': 1, '2B': 2, '3B': 3, 'CF': 4, 'LF': 5, 'RF': 6, '1B': 7, 'U': 8}
positions = list(defensive_spectrum.keys())
meta_ranked = {m: pd.DataFrame(columns=df.columns) for m in positions}
for _, row in df.iterrows():
# Loop over all positions this player is eligible at
# Get the SGP of all players at each eligible position
posrank = pd.Series(index=positions)
for pos in row['position'].split(','):
posrank[pos] = len(meta_ranked[pos]) # how many better players are already assigned to that position
bestposits = list(posrank[posrank == posrank.min()].index)
# In the case of ties, go down the defensive spectrum - sort is ascending so lower values are better
bestpos = sorted(bestposits, key=lambda x: defensive_spectrum[x])[0] # custom sorting
# Finally add the row to the end of the correct dataframe
meta_ranked[bestpos] = meta_ranked[bestpos].append(row, ignore_index='True')
# TODO: Account for bench hitters?
cat_offsets = update_category_offsets(cat_offsets, meta_ranked, positions)
# Get the positional difference by looking at the value of the last player
# TODO: These don't seem to be normalized correctly
pos_offsets = pd.Series({pos: meta_ranked[pos]['SGP'][N_teams-1] for pos in positions})
return cat_offsets, pos_offsets
def update_category_offsets(cat_offsets, meta_ranked, positions):
"""
:param cat_offsets: Previous value
:param meta_ranked: dictionary of dataframes
:param positions: list of str
:return: Updated value of cat_offsets
"""
sgp_per_category = N_teams * (N_teams - 1) / 2
sgp_difference_per_cat = dict()
for cat in ['sAVG', 'sOBP', 'sSLG', 'sHR', 'sR', 'sRBI', 'sSB', 'sTB']:
sgp_assigned = sum([meta_ranked[pos][cat][:N_teams].sum() for pos in positions])
sgp_difference_per_cat[cat] = sgp_assigned - sgp_per_category
# Divide the difference by the total number of active players since this is a per-player metric
cat_offsets[cat] += sgp_difference_per_cat[cat] / (N_teams * N_hitters)
print('Updated offsets for each category. Should get progressively smaller: {}'.format(sgp_difference_per_cat))
return cat_offsets
def addPositions(udf, pos_offsets):
pos_offsets = pos_offsets.sort_values()
# Initialize
pos_offset_values = | pd.Series(index=udf.index) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
df_strategy,
categoricaldf_strategy,
)
from janitor.functions import expand_grid
@given(df=df_strategy())
def test_others_not_dict(df):
"""Raise Error if `others` is not a dictionary."""
with pytest.raises(TypeError):
df.expand_grid("frame", others=[2, 3])
@given(df=df_strategy())
def test_others_none(df):
"""Return DataFrame if no `others`, and df exists."""
assert_frame_equal(df.expand_grid("df"), df)
def test_others_empty():
"""Return None if no `others`."""
assert (expand_grid(), None) # noqa : F631
@given(df=df_strategy())
def test_df_key(df):
"""Raise error if df exists and df_key is not supplied."""
with pytest.raises(KeyError):
expand_grid(df, others={"y": [5, 4, 3, 2, 1]})
@given(df=df_strategy())
def test_df_key_hashable(df):
"""Raise error if df exists and df_key is not Hashable."""
with pytest.raises(TypeError):
expand_grid(df, df_key=["a"], others={"y": [5, 4, 3, 2, 1]})
def test_numpy_zero_d():
"""Raise ValueError if numpy array dimension is zero."""
with pytest.raises(ValueError):
expand_grid(others={"x": np.array([], dtype=int)})
def test_numpy_gt_2d():
"""Raise ValueError if numpy array dimension is greater than 2."""
with pytest.raises(ValueError):
expand_grid(others={"x": np.array([[[2, 3]]])})
def test_series_empty():
"""Raise ValueError if Series is empty."""
with pytest.raises(ValueError):
expand_grid(others={"x": pd.Series([], dtype=int)})
def test_dataframe_empty():
"""Raise ValueError if DataFrame is empty."""
with pytest.raises(ValueError):
expand_grid(others={"x": | pd.DataFrame([]) | pandas.DataFrame |
""" Test cases for misc plot functions """
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.tests.plotting.common import (
TestPlotBase,
_check_plot_works,
)
import pandas.plotting as plotting
@td.skip_if_mpl
def test_import_error_message():
# GH-19810
df = DataFrame({"A": [1, 2]})
with pytest.raises(ImportError, match="matplotlib is required for plotting"):
df.plot()
def test_get_accessor_args():
func = plotting._core.PlotAccessor._get_call_args
msg = "Called plot accessor for type list, expected Series or DataFrame"
with pytest.raises(TypeError, match=msg):
func(backend_name="", data=[], args=[], kwargs={})
msg = "should not be called with positional arguments"
with pytest.raises(TypeError, match=msg):
func(backend_name="", data=Series(dtype=object), args=["line", None], kwargs={})
x, y, kind, kwargs = func(
backend_name="",
data=DataFrame(),
args=["x"],
kwargs={"y": "y", "kind": "bar", "grid": False},
)
assert x == "x"
assert y == "y"
assert kind == "bar"
assert kwargs == {"grid": False}
x, y, kind, kwargs = func(
backend_name="pandas.plotting._matplotlib",
data=Series(dtype=object),
args=[],
kwargs={},
)
assert x is None
assert y is None
assert kind == "line"
assert len(kwargs) == 24
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
def test_autocorrelation_plot(self):
from pandas.plotting import autocorrelation_plot
ser = tm.makeTimeSeries(name="ts")
# Ensure no UserWarning when making plot
with tm.assert_produces_warning(None):
_check_plot_works(autocorrelation_plot, series=ser)
_check_plot_works(autocorrelation_plot, series=ser.values)
ax = autocorrelation_plot(ser, label="Test")
self._check_legend_labels(ax, labels=["Test"])
@pytest.mark.parametrize("kwargs", [{}, {"lag": 5}])
def test_lag_plot(self, kwargs):
from pandas.plotting import lag_plot
ser = tm.makeTimeSeries(name="ts")
_check_plot_works(lag_plot, series=ser, **kwargs)
def test_bootstrap_plot(self):
from pandas.plotting import bootstrap_plot
ser = tm.makeTimeSeries(name="ts")
_check_plot_works(bootstrap_plot, series=ser, size=10)
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@td.skip_if_no_scipy
@pytest.mark.parametrize("pass_axis", [False, True])
def test_scatter_matrix_axis(self, pass_axis):
scatter_matrix = plotting.scatter_matrix
ax = None
if pass_axis:
_, ax = self.plt.subplots(3, 3)
with tm.RNGContext(42):
df = DataFrame(np.random.randn(100, 3))
# we are plotting multiples on a sub-plot
with tm.assert_produces_warning(UserWarning, raise_on_extra_warnings=True):
axes = _check_plot_works(
scatter_matrix,
filterwarnings="always",
frame=df,
range_padding=0.1,
ax=ax,
)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
# GH 5662
expected = ["-2", "0", "2"]
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
df[0] = (df[0] - 2) / 3
# we are plotting multiples on a sub-plot
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
scatter_matrix,
filterwarnings="always",
frame=df,
range_padding=0.1,
ax=ax,
)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
expected = ["-1.0", "-0.5", "0.0"]
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
@pytest.mark.slow
def test_andrews_curves(self, iris):
from matplotlib import cm
from pandas.plotting import andrews_curves
df = iris
# Ensure no UserWarning when making plot
with tm.assert_produces_warning(None):
_check_plot_works(andrews_curves, frame=df, class_column="Name")
rgba = ("#556270", "#4ECDC4", "#C7F464")
ax = _check_plot_works(
andrews_curves, frame=df, class_column="Name", color=rgba
)
self._check_colors(
ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10]
)
cnames = ["dodgerblue", "aquamarine", "seagreen"]
ax = _check_plot_works(
andrews_curves, frame=df, class_column="Name", color=cnames
)
self._check_colors(
ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10]
)
ax = _check_plot_works(
andrews_curves, frame=df, class_column="Name", colormap=cm.jet
)
cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
self._check_colors(
ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]
)
length = 10
df = DataFrame(
{
"A": np.random.rand(length),
"B": np.random.rand(length),
"C": np.random.rand(length),
"Name": ["A"] * length,
}
)
_check_plot_works(andrews_curves, frame=df, class_column="Name")
rgba = ("#556270", "#4ECDC4", "#C7F464")
ax = _check_plot_works(
andrews_curves, frame=df, class_column="Name", color=rgba
)
self._check_colors(
ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10]
)
cnames = ["dodgerblue", "aquamarine", "seagreen"]
ax = _check_plot_works(
andrews_curves, frame=df, class_column="Name", color=cnames
)
self._check_colors(
ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10]
)
ax = _check_plot_works(
andrews_curves, frame=df, class_column="Name", colormap=cm.jet
)
cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
self._check_colors(
ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]
)
colors = ["b", "g", "r"]
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors})
ax = andrews_curves(df, "Name", color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=colors)
@pytest.mark.slow
def test_parallel_coordinates(self, iris):
from matplotlib import cm
from pandas.plotting import parallel_coordinates
df = iris
ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name")
nlines = len(ax.get_lines())
nxticks = len(ax.xaxis.get_ticklabels())
rgba = ("#556270", "#4ECDC4", "#C7F464")
ax = _check_plot_works(
parallel_coordinates, frame=df, class_column="Name", color=rgba
)
self._check_colors(
ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10]
)
cnames = ["dodgerblue", "aquamarine", "seagreen"]
ax = _check_plot_works(
parallel_coordinates, frame=df, class_column="Name", color=cnames
)
self._check_colors(
ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10]
)
ax = _check_plot_works(
parallel_coordinates, frame=df, class_column="Name", colormap=cm.jet
)
cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
self._check_colors(
ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]
)
ax = _check_plot_works(
parallel_coordinates, frame=df, class_column="Name", axvlines=False
)
assert len(ax.get_lines()) == (nlines - nxticks)
colors = ["b", "g", "r"]
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors})
ax = parallel_coordinates(df, "Name", color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=colors)
# not sure if this is indicative of a problem
@pytest.mark.filterwarnings("ignore:Attempting to set:UserWarning")
def test_parallel_coordinates_with_sorted_labels(self):
"""For #15908"""
from pandas.plotting import parallel_coordinates
df = DataFrame(
{
"feat": list(range(30)),
"class": [2 for _ in range(10)]
+ [3 for _ in range(10)]
+ [1 for _ in range(10)],
}
)
ax = parallel_coordinates(df, "class", sort_labels=True)
polylines, labels = ax.get_legend_handles_labels()
color_label_tuples = zip(
[polyline.get_color() for polyline in polylines], labels
)
ordered_color_label_tuples = sorted(color_label_tuples, key=lambda x: x[1])
prev_next_tupels = zip(
list(ordered_color_label_tuples[0:-1]), list(ordered_color_label_tuples[1:])
)
for prev, nxt in prev_next_tupels:
# labels and colors are ordered strictly increasing
assert prev[1] < nxt[1] and prev[0] < nxt[0]
def test_radviz(self, iris):
from matplotlib import cm
from pandas.plotting import radviz
df = iris
# Ensure no UserWarning when making plot
with tm.assert_produces_warning(None):
_check_plot_works(radviz, frame=df, class_column="Name")
rgba = ("#556270", "#4ECDC4", "#C7F464")
ax = _check_plot_works(radviz, frame=df, class_column="Name", color=rgba)
# skip Circle drawn as ticks
patches = [p for p in ax.patches[:20] if p.get_label() != ""]
self._check_colors(patches[:10], facecolors=rgba, mapping=df["Name"][:10])
cnames = ["dodgerblue", "aquamarine", "seagreen"]
_check_plot_works(radviz, frame=df, class_column="Name", color=cnames)
patches = [p for p in ax.patches[:20] if p.get_label() != ""]
self._check_colors(patches, facecolors=cnames, mapping=df["Name"][:10])
_check_plot_works(radviz, frame=df, class_column="Name", colormap=cm.jet)
cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
patches = [p for p in ax.patches[:20] if p.get_label() != ""]
self._check_colors(patches, facecolors=cmaps, mapping=df["Name"][:10])
colors = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0]]
df = DataFrame(
{"A": [1, 2, 3], "B": [2, 1, 3], "C": [3, 2, 1], "Name": ["b", "g", "r"]}
)
ax = radviz(df, "Name", color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=colors)
def test_subplot_titles(self, iris):
df = iris.drop("Name", axis=1).head()
# Use the column names as the subplot titles
title = list(df.columns)
# Case len(title) == len(df)
plot = df.plot(subplots=True, title=title)
assert [p.get_title() for p in plot] == title
# Case len(title) > len(df)
msg = (
"The length of `title` must equal the number of columns if "
"using `title` of type `list` and `subplots=True`"
)
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, title=title + ["kittens > puppies"])
# Case len(title) < len(df)
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, title=title[:2])
# Case subplots=False and title is of type list
msg = (
"Using `title` of type `list` is not supported unless "
"`subplots=True` is passed"
)
with pytest.raises(ValueError, match=msg):
df.plot(subplots=False, title=title)
# Case df with 3 numeric columns but layout of (2,2)
plot = df.drop("SepalWidth", axis=1).plot(
subplots=True, layout=(2, 2), title=title[:-1]
)
title_list = [ax.get_title() for sublist in plot for ax in sublist]
assert title_list == title[:3] + [""]
def test_get_standard_colors_random_seed(self):
# GH17525
df = DataFrame(np.zeros((10, 10)))
# Make sure that the np.random.seed isn't reset by get_standard_colors
plotting.parallel_coordinates(df, 0)
rand1 = np.random.random()
| plotting.parallel_coordinates(df, 0) | pandas.plotting.parallel_coordinates |
# %%
import pandas as pd
# %%
class TokenHolder:
def process(self):
token_holders = pd.read_csv("ero_holders.csv")
useful_columns = ["address", "balance"]
token_holders = token_holders[useful_columns]
token_holders["balance"] = token_holders["balance"].astype(float)
token_holders["balance"] = token_holders["balance"] / 1000000000000000000
token_holders.rename(columns={"balance": "token_balance"}, inplace=True)
return token_holders
class OriginHolder:
def process(self):
origin_holders = pd.read_csv("origin_holders.csv")
useful_columns = ["address", "balance"]
origin_holders = origin_holders[useful_columns]
origin_holders["balance"] = origin_holders["balance"].astype(int)
origin_holders.rename(columns={"balance": "origin_balance"}, inplace=True)
return origin_holders
class XmasHolder:
def process(self):
xmas_holders = pd.read_csv("xmas_holders.csv")
useful_columns = ["address", "balance"]
xmas_holders = xmas_holders[useful_columns]
xmas_holders["balance"] = xmas_holders["balance"].astype(int)
xmas_holders.rename(columns={"balance": "xmas_balance"}, inplace=True)
return xmas_holders
class Collection1Holder:
def process(self):
coll1_holders = pd.read_csv("collection1_holders.csv")
useful_columns = ["address", "balance"]
coll1_holders = coll1_holders[useful_columns]
coll1_holders["balance"] = coll1_holders["balance"].astype(int)
coll1_holders.rename(columns={"balance": "coll1_balance"}, inplace=True)
return coll1_holders
class Collection2Holder:
def process(self):
coll2_holders = pd.read_csv("collection2_holders.csv")
useful_columns = ["address", "balance"]
coll2_holders = coll2_holders[useful_columns]
coll2_holders["balance"] = coll2_holders["balance"].astype(int)
coll2_holders.rename(columns={"balance": "coll2_balance"}, inplace=True)
return coll2_holders
# %%
token = TokenHolder().process()
origin = OriginHolder().process()
xmas = XmasHolder().process()
collection1 = Collection1Holder().process()
collection2 = Collection2Holder().process()
# %%
origin_join = | pd.merge(token, origin, on="address", how="left") | pandas.merge |
#不可一气呵成跑完!中间需要对表格调整 分块执行代码
import numpy as np
from numpy import *
import pandas as pd
df = pd.read_csv('data.csv',encoding='gbk')
#数据清洗 先用EPS平台对所需数据调整后导入,故不存在错误数据、多余数据与重复数据,故只简化表格与缺失值处理
df=df.dropna(how="all")
df=df.drop([0])#delete year
#for i in range(df.shape[0]):
#由于所分析问题不针对具体地区,找出缺失值大于1的行并删除
todel=[]
for i in range(df.shape[0]):
sum = 0
for j in range(df.shape[1]):
if pd.isnull(df.iloc[i,j]):
sum+=1
if sum>=2:
todel.append(i)
break
df=df.drop(todel)
#拉格朗日乘子法作缺失值处理
from scipy.interpolate import lagrange
def ploy(s,n,k=6):
y=s[list(range(n-k,n))+list(range(n+1,n+1+k))]#取数
y=y[y.notnull()]
return lagrange(y.index,list(y))(n)
for i in df.columns:
for j in range(len(df)):
if (df[i].isnull())[j]:
df[i][j]=ploy(df[i],j)
df.to_excel('data222.xls')
#利用KMO检验与Bartlett检验判断因子分析法是否合适
import numpy as np
import math as math
dataset = pd.read_csv('data222.csv', encoding='gbk')
dataset = dataset.drop(['no','Unnamed: 0'],axis=1)
def corr(data):
return np.corrcoef(dataset)
dataset_corr = corr(dataset)#Pearson's r Pearson积矩相关系数#数据标准化
tru = pd.read_csv('true.csv', encoding='gbk')#由于精度问题求逆需要在matlab中求完导入
def kmo(dataset_corr, tr):
corr_inv = tr#这原先用np.linalg.inv求逆 但是由于精度问题导致结果出错 故matlab算完后导入
nrow_inv_corr, ncol_inv_corr = dataset_corr.shape
A = np.ones((nrow_inv_corr, ncol_inv_corr))#全1矩阵
for i in range(0, nrow_inv_corr, 1):
for j in range(i, ncol_inv_corr, 1):
A[i, j] = -(corr_inv.iloc[i, j]) / (math.sqrt(corr_inv.iloc[i, i] * corr_inv.iloc[j, j]))
A[j, i] = A[i, j]
dataset_corr = np.asarray(dataset_corr)
kmo_num = np.sum(np.square(dataset_corr)) - np.sum(np.square(np.diagonal(A)))#相关系数阵平方和与对角阵平方和的差
kmo_denom = kmo_num + np.sum(np.square(A)) - np.sum(np.square(np.diagonal(A)))
kmo_value = kmo_num / kmo_denom
return kmo_value
print(kmo(dataset_corr, tru)) # kmo test
dataset = | pd.read_excel('data222.xls',encoding='gbk') | pandas.read_excel |
import os
import sys
import pandas as pd
import numpy as np
import json
import MongoDBUtils as dbutils
import CSVUtils as csvutils
import datetime
# ===================== The following has been migrated into CSVUtils.py =======================
# def volStr2int(volStr):
# if volStr == '-':
# return 0
# elif volStr[-1] == "K":
# return int(float(volStr[:-1].replace(',', '')) * 1000)
# elif volStr[-1] == 'M':
# return int(float(volStr[:-1].replace(',', '')) * 1000000)
# elif volStr[-1] == 'B':
# return int(float(volStr[:-1].replace(',', '')) * 1000000000)
# else:
# return np.float64(volStr.replace(',', ''))
#
#
# def unknown2float(numStr):
# if type(numStr) == np.float64:
# return numStr
# elif type(numStr) == float:
# return numStr
# else:
# return np.float64(numStr.replace(',', ''))
#
#
# def str2date(dateStr):
# import datetime
# format_str = '%b %d, %Y'
# return datetime.datetime.strptime(dateStr, format_str)
#
#
# def percent2float(percentStr):
# return float(percentStr[:-1]) / 100
# ===================== The above has been migrated into CSVUtils.py =======================
def csv2db(csv_path, csv_name, etf_name):
# ===================== The following has been migrated into CSVUtils.py =======================
# # ====== 1. Initial Settings ======
# # csv_path = "../index/2014-2019"
# # # file_path = ".." # For PC
# # csv_name = "S&P 500 Historical Data.csv"
# csv_addr = os.path.join(csv_path, csv_name)
#
# # ====== 2. Parsing CSV to JSON ======
# csv_df = pd.DataFrame(pd.read_csv(csv_addr, sep=",", header=0, index_col=False))
#
# csv_df['Date'] = csv_df['Date'].apply(csvutils.str2date)
#
# csv_df['Price'] = csv_df['Price'].apply(csvutils.unknown2float)
# csv_df['Open'] = csv_df['Open'].apply(csvutils.unknown2float)
# csv_df['High'] = csv_df['High'].apply(csvutils.unknown2float)
# csv_df['Low'] = csv_df['Low'].apply(csvutils.unknown2float)
#
# csv_df['Vol'] = csv_df['Vol.'].apply(csvutils.volStr2int)
# csv_df.drop("Vol.", axis=1, inplace=True) # Since MongoDB does not accept column name with dot
#
# csv_df['Change'] = csv_df['Change %'].apply(csvutils.percent2float)
# csv_df.drop("Change %", axis=1, inplace=True) # Since MongoDB does not accept column name with space and symbol
# # print(csv_df)
# ===================== The above has been migrated into CSVUtils.py =======================
csv_df = csvutils.csv2df(csv_path, csv_name)
json_str = csv_df.to_json(orient='records')
json_list = json.loads(json_str)
for i, v in enumerate(json_list):
json_list[i]['Date'] = | pd.to_datetime(json_list[i]['Date'], unit='ms') | pandas.to_datetime |
"""
Functions for converting between data formats
"""
from typing import Optional
import numpy as np
import pandas as pd
from .checks import (
is_flat_dataset,
is_sklearn_dataset,
is_stacked_dataset,
is_timeseries_dataset,
)
from .exceptions import TimekeepCheckError
def convert_timeseries_input(func):
def inner(*args, **kwargs):
dim = 0
x = args[dim] # For functions, x should be first argument
if not isinstance(x, np.ndarray):
dim = 1
x = args[dim] # For methods, arguments are (self, x, ...)
assert isinstance(x, np.ndarray)
x = to_sklearn_dataset(x)
args = [args[i] if i != dim else x for i in range(len(args))]
return func(*args, **kwargs)
return inner
def convert_output_to_timeseries(func):
def inner(*args, **kwargs):
data = func(*args, **kwargs)
if len(data.shape) == 3:
return data
# If it's not 2-dimensional, we can't handle it
if not len(data.shape) == 2:
raise TimekeepCheckError(
"convert_output_to_timeseries: data has {} axes; "
"data must have 2 axes".format(data.shape)
)
return to_timeseries_dataset(data)
return inner
def timeseries_transformer(cls):
"""
Augment sklearn.TransformerMixin classes to accept timeseries datasets
Parameters
----------
cls : TransformerMixin
The class to augment
Returns
-------
TransformerMixin
The input class, which now accepts timeseries datasets as input
"""
cls.fit = convert_timeseries_input(cls.fit)
cls.transform = convert_timeseries_input(cls.transform)
cls.fit_transform = convert_timeseries_input(cls.fit_transform)
return cls
# ----- Format conversion ----- #
def to_flat_dataset(data) -> pd.DataFrame:
"""
Convert a tslearn timeseries or tsfresh stacked dataset
to a tsfresh flat dataset
A flat dataset is a DataFrame with columns for 'id' (of timeseries),
'time' (at which value occurs) and a column for each of the
timeseries parameters
Parameters
----------
data
The data to have its format changed
Returns
-------
pandas.DataFrame
Data, now as a tsfresh flat dataset
Raises
------
ValueError
If data is not a tslearn timeseries dataset,
tsfresh stacked dataset or tsfresh flat dataset
"""
try:
is_flat_dataset(data) # will raise TimekeepCheckError if not
return data
except TimekeepCheckError:
pass
try:
is_stacked_dataset(data) # will raise TimekeepCheckError if not
# Get the id and time values for one "kind" of values
flat_data = data.loc[
data["kind"] == data.loc[0, "kind"], ["id", "time"]
].reset_index(drop=True)
# Add the values as columns
for col_name in np.unique(data["kind"]):
data_subset = data.loc[
data.loc[:, "kind"] == col_name, ["id", "time", "value"]
].rename(columns={"value": col_name})
flat_data = flat_data.merge(data_subset, on=["id", "time"])
return flat_data
except TimekeepCheckError:
pass
try:
is_timeseries_dataset(data) # will raise TimekeepCheckError if not
n, t, d = data.shape
id_ = np.tile(np.arange(n), t)
time_ = np.tile(np.arange(t), n)
values_ = data.reshape(n * t, d) # check if this reshape is correct
df = pd.DataFrame({"id": id_, "time": time_})
for value in range(d):
df[str(value)] = values_[:, value]
return df
except TimekeepCheckError:
pass
raise ValueError(
"Did not recognise data of type {}. Cannot convert to flat dataset".format(
type(data)
)
)
def to_stacked_dataset(data) -> pd.DataFrame:
"""
Convert a tslearn timeseries or tsfresh flat dataset
to a tsfresh stacked dataset
A stacked dataset is a DataFrame with columns for 'id' (of timeseries),
'time' (at which value occurs), 'kind' (of value),
and 'value' (of timeseries parameter)
Parameters
----------
data
The data to have its format changed
Returns
-------
pandas.DataFrame
Data, now as a tsfresh stacked dataset
Raises
------
ValueError
If data is not a tslearn timeseries dataset,
tsfresh stacked dataset or tsfresh flat dataset
"""
try:
is_flat_dataset(data)
d = data.shape[1] - 2
id_ = np.tile(data["id"].to_numpy(), d)
time_ = np.tile(data["time"].to_numpy(), d)
kind_ = np.repeat(
np.array([col for col in data.columns if col not in ("time", "id")]),
data.shape[0],
)
values_ = (
data[[col for col in data.columns if col not in ("time", "id")]]
.to_numpy()
.flatten("F") # flatten Fortran (column-major) order
)
return pd.DataFrame({"id": id_, "time": time_, "kind": kind_, "value": values_})
except TimekeepCheckError:
pass
try:
is_stacked_dataset(data)
return data
except TimekeepCheckError:
pass
try:
is_timeseries_dataset(data) # will raise TimekeepCheckError if not
n, t, d = data.shape
id_ = np.tile(np.arange(n), t * d)
time_ = np.tile(np.arange(t), n * d)
kind_ = np.repeat(np.arange(d), n * t)
values_ = data.flatten() # check if this reshape is correct
return | pd.DataFrame({"id": id_, "time": time_, "kind": kind_, "value": values_}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import PathPatch
def plot_fclayer_models_test_set_results_lut():
general_without_fu_model_file_name = "test_set_results_FCLayer_LUT_general_without_fully_unfolded_configs"
general_with_fu_model_file_name = "test_set_results_FCLayer_LUT_general_with_fully_unfolded_configs"
general_augmentation_model_file_name = "test_set_results_FCLayer_LUT_general_augmentation"
specialized_model_file_name = "test_set_results_FCLayer_LUT_specialized"
specialized_augmentation_model_file_name = "test_set_results_FCLayer_LUT_specialized_augmentation"
general_without_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_without_fu_model_file_name
general_with_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_with_fu_model_file_name
general_augmentation_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_augmentation_model_file_name
specialized_model_folder_path = "../test_set_results/FCLayer/%s.csv" % specialized_model_file_name
specialized_augmentation_model_folder_path = "../test_set_results/FCLayer/%s.csv" % specialized_augmentation_model_file_name
df_general_without_fu_model = pd.read_csv(general_without_fu_model_folder_path)
df_general_with_fu_model = pd.read_csv(general_with_fu_model_folder_path)
df_general_augmentation_model = pd.read_csv(general_augmentation_model_folder_path)
df_specialized_model = pd.read_csv(specialized_model_folder_path)
df_specialized_augmentation_model = pd.read_csv(specialized_augmentation_model_folder_path)
#import pdb; pdb.set_trace()
df_plot = pd.DataFrame()
df_plot['HLS GM (FD)'] = df_general_with_fu_model['hls_rel_error']
df_plot['FINN GM (FD)'] = df_general_with_fu_model['finn_rel_error']
df_plot['SVR GM (FD)'] = df_general_with_fu_model['svr_rel_error']
df_plot['HLS GM (PD)'] = df_general_without_fu_model['hls_rel_error']
df_plot['FINN GM (PD)'] = df_general_without_fu_model['finn_rel_error']
df_plot['SVR GM (PD)'] = df_general_without_fu_model['svr_rel_error']
#recheck if they are the same - hls and finn
#df_plot['HLS (general model + aug)'] = df_general_augmentation_model['hls_rel_error']
#df_plot['FINN (general model + aug)'] = df_general_augmentation_model['finn_rel_error']
df_plot['SVR GM (PD + AUG)'] = df_general_augmentation_model['svr_rel_error']
df_plot['HLS SM'] = df_specialized_model['hls_rel_error']
df_plot['FINN SM'] = df_specialized_model['finn_rel_error']
df_plot['SVR SM'] = df_specialized_model['svr_rel_error']
#recheck if they are the same - hls and finn
#df_plot['HLS (specialized model + aug)'] = df_specialized_augmentation_model['hls_rel_error']
#df_plot['FINN (specialized model + aug)'] = df_specialized_augmentation_model['finn_rel_error']
df_plot['SVR SM (PD + AUG)'] = df_specialized_augmentation_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=True, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightyellow']
#import pdb; pdb.set_trace()
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('FCLayer - LUT estimation model - Test Set Results')
plt.ylabel('Relative error [%]')
fig.savefig('../test_set_results/FCLayer/test_set_results_plot_luts_with_fu_with_outliers.png', bbox_inches='tight')
def add_newest_finn_estimation_to_the_csv_file(filename):
#finn estimate computed with the clasifier
df_updated_finn_estimate = pd.read_csv("../test_set_results/updated_fclayer_database_finn_estimate.csv")
filepath = "../test_set_results/FCLayer/%s.csv" % filename
#the other csv file that needs to be updated
df_initial = pd.read_csv(filepath)
df_initial['bram_new_finn_estimate'] = -1
#remove rows of df_updated_finn_estimate not found in df_initial
#copy to df_initial finn_new_estimate
if (filename == 'test_set_results_FCLayer_Total_BRAM_18K_specialized_min_fu'):
parameters = ['mh', 'mw', 'pe', 'simd', 'wdt', 'idt']
else:
parameters = ['mh', 'mw', 'pe', 'simd', 'wdt', 'idt', 'act', 'mem_mode']
for index1, row1 in df_initial[parameters].iterrows():
if (filename == 'test_set_results_FCLayer_Total_BRAM_18K_specialized_min_fu'):
df_temp = df_updated_finn_estimate.loc[(df_updated_finn_estimate.mh == row1['mh']) & (df_updated_finn_estimate.mw == row1['mw']) & (df_updated_finn_estimate.pe == row1['pe']) & (df_updated_finn_estimate.simd == row1['simd']) & (df_updated_finn_estimate.idt == row1['idt']) & (df_updated_finn_estimate.wdt == row1['wdt'])]
else:
df_temp = df_updated_finn_estimate.loc[(df_updated_finn_estimate.mh == row1['mh']) & (df_updated_finn_estimate.mw == row1['mw']) & (df_updated_finn_estimate.pe == row1['pe']) & (df_updated_finn_estimate.simd == row1['simd']) & (df_updated_finn_estimate.idt == row1['idt']) & (df_updated_finn_estimate.wdt == row1['wdt']) & (df_updated_finn_estimate.act == row1['act']) & (df_updated_finn_estimate.mem_mode == row1['mem_mode'])]
if not df_temp.empty:
df_initial.at[index1, 'bram_new_finn_estimate'] = int(df_temp.iloc[0]['BRAM_new'])
df_initial["Total_BRAM_18K_synth_denom"] = df_initial["Total_BRAM_18K synth"].apply(lambda x: 1 if x == 0 else x)
df_initial["finn_rel_error_new"] = df_initial.apply(lambda x: (abs(x['bram_new_finn_estimate'] - x["Total_BRAM_18K synth"])/x["Total_BRAM_18K_synth_denom"])*100, axis=1)
filepath_to_save = "../test_set_results/FCLayer/%s_updated.csv" % filename
df_initial.to_csv(filepath_to_save, index = False, header=True)
#import pdb; pdb.set_trace()
def plot_fclayer_models_test_set_results_bram():
general_with_fu_model_file_name = "test_set_results_FCLayer_Total_BRAM_18K_general_plus_fu_updated"
general_without_fu_model_file_name = "test_set_results_FCLayer_Total_BRAM_18K_general_min_fu_updated"
specialized_without_fu_model_file_name = "test_set_results_FCLayer_Total_BRAM_18K_specialized_min_fu_updated"
general_with_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_with_fu_model_file_name
general_without_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_without_fu_model_file_name
specialized_without_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % specialized_without_fu_model_file_name
df_general_with_fu_model = pd.read_csv(general_with_fu_model_folder_path)
df_general_without_fu_model = pd.read_csv(general_without_fu_model_folder_path)
df_specialized_without_fu_model = pd.read_csv(specialized_without_fu_model_folder_path)
df_plot = pd.DataFrame()
df_plot['HLS GM (FD)'] = df_general_with_fu_model['hls_rel_error']
#df_plot['FINN GM (FD)'] = df_general_with_fu_model['finn_rel_error']
#df_plot['FINN NEW GM (FD)'] = df_general_with_fu_model['finn_rel_error_new']
df_plot['SVR GM (FD)'] = df_general_with_fu_model['svr_rel_error']
df_plot['HLS GM (PD)'] = df_general_without_fu_model['hls_rel_error']
#df_plot['FINN GM (PD)'] = df_general_without_fu_model['finn_rel_error']
#df_plot['FINN NEW GM (PD)'] = df_general_without_fu_model['finn_rel_error_new']
df_plot['SVR GM (PD)'] = df_general_without_fu_model['svr_rel_error']
#df_plot['HLS SM (PD)'] = df_specialized_without_fu_model['hls_rel_error']
#df_plot['FINN SM (PD)'] = df_specialized_without_fu_model['finn_rel_error']
#df_plot['FINN NEW SM (PD)'] = df_specialized_without_fu_model['finn_rel_error_new']
#df_plot['SVR SM (PD)'] = df_specialized_without_fu_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=False, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
colors = ['lightskyblue', 'lightyellow', 'lightskyblue', 'lightyellow']
#colors = ['lightskyblue', 'lightyellow', 'lightskyblue', 'lightyellow', 'lightskyblue', 'lightyellow']
#import pdb; pdb.set_trace()
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('FCLayer - BRAM estimation model - Test Set Results')
plt.ylabel('Relative error [%] ')
fig.savefig('../test_set_results/FCLayer/test_set_results_plot_bram_without_pd.png', bbox_inches='tight')
def plot_thresholding_models_test_set_results_lut():
general_model_file_name = "test_set_results_Thresholding_LUT_general"
general_augmentation_model_file_name = "test_set_results_Thresholding_LUT_general_augmentation"
general_min_fu_model_file_name = "test_set_results_Thresholding_LUT_general_min_fu"
general_min_fu_augmentation_model_file_name = "test_set_results_Thresholding_LUT_general_min_fu_augmentation"
specialized_model_file_name = "test_set_results_Thresholding_LUT_specialized"
specialized_min_fu_model_file_name = "test_set_results_Thresholding_LUT_specialized_min_fu"
general_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_model_file_name
general_augmentation_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_augmentation_model_file_name
general_min_fu_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_min_fu_model_file_name
general_min_fu_augmentation_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_min_fu_augmentation_model_file_name
specialized_model_folder_path = "../test_set_results/Thresholding/%s.csv" % specialized_model_file_name
specialized_min_fu_model_folder_path = "../test_set_results/Thresholding/%s.csv" % specialized_min_fu_model_file_name
df_general_model = pd.read_csv(general_model_folder_path)
df_general_augmentation_model = pd.read_csv(general_augmentation_model_folder_path)
df_general_min_fu_model = pd.read_csv(general_min_fu_model_folder_path)
df_general_min_fu_augmentation_model = pd.read_csv(general_min_fu_augmentation_model_folder_path)
df_specialized_model = pd.read_csv(specialized_model_folder_path)
df_specialized_min_fu_model = pd.read_csv(specialized_min_fu_model_folder_path)
df_plot = pd.DataFrame()
df_plot['HLS GM (FD)'] = df_general_model['hls_rel_error']
df_plot['FINN GM (FD)'] = df_general_model['finn_rel_error']
df_plot['SVR GM (FD)'] = df_general_model['svr_rel_error']
#df_plot['HLS GM (FD + AUG)'] = df_general_augmentation_model['hls_rel_error']
#df_plot['FINN GM (FD + AUG)'] = df_general_augmentation_model['finn_rel_error']
#df_plot['SVR GM (FD + AUG)'] = df_general_augmentation_model['svr_rel_error']
df_plot['HLS GM (PD)'] = df_general_min_fu_model['hls_rel_error']
df_plot['FINN GM (PD)'] = df_general_min_fu_model['finn_rel_error']
df_plot['SVR GM (PD)'] = df_general_min_fu_model['svr_rel_error']
#df_plot['HLS GM (PD)'] = df_general_min_fu_model['hls_rel_error']
#df_plot['FINN GM (PD)'] = df_general_min_fu_model['finn_rel_error']
#df_plot['SVR GM (PD + AUG)'] = df_general_min_fu_augmentation_model['svr_rel_error']
#df_plot['HLS SM (FD)'] = df_specialized_model['hls_rel_error']
#df_plot['FINN SM (FD)'] = df_specialized_model['finn_rel_error']
#df_plot['SVR SM (FD)'] = df_specialized_model['svr_rel_error']
df_plot['HLS SM (PD)'] = df_specialized_min_fu_model['hls_rel_error']
df_plot['FINN SM (PD)'] = df_specialized_min_fu_model['finn_rel_error']
df_plot['SVR SM (PD)'] = df_specialized_min_fu_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=False, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
#colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow']
colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow']
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('Thresholding Layer - LUT estimation model - Test Set Results')
plt.ylabel('Relative error [%] ')
fig.savefig('../test_set_results/Thresholding/test_set_results_LUT_plot_without_outliers_plus_fu.png', bbox_inches='tight')
def plot_thresholding_models_test_set_results_bram():
general_model_file_name = "test_set_results_Thresholding_Total_BRAM_18K_general"
general_min_fu_model_file_name = "test_set_results_Thresholding_Total_BRAM_18K_general_min_fu"
general_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_model_file_name
general_min_fu_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_min_fu_model_file_name
df_general_model = pd.read_csv(general_model_folder_path)
df_general_min_fu_model = pd.read_csv(general_min_fu_model_folder_path)
df_plot = pd.DataFrame()
df_plot['HLS GM (FD)'] = df_general_model['hls_rel_error']
df_plot['FINN GM (FD)'] = df_general_model['finn_rel_error']
df_plot['SVR GM (FD)'] = df_general_model['svr_rel_error']
df_plot['HLS GM (PD)'] = df_general_min_fu_model['hls_rel_error']
df_plot['FINN GM (PD)'] = df_general_min_fu_model['finn_rel_error']
df_plot['SVR GM (PD)'] = df_general_min_fu_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=True, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow']
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('Thresholding Layer - BRAM estimation model - Test Set Results')
plt.ylabel('Relative error [%] ')
fig.savefig('../test_set_results/Thresholding/test_set_results_BRAM_plot_with_outliers.png', bbox_inches='tight')
def plot_SWU_models_test_set_results_lut():
general_model_file_name = "test_set_results_Sliding_Window_Unit_LUT_general"
specialized_dw_0_model_file_name = "test_set_results_Sliding_Window_Unit_LUT_specialized_dw_0"
specialized_dw_1_model_file_name = "test_set_results_Sliding_Window_Unit_LUT_specialized_dw_1"
general_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % general_model_file_name
specialized_dw_0_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % specialized_dw_0_model_file_name
specialized_dw_1_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % specialized_dw_1_model_file_name
df_general_model = pd.read_csv(general_model_folder_path)
df_specialized_dw_0_model = pd.read_csv(specialized_dw_0_model_folder_path)
df_specialized_dw_1_model = pd.read_csv(specialized_dw_1_model_folder_path)
df_plot = pd.DataFrame()
#df_plot['HLS GM'] = df_general_model['hls_rel_error']
#df_plot['FINN GM'] = df_general_model['finn_rel_error']
df_plot['SVR GM'] = df_general_model['svr_rel_error']
#df_plot['HLS SM (dw=0)'] = df_specialized_dw_0_model['hls_rel_error']
#df_plot['FINN SM (dw=0)'] = df_specialized_dw_0_model['finn_rel_error']
df_plot['SVR SM (dw=0)'] = df_specialized_dw_0_model['svr_rel_error']
#df_plot['HLS SM (dw=1)'] = df_specialized_dw_1_model['hls_rel_error']
#df_plot['FINN SM (dw=1)'] = df_specialized_dw_1_model['finn_rel_error']
df_plot['SVR SM (dw=1)'] = df_specialized_dw_1_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=False, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
#colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow']
colors = ['lightyellow', 'lightyellow', 'lightyellow']
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('Sliding Window Unit - LUT estimation model - Test Set Results')
plt.ylabel('Relative error [%] ')
fig.savefig('../test_set_results/Sliding_Window_Unit/test_set_results_LUT_plot_only_svr.png', bbox_inches='tight')
def plot_SWU_models_test_set_results_bram():
general_model_file_name = "test_set_results_Sliding_Window_Unit_Total_BRAM_18K_general"
specialized_dw_0_model_file_name = "test_set_results_Sliding_Window_Unit_Total_BRAM_18K_specialized_dw_0"
specialized_dw_1_model_file_name = "test_set_results_Sliding_Window_Unit_Total_BRAM_18K_specialized_dw_1"
general_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % general_model_file_name
specialized_dw_0_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % specialized_dw_0_model_file_name
specialized_dw_1_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % specialized_dw_1_model_file_name
df_general_model = pd.read_csv(general_model_folder_path)
df_specialized_dw_0_model = pd.read_csv(specialized_dw_0_model_folder_path)
df_specialized_dw_1_model = pd.read_csv(specialized_dw_1_model_folder_path)
df_plot = pd.DataFrame()
df_plot['HLS GM'] = df_general_model['hls_rel_error']
df_plot['FINN GM'] = df_general_model['finn_rel_error']
df_plot['SVR GM'] = df_general_model['svr_rel_error']
df_plot['HLS SM (dw=0)'] = df_specialized_dw_0_model['hls_rel_error']
df_plot['FINN SM (dw=0)'] = df_specialized_dw_0_model['finn_rel_error']
df_plot['SVR SM (dw=0)'] = df_specialized_dw_0_model['svr_rel_error']
df_plot['HLS SM (dw=1)'] = df_specialized_dw_1_model['hls_rel_error']
df_plot['FINN SM (dw=1)'] = df_specialized_dw_1_model['finn_rel_error']
df_plot['SVR SM (dw=1)'] = df_specialized_dw_1_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=True, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow']
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('Sliding Window Unit - BRAM estimation model - Test Set Results')
plt.ylabel('Relative error [%] ')
fig.savefig('../test_set_results/Sliding_Window_Unit/test_set_results_BRAM_plot_with_outliers.png', bbox_inches='tight')
def plot_fclayer_target_processing_results():
prep_none_file_name = "test_set_results_FCLayer_LUT_general_preprocessing_none"
prep_log_file_name = "test_set_results_FCLayer_LUT_general_preprocessing_log"
prep_diff_file_name = "test_set_results_FCLayer_LUT_general_preprocessing_diff"
prep_none_folder_path = "../test_set_results/FCLayer/%s.csv" % prep_none_file_name
prep_log_folder_path = "../test_set_results/FCLayer/%s.csv" % prep_log_file_name
prep_diff_folder_path = "../test_set_results/FCLayer/%s.csv" % prep_diff_file_name
df_prep_none = pd.read_csv(prep_none_folder_path)
df_prep_log = pd.read_csv(prep_log_folder_path)
df_prep_diff = pd.read_csv(prep_diff_folder_path)
df_plot = | pd.DataFrame() | pandas.DataFrame |
from financeAPI.financeAPI_lib import FinanceAPI as fapi
from pathlib import Path
import json
import pandas as pd
class Stock:
n_stocks = 0
API = fapi()
def __init__(self, symbol:str):
self.symbol = symbol
self.fundamentalsAnnual = pd.DataFrame(columns=['time', 'year', 'revenue', 'grossProfit', 'operatingIncome', 'netProfit',
'grossMargin', 'operatingMargin', 'profitMargin'])
self.fundamentalsQuarter = pd.DataFrame(columns=['time', 'quarter', 'revenue', 'grossProfit', 'operatingIncome', 'netProfit',
'grossMargin', 'operatingMargin', 'profitMargin'])
self.growthAnnual = pd.DataFrame(columns=['time', 'revGrowth', 'profitGrowth'])
self.growthQuarter = pd.DataFrame(columns=['time', 'revGrowth', 'profitGrowth'])
# Initialized in self._init_data()
# TODO maybe use DF_xxx instead of fundamentalsAnnual/Quarter & growthAnnual in future
self.DF_fundamentalsAnnual = pd.DataFrame()
self.DF_fundamentalsQuarter = pd.DataFrame()
self.DF_growthAnnual = pd.DataFrame()
#self.DF_growthQuarter = pd.DataFrame()
self.data = self._init_data()
self.chart_onePerDay = self._load_chartHistory(period='1d')
# TODO tbd - daily charts with 1min, 5min, ..., 1h, 4h intervalls
#self.chart_daily = self._load_chartHistory(XXX)
Stock.n_stocks += 1
@classmethod
def counter(cls):
return cls.n_stocks
def _load_chartHistory(self, period='1d'):
if period == '1d':
hist = Stock.API.callAPI_financialModelingPrep_(self.symbol, call="dailychart")
return pd.DataFrame(hist['historical'])
def _init_data(self, save=False):
data = {}
if Stock.API.key_registered == False:
# Check for API Key and load
if Path("financeAPI/Secret_Key.txt").is_file():
with open('financeAPI/Secret_Key.txt') as file:
key = file.read()
Stock.API.registerKey_(key)
else:
print("No file with API key found")
exit()
data['symbol'] = self.symbol
# self.metrics_data_(self.symbol)
# self.ratios_data_(self.symbol)
"""
profile data
"""
profile = Stock.API.callAPI_financialModelingPrep_(self.symbol, call="profile")
data["profile"] = {}
d_profile = {}
for k in profile.keys():
d_profile[k] = profile[k]
data["profile"] = d_profile
"""
income statement data
"""
data["income_statement"] = {}
# Annual
income_statement = Stock.API.callAPI_financialModelingPrep_(self.symbol, call="income_statement")
self.DF_fundamentalsAnnual = | pd.DataFrame(income_statement) | pandas.DataFrame |
"""
Wind Power Outputs
------------------
output functions for Wind Power component
"""
import os.path
import numpy as np
from pandas import DataFrame
from config import COMPONENT_NAME
import aaem.constants as constants
from aaem.components import comp_order, definitions
from datetime import datetime
## component summary
def component_summary (results, res_dir):
"""Creates the regional and communities summary for the component in provided
directory
Parameters
----------
results : dictionary
results from the model, dictionary with each community or project
as key
res_dir : path
location to save file
"""
communities_summary (results, res_dir)
save_regional_summary(create_regional_summary (results), res_dir)
def communities_summary (coms, res_dir):
"""Saves the component summary by community
Parameters
----------
coms : dictionary
results from the model, dictionary with each community or project
as key
res_dir : path
location to save file
"""
#~ return
out = []
for c in sorted(coms.keys()):
it = coms[c]['community data'].intertie
if it is None:
it = 'parent'
if it == 'child':
continue
try:
# ??? NPV or year one
wind = coms[c][COMPONENT_NAME]
start_yr = wind.comp_specs['start year']
wind.get_diesel_prices()
diesel_price = float(wind.diesel_prices[0].round(2))
phase = wind.comp_specs['phase']
average_load = wind.average_load
existing_load = wind.cd['wind capacity']
existing_solar = wind.cd['solar capacity']
wind_class = float(wind.comp_specs['wind class'])
proposed_load = wind.load_offset_proposed
cap_fac = float(wind.comp_specs['capacity factor'])
heat_rec_opp = wind.cd['heat recovery operational']
try:
#~ offset = wind.load_offset_proposed
net_gen_wind = wind.net_generation_wind
decbb = wind.diesel_equiv_captured
loss_heat = wind.loss_heat_recovery
electric_diesel_reduction=wind.electric_diesel_reduction
diesel_red = wind.reduction_diesel_used
levelized_cost = wind.levelized_cost_of_energy
break_even = wind.break_even_cost
eff = wind.cd["diesel generation efficiency"]
except AttributeError:
offset = 0
net_gen_wind = 0
decbb = 0
electric_diesel_reduction=0
loss_heat = 0
diesel_red = 0
levelized_cost = 0
break_even = 0
eff = wind.cd["diesel generation efficiency"]
#~ try:
#~ red_per_year = net_gen_wind / eff
#~ except ZeroDivisionError:
#~ red_per_year = 0
name = c
if name == 'Barrow':
name = 'Utqiagvik (Barrow)'
l = [name,
start_yr,
phase,
wind_class,
average_load,
proposed_load,
existing_load,
existing_solar,
cap_fac,
net_gen_wind,
decbb,
loss_heat,
heat_rec_opp,
diesel_red,
electric_diesel_reduction,
eff,
diesel_price,
break_even,
levelized_cost,
wind.get_NPV_benefits(),
wind.get_NPV_costs(),
wind.get_NPV_net_benefit(),
wind.irr,
wind.get_BC_ratio(),
wind.reason
]
out.append(l)
except (KeyError,AttributeError) as e:
#~ print e
pass
cols = ['Community',
'Start Year',
'Project phase',
'Assumed Wind Class',
'Average Diesel Load [kW]',
'Wind Capacity Proposed [kW]',
'Existing Wind Capacity [kW]',
'Existing Solar Capacity [kW]',
'Assumed Wind Class Capacity Factor [%]',
'Net Proposed Wind Generation [kWh]',
'Heating Oil Equivalent Captured by Secondary Load [gal]',
'Loss of Recovered Heat from Genset [gal]',
'Heat Recovery Operational',
'Net in Heating Oil Consumption [gal]',
'Wind Power Reduction in Utility Diesel Consumed per year [gal]',
'Diesel Generator Efficiency','Diesel Price - year 1 [$/gal]',
'Breakeven Diesel Price [$/gal]',
'Levelized Cost Of Energy [$/kWh]',
'Wind NPV benefits [$]',
'Wind NPV Costs [$]',
'Wind NPV Net benefit [$]',
'Wind Internal Rate of Return',
'Wind Benefit-cost ratio',
'notes'
]
data = DataFrame(out,columns = cols).set_index('Community')#.round(2)
f_name = os.path.join(res_dir,
COMPONENT_NAME.lower().replace(' ','_') + '_summary.csv')
fd = open(f_name,'w')
fd.write(("# wind summary\n"
'# Breakdown by community of potential wind power improvements'
'# \n'
'# Community: ' + definitions.COMMUNITY + '\n'
'# Start Year: ' + definitions.START_YEAR + '\n'
'# Project phase: '+ definitions.PHASE + '\n'
'# Assumed Wind Class: Wind power density class\n'
'# Average Diesel Load [kW]: ' + definitions.DIESEL_LOAD +'\n'
'# Wind Capacity Proposed [kW]: Proposed additional capacity for wind'
' generation in kilowatts.\n'
'# Existing Wind Capacity [kW]: Existing wind generation capacity'
' in kilowatts.\n'
'# Existing Solar Capacity [kW]: Existing solar generation capacity'
' in kilowatts.\n'
'# Assumed Wind Class Capacity Factor [%]:\n'
'# Net Proposed Wind Generation [kWh]: Proposed wind net generation'
' in kilowatt-hours.\n'
'# Heating Oil Equivalent Captured by Secondary Load [gal]: \n'
'# Loss of Recovered Heat from Genset [gal]: \n'
'# Heat Recovery Operational: ' + definitions.HR_OP + '\n'
'# Net in Heating Oil Consumption [gal]: \n'
'# Wind Power Reduction in Utility Diesel Consumed per year [gal]: Estimated '
'Reduction in utility diesel if wind power system is '
'installed/upgraded. In gallons per year\n'
'# Diesel Generator Efficiency: '+ definitions.GEN_EFF + ' \n'
'# Diesel Price - year 1 [$\gal]: ' + definitions.PRICE_DIESEL + '\n'
'# Breakeven Diesel Price [$/gal]: ' + definitions.BREAK_EVEN_COST_DIESEL + '\n'
'# Levelized Cost Of Energy [$/kWh]:' + definitions.LCOE + '\n'
'# Wind power NPV benefits [$]: '+ definitions.NPV_BENEFITS + '\n'
'# Wind power NPV Costs [$]: ' + definitions.NPV_COSTS + '\n'
'# Wind power NPV Net benefit [$]: ' + definitions.NPV_NET_BENEFITS + '\n'
'# Wind power Internal Rate of Return: ' + definitions.IRR +'\n'
'# Wind power Benefit-cost ratio: ' + definitions.NPV_BC_RATIO +'\n'
'# notes: '+ definitions.NOTES +'\n'))
fd.close()
data.to_csv(f_name, mode='a')
def create_regional_summary (results):
"""Creates the regional summary
Parameters
----------
results : dictionary
results from the model, dictionary with each community or project
as key
Returns
-------
DataFrame
containing regional results
"""
#~ print "start"
regions = {}
for c in results:
c_region = results[c]['community data'].get_item('community','region')
comp = results[c][COMPONENT_NAME]
#~ print comp
bc_ratio = comp.get_BC_ratio()
bc_ratio = (not type(bc_ratio) is str) and (not np.isinf(bc_ratio))\
and (bc_ratio > 1)
#~ print bc_ratio ,comp.get_BC_ratio()
#~ return
capex = round(comp.get_NPV_costs(),0) if bc_ratio else 0
net_benefit = round(comp.get_NPV_net_benefit(),0) if bc_ratio else 0
displaced_fuel = \
round(comp.electric_diesel_reduction,0) if bc_ratio else 0
if (results[c]['community data'].intertie == 'child' or c.find('+') != -1) and not c.find('wind') != -1:
#~ print c
continue
if c_region in regions.keys():
## append entry
regions[c_region]['Number of communities/interties in region'] +=1
k = 'Number of communities with cost effective projects'
regions[c_region][k] += 1 if bc_ratio else 0
k = 'Investment needed for cost-effective projects ($)'
regions[c_region][k] += capex
k = 'Net benefit of cost-effective projects ($)'
regions[c_region][k] += net_benefit
k = 'Generation diesel displaced by cost-effective projects (gallons)'
regions[c_region][k] += displaced_fuel
else:
## set up "first" entry
regions[c_region] = {'Number of communities/interties in region':1}
k = 'Number of communities with cost effective projects'
regions[c_region][k] = 1 if bc_ratio else 0
k = 'Investment needed for cost-effective projects ($)'
regions[c_region][k] = capex
k = 'Net benefit of cost-effective projects ($)'
regions[c_region][k] = net_benefit
k = 'Generation diesel displaced by cost-effective projects (gallons)'
regions[c_region][k] = displaced_fuel
cols = ['Number of communities/interties in region',
'Number of communities with cost effective projects',
'Investment needed for cost-effective projects ($)',
'Net benefit of cost-effective projects ($)',
'Generation diesel displaced by cost-effective projects (gallons)']
try:
summary = | DataFrame(regions) | pandas.DataFrame |
from pandas import Series, DataFrame
from unittest.case import TestCase
from survey.questions import CountQuestion
class TestCountQuestion(TestCase):
def setUp(self) -> None:
data = | Series([3, 1, 4, 1, 5]) | pandas.Series |
"""Conditional Volatility Models
- VaR, halflife, GARCH, EWMA, Scholes-Williams Beta
- VIX, Bitcoin, St Louis Fed FRED
<NAME>
License: MIT
"""
import os
import numpy as np
import scipy
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
import seaborn as sns
from finds.alfred import Alfred
from settings import settings
imgdir = os.path.join(settings['images'], 'ts')
alf = Alfred(api_key=settings['fred']['api_key'])
# proportion of failures likelihood test
def kupiecLR(s, n, var):
"""Kupiec LR test (S violations in N trials) of VaR"""
p = 1 - var # e.g. var95 is 0.95
t = n - s # number of non-violations
num = np.log(1 - p)*(n - s) + np.log(p)*s
den = np.log(1 - (s/n))*(n - s) + np.log(s/n)*s
lr = -2 * (num - den)
return {'lr': lr, 'pvalue': 1 - scipy.stats.chi2.cdf(lr, df=1)}
def pof(X, pred, var=0.95):
"""Kupiec proportion of failures VaR test"""
Z = X / pred
z = scipy.stats.norm.ppf(1 - var)
r = {'n': len(Z), 's': np.sum(Z < z)}
r.update(kupiecLR(r['s'], r['n'], var))
return r
# convert alpha to halflife
from pandas.api import types
def halflife(alpha):
"""Returns halflife from alpha = -ln(2)/ln(lambda), where lambda=1-alpha"""
if types.is_list_like(alpha):
return [halflife(a) for a in alpha]
return -np.log(2)/np.log(1-alpha) if 0<alpha<1 else [np.inf,0][int(alpha>0)]
# Retrive Bitcoin from FRED and plot EWMA and Daily Returns
z = scipy.stats.norm.ppf(0.05)
alpha = [0.03, 0.06]
series_id = 'CBBTCUSD'
X = alf(series_id, log=1, diff=1)[126:]
X.index = pd.DatetimeIndex(X.index.astype(str), freq='infer')
Y = np.square(X)
ewma = [np.sqrt((Y.ewm(alpha=a).mean()).rename(series_id)) for a in alpha]
fig, ax = plt.subplots(num=1, clear=True, figsize=(10,6))
ax.plot(X.shift(-1), ls='-', lw=.5, c='grey')
ax.plot(z * ewma[0], lw=1, ls='-.', c='b')
ax.plot(z * ewma[1], lw=1, ls='--', c='r')
ax.set_title(alf.header(series_id))
ax.set_ylabel('Daily Returns and EWMA Volatility')
ax.legend([series_id] + [f"$\lambda$ = {1-a:.2f}" for a in alpha])
ax.plot(-z * ewma[0], lw=1, ls='-.', c='b')
ax.plot(-z * ewma[1], lw=1, ls='--', c='r')
plt.savefig(os.path.join(imgdir, 'ewma.jpg'))
plt.show()
# Retrieve SP500 and VIX data, compute EWMA
sp500 = alf('SP500', log=1, diff=1).dropna()
vix = alf('VIXCLS')
ewma = np.sqrt((np.square(sp500).ewm(alpha=0.05).mean()).rename('EWMA(0.94)'))
mkt = pd.concat([sp500, ewma, (vix/100)/np.sqrt(252)], axis=1, join='inner')
mkt.index = pd.DatetimeIndex(mkt.index.astype(str), freq='infer')
mkt
# GARCH(1,1) using rugarch
import rpy2.robjects as ro
from rpy2.robjects.packages import importr
from finds.pyR import PyR
rugarch_ro = importr('rugarch') # to use library rugarch
c_ = ro.r['c']
list_ = ro.r['list']
spec = ro.r['ugarchspec'](mean_model=list_(armaOrder=c_(2, 0),
include_mean=False))
model = ro.r['ugarchfit'](spec, data=PyR(mkt['SP500'].values).ro)
ro.r['show'](model)
for which in [4, 5, 10, 11]:
ro.r['plot'](model, which=which)
PyR.savefig(os.path.join(imgdir, f'ugarch{which}.png'))
# Plot all, but for illustration only: the 3 "forecasts" are not comparable
# VIX is implied from 3-month options, GARCH full in-sample, EWMA is rolling avg
mkt['GARCH(1,1)'] = PyR(ro.r['sigma'](model)).values # fitted volatility values
var = 0.95 # VaR95
z = scipy.stats.norm.ppf(1 - var)
fig, ax = plt.subplots(num=1, clear=True, figsize=(10,7))
ax.plot(mkt['SP500'], ls='-', lw=.5, c='grey')
ax.plot(z * mkt.iloc[:,1], lw=1, ls='-.', c='blue')
ax.plot(z * mkt.iloc[:,2], lw=1, ls='--', c='green')
ax.plot(z * mkt.iloc[:,3], lw=1, ls=':', c='red')
ax.set_title('SP500')
ax.set_ylabel('Daily Returns and VaR')
ax.legend(mkt.columns)
plt.savefig(os.path.join(imgdir, 'var.jpg'))
plt.show()
# get all daily series of financial price categories from FRED
categories = {}
for category in [32255, 33913, 94]:
c = alf.get_category(category)
print(category, c['id'], c['name'])
series = Series({s['id']: s['title'] for s in c['series']
if s['frequency'].startswith('Daily') and
'DISCONT' not in s['title']})
categories.update({c['name']: series})
c = pd.concat(list(categories.values())).to_frame()
pd.set_option("max_colwidth", 80)
pd.set_option("max_rows", 100)
c
# Fit ewma and backtest VaR
alphas = 1 - np.linspace(1, 0.91, 10)
results = {'pof': DataFrame(), 'n': | DataFrame() | pandas.DataFrame |
from functools import partial
import pandas as pd
import numpy as np
from datacode.summarize import format_numbers_to_decimal_places
from datacode.formatters.stars import convert_to_stars
from datacode.psm.names import matched_var, t_stat_var, diff_var
from datacode.psm.typing import StrList
def matching_summary_stats(df: pd.DataFrame, matched_df: pd.DataFrame,
treated_var: str, describe_vars: StrList,
entity_var: str,
control_name: str = 'Control', treated_name: str = 'Treated') -> pd.DataFrame:
common_args = (
treated_var,
describe_vars,
entity_var
)
common_kwargs = dict(
treated_name=treated_name
)
summ = mean_and_diff_df_by_treatment(
matched_df,
*common_args,
control_name=matched_var,
**common_kwargs
)
control_summ = mean_and_diff_df_by_treatment(
df,
*common_args,
control_name=control_name,
**common_kwargs
)
summ[control_name] = control_summ[control_name]
summ = summ[[control_name, treated_name, matched_var, diff_var, t_stat_var]]
return summ
def mean_and_diff_df_by_treatment(df: pd.DataFrame, treated_var: str, describe_vars: StrList,
entity_var: str, num_decimals: int = 2, coerce_ints: bool = True,
control_name: str = 'Control', treated_name: str = 'Treated') -> pd.DataFrame:
common_args = (
treated_var,
describe_vars,
entity_var
)
common_kwargs = dict(
treated_name=treated_name,
control_name=control_name
)
mean_df = _stat_df_by_treatment(
df,
*common_args,
stat='mean',
**common_kwargs
)
std_df = _stat_df_by_treatment(
df,
*common_args,
stat='std',
**common_kwargs
)
count_df = _stat_df_by_treatment(
df,
*common_args,
stat='count',
**common_kwargs
)
diff_t = _diff_and_t_df_from_mean_and_std_df(
mean_df,
std_df,
count_df,
control_name=control_name,
treated_name=treated_name
)
summ = pd.concat([mean_df, diff_t], axis=1)
summ = _format_summary_df(summ, num_decimals=num_decimals, coerce_ints=coerce_ints)
return summ
def _format_summary_df(df: pd.DataFrame, float_format: str = '.2f', num_decimals: int = 2,
coerce_ints: bool = True,) -> pd.DataFrame:
# num_formatter = lambda x: f'{x:{float_format}}' if isinstance(x, float) else x
num_formatter = partial(format_numbers_to_decimal_places, decimals=num_decimals, coerce_ints=coerce_ints)
df[diff_var] = df[diff_var].apply(num_formatter)
df[diff_var] = df[diff_var] + df[t_stat_var].apply(
lambda x: convert_to_stars(x) if isinstance(x, float) else x
)
df = df.applymap(num_formatter)
return df
def _stat_df_by_treatment(df: pd.DataFrame, treated_var: str, describe_vars: StrList, entity_var: str ,
stat: str = 'mean',
control_name: str = 'Control', treated_name: str = 'Treated') -> pd.DataFrame:
grouped = df.groupby(treated_var)[describe_vars]
agg_func = getattr(grouped, stat)
stat_df = agg_func().T
stat_df.columns = [control_name, treated_name]
count_series = _count_series(
df,
treated_var,
entity_var,
control_name=control_name,
treated_name=treated_name
)
stat_df = stat_df.append(count_series)
return stat_df
def _diff_and_t_df_from_mean_and_std_df(mean_df: pd.DataFrame, std_df: pd.DataFrame, count_df: pd.DataFrame,
control_name: str = 'Control',
treated_name: str = 'Treated') -> pd.DataFrame:
diff = mean_df[treated_name] - mean_df[control_name]
standard_errors = (
(std_df[control_name] ** 2 ) /count_df[control_name] +
(std_df[treated_name] ** 2 ) /count_df[treated_name]
) ** 0.5
t_stats = diff /standard_errors
t_stats = t_stats.replace(np.inf, '')
df = | pd.DataFrame([diff, t_stats]) | pandas.DataFrame |
'''Utilities common to classifiers.'''
"""
Functions know about features and states.
The feature matrix df_X has columns that are
feature names, and index that are instances.
Trinary values are in the set {-1, 0, 1}.
The state Series ser_y has values that are
states, and indexes that are instances.
The state F-statistic for a gene quantifies
how well it distinguishes between states.
"""
from common_python import constants as cn
import collections
import copy
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import random
import scipy.stats
FRACTION = "fraction"
STATE = "state"
# Previous, current, and next state in a time series
# p_dist - # indices to previous state
# n_dist - # indices to next state
AdjacentStates = collections.namedtuple(
"AdjacentStates", "prv cur nxt p_dist n_dist")
#
ClassifierDescription = collections.namedtuple(
"ClassifierDescription", "clf features")
#
# score - score for cross validations
# scores - list of scores
# clfs - list of classifiers
BinaryCrossValidateResult = collections.namedtuple(
"BinaryCrossValidateResult",
"score scores clfs")
def findAdjacentStates(ser_y, index):
"""
Finds the states adjacent to the index provided where
The indices are in time serial order.
:param pd.Series ser_y:
index is in time series order
:param object index: an index in ser_y
:return AdjacentStates:
"""
cur_state = ser_y[index]
def findNewState(indices):
"""
The first index is the current state.
Returns np.nan if no next state.
"""
dist = -1 # Starting with current state
ser = ser_y.loc[indices]
for _, state in ser.iteritems():
dist += 1
if state != cur_state:
return state, dist
return np.nan, np.nan
#
indices = ser_y.index.tolist()
pos = indices.index(index)
prv_indices = indices[:pos+1]
nxt_indices = indices[pos:]
prv_indices.reverse() # So start after pos
nxt_state, n_dist = findNewState(nxt_indices)
prv_state, p_dist = findNewState(prv_indices)
return AdjacentStates(prv=prv_state, cur=cur_state,
nxt=nxt_state, p_dist=p_dist, n_dist=n_dist)
def calcStateProbs(ser_y):
"""
Calculates the probability of each state occurrence.
:param pd.Series ser_y:
index: instance
value: state
:return pd.Series:
index: state
value: float
"""
df = pd.DataFrame()
df[STATE] = ser_y
df[FRACTION] = df[STATE]
dfg = df.groupby(STATE).count()
df_result = pd.DataFrame(dfg)
ser = df_result[FRACTION]/len(df)
return ser
def aggregatePredictions(df_pred, threshold=0.8):
"""
Aggregates probabilistic predictions, choosing the
state with the largest probability, if it exceeds
the threshold.
:param pd.DataFrame df_pred:
columns: state
rows: instance
values: float
:param float threshold:
:return pd.Series:
index: instance
values: state or np.nan if below threshold
"""
MISSING = -1
columns = df_pred.columns
values = []
df = df_pred.applymap(lambda v: 1 if v >= threshold
else MISSING)
for idx, row in df_pred.iterrows():
row_list = row.tolist()
pos = row_list.index(max(row_list))
values.append(columns[pos])
ser = pd.Series(values, index=df_pred.index)
ser = ser.apply(lambda v: np.nan if v == MISSING else v)
return ser
def makeOneStateSer(ser, state):
"""
Creates a Series where state is 1 for the designated
state and 0 otherwise.
:param pd.Series ser:
index: instance
value: int (state)
:param int
:return pd.Series
"""
result = ser.map(lambda v: 1 if v==state else 0)
return result
def makeFstatDF(df_X, ser_y, ser_weight=None):
"""
Constructs the state F-static for gene features
by state.
:param pd.DataFrame df_X:
column: gene
row: instance
value: trinary
:param pd.Series ser_y:
row: instance
value: state
:param pd.Series ser_weight: weight for instances
row: instance
value: multiplier for instance
:return pd.DataFrame:
columns: state
index: gene
value: -log significance level
ordered by descending magnitude of sum(value)
"""
MAX = "max"
if ser_weight is None:
ser_weight = ser_y.copy()
ser_weight.loc[:] = 1
df_X_adj = df_X.copy()
df_X_adj = df_X_adj.apply(lambda col: col*ser_weight)
states = ser_y.unique()
df = pd.DataFrame()
for state in states:
ser_y_1 = makeOneStateSer(ser_y, state)
ser = makeFstatSer(df_X_adj, ser_y_1, is_prune=False)
df[state] = ser
df[MAX] = df.max(axis=1)
df = df.sort_values(MAX, ascending=False)
del df[MAX]
return df
def makeFstatSer(df_X, ser_y,
is_prune=True, state_equ=None):
"""
Constructs the state F-static for gene features.
This statistic quantifies the variation of the
gene expression between states to that within
states.
:param pd.DataFrame df_X:
column: gene
row: instance
value: trinary
:param pd.Series ser_y:
row: instance
value: state
:param bo0l is_prune: removes nan and inf values
:param dict state_equ: Provides for state equivalences
key: state in ser_y
value: new state
"""
if state_equ is None:
state_equ = {s: s for s in ser_y.unique()}
# Construct the groups
df_X = df_X.copy()
df_X[STATE] = ser_y.apply(lambda v: state_equ[v])
# Calculate aggregations
dfg = df_X.groupby(STATE)
dfg_std = dfg.std()
dfg_mean = dfg.mean()
dfg_count = dfg.count()
# Calculate SSB
ser_mean = df_X.mean()
del ser_mean[STATE]
df_ssb = dfg_count*(dfg_mean - ser_mean)**2
ser_ssb = df_ssb.sum()
# SSW
df_ssw = dfg_std*(dfg_count - 1)
ser_ssw = df_ssw.sum()
# Calculate the F-Statistic
ser_fstat = ser_ssb/ser_ssw
ser_fstat = ser_fstat.sort_values(ascending=False)
if is_prune:
ser_fstat = ser_fstat[ser_fstat != np.inf]
sel = ser_fstat.isnull()
ser_fstat = ser_fstat[~sel]
return ser_fstat
def makeFstatSer(df_X, ser_y,
is_prune=True, state_equ=None):
"""
Constructs the state F-static for gene features.
This statistic quantifies the variation of the
gene expression between states to that within
states.
:param pd.DataFrame df_X:
column: gene
row: instance
value: trinary
:param pd.Series ser_y:
row: instance
value: state
:param bo0l is_prune: removes nan and inf values
:param dict state_equ: Provides for state equivalences
key: state in ser_y
value: new state
"""
if state_equ is None:
state_equ = {s: s for s in ser_y.unique()}
# Construct the groups
df_X = df_X.copy()
df_X[STATE] = ser_y.apply(lambda v: state_equ[v])
# Calculate aggregations
dfg = df_X.groupby(STATE)
dfg_std = dfg.std()
dfg_mean = dfg.mean()
dfg_count = dfg.count()
# Calculate SSB
ser_mean = df_X.mean()
del ser_mean[STATE]
df_ssb = dfg_count*(dfg_mean - ser_mean)**2
ser_ssb = df_ssb.sum()
# SSW
df_ssw = dfg_std*(dfg_count - 1)
ser_ssw = df_ssw.sum()
# Calculate the F-Statistic
ser_fstat = ser_ssb/ser_ssw
ser_fstat = ser_fstat.sort_values(ascending=False)
if is_prune:
ser_fstat = ser_fstat[ser_fstat != np.inf]
sel = ser_fstat.isnull()
ser_fstat = ser_fstat[~sel]
return ser_fstat
def plotStateFstat(state, df_X, ser_y, is_plot=True):
"""
Plot state F-statistic
:param pd.DataFrame df_X:
column: gene
row: instance
value: trinary
:param pd.Series ser_y:
row: instance
value: state
:param bool is_plot: Construct the plot
"""
if state is None:
state_equ = {s: s for s in ser_y.unique()}
else:
state_equ = {s: s if s==state else -1 for s
in ser_y.unique()}
num_state = len(state_equ.values())
ser_fstat = makeFstatSer(df_X, ser_y,
state_equ=state_equ)
ser_sl = ser_fstat.apply(lambda v: -np.log(
1 - scipy.stats.f.cdf(v, num_state-1,
len(df_X)-num_state)))
indices = ser_sl.index[0:10]
_ = plt.figure(figsize=(8, 6))
_ = plt.bar(indices, ser_sl[indices])
_ = plt.xticks(indices, indices, rotation=90)
_ = plt.ylabel("-log(SL)")
if state is None:
_ = plt.title("All States")
else:
_ = plt.title("State: %d" % state)
if state is not None:
_ = plt.ylim([0, 1.4])
if is_plot:
plt.show()
def plotInstancePredictions(ser_y, ser_pred,
is_plot=True):
"""
Plots the predicted states with text codings of
the actual states.
:param pd.Series ser_y: actual states
States are numeric
:param pd.Series ser_pred: actual states
:param bool is_plot: Produce the plot
"""
min_state = ser_y.min()
max_state = ser_y.max()
plt.figure(figsize=(12, 8))
plt.scatter([-1,80], [-1,7])
for obs in range(len(ser_y)):
index = ser_pred.index[obs]
_ = plt.text(obs, ser_pred[index],
"%d" % ser_y[index], fontsize=16)
plt.xlim([0, len(ser_y)])
plt.ylim([-0.5+min_state, 0.5+max_state])
_ = plt.xlabel("Observation", fontsize=18)
_ = plt.ylabel("Predicted State", fontsize=18)
if is_plot:
plt.show()
def makeArrayDF(df, indices=None):
"""
Constructs numpy arrays for the dataframe.
:param pd.DataFrame df:
:return ndarray:
"""
if indices is None:
indices = df.index
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
return df.loc[indices, :].to_numpy()
def makeArraySer(ser, indices=None):
"""
Constructs numpy arrays for the dataframe.
:param pd.Series ser:
:return ndarray:
"""
if indices is None:
indices = ser.index
return ser.loc[indices].to_numpy()
def makeArrays(df, ser, indices=None):
"""
Constructs numpy arrays for the dataframe and series.
:param pd.DataFrame df:
:param pd.Series ser:
:return ndarray, ndarray:
"""
if indices is None:
indices = df.index
return makeArrayDF(df, indices=indices), \
makeArraySer(ser, indices=indices)
def scoreFeatures(clf, df_X, ser_y,
features=None, train_idxs=None, test_idxs=None,
is_copy=True):
"""
Evaluates the classifier for the set of features and the
training and test indices provided (or all if None are
provided).
:param Classifier clf: Exposes
fit, score
:param pd.DataFrame df_X:
columns: features
indicies: instances
:param pd.Series ser_y:
indices: instances
values: classes
:param list-object features:
:param list-object train_idxs: indices for training
:param list-object test_idxs: indices for testing
:param bool is_copy: Copy the classifier
:return float: score for classifier using features
"""
if train_idxs is None:
train_idxs = df_X.index
if test_idxs is None:
test_idxs = df_X.index
if features is None:
features = df_X.columns.tolist()
#
if is_copy:
clf = copy.deepcopy(clf)
arr_X, arr_y = makeArrays(df_X[features], ser_y,
indices=train_idxs)
clf.fit(arr_X, arr_y)
#
arr_X, arr_y = makeArrays(df_X[features], ser_y,
test_idxs)
score = clf.score(arr_X, arr_y)
#
return score
def partitionByState(ser, holdouts=1):
"""
Creates training and test indexes by randomly selecting
a indices for each state.
:param pd.DataFrame ser: Classes for instances
:param int holdouts: number of holdouts for test
:return list-object, list-object: test, train
"""
classes = ser.unique().tolist()
classes.sort()
test_idxs = []
for cls in classes:
ser_cls = ser[ser == cls]
if len(ser_cls) <= holdouts:
raise ValueError(
"Class %s has fewer than %d holdouts" %
(cls, holdouts))
idxs = random.sample(ser_cls.index.tolist(),
holdouts)
test_idxs.extend(idxs)
#
train_idxs = list(set(ser.index).difference(test_idxs))
return train_idxs, test_idxs
def getBinarySVMParameters(clf):
return clf.intercept_[0], clf.coef_[0]
def predictBinarySVM(clf, values):
"""
Does binary SVM prediction from the SVM coeficients.
:param SVC clf:
:param list/array values:
:return int in [0, 1]:
"""
intercept, arr1 = getBinarySVMParameters(clf)
arr2 = np.array(values)
svm_value = arr1.dot(arr2) + intercept
if svm_value < 0:
binary_class = cn.NCLASS
else:
binary_class = cn.PCLASS
return binary_class
def binaryMultiFit(clf, df_X, ser_y,
list_train_idxs=None):
"""
Constructs multiple fits for indices for a classifier
that has an intercept and coefs_.
:param Classifier clf:
:param pd.DataFrame df_X:
columns: features
index: instances
:param pd.Series ser_y:
index: instances
values: binary class values (0, 1)
:param list-indexes
:return float, array
intercept
mean values of coeficients:
"""
if list_train_idxs is None:
list_train_idxs = [df_X.index.to_list()]
coefs = np.repeat(0.0, len(df_X.columns))
intercept = 0
length = len(list_train_idxs)
for train_idxs in list_train_idxs:
arr_X, arr_y = makeArrays(df_X, ser_y,
indices=train_idxs)
clf.fit(arr_X, arr_y)
coefs += clf.coef_[0]
intercept += clf.intercept_[0]
return intercept/length, coefs/length
def binaryCrossValidate(clf, df_X, ser_y,
partitions=None, num_holdouts=1, num_iteration=10):
"""
Constructs a cross validated estimate of the score
for a classifier trained on the features in df_X.
:param Classifier clf:
:param pd.DataFrame df_X:
columns: features
index: instances
:param pd.Series ser_y:
index: instances
values: binary class values (0, 1)
:param list-(list-index, list-index) partitions:
list of pairs of indices. Pairs are train, test.
:param int num_holdouts: holdouts used if
constructing partitions
:param int num_iteration: number of iterations in
cross validations if constructing partitions
:return BinaryCrossValidate:
"""
if partitions is None:
partitions = [p for p in
partitioner(ser_y, num_iteration,
num_holdout=num_holdouts)]
scores = []
features = df_X.columns.tolist()
clfs = []
for train_set, test_set in partitions:
copy_clf = copy.deepcopy(clf)
scores.append(scoreFeatures(copy_clf, df_X, ser_y,
features=features, is_copy=False,
train_idxs=train_set, test_idxs=test_set))
clfs.append(copy_clf)
result = BinaryCrossValidateResult(
score=np.mean(scores), scores=scores, clfs=clfs)
return result
def correlatePredictions(clf_desc1, clf_desc2,
df_X, ser_y, partitions):
"""
Estimates the correlation of predicted classifications
between two classifiers.
:param ClassifierDescription clf_desc1:
:param ClassifierDescription clf_desc2:
:param pd.DataFrame df_X:
columns: features
index: instances
:param pd.Series ser_y:
index: instances
values: binary class values (0, 1)
:param list-(list-index, list-index) partitions:
list of pairs of indices. Pairs are train, test.
:return float: correlation of two predictions
"""
def predict(clf_desc, train_idxs, test_idxs):
df_X_sub = | pd.DataFrame(df_X[clf_desc.features]) | pandas.DataFrame |
'''
Author:<NAME>
<EMAIL>'''
# Import required libraries
import pathlib
import dash
import numpy as np
from dash.dependencies import Input, Output, State, ClientsideFunction
import dash_core_components as dcc
import dash_html_components as html
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
from dateutil.relativedelta import *
from datetime import datetime
from controls import TYPE_COLORS,PORTS_COLORS,FLEET
from choropleth_map_emission import choropleth_map, sum_by_hexagon
##DataFrames
from data_filtering import processed_data
import pandas as pd
import geopandas as gpd
import os
import requests
##Databases
##Databases
panama_ports=gpd.read_file("data/Panama_ports.geojson")
canal,ports=processed_data(FLEET)
gatun=pd.read_csv("data/draught_restr_data.csv")
em=pd.read_csv("data/emissions_type_monthly.csv")
em["dt_pos_utc"]=pd.to_datetime(em["dt_pos_utc"])
pol=gpd.read_file("data/Panama_Canal.geojson")[["Name","geometry"]]
pol=pol[pol.geometry.apply(lambda x: x.geom_type=="Polygon")]
##Transform to datetime. Preferred to read csv method which is less flexible.
canal["time_at_entrance"]=pd.to_datetime(canal["time_at_entrance"])
ports["initial_service"]=pd.to_datetime(ports["initial_service"])
gatun["Date"]=pd.to_datetime(gatun["Date"])
##Ports color
panama_ports=panama_ports.assign(color="#F9A054")
# get relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
app.title = 'Panama Maritime Stats'
server = app.server
# Create global chart template
MAPBOX_TOKEN = os.environ.get('MAPBOX_TOKEN', None)
layout_map = dict(
autosize=True,
paper_bgcolor='#30333D',
plot_bgcolor='#30333D',
margin=dict(l=10, r=10, b=10, t=10),
hovermode="closest",
font=dict(family="HelveticaNeue",size=17,color="#B2B2B2"),
legend=dict(font=dict(size=10), orientation="h"),
mapbox=dict(
accesstoken=MAPBOX_TOKEN,
style='mapbox://styles/gabrielfuenmar/ckhs87tuj2rd41amvifhb26ad',
center=dict(lon=-79.55, lat=8.93),
zoom=9,
),
showlegend=False,
)
layout= dict(
legend=dict(bgcolor='rgba(0,0,0,0)',font=dict(size=14,family="HelveticaNeue")),
font_family="HelveticaNeue",
font_color="#B2B2B2",
title_font_family="HelveticaNeue",
title_font_color="#B2B2B2",
title_font_size=20,
paper_bgcolor='#21252C',
plot_bgcolor='#21252C',
xaxis=dict(gridcolor="rgba(178, 178, 178, 0.1)",title_font_size=15,
tickfont_size=14,title_font_family="HelveticaNeue",tickfont_family="HelveticaNeue"),
yaxis=dict(gridcolor="rgba(178, 178, 178, 0.1)",title_font_size=15,tickfont_size=14,
title_font_family="HelveticaNeue",tickfont_family="HelveticaNeue")
)
##Modebar on graphs
config={"displaylogo":False, 'modeBarButtonsToRemove': ['autoScale2d']}
##Annotation on graphs
annotation_layout=dict(
xref="x domain",
yref="y domain",
x=0.25,
y=-0.35)
# Create app layout
app.layout = html.Div(
[
dcc.Store(id="aggregate_data"),
# empty Div to trigger javascript file for graph resizing
html.Div(id="output-clientside"),
html.Div(
[
html.Div(
[
html.A(html.Img(
src=app.get_asset_url("mtcc_logo_v3.png"),
id="plotly-image",
style={
"height": "160px",
"width": "auto",
"margin-bottom": "0px",
"text-align": "center"
},
),
href="https://mtcclatinamerica.com/")
],
className="one-half column",
),
html.Div(
[
html.Div(
[
html.H3(
"Panama Maritime Statistics",
style={"margin-bottom": "0px"},
),
html.H5(
"Efficiency and Sustainability Indicators", style={"margin-top": "0px"}
),
]
)
],
className="one-half column",
id="title",
),
html.Div(
[
html.Button("Refresh", id="refresh-button"),
html.A(
html.Button("Developer", id="home-button"),
href="https://gabrielfuentes.org",
)
],
className="one-third column",
id="button",
style={
"text-align": "center"},
),
],
id="header",
className="row flex-display",
style={"margin-bottom": "15px"},
),
html.Div(
[
html.Div(
[
html.P("Date Filter",
className="control_label",
),
html.Div([html.P(id="date_from"),
html.P(id="date_to")],className="datecontainer")
,
dcc.RangeSlider(
id="year_slider",
min=0,
max=20,
value=[0, 20],
marks={
0:"Dec 2018",
5:"May 2019",
10:"Oct 2019",
15:"Mar 2020",
20:"Aug 2020"},
allowCross=False,
className="dcc_control",
),
html.P("Vessel Type:", className="control_label"),
dcc.Dropdown(
id='types-dropdown',
options=[{'label': row,'value': row} \
for row in sorted(FLEET)],
placeholder="All",multi=True,
className="dcc_control"),
html.P("Port:", className="control_label"),
dcc.Dropdown(
id='ports-dropdown',
options=[{'label': row,'value': row} \
for row in sorted(ports[~ports.port_name.isin(["Pacific - PATSA","Colon2000"])]\
.dropna(subset=["port_name"]).port_name.unique())+["Panama Canal South", "Panama Canal North"]],
placeholder="All",multi=True,
className="dcc_control"),
html.P(
"Vessel Size (GT)",
className="control_label",
),
html.Div([html.P(id="size_from"),
html.P(id="size_to")],className="datecontainer"),
dcc.RangeSlider(
id="size_slider",
min=400,
max=170000,
value=[400, 170000],
step=8500,
marks={
400:"400",
35000:"35k",
70000:"70k",
105000:"105k",
140000:"140k",
170000:"170k"},
allowCross=False,
className="dcc_control",
),
],
className="pretty_container four columns",
id="cross-filter-options",
),
html.Div(
[
html.Div(
[
html.Div(
[html.H6(id="waitingText"), html.P("Waiting Average")],
id="waiting",
className="mini_container",
),
html.Div(
[html.H6(id="opsText"), html.P("Operations")],
id="ops",
className="mini_container",
),
html.Div(
[html.H6(id="serviceText"), html.P("Service Average")],
id="service_m",
className="mini_container",
),
html.Div(#####Hardcoded for the time being. Build a scrapper.
[html.H6(["15.24 m"],id="draughtText"), html.P("Canal Max Draught TFW")],
id="draught",
className="mini_container",
),
],
id="info-container",
className="row container-display",
),
html.Div([
html.Div(
[
html.Div([html.H5("Emissions Review"),
html.H6(id="month_map",style={"color":"white"})],
style={"display": "flex", "flex-direction": "row","justify-content":"space-between"}),
dcc.Graph(animate=False,config=config,id="map_in"),
html.P(["Grid size"],id="grid_size",className="control_label"),
dcc.Slider(
id="zoom_slider",
min=4,
max=8,
value=8,
marks={
4:{'label': '1'},5:{'label': '2'},6:{'label': '3'},
7:{'label': '4'},8:{'label': '5'}},
className="dcc_control",
included=False),
dcc.RadioItems(
id='selector',options=[{'label': "CO2 emissions", 'value': "co2"},
{'label': "CH4 emissions", 'value': "ch4"}],
value="co2",labelStyle={'display': 'inline-block'}),
],
id="emissionsMapContainer",
className="pretty_container eight columns",
)
],
className="row flex-display",
),
],
id="right-column",
className="eight columns",
),
],
className="row flex-display",
),
html.Div(
[
html.Div(
[dcc.Graph(id="service_graph",config=config)],
className="pretty_container six columns",
),
html.Div(
[dcc.Graph(id="waiting_graph",config=config)],
className="pretty_container six columns",
)
],
className="row flex-display",
),
html.Div(
[
html.Div(
[dcc.Graph(id="draught_graph",config=config)],
className="pretty_container six columns",
),
html.Div(
[dcc.Graph(id="ratio_graph",config=config)],
className="pretty_container six columns",
),
],
className="row flex-display",
),
],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
def upper_text_p1(fr="01-01-2019",to="18-11-2020",ports_sel=["All"],
type_vessel=["All"],size=["All"],text_bar=True,*args):
date_from=pd.to_datetime(fr)
date_to=pd.to_datetime(to)
canal_in=canal[(canal.time_at_entrance.between(date_from,date_to))&(canal.direct_transit_boolean==True)].\
copy()
ports_in=ports[ports.initial_service.between(date_from,date_to)].\
copy()
canal_in=canal_in.assign(day=canal_in.time_at_entrance.dt.date)
canal_in=canal_in[["day","waiting_time","service_time","port_name","draught_ratio","StandardVesselType","GT"]]
canal_in["day"]=pd.to_datetime(canal_in.day)
ports_in=ports_in.assign(day=ports_in.initial_service.dt.date)
ports_in=ports_in[["day","waiting_time","service_time","port_name","draught_ratio","StandardVesselType","GT"]]
ports_in["day"]=pd.to_datetime(ports_in.day)
df_in=pd.concat([ports_in,canal_in],axis=0)
if "All" not in ports_sel:
df_in=df_in[df_in.port_name.isin(ports_sel)]
if "All" not in size:
df_in=df_in[df_in.GT.between(size[0],size[1])]
if "All" not in type_vessel:
df_in=df_in[df_in["StandardVesselType"].isin(type_vessel)]
if text_bar is True: ##Row at top with summary values
waiting_mean=df_in.waiting_time.mean()
ops=df_in.shape[0]
service_mean=df_in.service_time.mean()
return waiting_mean,ops,service_mean
else: ###Graphs on waiting, service time and draught ratio
##Fig ratio
df_in=df_in[df_in.day>pd.to_datetime("01-01-2019")]
df_in=df_in.reset_index(drop=True)
series_grouped=[]
for name,row in df_in.\
groupby([df_in.day.dt.isocalendar().week,df_in.day.dt.year,"StandardVesselType"]):
series_grouped.append([pd.to_datetime(str(name[1])+"-"+str(name[0])+"-1",format='%Y-%W-%w'),name[2],row.draught_ratio.mean()])
series_grouped=pd.DataFrame(series_grouped,columns=["day","StandardVesselType","draught_ratio"]).sort_values(by=["day"])
draught_fig = go.Figure()
for val in series_grouped["StandardVesselType"].unique():
series_in=series_grouped[series_grouped["StandardVesselType"]==val]
draught_fig.add_trace(go.Scatter(
name=val,
mode="markers+lines",
x=series_in.day,y=series_in.draught_ratio,
line=dict(shape="spline", width=1, color=TYPE_COLORS[val]),
marker=dict(symbol="diamond-open")))
draught_fig.update_layout(layout,legend=dict(x=1),title_text="<b>Draught Ratio per vessel type</b>",
xaxis=dict(title_text="Date"),yaxis=dict(title_text="Ratio"),)
draught_fig.add_annotation(annotation_layout,text="*AIS draft/min(maxTFWD, max Allowable draft)")
##Service and waiting time
labels_w=[]
remove_w=[]
waiting=[]
for name,row in df_in.groupby("port_name"):
if len(row.waiting_time.dropna().tolist())>25:
labels_w.append(name)
wa_li=row.waiting_time[(row.waiting_time>1)&(row.waiting_time<row.waiting_time.quantile(0.95))&\
(row.waiting_time>row.waiting_time.quantile(0.05))]
waiting.append(wa_li.dropna().tolist())
else:
remove_w.append(name)
labels_s=[]
remove_s=[]
service=[]
for name,row in df_in.groupby("port_name"):
if len(row.service_time.dropna().tolist())>25:
labels_s.append(name)
se_li=row.service_time[(row.service_time>0)&(row.service_time<row.service_time.quantile(0.95))&\
(row.service_time>row.service_time.quantile(0.05))]
service.append(se_li.dropna().tolist())
else:
remove_s.append(name)
##Figs of waiting and service time
if len(labels_w)>0:
fig_waiting = ff.create_distplot(waiting, labels_w,histnorm="probability density",colors=list(PORTS_COLORS.values()),show_rug=False,show_curve=False)
else:
fig_waiting=go.Figure()
fig_waiting.add_annotation(x=2,y=5,xref="x",yref="y",text="max=5",showarrow=True,
font=dict(family="Courier New, monospace",size=16, color="#ffffff"),align="center",
arrowhead=2, arrowsize=1, arrowwidth=2,arrowcolor="#636363", ax=20,ay=-30,bordercolor="#c7c7c7",
borderwidth=2,borderpad=4,bgcolor="#ff7f0e",opacity=0.8)
if len(labels_s)>0:
fig_service = ff.create_distplot(service, labels_s,histnorm="probability density",colors=list(PORTS_COLORS.values()),show_rug=False,show_curve=False)
else:
fig_service=go.Figure()
fig_service.add_annotation(x=2,y=5,xref="x",yref="y",text="max=5",showarrow=True,
font=dict(family="Courier New, monospace",size=16, color="#ffffff"),align="center",
arrowhead=2, arrowsize=1, arrowwidth=2,arrowcolor="#636363", ax=20,ay=-30,bordercolor="#c7c7c7",
borderwidth=2,borderpad=4,bgcolor="#ff7f0e",opacity=0.8)
###Service and Waiting Graphs Layout
fig_waiting.update_layout(layout,yaxis=dict(zeroline=True,linecolor='white',title_text="Density"),
xaxis=dict(title_text="Hours"),
legend=dict(x=0.6),title_text="<b>Waiting Time</b>")
fig_waiting.add_annotation(annotation_layout,text="*Results from inbuilt method by Fuentes, Sanchez-Galan and Diaz")
fig_waiting.update_traces(marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
fig_service.update_layout(layout,yaxis=dict(zeroline=True,linecolor="white",title_text="Density"),
xaxis=dict(title_text="Hours"),
legend=dict(x=0.6),title_text="<b>Service Time</b>")
fig_service.add_annotation(annotation_layout,text="*Results from inbuilt method by Fuentes, Sanchez-Galan and Diaz")
fig_service.update_traces(marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
return fig_waiting,fig_service,draught_fig
def lake_draught(fr="01-01-2015",to="18-11-2020",*args):
gatun_in=gatun.copy()
date_from=pd.to_datetime(fr)
date_to=pd.to_datetime(to)
gatun_in=gatun_in[gatun_in.Date.between(date_from,date_to)]
gatun_in=gatun_in.assign(day=gatun_in.Date.dt.day.astype(str)+"/"+gatun_in.Date.dt.month.astype(str)+"/"+gatun_in.Date.dt.year.astype(str))
lake_fig=make_subplots(specs=[[{"secondary_y": True}]])
lake_fig.add_trace(go.Scatter(
name="Gatun Lake Depth",
mode="lines",
x=gatun_in.day,y=gatun_in.gatun_depth,
line=dict(shape="spline", width=2,color="#6671FD")),secondary_y=True)
lake_fig.add_trace(go.Scatter(
name="Draught Change",
mode="lines",
x=gatun_in[gatun_in.Change.notnull()]["day"],y=gatun_in[gatun_in.Change.notnull()]["Change"],
line=dict(shape="spline", width=2,color="#3ACC95"),
marker=dict(symbol="diamond-open")),secondary_y=False)
lake_fig.add_trace(go.Scatter(
name="Max draught",
mode="lines",
x=gatun_in.day,y=gatun_in.Overall,
line=dict(shape="spline", width=2,color="#F9A054")),secondary_y=False)
##Layout update
lake_fig.update_layout(layout,title_text="<b>Gatun Lake and Draught Restriction Relation</b>",
xaxis=dict(title_text="Date",nticks=6),
legend=dict(x=0.6,y=1))
# Set y-axes titles
lake_fig.update_yaxes(title_text="Max Draught (m)", secondary_y=False,showgrid=False,
range=[gatun_in.Overall.min()*0.99,gatun_in.Overall.max()*1.05])
lake_fig.update_yaxes(title_text="Lake Depth (m)", secondary_y=True,gridcolor="rgba(178, 178, 178, 0.1)",
title_font_size=15,tickfont_size=14,
title_font_family="HelveticaNeue",tickfont_family="HelveticaNeue",
range=[gatun_in.gatun_depth.min()*0.99,gatun_in.gatun_depth.max()*1.05])
lake_fig.add_annotation(annotation_layout,text="*Values sourced by the Panama Canal Authority Maritime Services Platform")
return lake_fig
def emissions_map(ghg,res,fr="01-01-2018",to="30-08-2020",lat=None,lon=None,zoom=None,type_vessel=[],size=[]):
emissions_in=em.copy()
date_fr=pd.to_datetime(fr)
date_to=pd.to_datetime(to)
df_aggreg=sum_by_hexagon(emissions_in,res,pol,date_fr,date_to,vessel_type=type_vessel,gt=size)
##Update layout
if lat is not None:
layout_map["mapbox"]["center"]["lon"]=lon
layout_map["mapbox"]["center"]["lat"]=lat
layout_map["mapbox"]["zoom"]=zoom
if df_aggreg.shape[0]>0:
heatmap=choropleth_map(ghg,df_aggreg,layout_map)
else:
heatmap=go.Figure(data=go.Scattermapbox(lat=[0],lon=[0]),layout=layout_map)
return heatmap
##Upper Row,
@app.callback(
[
Output("waitingText", "children"),
Output("opsText", "children"),
Output("serviceText", "children"),
Output("date_from","children"),
Output("date_to","children"),
Output("size_from","children"),
Output("size_to","children"),
],
[Input("ports-dropdown", "value"),
Input("types-dropdown","value"),
Input('year_slider', 'value'),
Input('size_slider', 'value'),
],
)
def update_row1(ports_val,types_val,date,size_val):
if not ports_val:
ports_val=["All"]
if not types_val:
types_val=["All"]
date_fr=pd.to_datetime("12-01-2018 00:00")+relativedelta(months=+date[0])
date_to=pd.to_datetime("12-01-2018 00:00")+relativedelta(months=+date[1])
date_to=date_to+ relativedelta(day=31)
if date[0]==0:
date_fr=pd.to_datetime("31-12-2018")
date_fr=date_fr.strftime('%d-%m-%Y')
date_to=date_to.strftime('%d-%m-%Y')
waiting,ops,service=upper_text_p1(fr=date_fr,to=date_to,ports_sel=ports_val,type_vessel=types_val,size=size_val)
return "{:.1f}".format(waiting)+ " hours", format(ops,","), "{:.1f}".format(service) + " hours",\
date_fr, date_to ,format(size_val[0],","),format(size_val[1],",")
@app.callback(
[
Output("service_graph", "figure"),
Output("waiting_graph", "figure"),
Output("ratio_graph", "figure"),
],
[Input("ports-dropdown", "value"),
Input("types-dropdown","value"),
Input('year_slider', 'value'),
Input('size_slider', 'value'),
],
)
def update_graphs(ports_val,types_val,date,size_val):
if not ports_val:
ports_val=["All"]
if not types_val:
types_val=["All"]
date_fr=pd.to_datetime("12-01-2018 00:00")+relativedelta(months=+date[0])
date_to=pd.to_datetime("12-01-2018 00:00")+relativedelta(months=+date[1])
date_to=date_to+ relativedelta(day=31)
if date[0]==0:
date_fr=pd.to_datetime("31-12-2018")
date_fr=date_fr.strftime('%d-%m-%Y')
date_to=date_to.strftime('%d-%m-%Y')
service_g,waiting_g,ratio_g=upper_text_p1(fr=date_fr,to=date_to,ports_sel=ports_val,type_vessel=types_val,size=size_val,text_bar=False)
return service_g,waiting_g,ratio_g
@app.callback(
Output("draught_graph", "figure"),
[ Input('year_slider', 'value'),
],
)
def update_gatun(date):
date_fr=pd.to_datetime("12-01-2018 00:00")+relativedelta(months=+date[0])
date_to=pd.to_datetime("12-01-2018 00:00")+relativedelta(months=+date[1])
date_to=date_to+ relativedelta(day=31)
if date[0]==0:
date_fr="30-12-2018"
if date[1]==20:
date_to="18-11-2020"
lake_g=lake_draught(fr=date_fr,to=date_to)
return lake_g
@app.callback(
Output("map_in", "figure"),
[Input("selector","value"),
Input("zoom_slider","value"),
Input('year_slider', 'value'),
Input("types-dropdown","value"),
],
[State("map_in","relayoutData")]
)
def update_emissions_map(ghg_t,resol,date,types_val,relay):
date_fr= | pd.to_datetime("01-01-2019 00:00") | pandas.to_datetime |
import vaex
from geovaex import GeoDataFrame
import pygeos as pg
import pyproj
import numpy as np
import pandas as pd
import uuid
import warnings
from .distribution import Distribution
from .report import Report
from .plots import heatmap, map_choropleth
from .clustering import Clustering
from .heatmap import Heatmap
def custom_formatwarning(msg, *args, **kwargs):
"""Ignore everything except the message."""
return str(msg) + '\n'
warnings.formatwarning = custom_formatwarning
@vaex.register_dataframe_accessor('profiler', override=True)
class Profiler(object):
"""Vector profiler class.
Attributes:
df (object): The vector (geo)dataframe.
_count (int): The number of features in dataframe.
_categorical (list): A list of categorical fields.
"""
def __init__(self, df):
"""Initiates the Profiles class.
Parameters:
df (object): The vector (geo)dataframe.
"""
self.df = df
self._count = None
self._categorical = None
self._has_geometry = isinstance(df, GeoDataFrame)
def mbr(self):
"""Returns the Minimum Bounding Rectangle.
Returns:
(string) The WKT representation of the MBR.
"""
if not self._has_geometry:
warnings.warn('DataFrame is not spatial.')
return None
total_bounds = self.df.geometry.total_bounds()
transformer = pyproj.Transformer.from_crs(self.crs, "EPSG:4326", always_xy=True)
coords = pg.get_coordinates(total_bounds)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
transformed = pg.set_coordinates(total_bounds, np.array(new_coords).T)
return pg.to_wkt(transformed)
@property
def featureCount(self):
"""Property containing the original length of features in the dataframe."""
return self.df._length_original
def count(self):
"""Counts the length of dataframe and caches the value in the corresponding object attribute.
Returns:
(int) The number of features.
"""
if self._count is not None:
return self._count
self._count = {col: self.df.count(col, array_type='list') for col in self.df.get_column_names(virtual=False)}
return self._count
def convex_hull(self, chunksize=50000, max_workers=None):
"""Returns the convex hull of all geometries in the dataframe.
Parameters:
chunksize (int): The chunksize (number of features) for each computation.
max_workers (int): The number of workers to be used, if None equals to number of available cores.
Returns:
(string) The WKT representation of convex hull.
"""
if not self._has_geometry:
warnings.warn('DataFrame is not spatial.')
return None
hull = self.df.geometry.convex_hull_all(chunksize=chunksize, max_workers=max_workers)
transformer = pyproj.Transformer.from_crs(self.crs, "EPSG:4326", always_xy=True)
coords = pg.get_coordinates(hull)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
transformed = pg.set_coordinates(hull, np.array(new_coords).T)
return pg.to_wkt(transformed)
def thumbnail(self, file=None, maxpoints=100000, **kwargs):
"""Creates a thumbnail of the dataset.
Parameters:
file (string): The full path to save the thumbnail. If None, the thumbnail is not saved to file.
maxpoints (int): The maximum number of points to used for the thumbnail.
Raises:
Exception: if cannot write to filesystem.
Returns:
(string) base64 encoded png.
"""
from .static_map import StaticMap
if not self._has_geometry:
warnings.warn('DataFrame is not spatial.')
return None
static_map = StaticMap(**kwargs)
df = self.df.sample(n=maxpoints) if maxpoints < len(self.df) else self.df.copy()
df = df.to_geopandas_df()
static_map.addGeometries(df)
if (file is not None):
static_map.toFile(file)
else:
return static_map.base64()
@property
def crs(self):
"""Returns the native CRS (proj4 object) of the dataframe."""
if not self._has_geometry:
warnings.warn('DataFrame is not spatial.')
return None
return self.df.geometry.crs
@property
def short_crs(self):
"""Returns the short CRS of the dataframe."""
if not self._has_geometry:
warnings.warn('DataFrame is not spatial.')
return None
return self.df.geometry.crs.to_string()
def attributes(self):
"""The attributes of the (geo)dataframe.
Returns:
(list) The attributes of the df.
"""
return self.df.get_column_names(virtual=False)
def data_types(self):
"""Calculates the datatype of each attribute.
Returns:
(dict) The datatype of each attribute.
"""
datatypes = {col: self.df.data_type(col) for col in self.df.get_column_names(virtual=False)}
for col in datatypes:
try:
datatypes[col] = datatypes[col].__name__
except:
datatypes[col] = datatypes[col].name
return datatypes
def categorical(self, min_frac=0.01, sample_length=10000):
"""Checks whether each attribute holds categorical data, using a sample.
Parameters:
min_frac (float): The minimum fraction of unique values, under which the attribute is considered categorical.
sample_length (int): The length of sample to be used.
Returns:
(list): A list of the categorical attributes.
"""
if self._categorical is not None:
return self._categorical
df = self.df.to_vaex_df() if isinstance(self.df, GeoDataFrame) else self.df
df = df.sample(n=sample_length) if sample_length < len(df) else df
categorical = []
for col in df.get_column_names(virtual=False):
nunique = df[col].nunique()
if nunique/df[col].count() <= min_frac:
categorical.append(col)
# self.df.ordinal_encode(col, inplace=True)
self._categorical = categorical
return self._categorical
def distribution(self, attributes=None, n_obs=5, dropmissing=True):
"""Creates the distribution of values for each attribute.
By default, it calculates the distribution only for the categorical attributes,
returning the 5 most frequent values for each attribute, dropping the missing values.
Parameters:
attributes (list): A list of the attributes to create the distribution.
n_obs (int): The number of most frequent values to return.
dropmissing (bool): Whether to drop missing values or not.
Returns:
(object) A distribution object.
"""
attributes = self.categorical() if attributes is None else attributes
return Distribution(self.df, attributes, n_obs)
def distribution_ml(self, attributes=None, n_obs=5, dropmissing=True):
"""Creates the distribution of values for each attribute using machine learning techniques.
By default, it calculates the distribution only for the categorical attributes,
returning the 5 most frequent values for each attribute, dropping the missing values.
Parameters:
attributes (list): A list of the attributes to create the distribution.
n_obs (int): The number of most frequent values to return.
dropmissing (bool): Whether to drop missing values or not.
Returns:
(object) A distribution object.
"""
attributes = self.categorical() if attributes is None else attributes
return Distribution(self.df, attributes, n_obs, dropmissing=dropmissing, method='ml')
def get_sample(self, n_obs=None, frac=None, method="first", bbox=None, random_state=None):
"""Creates a sample of the dataframe.
Parameters:
n_obs (int): The number of features contained in the sample.
frac (float): The fraction of the total number of features contained in the sample. It overrides n_obs.
method (string): The method it will be used to extract the sample. One of: first, last, random.
bbox (list): The desired bounding box of the sample.
random_state (int): Seed or RandomState for reproducability, when None a random seed it chosen.
Returns:
(object): A sample dataframe.
"""
df = self.df
if bbox is not None:
if not self._has_geometry:
warnings.warn('DataFrame is not spatial.')
else:
df = self.df.within(pg.box(*bbox))
length = len(df)
if n_obs is None and frac is None:
n_obs = min(round(0.05*length), 100000)
if (method == "first"):
if frac is not None:
n_obs = round(frac*length)
sample = df.head(n_obs)
elif (method == "random"):
sample = df.sample(n=n_obs, frac=frac, random_state=random_state)
elif (method == "last"):
if frac is not None:
n_obs = round(frac*length)
sample = df.tail(n_obs)
else:
raise Exception('ERROR: Method %s not supported' % (method))
return sample
def quantiles(self):
"""Calculates the 5, 25, 50, 75, 95 quantiles.
Returns:
(object) A pandas dataframe with the calculated values.
"""
columns=[5, 25, 50, 75, 95]
df = pd.DataFrame(columns=columns)
for col in self.df.get_column_names(virtual=False):
quantiles = []
try:
for percentage in columns:
percentage = float(percentage)
quantiles.append(self.df.percentile_approx(col, percentage=percentage))
except:
pass
else:
row = pd.DataFrame([quantiles], columns=columns, index=[col])
df = df.append(row)
return df
def distinct(self, attributes=None, n_obs=50):
"""Retrieves the distinct values for each attribute.
By default, it retrieves all distinct values only for the categorical attributes in the dataframe.
Parameters:
attributes (list): A list of the attributes to create the distribution.
n_obs (int): If given, only the first n_obs values for each attribute are returned.
Returns:
(dict) A dictionary with the list of distinct values for each attribute.
"""
attributes = self.categorical() if attributes is None else attributes
if n_obs is None:
distinct = {col: self.df.unique(col, dropna=True).tolist() for col in attributes}
else:
distinct = {col: self.df.unique(col, dropna=True)[0:n_obs].tolist() for col in attributes}
return distinct
def recurring(self, attributes=None, n_obs=5):
"""Retrieves the most frequent values of each attribute.
By default, it calculates the most frequent values only for the categorical attributes,
returning the top 5.
Parameters:
attributes (list): A list of the attributes to create the distribution.
n_obs (int): The maximum number of most frequent values for each attribute.
Returns:
(dict) A dictionary with the list of most frequent values for each attribute.
"""
attributes = self.categorical() if attributes is None else attributes
return {col: self.df[col].value_counts(dropna=True).index[:n_obs].tolist() for col in attributes}
def statistics(self):
"""Calculates general descriptive statistics for each attribute.
The statistics calculated are: minimum and maximum value, mean, median, standard deviation
and the sum of all values, in case the attribute contains numerical values.
Returns:
(object) A pandas dataframe with the statistics.
"""
columns = ['min', 'max', 'mean', 'median', 'std', 'sum']
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
from sim.Bus import Bus
from sim.Route import Route
from sim.Busstop import Bus_stop
from sim.Passenger import Passenger
import matplotlib.pyplot as plt
pd.options.mode.chained_assignment = None
def getBusRoute(data):
my_path = os.path.abspath(os.path.dirname(__file__))
path = my_path + "/data/" + data + "/"
_path_trips = path + 'trips.txt'
_path_st = path + 'stop_times.txt'
trips = pd.DataFrame(pd.read_csv(_path_trips))
stop_times = pd.DataFrame(pd.read_csv(_path_st))
stop_times.dropna(subset=['arrival_time'], inplace=True)
bus_routes = {}
trip_ids = set(stop_times['trip_id'])
try:
service_id = trips.iloc[np.random.randint(0, trips.shape[0])]['service_id']
trips = trips[trips['service_id'] == service_id]
except:
pass
# each route_id may correspond to multiple trip_id
for trip_id in trip_ids:
# A completely same route indicates the same shape_id in trip file, but this field is not 100% provided by opendata
try:
if 'shape_id' in trips.columns:
route_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['shape_id'])
block_id = ''
dir = ''
else:
route_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['route_id'])
block_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['block_id'])
dir = str(trips[trips['trip_id'] == trip_id].iloc[0]['trip_headsign'])
except:
continue
# Identifies a set of dates when service is available for one or more routes.
trip = stop_times[stop_times['trip_id'] == trip_id]
try:
trip['arrival_time'] = pd.to_datetime(trip['arrival_time'], format='%H:%M:%S')
except:
trip['arrival_time'] = pd.to_datetime(trip['arrival_time'], format="%Y-%m-%d %H:%M:%S")
trip = trip.sort_values(by='arrival_time')
trip_dist = trip.iloc[:]['shape_dist_traveled'].to_list()
if len(trip_dist) <= 0 or np.isnan(trip_dist[0]):
continue
schedule = ((trip.iloc[:]['arrival_time'].dt.hour * 60 + trip.iloc[:]['arrival_time'].dt.minute) * 60 +
trip.iloc[:]['arrival_time'].dt.second).to_list()
if len(schedule) <= 2 or np.isnan(schedule[0]):
continue
b = Bus(id=trip_id, route_id=route_id, stop_list=trip.iloc[:]['stop_id'].to_list(),
dispatch_time=schedule[0], block_id=block_id, dir=dir)
b.left_stop = []
b.speed = (trip_dist[1] - trip_dist[0]) / (schedule[1] - schedule[0])
b.c_speed = b.speed
for i in range(len(trip_dist)):
if str(b.stop_list[i]) in b.stop_dist:
b.left_stop.append(str(b.stop_list[i]) + '_' + str(i))
b.stop_dist[str(b.stop_list[i]) + '_' + str(i)] = trip_dist[i]
b.schedule[str(b.stop_list[i]) + '_' + str(i)] = schedule[i]
else:
b.left_stop.append(str(b.stop_list[i]))
b.stop_dist[str(b.stop_list[i])] = trip_dist[i]
b.schedule[str(b.stop_list[i])] = schedule[i]
b.stop_list = b.left_stop[:]
b.set()
if route_id in bus_routes:
bus_routes[route_id].append(b)
else:
bus_routes[route_id] = [b]
# Do not consider the route with only 1 trip
bus_routes_ = {}
for k, v in bus_routes.items():
if len(v) > 1:
bus_routes_[k] = v
return bus_routes_
def getStopList(data, read=0):
my_path = os.path.abspath(os.path.dirname(__file__))
path = my_path + "/data/" + data + "/"
_path_stops = path + 'stops.txt'
_path_st = path + 'stop_times.txt'
_path_trips = path + 'trips.txt'
stops = pd.DataFrame(pd.read_csv(_path_stops))
stop_times = pd.DataFrame(pd.read_csv(_path_st))
trips = pd.DataFrame(pd.read_csv(_path_trips))
stop_list = {}
select_stops = pd.merge(stops, stop_times, on=['stop_id'], how='left')
select_stops = select_stops.sort_values(by='shape_dist_traveled', ascending=False)
select_stops = select_stops.drop_duplicates(subset='stop_id', keep="first").sort_values(by='shape_dist_traveled',
ascending=True)
for i in range(select_stops.shape[0]):
stop = Bus_stop(id=str(select_stops.iloc[i]['stop_id']), lat=select_stops.iloc[i]['stop_lat'],
lon=select_stops.iloc[i]['stop_lon'])
stop.loc = select_stops.iloc[i]['shape_dist_traveled']
try:
stop.next_stop = str(select_stops.iloc[i + 1]['stop_id'])
except:
stop.next_stop = None
stop_list[str(select_stops.iloc[i]['stop_id'])] = stop
_path_demand = path + 'demand.csv'
pax_num = 0
try:
demand = pd.DataFrame(pd.read_csv(_path_demand))
except:
print('No available demand file')
return stop_list, 0
try:
demand['Ride_Start_Time'] = pd.to_datetime(demand['Ride_Start_Time'], format='%H:%M:%S')
except:
demand['Ride_Start_Time'] = pd.to_datetime(demand['Ride_Start_Time'], format="%Y-%m-%d %H:%M:%S")
demand['Ride_Start_Time_sec'] = (demand.iloc[:]['Ride_Start_Time'].dt.hour * 60 + demand.iloc[:][
'Ride_Start_Time'].dt.minute) * 60 + demand.iloc[:]['Ride_Start_Time'].dt.second
demand.dropna(subset=['ALIGHTING_STOP_STN'], inplace=True)
demand = demand[demand.ALIGHTING_STOP_STN != demand.BOARDING_STOP_STN]
demand = demand.sort_values(by='Ride_Start_Time_sec')
for stop_id, stop in stop_list.items():
demand_by_stop = demand[demand['BOARDING_STOP_STN'] == int(stop_id)]
# macro demand setting
if read == 0:
t = 0
while t < 24:
d = demand_by_stop[(demand_by_stop['Ride_Start_Time_sec'] >= t * 3600) & (
demand_by_stop['Ride_Start_Time_sec'] < (t + 1) * 3600)]
stop.dyna_arr_rate.append(d.shape[0] / 3600.)
for dest_id in stop_list.keys():
od = d[demand['ALIGHTING_STOP_STN'] == int(dest_id)]
if od.empty:
continue
if dest_id not in stop.dest:
stop.dest[dest_id] = [0 for _ in range(24)]
stop.dest[dest_id][t] = od.shape[0] / 3600.
t += 1
else:
# micro demand setting
for i in range(demand_by_stop.shape[0]):
pax = Passenger(id=demand_by_stop.iloc[i]['TripID'], origin=stop_id,
plan_board_time=float(demand_by_stop.iloc[i]['Ride_Start_Time_sec']))
pax.dest = str(int(demand_by_stop.iloc[i]['ALIGHTING_STOP_STN']))
pax.realcost = float(demand_by_stop.iloc[i]['Ride_Time']) * 60.
pax.route = str(demand_by_stop.iloc[i]['Srvc_Number']) + '_' + str(
int(demand_by_stop.iloc[i]['Direction']))
stop.pax[pax.id] = pax
pax_num += 1
return stop_list, pax_num
def demand_analysis(engine=None):
if engine is not None:
stop_list = list(engine.busstop_list.keys())
stop_hash = {}
i = 0
for p in stop_list:
stop_hash[p] = i
i += 1
# output data for stack area graph
demand = []
for t in range(24):
d = np.zeros(len(stop_list))
for s in stop_list:
for pid, pax in engine.busstop_list[s].pax.items():
if int((pax.plan_board_time - 0) / 3600) == t:
d[stop_hash[s]] += 1
demand.append(d)
df = pd.DataFrame(demand, columns=[str(i) for i in range(len(stop_list))])
df.to_csv('demand.csv')
return
def sim_validate(engine, data):
actual_onboard = []
sim_onboard = []
sim_travel_cost = []
actual_travel_cost = []
for pid, pax in engine.pax_list.items():
actual_onboard.append(pax.plan_board_time)
sim_onboard.append(pax.onboard_time)
sim_travel_cost.append(abs(pax.onboard_time - pax.alight_time))
actual_travel_cost.append(pax.realcost)
actual_onboard = np.array(actual_onboard)
sim_onboard = np.array(sim_onboard)
actual_travel_cost = np.array(actual_travel_cost)
sim_travel_cost = np.array(sim_travel_cost)
print('Boarding RMSE:%g' % (np.sqrt(np.mean((actual_onboard - sim_onboard) ** 2))))
print('Travel RMSE:%g' % (np.sqrt(np.mean((actual_travel_cost - sim_travel_cost) ** 2))))
sim_comp = | pd.DataFrame() | pandas.DataFrame |
"""
CompPySS
Python implementation of the Comparative Proteomic Analysis Software Suite (CompPASS)
developed by Dr. <NAME> for defining the human deubiquitinating enzyme interaction
landscape (Sowa, Mathew E., et al 2009). Based on the R packages cRomppass (David
Nusinow) and SMAD (<NAME>).
"""
from functools import partial
import math
import numpy as np
import pandas as pd
def preprocess(df: pd.DataFrame) -> pd.DataFrame:
"""Sets DataFrame index, drops rows with spectral_count of 0, and only keep the
maximum spectral count for bait-prey pairs if there are duplicates within a given
replicate.
"""
return (
df.set_index(['bait', 'prey'])
.loc[lambda x: x.spectral_count > 0]
.groupby(['prey', 'bait', 'replicate'])
.agg('max')
)
def entropy(s: pd.Series) -> float:
"""Calculates the Shannon entropy for a list of values. To avoid taking the log of
zero, a fractional pseudocount of 1/(# of values) is added to each value.
"""
p = (s + (1 / len(s))) / (s.sum() + 1)
return sum(-p * np.log2(p))
def calculate_aggregate_stats(df: pd.DataFrame) -> pd.DataFrame:
"""Calculates the following aggregate statistics from an input dataframe:
ave_psm: mean of the PSM values for each bait-prey pair across replicates.
p: number of replicates in which each bait-prey pair was detected.
entropy: Shannon entropy.
"""
return df.groupby(['bait', 'prey']).agg(
ave_psm=('spectral_count', 'mean'),
p=('spectral_count', 'count'),
entropy=('spectral_count', entropy),
)
def mean_(s: pd.Series, n: int) -> float:
"""Calculates the mean of a series. Denominator is the total number of unique baits,
so ave_psm for bait-prey pairs that were not detected are counted as zeroes."""
is_bait = prey_equals_bait(s)
return s.loc[~is_bait].sum() / n
def extend_series_with_zeroes(s: pd.Series, n: int) -> pd.Series:
"""Extends Series to n elements by filling in missing elements with zero values.
'prey' level of the multindex is preserved. This is used for calculating the
standard deviation.
"""
prey = s.index.get_level_values('prey')[0]
n_to_extend = n - len(s)
extension = pd.Series(0, index=((prey, None) for _ in range(n_to_extend)))
return | pd.concat([s, extension]) | pandas.concat |
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from divmachines.utility.helper import check_random_state
def _get_cv(cv):
try:
cv = CROSS_VALIDATOR[cv]
except KeyError:
raise ValueError("Consistent Cross Validator must be provided")
return cv
class CrossValidator(metaclass=ABCMeta):
"""
Base class of the Data Partitioning strategy or
Cross Validation strategy
"""
def __init__(self):
pass
@abstractmethod
def _iter_indices_mask(self, x, y, indices):
raise NotImplementedError
def split(self, x, y):
"""
Data partitioning function,
it returns the training and the test indexes
Parameters
----------
x: ndarray
training samples
y: ndarray
target value for samples
Returns
-------
train_index : ndarray
The training set indices for that split.
test_index : ndarray
The testing set indices for that split.
"""
indices = np.arange(len(x))
for test_index in self._iter_indices_mask(x, y, indices):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
class KFold(CrossValidator):
"""
K-fold cross validation strategy
It divides the dataset into k independent fold
and at each iteration considers one fold as the
test set and the remaining folds as the training sets
Parameters
----------
folds: int, optional
Number of fold to use for the k-fold cross validation.
Minimum is 2 an default is 3.
shuffle: boolean, optional
Whether to shuffle the data before splitting into batches.
By default it shuffle the data
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self, folds=3, shuffle=True, random_state=None):
super(KFold, self).__init__()
if folds < 2:
raise ValueError("Number of folds too low, minimum value is 2")
else:
self._folds = folds
self._shuffle = shuffle
self._random_state = random_state
def _iter_indices_mask(self, x, y, indices):
if self._shuffle:
check_random_state(self._random_state).shuffle(indices)
n_splits = self._folds
fold_sizes = (len(x) // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:len(x) % n_splits] += 1
current = 0
mask = np.zeros(len(indices), dtype=np.bool)
for fold_size in fold_sizes:
start, stop = current, current + fold_size
copy_mask = np.copy(mask)
copy_mask[indices[start:stop]] = True
current = stop
yield copy_mask
class LeaveOneOut(CrossValidator):
"""
Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Parameters
----------
shuffle: boolean, optional
Whether to shuffle the data before splitting into batches.
By default it shuffle the data
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self, shuffle=True, random_state=None):
super(LeaveOneOut, self).__init__()
self._shuffle = shuffle
self._random_state = random_state
def _iter_indices_mask(self, x, y, indices):
if self._shuffle:
check_random_state(self._random_state).shuffle(indices)
mask = np.zeros(len(indices), dtype=np.bool)
for i in indices:
new_mask = mask.copy()
new_mask[i] = True
yield new_mask
class NaiveHoldOut(CrossValidator):
"""
Naive Hold-Out cross-validator
Provides train/test indices to split data in train/test sets.
The partitioning is performed by randomly withholding some ratings
for some of the users.
The naive hold-out cross validation removes from the test set all
the user that are not present in the train set.
The classifiers could not handle the cold start problem.
Parameters
----------
ratio: float, optional
Ratio between the train set .
For instance, 0.7 means that the train set is 70% of the
original dataset, while the test set is 30% of it.
Default is 80% for the train set and 20% for the test set.
times: int, optional
Number of times to run Hold-out cross validation.
Higher values of it result in less variance in the result score.
Default is 10.
user_idx: int, optional
Indicates the user column index in the transaction data.
Default is 0.
item_idx: int, optional
Indicates the item column index in the transaction data
Default is 1.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self,
ratio=0.8,
times=10,
user_idx=0,
item_idx=1,
random_state=None):
super(NaiveHoldOut, self).__init__()
self._times = times
self._ratio = ratio
self._user_idx = user_idx
self._item_idx = item_idx
self._random_state = random_state
def split(self, x, y):
data = | pd.DataFrame(x) | pandas.DataFrame |
import numpy as np
import pandas as pd
from hics.slice_similarity import continuous_similarity_matrix, categorical_similarity_matrix
from hics.slice_selection import select_by_similarity
class ScoredSlices:
def __init__(self, categorical, continuous, to_keep=5, threshold=None):
self.continuous = {feature: pd.DataFrame(columns=['to_value', 'from_value'])
for feature in continuous}
self.categorical = {feature['name']: pd.DataFrame(columns=feature['values'])
for feature in categorical}
self.scores = pd.Series()
self.to_keep = to_keep
if threshold is None:
self.threshold = ScoredSlices.default_threshold(len(categorical) + len(continuous))
else:
self.threshold = threshold
def add_slices(self, slices):
if isinstance(slices, dict):
self.add_from_dict(slices)
else:
self.add_from_object(slices)
def add_from_object(self, slices):
self.scores = self.scores.append(pd.Series(slices.scores)).sort_values(ascending=False, inplace=False)
for feature, df in slices.continuous.items():
self.continuous[feature] = pd.concat([self.continuous[feature], df], ignore_index=True)
self.continuous[feature] = self.continuous[feature].loc[self.scores.index, :].reset_index(drop=True)
for feature, df in slices.categorical.items():
self.categorical[feature] = pd.concat([self.categorical[feature], df], ignore_index=True)
self.categorical[feature] = self.categorical[feature].loc[self.scores.index, :].reset_index(drop=True)
self.scores.reset_index(drop=True, inplace=True)
def add_from_dict(self, slices):
new_scores = pd.Series(slices['scores'])
self.scores = self.scores.append(new_scores, ignore_index=True).sort_values(ascending=False, inplace=False)
for feature in self.continuous:
content = pd.DataFrame(slices['features'][feature])
self.continuous[feature] = pd.concat([self.continuous[feature], content], ignore_index=True)
self.continuous[feature] = self.continuous[feature].loc[self.scores.index, :].reset_index(drop=True)
for feature in self.categorical:
content = pd.DataFrame(slices['features'][feature], columns=self.categorical[feature].columns)
self.categorical[feature] = pd.concat([self.categorical[feature], content], ignore_index=True)
self.categorical[feature] = self.categorical[feature].loc[self.scores.index, :].reset_index(drop=True)
self.scores.reset_index(drop=True, inplace=True)
def select_slices(self, similarity):
indices = list(range(len(similarity)))
selected = []
for i in range(self.to_keep):
if not indices:
break
selected.append(indices[0])
selection = indices[0]
indices = [index for index in indices if similarity[selection, index] < self.threshold]
return selected
def reduce_slices(self):
if self.continuous:
continuous_similarity = continuous_similarity_matrix(self.continuous)
else:
continuous_similarity = np.ones((len(self.scores), len(self.scores)))
if self.categorical:
categorical_similarity = categorical_similarity_matrix(self.categorical)
else:
categorical_similarity = np.ones((len(self.scores), len(self.scores)))
similarity = continuous_similarity * categorical_similarity
selected = self.select_slices(similarity)
if self.categorical:
self.categorical = {key: df.loc[selected, :].reset_index(drop=True)
for key, df in self.categorical.items()}
if self.continuous:
self.continuous = {key: df.loc[selected, :].reset_index(drop=True)
for key, df in self.continuous.items()}
self.scores = self.scores.loc[selected].reset_index(drop=True)
def to_dict(self):
continuous_dict = {name: df.to_dict(orient='list') for name, df in self.continuous.items()}
categorical_dict = {name: df.to_dict(orient='list') for name, df in self.categorical.items()}
scores_list = self.scores.tolist()
return {'continuous': continuous_dict, 'categorical': categorical_dict, 'scores': scores_list, 'to_keep': self.to_keep, 'threshold': self.threshold}
def to_output(self, name_mapping=None):
if name_mapping is None:
name_mapping = ScoredSlices.default_name_mapping
result = []
for index, value in self.scores.iteritems():
current_result = {'deviation': value, 'features': {}}
if self.continuous:
for feature, df in self.continuous.items():
current_result['features'][name_mapping(feature)] = df.loc[index, :].to_dict()
if self.categorical:
for feature, df in self.categorical.items():
selected_values = df.columns[df.loc[index, :] == 1].astype(float).tolist() # TODO: remove that bullshit
current_result['features'][name_mapping(feature)] = selected_values
result.append(current_result)
return result
@staticmethod
def default_threshold(dimensions):
return pow(0.6, dimensions)
@staticmethod
def from_dict(dictionary):
continuous = {name: pd.DataFrame(description)
for name, description in dictionary['continuous'].items()}
categorical = {name: | pd.DataFrame(description) | pandas.DataFrame |
import contextlib
import json
import gzip
import io
import logging
import os.path
import pickle
import random
import shutil
import sys
import tempfile
import traceback
import unittest
import pandas
COMMON_PRIMITIVES_DIR = os.path.join(os.path.dirname(__file__), 'common-primitives')
# NOTE: This insertion should appear before any code attempting to resolve or load primitives,
# so the git submodule version of `common-primitives` is looked at first.
sys.path.insert(0, COMMON_PRIMITIVES_DIR)
TEST_PRIMITIVES_DIR = os.path.join(os.path.dirname(__file__), 'data', 'primitives')
sys.path.insert(0, TEST_PRIMITIVES_DIR)
from common_primitives.column_parser import ColumnParserPrimitive
from common_primitives.construct_predictions import ConstructPredictionsPrimitive
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives.no_split import NoSplitDatasetSplitPrimitive
from common_primitives.random_forest import RandomForestClassifierPrimitive
from common_primitives.train_score_split import TrainScoreDatasetSplitPrimitive
from test_primitives.random_classifier import RandomClassifierPrimitive
from test_primitives.fake_score import FakeScorePrimitive
from d3m import cli, index, runtime, utils
from d3m.container import dataset as dataset_module
from d3m.contrib.primitives.compute_scores import ComputeScoresPrimitive
from d3m.metadata import base as metadata_base, pipeline as pipeline_module, pipeline_run as pipeline_run_module, problem as problem_module
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
PROBLEM_DIR = os.path.join(TEST_DATA_DIR, 'problems')
DATASET_DIR = os.path.join(TEST_DATA_DIR, 'datasets')
PIPELINE_DIR = os.path.join(TEST_DATA_DIR, 'pipelines')
class TestCLIRuntime(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
@classmethod
def setUpClass(cls):
to_register = {
'd3m.primitives.data_transformation.dataset_to_dataframe.Common': DatasetToDataFramePrimitive,
'd3m.primitives.classification.random_forest.Common': RandomForestClassifierPrimitive,
'd3m.primitives.classification.random_classifier.Test': RandomClassifierPrimitive,
'd3m.primitives.data_transformation.column_parser.Common': ColumnParserPrimitive,
'd3m.primitives.data_transformation.construct_predictions.Common': ConstructPredictionsPrimitive,
'd3m.primitives.evaluation.no_split_dataset_split.Common': NoSplitDatasetSplitPrimitive,
'd3m.primitives.evaluation.compute_scores.Test': FakeScorePrimitive,
'd3m.primitives.evaluation.train_score_dataset_split.Common': TrainScoreDatasetSplitPrimitive,
# We do not have to load this primitive, but loading it here prevents the package from loading all primitives.
'd3m.primitives.evaluation.compute_scores.Core': ComputeScoresPrimitive,
}
# To hide any logging or stdout output.
with utils.silence():
for python_path, primitive in to_register.items():
index.register_primitive(python_path, primitive)
def _call_cli_runtime(self, arg):
logger = logging.getLogger('d3m.runtime')
with utils.silence():
with self.assertLogs(logger=logger) as cm:
# So that at least one message is logged.
logger.warning("Debugging.")
cli.main(arg)
# We skip our "debugging" message.
return cm.records[1:]
def _call_cli_runtime_without_fail(self, arg):
try:
return self._call_cli_runtime(arg)
except Exception as e:
self.fail(traceback.format_exc())
def _assert_valid_saved_pipeline_runs(self, pipeline_run_save_path):
with open(pipeline_run_save_path, 'r') as f:
for pipeline_run_dict in list(utils.yaml_load_all(f)):
try:
pipeline_run_module.validate_pipeline_run(pipeline_run_dict)
except Exception as e:
self.fail(traceback.format_exc())
def _validate_previous_pipeline_run_ids(self, pipeline_run_save_path):
ids = set()
prev_ids = set()
with open(pipeline_run_save_path, 'r') as f:
for pipeline_run_dict in list(utils.yaml_load_all(f)):
ids.add(pipeline_run_dict['id'])
if 'previous_pipeline_run' in pipeline_run_dict:
prev_ids.add(pipeline_run_dict['previous_pipeline_run']['id'])
self.assertTrue(
prev_ids.issubset(ids),
'Some previous pipeline run ids {} are not in the set of pipeline run ids {}'.format(prev_ids, ids)
)
def test_fit_multi_input(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
def test_fit_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--save',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'--output',
output_csv_path,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11225, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11225, outputs_path='output.csv')
def test_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-no-problem-pipeline')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--save',
fitted_pipeline_path,
]
self._call_cli_runtime_without_fail(arg)
arg = [
'',
'runtime',
'produce',
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--output',
output_csv_path,
'--fitted-pipeline',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-no-problem-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11008, outputs_path='output.csv')
def test_fit_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--output',
output_csv_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11008, outputs_path='output.csv')
def test_nonstandard_fit_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--save',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'--not-standard-pipeline',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=10710, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_nonstandard_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--save',
fitted_pipeline_path,
'--not-standard-pipeline'
]
self._call_cli_runtime_without_fail(arg)
arg = [
'',
'runtime',
'produce',
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--fitted-pipeline',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=12106, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_nonstandard_fit_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--expose-produced-outputs',
self.test_dir,
'--not-standard-pipeline',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=12106, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_fit_produce_multi_input(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
def test_fit_score(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-score',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--scores',
os.path.join(self.test_dir, 'scores.csv'),
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(os.path.join(self.test_dir, 'scores.csv'))
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
def test_fit_score_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-score',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'F1_MACRO',
'--metric',
'ACCURACY',
'--scores',
os.path.join(self.test_dir, 'scores.csv'),
'-O',
pipeline_run_save_path,
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(os.path.join(self.test_dir, 'scores.csv'))
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
@staticmethod
def _get_iris_dataset_path():
return os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json')
@staticmethod
def _get_iris_problem_path():
return os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json')
@staticmethod
def _get_random_forest_pipeline_path():
return os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml')
@staticmethod
def _get_no_split_data_pipeline_path():
return os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml')
@staticmethod
def _get_train_test_split_data_pipeline_path():
return os.path.join(PIPELINE_DIR, 'data-preparation-train-test-split.yml')
def _get_pipeline_run_save_path(self):
return os.path.join(self.test_dir, 'pipeline_run.yml')
def _get_predictions_path(self):
return os.path.join(self.test_dir, 'predictions.csv')
def _get_scores_path(self):
return os.path.join(self.test_dir, 'scores.csv')
def _get_pipeline_rerun_save_path(self):
return os.path.join(self.test_dir, 'pipeline_rerun.yml')
def _get_rescores_path(self):
return os.path.join(self.test_dir, 'rescores.csv')
def _fit_iris_random_forest(
self, *, predictions_path=None, fitted_pipeline_path=None, pipeline_run_save_path=None
):
if pipeline_run_save_path is None:
pipeline_run_save_path = self._get_pipeline_run_save_path()
arg = [
'',
'runtime',
'fit',
'--input',
self._get_iris_dataset_path(),
'--problem',
self._get_iris_problem_path(),
'--pipeline',
self._get_random_forest_pipeline_path(),
'-O',
pipeline_run_save_path
]
if predictions_path is not None:
arg.append('--output')
arg.append(predictions_path)
if fitted_pipeline_path is not None:
arg.append('--save')
arg.append(fitted_pipeline_path)
self._call_cli_runtime_without_fail(arg)
def _fit_iris_random_classifier_without_problem(self, *, fitted_pipeline_path):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'-O',
pipeline_run_save_path
]
if fitted_pipeline_path is not None:
arg.append('--save')
arg.append(fitted_pipeline_path)
self._call_cli_runtime_without_fail(arg)
def test_fit(self):
pipeline_run_save_path = self._get_pipeline_run_save_path()
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
self._fit_iris_random_forest(
fitted_pipeline_path=fitted_pipeline_path, pipeline_run_save_path=pipeline_run_save_path
)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
self.assertTrue(os.path.isfile(pipeline_run_save_path))
def test_evaluate(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scores',
scores_path,
'--metric',
'ACCURACY',
'--metric',
'F1_MACRO',
'-O',
pipeline_run_save_path
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0], ['F1_MACRO', 1.0, 1.0, 0, 0]])
def test_evaluate_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'ACCURACY',
'--scores',
scores_path,
'-O',
pipeline_run_save_path
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0]])
def test_score(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_forest(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scores',
scores_path,
'--metric',
'F1_MACRO',
'--metric',
'ACCURACY',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['F1_MACRO', 1.0, 1.0, 0], ['ACCURACY', 1.0, 1.0, 0]])
def test_score_without_problem_without_metric(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_classifier_without_problem(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
'--scores',
scores_path,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = | pandas.read_csv(scores_path) | pandas.read_csv |
#!/usr/bin/env python3
"""Create trendbargraphs of the data for various periods."""
import argparse
from datetime import datetime as dt
import sqlite3 as s3
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import constants
DATABASE = constants.TREND['database']
TABLE_RHT = constants.TREND['sql_table_rht']
TABLE_AC = constants.TREND['sql_table_ac']
ROOMS = constants.ROOMS
DEVICE_LIST = constants.DEVICES
AIRCO_LIST = constants.AIRCO
OPTION = ""
DEBUG = False
def fetch_data(hours_to_fetch=48, aggregation=1):
data_dict_rht = fetch_data_rht(hours_to_fetch=hours_to_fetch, aggregation=aggregation)
data_dict_ac = fetch_data_ac(hours_to_fetch=hours_to_fetch, aggregation=aggregation)
data_dict = dict()
# move outside temperature from Daikin to the table with the other temperature sensors
for d in data_dict_ac:
if 'T(out)' in data_dict_ac[d]:
data_dict_rht['temperature']['T(out)'] = data_dict_ac[d]['T(out)']
data_dict_ac[d].drop(['T(out)'], axis=1, inplace=True, errors='ignore')
for d in data_dict_rht:
data_dict[d] = data_dict_rht[d]
for d in data_dict_ac:
data_dict[d] = data_dict_ac[d]
return data_dict
def fetch_data_ac(hours_to_fetch=48, aggregation=1):
"""
Query the database to fetch the requested data
:param hours_to_fetch: (int) number of hours of data to fetch
:param aggregation: (int) number of minutes to aggregate per datapoint
:return:
"""
df_cmp = None
df_t = None
if DEBUG:
print("*** fetching AC ***")
for airco in AIRCO_LIST:
airco_id = airco['name']
where_condition = f" (sample_time >= datetime(\'now\', \'-{hours_to_fetch + 1} hours\'))" \
f" AND (room_id LIKE \'{airco_id}\')"
s3_query = f"SELECT * FROM {TABLE_AC} WHERE {where_condition}"
if DEBUG:
print(s3_query)
with s3.connect(DATABASE) as con:
df = pd.read_sql_query(s3_query,
con,
parse_dates='sample_time',
index_col='sample_epoch'
)
for c in df.columns:
if c not in ['sample_time']:
df[c] = pd.to_numeric(df[c], errors='coerce')
df.index = pd.to_datetime(df.index, unit='s').tz_localize("UTC").tz_convert("Europe/Amsterdam")
# resample to monotonic timeline
df = df.resample(f'{aggregation}min').mean()
df = df.interpolate(method='slinear')
# remove temperature target values for samples when the AC is turned off.
df.loc[df.ac_power == 0, 'temperature_target'] = np.nan
# conserve memory; we dont need the these.
df.drop(['ac_mode', 'ac_power', 'room_id'], axis=1, inplace=True, errors='ignore')
df_cmp = collate(df_cmp, df,
columns_to_drop=['temperature_ac', 'temperature_target', 'temperature_outside'],
column_to_rename='cmp_freq',
new_name=airco_id
)
if df_t is None:
df = collate(None, df,
columns_to_drop=['cmp_freq'],
column_to_rename='temperature_ac',
new_name=airco_id
)
df_t = collate(df_t, df,
columns_to_drop=[],
column_to_rename='temperature_target',
new_name=f'{airco_id}_tgt'
)
else:
df = collate(None, df,
columns_to_drop=['cmp_freq', 'temperature_outside'],
column_to_rename='temperature_ac',
new_name=airco_id
)
df_t = collate(df_t, df,
columns_to_drop=[],
column_to_rename='temperature_target',
new_name=f'{airco_id}_tgt'
)
# create a new column containing the max value of both aircos, then remove the airco_ columns
df_cmp['cmp_freq'] = df_cmp[['airco0', 'airco1']].apply(np.max, axis=1)
df_cmp.drop(['airco0', 'airco1'], axis=1, inplace=True, errors='ignore')
if DEBUG:
print(df_cmp)
# rename the column to something shorter or drop it
if OPTION.outside:
df_t.rename(columns={'temperature_outside': 'T(out)'}, inplace=True)
else:
df_t.drop(['temperature_outside'], axis=1, inplace=True, errors='ignore')
if DEBUG:
print(df_t)
ac_data_dict = {'temperature_ac': df_t, 'compressor': df_cmp}
return ac_data_dict
def fetch_data_rht(hours_to_fetch=48, aggregation=1):
"""
Query the database to fetch the requested data
:param hours_to_fetch: (int) number of hours of data to fetch
:param aggregation: (int) number of minutes to aggregate per datapoint
:return:
"""
if DEBUG:
print("*** fetching RHT ***")
df_t = df_h = df_v = None
for device in DEVICE_LIST:
room_id = device[1]
where_condition = f" (sample_time >= datetime(\'now\', \'-{hours_to_fetch + 1} hours\'))" \
f" AND (room_id LIKE \'{room_id}\')"
s3_query = f"SELECT * FROM {TABLE_RHT} WHERE {where_condition}"
if DEBUG:
print(s3_query)
with s3.connect(DATABASE) as con:
df = pd.read_sql_query(s3_query,
con,
parse_dates='sample_time',
index_col='sample_epoch'
)
# conserve memory; we dont need the room_id repeated in every row.
df.drop('room_id', axis=1, inplace=True, errors='ignore')
for c in df.columns:
if c not in ['sample_time']:
df[c] = pd.to_numeric(df[c], errors='coerce')
df.index = pd.to_datetime(df.index, unit='s').tz_localize("UTC").tz_convert("Europe/Amsterdam")
# resample to monotonic timeline
df = df.resample(f'{aggregation}min').mean()
df = df.interpolate(method='slinear')
try:
new_name = ROOMS[room_id]
except KeyError:
new_name = room_id
df.drop('sample_time', axis=1, inplace=True, errors='ignore')
df_t = collate(df_t, df,
columns_to_drop=['voltage', 'humidity'],
column_to_rename='temperature',
new_name=new_name
)
df_h = collate(df_h, df,
columns_to_drop=['temperature', 'voltage'],
column_to_rename='humidity',
new_name=new_name
)
df_v = collate(df_v, df,
columns_to_drop=['temperature', 'humidity'],
column_to_rename='voltage',
new_name=new_name
)
if DEBUG:
print(f"TEMPERATURE\n", df_t)
print(f"HUMIDITY\n", df_h)
print(f"VOLTAGE\n", df_v)
rht_data_dict = {'temperature': df_t, 'humidity': df_h, 'voltage': df_v}
return rht_data_dict
def collate(prev_df, data_frame, columns_to_drop=[], column_to_rename='', new_name='room_id'):
# drop the 'columns_to_drop'
for col in columns_to_drop:
data_frame = data_frame.drop(col, axis=1, errors='ignore')
# rename the 'column_to_rename'
data_frame.rename(columns={f'{column_to_rename}': new_name}, inplace=True)
# collate both dataframes
if prev_df is not None:
data_frame = | pd.merge(prev_df, data_frame, left_index=True, right_index=True, how='left') | pandas.merge |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
| pd.testing.assert_frame_equal(matches11, matches14) | pandas.testing.assert_frame_equal |
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
from datetime import datetime as dt
from datetime import timedelta
import glob
from scipy.stats import gamma
import sys
sys.path.insert(0,'model') # I hate this too but it allows everything to use the same helper functions.
from helper_functions import read_in_NNDSS
#Code taken from read_in_cases from Reff_functions. Preprocessing was not helpful for this situation.
def read_cases_lambda(case_file_date):
"""
Read in NNDSS data
"""
df_NNDSS = read_in_NNDSS(case_file_date)
df_interim = df_NNDSS[['date_inferred','STATE','imported','local']]
return(df_interim)
def tidy_cases_lambda(interim_data, remove_territories=True):
#Remove non-existent notification dates
interim_data = interim_data[~np.isnat(interim_data.date_inferred)]
#Filter out territories
if(remove_territories):
df_linel = interim_data[(interim_data['STATE']!='NT') & (interim_data['STATE']!='ACT')]
#Melt down so that imported and local are no longer columns. Allows multiple draws for infection date.
#i.e. create linelist data
df_linel = df_linel.melt(id_vars = ['date_inferred','STATE'], var_name = 'SOURCE',value_name='n_cases')
#Reset index or the joining doesn't work
df_linel = df_linel[df_linel.n_cases!=0]
df_linel = df_linel.reset_index(drop=True)
return(df_linel)
##gamma draws take arguments (shape, scale)
def draw_inf_dates(df_linelist, shape_rd=2.77, scale_rd=3.17, offset_rd=0,
shape_inc=5.807, scale_inc=0.948, offset_inc=1,nreplicates=1):
notification_dates = df_linelist['date_inferred']
nsamples = notification_dates.shape[0]
# DEFINE DELAY DISTRIBUTION
# mean_rd = 5.47
# sd_rd = 4.04
#scale_rd = shape_rd/(scale_rd)**2
#shape_rd = shape_rd/scale_rd
# DEFINE INCUBATION PERIOD DISTRIBUTION
# Taken from Lauer et al 2020
# mean_inc = 5.5 days
# sd_inc = 1.52
#scale_inc = (scale_inc)**2/shape_inc #scale**2 = var / shape
#shape_inc =(scale_inc)**2/scale_inc**2
#Draw from distributions - these are long vectors
inc_period = offset_inc+np.random.gamma(shape_inc, scale_inc, size = (nsamples*nreplicates))
rep_delay = offset_rd+np.random.gamma(shape_rd, scale_rd, size = (nsamples*nreplicates))
#infection date is id_nd_diff days before notification date. This is also a long vector.
id_nd_diff = inc_period + rep_delay
#Minutes aren't included in df. Take the ceiling because the day runs from 0000 to 2359. This can still be a long vector.
whole_day_diff = np.ceil(id_nd_diff)
time_day_diffmat = whole_day_diff.astype('timedelta64[D]').reshape((nsamples, nreplicates))
#Vector must be coerced into a nsamples by nreplicates array. Then each column must be subtracted from notification_dates.
#Subtract days off of notification dates.
notification_mat = np.tile(notification_dates, (nreplicates,1)).T #notification_dates is repeated as a column nreplicates times.
infection_dates = notification_mat - time_day_diffmat
#Make infection dates into a dataframe
datecolnames = [*map(str,range(nreplicates))]
infdates_df = pd.DataFrame(infection_dates,columns = datecolnames)
#Uncomment this if theres errors
#print([df_linelist.shape, infdates_df.shape])
#Combine infection dates and original dataframe
df_inf = pd.concat([df_linelist, infdates_df], axis=1, verify_integrity=True)
return(df_inf)
def index_by_infection_date(infections_wide):
datecolnames = [*infections_wide.columns[4:]]
df_combined = infections_wide[['STATE','SOURCE',datecolnames[0],'n_cases']].groupby(['STATE', datecolnames[0],'SOURCE']).sum()
#For each column (cn=column number): concatenate each sample as a column.
for cn in range(1,len(datecolnames)):
df_addin = infections_wide[['STATE','SOURCE',datecolnames[cn],'n_cases']].groupby(['STATE', datecolnames[cn],'SOURCE']).sum()
df_combined = pd.concat([df_combined,df_addin], axis=1, ignore_index = True)
#NaNs are inserted for missing values when concatenating. If it's missing, there were zero infections
df_combined[np.isnan(df_combined)]=0
#Rename the index.
df_combined.index.set_names(["STATE","INFECTION_DATE","SOURCE"], inplace=True)
#return(df_combined)
##INCLUDE ALL DAYS WITH ZERO INFECTIONS IN THE INDEX AS WELL.
# Reindex to include days with zero total infections.
local_infs = df_combined.xs('local',level='SOURCE')
imported_infs = df_combined.xs('imported',level='SOURCE')
statelist = [*df_combined.index.get_level_values('STATE').unique()]
#Should all states have the same start date? Current code starts from the first case in each state.
#For the same start date:
local_statedict = dict(zip(statelist, np.repeat(None, len(statelist))))
imported_statedict = dict(zip(statelist, np.repeat(None, len(statelist))))
#Determine start date as the first infection date for all.
#start_date = np.datetime64("2020-02-01")
start_date = df_combined.index.get_level_values('INFECTION_DATE').min()
#Determine end dates as the last infected date by state.
index_only = df_combined.index.to_frame()
index_only = index_only.reset_index(drop=True)
maxdates = index_only['INFECTION_DATE'].max()
for aus_state in statelist:
state_data = local_infs.xs(aus_state, level='STATE')
#start_date = state_data.index.min()
#dftest.index=dftest.reindex(alldates, fill_value=0)
alldates = pd.date_range(start_date, maxdates) #All days from start_date to the last infection day.
local_statedict[aus_state] = state_data.reindex(alldates, fill_value=0)
for aus_state in statelist:
state_data = imported_infs.xs(aus_state, level='STATE')
alldates = pd.date_range(start_date, maxdates)
imported_statedict[aus_state] = state_data.reindex(alldates, fill_value=0)
#Convert dictionaries to data frames
df_local_inc_zeros = pd.concat(local_statedict)
df_local_inc_zeros['SOURCE']='local'
df_imp_inc_zeros = pd.concat(imported_statedict)
df_imp_inc_zeros['SOURCE']='imported'
#Merge dataframes and reindex.
df_inc_zeros = pd.concat([df_local_inc_zeros, df_imp_inc_zeros])
df_inc_zeros = df_inc_zeros.reset_index()
df_inc_zeros= df_inc_zeros.groupby(['level_0',"level_1","SOURCE"]).sum()
df_inc_zeros.index = df_inc_zeros.index.rename(['STATE','INFECTION_DATE',"SOURCE"])
return(df_inc_zeros)
def generate_lambda(infection_dates, shape_gen=3.64/3.07, scale_gen=3.07,
trunc_days=21,shift=0, offset=1):
"""
Given array of infection_dates (N_dates by N_samples), where values are possible
number of cases infected on this day, generate the force of infection Lambda_t,
a N_dates-tau by N_samples array.
Default generation interval parameters taken from Ganyani et al 2020.
"""
from scipy.stats import gamma
#scale_gen = mean_gen/(sd_gen)**2
#shape_gen = mean_gen/scale_gen
xmids = [x+shift for x in range(trunc_days+1)] #Find midpoints for discretisation
gamma_vals = gamma.pdf(xmids, a=shape_gen, scale=scale_gen) #double check parameterisation of scipy
#renormalise the pdf
disc_gamma = gamma_vals/sum(gamma_vals)
ws = disc_gamma[:trunc_days]
#offset
ws[offset:] = disc_gamma[:trunc_days-offset]
ws[:offset] = 0
lambda_t = np.zeros(shape=(infection_dates.shape[0]-trunc_days+1, infection_dates.shape[1]))
for n in range(infection_dates.shape[1]):
lambda_t[:,n] = np.convolve(infection_dates[:,n], ws, mode='valid')
return lambda_t
def lambda_all_states(df_infection, **kwargs):
"""
Use geenrate lambda on every state
"""
statelist = [*df_infection.index.get_level_values('STATE').unique()]
lambda_dict ={}
for state in statelist:
df_total_infections = df_infection.groupby(['STATE','INFECTION_DATE']).agg(sum)
lambda_dict[state] = generate_lambda(
df_total_infections.loc[state].values,
**kwargs
)
return lambda_dict
def Reff_from_case(cases_by_infection, lamb, prior_a=1, prior_b=5, tau=7, samples=1000):
"""
Using Cori at al. 2013, given case incidence by date of infection, and the force
of infection \Lambda_t on day t, estimate the effective reproduction number at time
t with smoothing parameter \tau.
cases_by_infection: A T by N array, for T days and N samples
lamb : A T by N array, for T days and N samples
"""
csum_incidence = np.cumsum(cases_by_infection, axis = 0)
#remove first few incidences to align with size of lambda
# Generation interval length 20
csum_incidence = csum_incidence[20:,:]
csum_lambda = np.cumsum(lamb, axis =0)
roll_sum_incidence = csum_incidence[tau:, :] - csum_incidence[:-tau, :]
roll_sum_lambda = csum_lambda[tau:,:] - csum_lambda[:-tau,:]
a = prior_a + roll_sum_incidence
b = 1/(1/prior_b + roll_sum_lambda)
R = np.random.gamma(a,b) #shape, scale
#Need to empty R when there is too few cases...
#Use array inputs to output to same size
#inputs are T-tau by N, output will be T-tau by N
#
return a,b, R
def generate_summary(samples, dates_by='rows'):
"""
Given an array of samples (T by N) where rows index the dates,
generate summary statistics and quantiles
"""
if dates_by=='rows':
#quantiles of the columns
ax = 1
else:
#quantiles of the rows
ax = 0
mean = np.mean(samples, axis = ax)
bottom, lower, median, upper, top = np.quantile(samples,
(0.05, 0.25, 0.5, 0.75, 0.95),
axis =ax)
std = np.std(samples, axis = ax)
output = {
'mean':mean,
'std':std,
'bottom':bottom,
'lower':lower,
'median':median,
'upper':upper,
'top': top,
}
return output
def plot_Reff(Reff:dict, dates=None, ax_arg=None, truncate=None, **kwargs):
"""
Given summary statistics of Reff as a dictionary, plot the distribution over time
"""
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
from datetime import datetime as dt
if ax_arg is None:
fig, ax = plt.subplots(figsize=(12,9))
else:
fig, ax = ax_arg
color_cycle = ax._get_lines.prop_cycler
curr_color = next(color_cycle)['color']
if dates is None:
dates = range(len(Reff['mean']))
if truncate is None:
ax.plot(dates, Reff['mean'], color= curr_color, **kwargs)
ax.fill_between(dates, Reff['lower'],Reff['upper'], alpha=0.4, color = curr_color)
ax.fill_between(dates, Reff['bottom'],Reff['top'], alpha=0.4, color= curr_color)
else:
ax.plot(dates[truncate[0]:truncate[1]], Reff['mean'][truncate[0]:truncate[1]], color= curr_color, **kwargs)
ax.fill_between(dates[truncate[0]:truncate[1]], Reff['lower'][truncate[0]:truncate[1]],
Reff['upper'][truncate[0]:truncate[1]],
alpha=0.4, color = curr_color)
ax.fill_between(dates[truncate[0]:truncate[1]], Reff['bottom'][truncate[0]:truncate[1]],
Reff['top'][truncate[0]:truncate[1]],
alpha=0.4, color= curr_color)
#plt.legend()
#grid line at R_eff =1
ax.set_yticks([1],minor=True,)
ax.set_yticks([0,2,3],minor=False)
ax.set_yticklabels([0,2,3],minor=False)
ax.yaxis.grid(which='minor',linestyle='--',color='black',linewidth=2)
ax.tick_params(axis='x', rotation = 90)
return fig, ax
def plot_all_states(R_summ_states,df_interim, dates,
start='2020-03-01',end='2020-08-01',save=True, date =None, tau = 7,
nowcast_truncation=-10):
"""
Plot results over time for all jurisdictions.
dates: dictionary of (region, date) pairs where date holds the relevant
dates for plotting cases by inferred symptom-onset
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
states = df_interim.STATE.unique().tolist()
states.remove('NT')
states.remove('ACT')
date_filter = | pd.date_range(start=start,end=end) | pandas.date_range |
import numpy as np
import pandas as pd
from collections import Counter
from IPython.display import display
def cluster_acc(y_true, cluster):
compare_df = pd.DataFrame({'y_true': y_true, 'cluster': cluster})
classes = set(y_true)
cluster_codes = [x for x, y in sorted(Counter(cluster).items(), key=lambda x: x[1], reverse=True)]
cluster_class_acc_dict = dict()
# display(compare_df)
for i, code in enumerate(cluster_codes):
# Get counts for each class, by cluster code
temp_class_dict = compare_df.loc[compare_df['cluster'] == code, 'y_true'].value_counts().to_dict()
# Get total number of data
temp_num_total_data = sum(temp_class_dict.values())
# Update dictionary with available classes
temp_class_dict = {key: temp_class_dict[key] for key in classes if key in temp_class_dict}
# Get majority class for the cluster code
try:
temp_class = max(temp_class_dict, key=temp_class_dict.get)
except ValueError:
temp_num_bad = temp_num_total_data
temp_class = None
else:
# Get number of wrongly clustered data
temp_num_bad = temp_num_total_data - temp_class_dict[temp_class]
# Record number of wrongly clustered data for the cluster
cluster_class_acc_dict[code] = temp_num_bad
# Update the remaining class set
if temp_class is not None:
classes.remove(temp_class)
return 1 - (sum(cluster_class_acc_dict.values()) / len(compare_df))
def purity(y_true, cluster):
compare_df = | pd.DataFrame({'y_true': y_true, 'cluster': cluster}) | pandas.DataFrame |
import pandas as pd
from itertools import product
from .sourcehooks import SourceHook
from .agency import Agency
from .lrseg import Lrseg
from .sector import Sector
class LoadSource(SourceHook):
def __init__(self, sourcedata=None, metadata=None):
""" Methods for querying CAST data related to Load Sources """
SourceHook.__init__(self, sourcedata=sourcedata, metadata=metadata)
self.agency = Agency(sourcedata=sourcedata, metadata=metadata)
self.lrseg = Lrseg(sourcedata=sourcedata, metadata=metadata)
self.sector = Sector(sourcedata=sourcedata, metadata=metadata)
def all_names(self, astype=pd.Series):
return self.grab_sourcetbl_column(tbl='TblLoadSource', col='loadsourceshortname', astype=astype)
def all_ids(self, astype=pd.Series):
return self.grab_sourcetbl_column(tbl='TblLoadSource', col='loadsourceid', astype=astype)
def ids_from_names(self, names=None):
return self._map_using_sourcetbl(names, tbl='TblLoadSource',
fromcol='loadsourceshortname', tocol='loadsourceid')
def names_from_ids(self, ids=None):
return self._map_using_sourcetbl(ids, tbl='TblLoadSource',
fromcol='loadsourceid', tocol='loadsourceshortname')
def loadsourcegroupids_from(self, sectorids=None, loadsourceids=None):
kwargs = (sectorids, loadsourceids)
kwargsNoDataFrames = [True if isinstance(x, pd.DataFrame) else x for x in kwargs]
if self.checkOnlyOne(kwargsNoDataFrames) is False:
raise ValueError('One and only one keyword argument must be specified')
if sectorids is not None:
return self.__loadsourcegroupids_from_sectorids(getfrom=sectorids)
elif loadsourceids is not None:
return self.__loadsourcegroupids_from_loadsourceids(getfrom=loadsourceids)
def __loadsourcegroupids_from_sectorids(self, getfrom=None):
getfrom = self.forceToSingleColumnDataFrame(getfrom, colname='sectorid')
return self.singleconvert(sourcetbl='TblLoadSourceGroupSector',
toandfromheaders=['loadsourcegroupid', 'sectorid'],
fromtable=getfrom, toname='loadsourcegroupid')
def __loadsourcegroupids_from_loadsourceids(self, getfrom=None):
getfrom = self.forceToSingleColumnDataFrame(getfrom, colname='loadsourceid')
return self.singleconvert(sourcetbl='TblLoadSourceGroupLoadSource',
toandfromheaders=['loadsourcegroupid', 'loadsourceid'],
fromtable=getfrom, toname='loadsourcegroupid')
def fullnames_from_shortnames(self, shortnames):
return self._map_using_sourcetbl(shortnames, tbl='TblLoadSource',
fromcol='loadsourceshortname', tocol='loadsource')
def shortnames_from_fullnames(self, fullnames, use_order_of_sourcetbl=True):
return self._map_using_sourcetbl(fullnames, tbl='TblLoadSource',
fromcol='loadsource', tocol='loadsourceshortname')
def loadsourceids_from(self, sectorids=None):
return self._map_using_sourcetbl(sectorids, tbl='TblLoadSource',
fromcol='sectorid', tocol='loadsourceid')
def single_loadsourcegroupid_from_loadsourcegroup_name(self, loadsourcegroupname):
TblLoadSourceGroup = self.source.TblLoadSourceGroup # get relevant source data
return TblLoadSourceGroup['loadsourcegroupid'][TblLoadSourceGroup['loadsourcegroup'] ==
loadsourcegroupname].tolist()
def sourceLrsegAgencyIDtable_from_lrsegAgencySectorids(self, lrsegagencyidtable=None, sectorids=None):
"""Get the load sources present (whether zero acres or not) in the specified lrseg-agency-sectors
"""
# get relevant source data
TblLandRiverSegmentAgencyLoadSource = self.source.TblLandRiverSegmentAgencyLoadSource
# use [lrseg, agency] to get loadsourceids
columnmask = ['lrsegid', 'agencyid', 'loadsourceid', 'unitid']
tblloadsourceids1 = TblLandRiverSegmentAgencyLoadSource.loc[:, columnmask].merge(lrsegagencyidtable,
how='inner')
# use sectors/loadsourcegroups to get loadsourceids
tblloadsourceids2 = self.loadsourceids_from(sectorids=sectorids)
# get the intersection of these two loadsourceid tables
tblsubset = tblloadsourceids1.merge(tblloadsourceids2, how='inner')
# ** LoadSources that are of the Ag or Septic sector only go on NONFED agency parcels **
# So, first, identify the LoadSources that are in the Ag or Septic sector
sectoridsToRemove = self.sector.ids_from_names(['Agriculture', 'Septic'])
loadsourceids = list(self.loadsourceids_from(sectorids=sectoridsToRemove)['loadsourceid'])
# Then, get the NONFED agencyid
agencyidsToRemove = list(self.agency.ids_from_names(['NONFED'])['agencyid'])
# Then, extract only the rows that aren't those loadsources and not NONFED
tblreturn = tblsubset[~((tblsubset['loadsourceid'].isin(loadsourceids)) &
(~tblsubset['agencyid'].isin(agencyidsToRemove)))]
return tblreturn
return tblsubset
def sourceCountyAgencyIDtable_from_sourceLrsegAgencyIDtable(self, sourceAgencyLrsegIDtable=None):
# get relevant source data
TblLandRiverSegment = self.source.TblLandRiverSegment
columnmask = ['lrsegid', 'countyid']
tblsubset = TblLandRiverSegment.loc[:, columnmask].merge(sourceAgencyLrsegIDtable, how='inner')
tblsubset.drop_duplicates(subset=['countyid', 'agencyid', 'loadsourceid'], inplace=True)
return tblsubset.loc[:, ['countyid', 'agencyid', 'loadsourceid']]
def loadsourceids_from_lrsegid_agencyid_sectorid(self, lrsegids=None, agencyids=None, sectorids=None):
"""Get the load sources present (whether zero acres or not) in the specified lrseg-agency-sectors
"""
# get relevant source data
TblLandRiverSegmentAgencyLoadSource = self.source.TblLandRiverSegmentAgencyLoadSource
# Generate all combinations of the lrseg, agency, sectors
combos = list(product(lrsegids['lrsegid'], agencyids['agencyid']))
combos = pd.DataFrame(combos, columns=['lrsegid', 'agencyid'])
# use [lrseg, agency] to get loadsourceids
columnmask = ['lrsegid', 'agencyid', 'loadsourceid', 'unitid']
tblloadsourceids1 = TblLandRiverSegmentAgencyLoadSource.loc[:, columnmask].merge(combos, how='inner')
# use sectors/loadsourcegroups to get loadsourceids
tblloadsourceids2 = self.loadsourceids_from(sectorids=sectorids)
# get the intersection of these two loadsourceid tables
tblloadsourceids = tblloadsourceids1.merge(tblloadsourceids2, how='inner')
return tblloadsourceids.loc[:, ['loadsourceid']]
def loadsources_from_lrseg_agency_sector(self, lrsegs=None, agencies=None, sectors=None):
"""Get the load sources present (whether zero acres or not) in the specified lrseg-agency-sectors
"""
# get relevant source data
TblLandUsePreBmp = self.source.TblLandUsePreBmp # use this to find load sources with >0 acres
TblLandRiverSegmentAgencyLoadSource = self.source.TblLandRiverSegmentAgencyLoadSource
TblLoadSource = self.source.TblLoadSource
TblLoadSourceGroupLoadSource = self.source.TblLoadSourceGroupLoadSource
# Convert names to IDs
lrsegids = self.lrseg.ids_from_names(names=lrsegs)
agencyids = self.agency.ids_from_names(agencycodes=agencies)
sectorids = self.sector.ids_from_names(names=sectors)
# Generate all combinations of the lrseg, agency, sectors
combos = list(product(lrsegids, agencyids))
combos = pd.DataFrame(combos, columns=['lrsegid', 'agencyid'])
# use [lrseg, agency] to get loadsourceids
columnmask = ['lrsegid', 'agencyid', 'loadsourceid', 'unitid']
tblloadsourceids1 = TblLandRiverSegmentAgencyLoadSource.loc[:, columnmask].merge(combos, how='inner')
# use sectors/loadsourcegroups to get loadsourceids
sectorid_df = | pd.DataFrame(sectorids, columns=['sectorid']) | pandas.DataFrame |
import uuid
from pprint import pformat
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import pandas as pd
from .utils import add_default_to_data
from .utils import create_db_folders
from .utils import dump_cached_schema
from .utils import dump_db
from .utils import generate_hash_id
from .utils import get_db_path
from .utils import load_cached_schema
from .utils import load_db
from .utils import validate_data_with_schema
from .utils import validate_query_data
from .utils import validate_schema
from .utils import validate_update_data
from onstrodb.errors.common_errors import DataDuplicateError
from onstrodb.errors.common_errors import DataError
from onstrodb.errors.schema_errors import SchemaError
# types
DBDataType = Dict[str, object]
SchemaDictType = Dict[str, Dict[str, object]]
GetType = Union[Dict[str, Union[Dict[str, object], str]], None]
class OnstroDb:
"""The main API for the DB"""
def __init__(self, db_name: str, schema: Optional[SchemaDictType] = None,
db_path: Optional[str] = None, allow_data_duplication: bool = False,
in_memory: bool = False) -> None:
self._db_name = db_name
self._schema = schema
self._data_dupe = allow_data_duplication
self._in_memory = in_memory
# db variables
self._db: pd.DataFrame = None
self._db_path: str = get_db_path(db_name)
if db_path:
self._db_path = f"{db_path}/{self._db_name}"
# validate the user defined schema
self._validate_schema()
# meta data about the db
if self._schema:
self._columns = list(self._schema.keys())
# start the loading sequence
self._load_initial_schema()
self._reload_db()
def __repr__(self) -> str:
return pformat(self._to_dict(self._db), indent=4, width=80, sort_dicts=False)
def __len__(self) -> int:
return len(self._db.index)
def add(self, values: List[Dict[str, object]], get_hash_id: bool = False) -> Union[None, List[str]]:
"""Adds a list of values to the DB"""
new_data: List[Dict[str, object]] = []
new_hashes: List[str] = []
for data in values:
if self._schema:
if validate_data_with_schema(data, self._schema):
data = add_default_to_data(data, self._schema)
hash_id = self._get_hash(
[str(i) for i in data.values()], list(self._db.index) + new_hashes)
new_data.append(data)
new_hashes.append(hash_id)
else:
raise DataError(
f"The data {data!r} does not comply with the schema")
new_df = pd.DataFrame(new_data, new_hashes)
try:
self._db = pd.concat([self._db, new_df],
verify_integrity=not self._data_dupe)
except ValueError:
raise DataDuplicateError(
"The data provided, contains duplicate values") from None
if get_hash_id:
return new_hashes
return None
def get_by_query(self, query: Dict[str, object]) -> GetType:
"""Get values from the DB. queries must comply with the schema and must be of length 1"""
if self._schema:
if validate_query_data(query, self._schema):
key = list(query)[0]
filt = self._db[key] == query[key]
return self._to_dict(self._db.loc[filt])
return None
def get_by_hash_id(self, hash_id: str) -> GetType:
"""Get values from the DB based on their hash ID"""
if hash_id in self._db.index:
return self._to_dict(self._db.loc[hash_id])
return {}
def get_hash_id(self, condition: Dict[str, object]) -> List[str]:
"""Returns a hash id or a list of ids that matches all the conditions"""
# the validate_update_method can be used as the same verification style is required here.
if self._schema:
if validate_update_data(condition, self._schema):
return list(self._db.loc[(self._db[list(condition)]
== pd.Series(condition)).all(axis=1)].index)
return []
def get_all(self) -> GetType:
"""Return the entire DB in a dict representation"""
return self._to_dict(self._db)
def update_by_query(self, query: Dict[str, object], update_data: DBDataType) -> Dict[str, str]:
"""Update the records in the DB with a query"""
u_db = self._db.copy(deep=True)
if self._schema:
if validate_query_data(query, self._schema) and validate_update_data(update_data, self._schema):
q_key = list(query)[0]
q_val = query[q_key]
filt = u_db[q_key] == q_val
for key, val in update_data.items():
u_db.loc[filt, key] = val
# update the indexes
new_vals = u_db.loc[filt].to_dict("index")
new_idx = self._verify_and_get_new_idx(
new_vals, list(u_db.index))
if new_idx:
new_df = self._update_hash_id(new_idx, u_db)
self._db = new_df.copy(deep=True)
del [u_db, new_df]
return new_idx
return {}
def update_by_hash_id(self, hash_id: str, update_data: DBDataType) -> Dict[str, str]:
"""Update the records in the DB using their hash id"""
u_db = self._db.copy(deep=True)
if hash_id in u_db.index:
if self._schema:
if validate_update_data(update_data, self._schema):
for key, val in update_data.items():
u_db.loc[hash_id, key] = val
# update the indexes
new_vals = pd.DataFrame(
u_db.loc[hash_id].to_dict(), index=[hash_id]).to_dict("index")
new_idx = self._verify_and_get_new_idx(
new_vals, list(u_db.index))
if new_idx:
new_df = self._update_hash_id(new_idx, u_db)
self._db = new_df.copy(deep=True)
del [u_db, new_df]
return new_idx
return {}
def delete_by_query(self, query: Dict[str, object]) -> None:
"""Delete the records from the db that complies to the query"""
if self._schema:
if validate_query_data(query, self._schema):
key = list(query)[0]
filt = self._db[key] != query[key]
self._db = self._db.loc[filt]
def delete_by_hash_id(self, hash_id: str) -> None:
"""Delete the a records from thr DB based on their hash_id"""
ids = list(self._db.index)
if hash_id in ids:
self._db = self._db.drop(hash_id)
def raw_db(self) -> pd.DataFrame:
"""Returns the in in memory representation of the DB"""
return self._db.copy(deep=True)
def purge(self) -> None:
"""Removes all the data from the runtime instance of the db"""
self._db = self._db.iloc[0:0]
def commit(self) -> None:
"""Store the current db in a file"""
if isinstance(self._db, pd.DataFrame):
if not self._in_memory:
dump_db(self._db, self._db_path, self._db_name)
def _get_hash(self, values: List[str], hash_list: List[str]) -> str:
"""returns the hash id based on the dupe value"""
def gen_dupe_hash(extra: int = 0) -> str:
if extra:
hash_ = generate_hash_id(values + [str(extra)])
else:
hash_ = generate_hash_id(values)
if hash_ in hash_list:
return gen_dupe_hash(uuid.uuid4().int)
else:
hash_list.append(hash_)
return hash_
if not self._data_dupe:
return generate_hash_id(values)
else:
return gen_dupe_hash()
def _update_hash_id(self, new_hashes: Dict[str, str], _df: pd.DataFrame) -> pd.DataFrame:
"""Updates the hash to the new hashes """
for idx, hash_ in new_hashes.items():
_df.rename(index={idx: hash_}, inplace=True)
return _df
def _verify_and_get_new_idx(self, new_vals: Dict[str, Dict[str, object]], hash_list: List[str]) -> Dict[str, str]:
"""verify whether the updated is not a duplicate of an existing data"""
new_hashes: Dict[str, str] = {}
idxs = list(new_vals)
for k, v in new_vals.items():
hash_ = self._get_hash(
list(map(str, v.values())), hash_list)
if hash_ in self._db.index or (hash_ in idxs and k != hash_) or hash_ in new_hashes.values():
if not self._data_dupe:
new_hashes.clear()
raise DataDuplicateError(
"The updated data is a duplicate of an existing data in the DB")
else:
new_hashes[k] = hash_
else:
new_hashes[k] = hash_
return new_hashes
def _to_dict(self, _df: Union[pd.DataFrame, pd.Series]) -> Dict[str, Union[Dict[str, object], str]]:
"""Returns the dict representation of the DB based on
the allow_data_duplication value
"""
if isinstance(_df, pd.DataFrame):
return _df.to_dict("index")
else:
return _df.to_dict()
def _validate_schema(self) -> None:
if self._schema:
validate_schema(self._schema)
def _reload_db(self) -> None:
"""Reload the the pandas DF"""
if not self._in_memory:
data = load_db(self._db_path, self._db_name)
if isinstance(data, pd.DataFrame):
self._db = data
else:
self._db = | pd.DataFrame(columns=self._columns) | pandas.DataFrame |
# Imports: standard library
import os
import re
import logging
from abc import ABC
from typing import Any, Set, Dict, List, Tuple, Optional
from datetime import datetime
# Imports: third party
import h5py
import numpy as np
import pandas as pd
import unidecode
# Imports: first party
from ml4c3.utils import get_unix_timestamps
from definitions.edw import EDW_FILES, MED_ACTIONS
from definitions.icu import ALARMS_FILES, ICU_SCALE_UNITS
from definitions.globals import TIMEZONE
from ingest.icu.data_objects import (
Event,
Procedure,
Medication,
StaticData,
Measurement,
BedmasterAlarm,
BedmasterSignal,
)
from tensorize.bedmaster.bedmaster_stats import BedmasterStats
from tensorize.bedmaster.match_patient_bedmaster import PatientBedmasterMatcher
# pylint: disable=too-many-branches, dangerous-default-value
class Reader(ABC):
"""
Parent class for our Readers class.
As an abstract class, it can't be directly instanced. Its children
should be used instead.
"""
@staticmethod
def _ensure_contiguous(data: np.ndarray) -> np.ndarray:
if len(data) > 0:
dtype = Any
try:
data = data.astype(float)
if all(x.is_integer() for x in data):
dtype = int
else:
dtype = float
except ValueError:
dtype = "S"
try:
data = np.ascontiguousarray(data, dtype=dtype)
except (UnicodeEncodeError, SystemError):
logging.info("Unknown character. Not ensuring contiguous array.")
new_data = []
for element in data:
new_data.append(unidecode.unidecode(str(element)))
data = np.ascontiguousarray(new_data, dtype="S")
except ValueError:
logging.exception(
f"Unknown method to convert np.ndarray of "
f"{dtype} objects to numpy contiguous type.",
)
raise
return data
class EDWReader(Reader):
"""
Implementation of the Reader for EDW data.
Usage:
>>> reader = EDWReader('MRN')
>>> hr = reader.get_measurement('HR')
"""
def __init__(
self,
path: str,
mrn: str,
csn: str,
med_file: str = EDW_FILES["med_file"]["name"],
move_file: str = EDW_FILES["move_file"]["name"],
adm_file: str = EDW_FILES["adm_file"]["name"],
demo_file: str = EDW_FILES["demo_file"]["name"],
vitals_file: str = EDW_FILES["vitals_file"]["name"],
lab_file: str = EDW_FILES["lab_file"]["name"],
surgery_file: str = EDW_FILES["surgery_file"]["name"],
other_procedures_file: str = EDW_FILES["other_procedures_file"]["name"],
transfusions_file: str = EDW_FILES["transfusions_file"]["name"],
events_file: str = EDW_FILES["events_file"]["name"],
medhist_file: str = EDW_FILES["medhist_file"]["name"],
surghist_file: str = EDW_FILES["surghist_file"]["name"],
socialhist_file: str = EDW_FILES["socialhist_file"]["name"],
):
"""
Init EDW Reader.
:param path: absolute path of files.
:param mrn: MRN of the patient.
:param csn: CSN of the patient visit.
:param med_file: file containing the medicines data from the patient.
Can be inferred if None.
:param move_file: file containing the movements of the patient
(admission, transfer and discharge) from the patient.
Can be inferred if None.
:param demo_file: file containing the demographic data from
the patient. Can be inferred if None.
:param vitals_file: file containing the vital signals from
the patient. Can be inferred if None.
:param lab_file: file containing the laboratory signals from
the patient. Can be inferred if None.
:param adm_file: file containing the admission data from
the patient. Can be inferred if None.
:param surgery_file: file containing the surgeries performed to
the patient. Can be inferred if None.
:param other_procedures_file: file containing procedures performed to
the patient. Can be inferred if None.
:param transfusions_file: file containing the transfusions performed to
the patient. Can be inferred if None.
:param eventss_file: file containing the events during
the patient stay. Can be inferred if None.
:param medhist_file: file containing the medical history information of the
patient. Can be inferred if None.
:param surghist_file: file containing the surgical history information of the
patient. Can be inferred if None.
:param socialhist_file: file containing the social history information of the
patient. Can be inferred if None.
"""
self.path = path
self.mrn = mrn
self.csn = csn
self.move_file = self.infer_full_path(move_file)
self.demo_file = self.infer_full_path(demo_file)
self.vitals_file = self.infer_full_path(vitals_file)
self.lab_file = self.infer_full_path(lab_file)
self.med_file = self.infer_full_path(med_file)
self.adm_file = self.infer_full_path(adm_file)
self.surgery_file = self.infer_full_path(surgery_file)
self.other_procedures_file = self.infer_full_path(other_procedures_file)
self.transfusions_file = self.infer_full_path(transfusions_file)
self.events_file = self.infer_full_path(events_file)
self.medhist_file = self.infer_full_path(medhist_file)
self.surghist_file = self.infer_full_path(surghist_file)
self.socialhist_file = self.infer_full_path(socialhist_file)
self.timezone = TIMEZONE
def infer_full_path(self, file_name: str) -> str:
"""
Infer a file name from MRN and type of data.
Used if a file is not specified on the input.
:param file_name: <str> 8 possible options:
'medications.csv', 'demographics.csv', 'labs.csv',
'flowsheet.scv', 'admission-vitals.csv',
'surgery.csv','procedures.csv', 'transfusions.csv'
:return: <str> the inferred path
"""
if not file_name.endswith(".csv"):
file_name = f"{file_name}.csv"
full_path = os.path.join(self.path, self.mrn, self.csn, file_name)
return full_path
def list_vitals(self) -> List[str]:
"""
List all the vital signs taken from the patient.
:return: <List[str]> List with all the available vital signals
from the patient
"""
signal_column = EDW_FILES["vitals_file"]["columns"][0]
vitals_df = pd.read_csv(self.vitals_file)
# Remove measurements out of dates
time_column = EDW_FILES["vitals_file"]["columns"][3]
admit_column = EDW_FILES["adm_file"]["columns"][3]
discharge_column = EDW_FILES["adm_file"]["columns"][4]
admission_df = pd.read_csv(self.adm_file)
init_date = admission_df[admit_column].values[0]
end_date = admission_df[discharge_column].values[0]
vitals_df = vitals_df[vitals_df[time_column] >= init_date]
if str(end_date) != "nan":
vitals_df = vitals_df[vitals_df[time_column] <= end_date]
return list(vitals_df[signal_column].astype("str").str.upper().unique())
def list_labs(self) -> List[str]:
"""
List all the lab measurements taken from the patient.
:return: <List[str]> List with all the available lab measurements
from the patient.
"""
signal_column = EDW_FILES["lab_file"]["columns"][0]
labs_df = pd.read_csv(self.lab_file)
return list(labs_df[signal_column].astype("str").str.upper().unique())
def list_medications(self) -> List[str]:
"""
List all the medications given to the patient.
:return: <List[str]> List with all the medications on
the patients record
"""
signal_column = EDW_FILES["med_file"]["columns"][0]
status_column = EDW_FILES["med_file"]["columns"][1]
med_df = pd.read_csv(self.med_file)
med_df = med_df[med_df[status_column].isin(MED_ACTIONS)]
return list(med_df[signal_column].astype("str").str.upper().unique())
def list_surgery(self) -> List[str]:
"""
List all the types of surgery performed to the patient.
:return: <List[str]> List with all the event types associated
with the patient
"""
return self._list_procedures(self.surgery_file, "surgery_file")
def list_other_procedures(self) -> List[str]:
"""
List all the types of procedures performed to the patient.
:return: <List[str]> List with all the event types associated
with the patient
"""
return self._list_procedures(
self.other_procedures_file,
"other_procedures_file",
)
def list_transfusions(self) -> List[str]:
"""
List all the transfusions types that have been done on the patient.
:return: <List[str]> List with all the transfusions type of
the patient
"""
return self._list_procedures(self.transfusions_file, "transfusions_file")
@staticmethod
def _list_procedures(file_name, file_key) -> List[str]:
"""
Filter and list all the procedures in the given file.
"""
signal_column, status_column, start_column, end_column = EDW_FILES[file_key][
"columns"
]
data = pd.read_csv(file_name)
data = data[data[status_column].isin(["Complete", "Completed"])]
data = data.dropna(subset=[start_column, end_column])
return list(data[signal_column].astype("str").str.upper().unique())
def list_events(self) -> List[str]:
"""
List all the event types during the patient stay.
:return: <List[str]> List with all the events type.
"""
signal_column, _ = EDW_FILES["events_file"]["columns"]
data = pd.read_csv(self.events_file)
return list(data[signal_column].astype("str").str.upper().unique())
def get_static_data(self) -> StaticData:
"""
Get the static data from the EDW csv file (admission + demographics).
:return: <StaticData> wrapped information
"""
movement_df = pd.read_csv(self.move_file)
admission_df = pd.read_csv(self.adm_file)
demographics_df = pd.read_csv(self.demo_file)
# Obtain patient's movement (location and when they move)
department_id = np.array(movement_df["DepartmentID"], dtype=int)
department_nm = np.array(movement_df["DepartmentDSC"], dtype="S")
room_bed = np.array(movement_df["BedLabelNM"], dtype="S")
move_time = np.array(movement_df["TransferInDTS"], dtype="S")
# Convert weight from ounces to pounds
weight = float(admission_df["WeightPoundNBR"].values[0]) / 16
# Convert height from feet & inches to meters
height = self._convert_height(admission_df["HeightTXT"].values[0])
admin_type = admission_df["HospitalAdmitTypeDSC"].values[0]
# Find possible diagnosis at admission
diag_info = admission_df["AdmitDiagnosisTXT"].dropna().drop_duplicates()
if list(diag_info):
diag_info = diag_info.astype("str")
admin_diag = diag_info.str.cat(sep="; ")
else:
admin_diag = "UNKNOWN"
admin_date = admission_df["HospitalAdmitDTS"].values[0]
birth_date = demographics_df["BirthDTS"].values[0]
race = demographics_df["PatientRaceDSC"].values[0]
sex = demographics_df["SexDSC"].values[0]
end_date = admission_df["HospitalDischargeDTS"].values[0]
# Check whether it exists a deceased date or not
end_stay_type = (
"Alive"
if str(demographics_df["DeathDTS"].values[0]) == "nan"
else "Deceased"
)
# Find local time, if patient is still in hospital, take today's date
if str(end_date) != "nan":
offsets = self._get_local_time(admin_date[:-1], end_date[:-1])
else:
today_date = datetime.today().strftime("%Y-%m-%d %H:%M:%S.%f")
offsets = self._get_local_time(admin_date[:-1], today_date)
offsets = list(set(offsets)) # Take unique local times
local_time = np.empty(0)
for offset in offsets:
local_time = np.append(local_time, f"UTC{int(offset/3600)}:00")
local_time = local_time.astype("S")
# Find medical, surgical and social history of patient
medical_hist = self._get_med_surg_hist("medhist_file")
surgical_hist = self._get_med_surg_hist("surghist_file")
tobacco_hist, alcohol_hist = self._get_social_hist()
return StaticData(
department_id,
department_nm,
room_bed,
move_time,
weight,
height,
admin_type,
admin_diag,
admin_date,
birth_date,
race,
sex,
end_date,
end_stay_type,
local_time,
medical_hist,
surgical_hist,
tobacco_hist,
alcohol_hist,
)
def get_med_doses(self, med_name: str) -> Medication:
"""
Get all the doses of the input medication given to the patient.
:param medication_name: <string> name of the medicine
:return: <Medication> wrapped list of medications doses
"""
(
signal_column,
status_column,
time_column,
route_column,
weight_column,
dose_column,
dose_unit_column,
infusion_column,
infusion_unit_column,
duration_column,
duration_unit_column,
) = EDW_FILES["med_file"]["columns"]
source = EDW_FILES["med_file"]["source"]
med_df = pd.read_csv(self.med_file)
med_df = med_df[med_df[status_column].isin(MED_ACTIONS)]
med_df = med_df.sort_values(time_column)
if med_name not in med_df[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{med_name} was not found in {self.med_file}.")
idx = np.where(med_df[signal_column].astype("str").str.upper() == med_name)[0]
route = np.array(med_df[route_column])[idx[0]]
wt_base_dose = (
bool(1) if np.array(med_df[weight_column])[idx[0]] == "Y" else bool(0)
)
if med_df[duration_column].isnull().values[idx[0]]:
start_date = self._get_unix_timestamps(np.array(med_df[time_column])[idx])
action = np.array(med_df[status_column], dtype="S")[idx]
if (
np.array(med_df[status_column])[idx[0]] in [MED_ACTIONS[0]]
or med_df[infusion_column].isnull().values[idx[0]]
):
dose = np.array(med_df[dose_column], dtype="S")[idx]
units = np.array(med_df[dose_unit_column])[idx[0]]
else:
dose = np.array(med_df[infusion_column])[idx]
units = np.array(med_df[infusion_unit_column])[idx[0]]
else:
dose = np.array([])
units = np.array(med_df[infusion_unit_column])[idx[0]]
start_date = np.array([])
action = np.array([])
for _, row in med_df.iloc[idx, :].iterrows():
dose = np.append(dose, [row[infusion_column], 0])
time = self._get_unix_timestamps(np.array([row[time_column]]))[0]
conversion = 1
if row[duration_unit_column] == "Seconds":
conversion = 1
elif row[duration_unit_column] == "Minutes":
conversion = 60
elif row[duration_unit_column] == "Hours":
conversion = 3600
start_date = np.append(
start_date,
[time, time + float(row[duration_column]) * conversion],
)
action = np.append(action, [row[status_column], "Stopped"])
dose = self._ensure_contiguous(dose)
start_date = self._ensure_contiguous(start_date)
action = self._ensure_contiguous(action)
return Medication(
med_name,
dose,
units,
start_date,
action,
route,
wt_base_dose,
source,
)
def get_vitals(self, vital_name: str) -> Measurement:
"""
Get the vital signals from the EDW csv file 'flowsheet'.
:param vital_name: <string> name of the signal
:return: <Measurement> wrapped measurement signal
"""
vitals_df = pd.read_csv(self.vitals_file)
# Remove measurements out of dates
time_column = EDW_FILES["vitals_file"]["columns"][3]
admit_column = EDW_FILES["adm_file"]["columns"][3]
discharge_column = EDW_FILES["adm_file"]["columns"][4]
admission_df = pd.read_csv(self.adm_file)
init_date = admission_df[admit_column].values[0]
end_date = admission_df[discharge_column].values[0]
vitals_df = vitals_df[vitals_df[time_column] >= init_date]
if str(end_date) != "nan":
vitals_df = vitals_df[vitals_df[time_column] <= end_date]
return self._get_measurements(
"vitals_file",
vitals_df,
vital_name,
self.vitals_file,
)
def get_labs(self, lab_name: str) -> Measurement:
"""
Get the lab measurement from the EDW csv file 'labs'.
:param lab_name: <string> name of the signal
:return: <Measurement> wrapped measurement signal
"""
labs_df = pd.read_csv(self.lab_file)
return self._get_measurements("lab_file", labs_df, lab_name, self.lab_file)
def get_surgery(self, surgery_type: str) -> Procedure:
"""
Get all the surgery information of the input type performed to the
patient.
:param surgery_type: <string> type of surgery
:return: <Procedure> wrapped list surgeries of the input type
"""
return self._get_procedures("surgery_file", self.surgery_file, surgery_type)
def get_other_procedures(self, procedure_type: str) -> Procedure:
"""
Get all the procedures of the input type performed to the patient.
:param procedure: <string> type of procedure
:return: <Procedure> wrapped list procedures of the input type
"""
return self._get_procedures(
"other_procedures_file",
self.other_procedures_file,
procedure_type,
)
def get_transfusions(self, transfusion_type: str) -> Procedure:
"""
Get all the input transfusions type that were done to the patient.
:param transfusion_type: <string> Type of transfusion.
:return: <Procedure> Wrapped list of transfusions of the input type.
"""
return self._get_procedures(
"transfusions_file",
self.transfusions_file,
transfusion_type,
)
def get_events(self, event_type: str) -> Event:
"""
Get all the input event type during the patient stay.
:param event_type: <string> Type of event.
:return: <Event> Wrapped list of events of the input type.
"""
signal_column, time_column = EDW_FILES["events_file"]["columns"]
data = pd.read_csv(self.events_file)
data = data.dropna(subset=[time_column])
data = data.sort_values([time_column])
if event_type not in data[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{event_type} was not found in {self.events_file}.")
idx = np.where(data[signal_column].astype("str").str.upper() == event_type)[0]
time = self._get_unix_timestamps(np.array(data[time_column])[idx])
time = self._ensure_contiguous(time)
return Event(event_type, time)
def _get_local_time(self, init_date: str, end_date: str) -> np.ndarray:
"""
Obtain local time from init and end dates.
:param init_date: <str> String with initial date.
:param end_date: <str> String with end date.
:return: <np.ndarray> List of offsets from UTC (it may be two in
case the time shift between summer/winter occurs while the
patient is in the hospital).
"""
init_dt = datetime.strptime(init_date, "%Y-%m-%d %H:%M:%S.%f")
end_dt = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S.%f")
offset_init = self.timezone.utcoffset( # type: ignore
init_dt,
is_dst=True,
).total_seconds()
offset_end = self.timezone.utcoffset( # type: ignore
end_dt,
is_dst=True,
).total_seconds()
return np.array([offset_init, offset_end], dtype=float)
def _get_unix_timestamps(self, time_stamps: np.ndarray) -> np.ndarray:
"""
Convert readable time stamps to unix time stamps.
:param time_stamps: <np.ndarray> Array with all readable time stamps.
:return: <np.ndarray> Array with Unix time stamps.
"""
try:
arr_timestamps = pd.to_datetime(time_stamps)
except pd.errors.ParserError as error:
raise ValueError("Array contains non datetime values.") from error
# Convert readable local timestamps in local seconds timestamps
local_timestamps = (
np.array(arr_timestamps, dtype=np.datetime64)
- np.datetime64("1970-01-01T00:00:00")
) / np.timedelta64(1, "s")
# Find local time shift to UTC
if not (pd.isnull(local_timestamps[0]) or pd.isnull(local_timestamps[-1])):
offsets = self._get_local_time(time_stamps[0][:-1], time_stamps[-1][:-1])
else:
offsets = np.random.random(2) # pylint: disable=no-member
# Compute unix timestamp (2 different methods: 1st ~5000 times faster)
if offsets[0] == offsets[1]:
unix_timestamps = local_timestamps - offsets[0]
else:
unix_timestamps = np.empty(np.size(local_timestamps))
for idx, val in enumerate(local_timestamps):
if not pd.isnull(val):
ntarray = datetime.utcfromtimestamp(val)
offset = self.timezone.utcoffset( # type: ignore
ntarray,
is_dst=True,
)
unix_timestamps[idx] = val - offset.total_seconds() # type: ignore
else:
unix_timestamps[idx] = val
return unix_timestamps
def _get_med_surg_hist(self, file_key: str) -> np.ndarray:
"""
Read medical or surgical history table and its information as arrays.
:param file_key: <str> Key name indicating the desired file name.
:return: <Tuple> Tuple with tobacco and alcohol information.
"""
if file_key == "medhist_file":
hist_df = pd.read_csv(self.medhist_file)
else:
hist_df = pd.read_csv(self.surghist_file)
hist_df = (
hist_df[EDW_FILES[file_key]["columns"]].fillna("UNKNOWN").drop_duplicates()
)
info_hist = []
for _, row in hist_df.iterrows():
id_num, name, comment, date = row
info_hist.append(
f"ID: {id_num}; DESCRIPTION: {name}; "
f"COMMENTS: {comment}; DATE: {date}",
)
return self._ensure_contiguous(np.array(info_hist))
def _get_social_hist(self) -> Tuple:
"""
Read social history table and return tobacco and alcohol patient
status.
:return: <Tuple> Tuple with tobacco and alcohol information.
"""
hist_df = pd.read_csv(self.socialhist_file)
hist_df = hist_df[EDW_FILES["socialhist_file"]["columns"]].drop_duplicates()
concat = []
for col in hist_df:
information = hist_df[col].drop_duplicates().dropna()
if list(information):
information = information.astype(str)
concat.append(information.str.cat(sep=" - "))
else:
concat.append("NONE")
tobacco_hist = f"STATUS: {concat[0]}; COMMENTS: {concat[1]}"
alcohol_hist = f"STATUS: {concat[2]}; COMMENTS: {concat[3]}"
return tobacco_hist, alcohol_hist
def _get_measurements(self, file_key: str, data, measure_name: str, file_name: str):
(
signal_column,
result_column,
units_column,
time_column,
additional_columns,
) = EDW_FILES[file_key]["columns"]
source = EDW_FILES[file_key]["source"]
# Drop repeated values and sort
data = data[
[signal_column, result_column, units_column, time_column]
+ additional_columns
].drop_duplicates()
data = data.sort_values(time_column)
if measure_name not in data[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{measure_name} was not found in {file_name}.")
idx = np.where(data[signal_column].astype("str").str.upper() == measure_name)[0]
value = np.array(data[result_column])[idx]
time = self._get_unix_timestamps(np.array(data[time_column])[idx])
units = np.array(data[units_column])[idx[0]]
value = self._ensure_contiguous(value)
time = self._ensure_contiguous(time)
data_type = "Numerical"
additional_data = {}
for col in additional_columns:
col_data = np.array(data[col])[idx]
if "DTS" in col:
col_data = self._get_unix_timestamps(col_data)
col_data = self._ensure_contiguous(col_data)
additional_data[col] = col_data
return Measurement(
measure_name,
source,
value,
time,
units,
data_type,
additional_data,
)
def _get_procedures(
self,
file_key: str,
file_name: str,
procedure_type: str,
) -> Procedure:
signal_column, status_column, start_column, end_column = EDW_FILES[file_key][
"columns"
]
source = EDW_FILES[file_key]["source"]
data = pd.read_csv(file_name)
data = data[data[status_column].isin(["Complete", "Completed"])]
data = data.dropna(subset=[start_column, end_column])
data = data.sort_values([start_column, end_column])
if procedure_type not in data[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{procedure_type} was not found in {file_name}.")
idx = np.where(data[signal_column].astype("str").str.upper() == procedure_type)[
0
]
start_date = self._get_unix_timestamps(np.array(data[start_column])[idx])
end_date = self._get_unix_timestamps(np.array(data[end_column])[idx])
start_date = self._ensure_contiguous(start_date)
end_date = self._ensure_contiguous(end_date)
return Procedure(procedure_type, source, start_date, end_date)
@staticmethod
def _convert_height(height_i):
if str(height_i) != "nan":
height_i = height_i[:-1].split("' ")
height_f = float(height_i[0]) * 0.3048 + float(height_i[1]) * 0.0254
else:
height_f = np.nan
return height_f
class BedmasterReader(h5py.File, Reader):
"""
Implementation of the Reader for Bedmaster data.
Usage:
>>> reader = BedmasterReader('file.mat')
>>> hr = reader.get_vs('HR')
"""
def __init__(
self,
file: str,
scaling_and_units: Dict[str, Dict[str, Any]] = ICU_SCALE_UNITS,
summary_stats: BedmasterStats = None,
):
super().__init__(file, "r")
self.max_segment = {
"vs": {"segmentNo": 0, "maxTime": -1, "signalName": ""},
"wv": {"segmentNo": 0, "maxTime": -1, "signalName": ""},
}
self.interbundle_corr: Dict[str, Optional[Dict]] = {
"vs": None,
"wv": None,
}
self.scaling_and_units: Dict[str, Dict[str, Any]] = scaling_and_units
self.summary_stats = summary_stats
if self.summary_stats:
self.summary_stats.add_file_stats("total_files")
def _update_max_segment(self, sig_name, sig_type, max_time):
"""
Update the signal that holds the segment with the last timespan.
Needed for inter-bundle correction.
:param sig_name: <str> name of the new candidate signal
:param sig_type: <str> wv or vs
:param max_time: <int> latest timespan for that signal
"""
packet = self["vs_packet"] if sig_type == "vs" else self["wv_time_original"]
max_seg = self.max_segment[sig_type]
max_seg["maxTime"] = max_time
max_seg["segmentNo"] = packet[sig_name]["SegmentNo"][-1][0]
max_seg["signalName"] = sig_name
def get_interbundle_correction(self, previous_max):
"""
Calculate interbundle correction parameters from previous bundle maxs.
Based on the signal with maximum time from the previous bundle,
it calculates the 'maxTime': the last timespan that is overlapped
with the previous bundle, and 'timeCorr': the time shifting to be
applied on this bundle.
Parameters are stored on attribute 'interbundle_corr'.
:param previous_max: <Dict> dict with the max timepans info from
the previous bundle. Same format than 'max_sement' attribute.
"""
def _ib_corr(previous_max, segments, time):
ib_corr = None
overlap_idx = np.where(segments[()] == previous_max["segmentNo"])[0]
if overlap_idx.size > 0: # Bundles overlap
last_overlap_idx = overlap_idx[-1]
if last_overlap_idx >= len(time):
last_overlap_idx = len(time) - 1
last_overlap_time = time[last_overlap_idx][0]
time_corr = previous_max["maxTime"] - last_overlap_time
ib_corr = {"maxTime": last_overlap_time, "timeCorr": time_corr}
return ib_corr
vs_corr = None
last_max_vs = previous_max["vs"]["signalName"]
if self.contains_group("vs"):
if last_max_vs in self["vs"].keys():
vs_corr = _ib_corr(
previous_max=previous_max["vs"],
segments=self["vs_packet"][last_max_vs]["SegmentNo"],
time=self["vs_time_corrected"][last_max_vs]["res_vs"],
)
wv_corr = None
last_max_wv = previous_max["wv"]["signalName"]
if self.contains_group("wv"):
if last_max_wv in self["wv"].keys():
wv_corr = _ib_corr(
previous_max=previous_max["wv"],
segments=self["wv_time_original"][last_max_wv]["SegmentNo"],
time=self["wv_time_corrected"][last_max_wv]["res_wv"],
)
self.max_segment = previous_max
self.interbundle_corr["vs"] = vs_corr
self.interbundle_corr["wv"] = wv_corr
def apply_ibcorr(self, signal: BedmasterSignal):
"""
Apply inter-bundle correction on a given signal.
The correction will be applied based on the 'interbundle_corr'
attribute, which needs is updated using the method:
'get_interbundle_correction'
The correction will cut the overlapping values between this bundle
and the previous one. In addition, it will shift the timespans so that
the first timespan on this bundle is the continuation of the last
timespan of the previouz value.
Note that this shifting will occur until a dataevent 1 or 5 is found.
:param signal: <BedmasterSignal> a Bedmaster signal.
"""
source = "vs" if signal._source_type == "vitals" else "wv"
if not self.interbundle_corr[source]:
return
overlap_idx = np.where(
signal.time <= self.interbundle_corr[source]["maxTime"], # type: ignore
)[0]
if overlap_idx.size > 0:
first_non_ol_idx = overlap_idx[-1] + 1
signal.time = signal.time[first_non_ol_idx:]
signal.time_corr_arr = signal.time_corr_arr[first_non_ol_idx:]
value_cut_idx = (
first_non_ol_idx
if source == "vs"
else np.sum(signal.samples_per_ts[:first_non_ol_idx])
)
signal.value = signal.value[value_cut_idx:]
signal.samples_per_ts = signal.samples_per_ts[first_non_ol_idx:]
if signal.source == "waveform":
signal.sample_freq = self.get_sample_freq_from_channel(
channel=signal.channel,
first_idx=first_non_ol_idx,
)
corr_to_apply = self.interbundle_corr[source]["timeCorr"] # type: ignore
if corr_to_apply:
de_idx = np.where(signal.time_corr_arr == 1)[0]
if de_idx.size > 0: # Contains data events
first_event = de_idx[0]
signal.time[:first_event] = signal.time[:first_event] + corr_to_apply
else:
signal.time = signal.time + corr_to_apply
if self.summary_stats and overlap_idx.size > 0:
if signal.value.size > 0:
self.summary_stats.add_signal_stats(
signal.name,
"overlapped_points",
first_non_ol_idx,
source=signal.source,
)
def contains_group(self, group_name: str) -> bool:
"""
Check if the .mat file contains the given group.
"""
has_group = False
if group_name in self.keys():
if isinstance(self[group_name], h5py.Group):
has_group = True
return has_group
def list_vs(self) -> List[str]:
"""
Get the JUST the names of vital signals contained on the .mat file.
It doesn't return the value of the vital signs.
:return: <list[str]> A list with the vital signals' names contained
on the .mat file
"""
if not self.contains_group("vs"):
logging.warning(f"No BM vitalsign found on file {self.filename}.")
if self.summary_stats:
self.summary_stats.add_file_stats("missing_vs")
return []
return list(self["vs"].keys())
def list_wv(self) -> Dict[str, str]:
"""
Get the the names of waveform signals contained on the .mat file.
The format is : {wv_name: channel}, where `channel` is the input
channel where the the signal enters. If a channel contains
no waveform or contains multiple waveforms, it will be ignored.
:return: <Dict[str:str]> A dict with the wave form signals
contained on the .mat file, along with their input channel.
"""
wv_signals: Dict[str, str] = {}
if not self.contains_group("wv"):
logging.warning(f"No BM waveform found on file {self.filename}.")
if self.summary_stats:
self.summary_stats.add_file_stats("missing_wv")
return wv_signals
for ch_name in self["wv"].keys():
signal_name = self.get_wv_from_channel(ch_name)
if signal_name:
wv_signals[signal_name] = ch_name
return wv_signals
def format_data(self, data) -> np.ndarray:
"""
Format multidimensional data into 1D arrays.
:param data: <np.array> Data to be formatted
:return: <np.array> formatted data
"""
# Pseudo 1D data to 1D data
if data.shape[1] == 1: # Case [[0],[1]]
data = np.transpose(data)
if data.shape[0] == 1: # Case [[0, 1]]
data = data[0]
# 2D data unicode encoded to 1D decoded
if data.ndim == 2:
if data.shape[0] < data.shape[1]:
data = np.transpose(data)
data = self.decode_data(data)
return data
@staticmethod
def decode_data(data: np.ndarray) -> np.ndarray:
"""
Decodes data stored as unicode identifiers and returns a 1D array.
Example:
>>> data # 3D array with unicode codes for '0','.','2'
array([[48, 46, 50],
[48, 46, 50],
[48, 46, 50],
[48, 46, 50]])
>>> BedmasterReader.decode_data(data)
array([0.2, 0.2, 0.2, 0.2])
:param data: <np.ndarray> Data to decode
:return: <np.ndarray> decoded data
"""
def _decode(row):
row = "".join([chr(code) for code in row]).strip()
if row in ("X", "None"):
return np.nan
return row
data = np.apply_along_axis(_decode, 1, data)
try:
data = data.astype(float)
if all(x.is_integer() for x in data):
dtype = int # type: ignore
else:
dtype = float # type: ignore
except ValueError:
dtype = "S" # type: ignore
data = data.astype(dtype)
return data
def get_vs(self, signal_name: str) -> Optional[BedmasterSignal]:
"""
Get the corrected vs signal from the.mat file.
2. Applies corrections on the signal
3. Wraps the corrected signal and its metadata on a BedmasterDataObject
:param signal_name: <string> name of the signal
:return: <BedmasterSignal> wrapped corrected signal
"""
if signal_name not in self["vs"].keys():
raise ValueError(
f"In bedmaster_file {self.filename}, the signal {signal_name} "
"was not found.",
)
# Get values and time
values = self["vs"][signal_name][()]
if values.ndim == 2:
values = self.format_data(values)
if values.dtype.char == "S":
logging.warning(
f"{signal_name} on .mat file {self.filename}, has unexpected "
"string values.",
)
return None
if values.ndim >= 2:
raise ValueError(
f"Signal {signal_name} on file: {self.filename}. The values"
f"of the signal have higher dimension than expected (>1) after"
f"being formatted. The signal is probably in a bad format so it "
f"won't be written.",
)
time = np.transpose(self["vs_time_corrected"][signal_name]["res_vs"][:])[0]
# Get the occurrence of event 1 and 5
de_1 = self["vs_time_corrected"][signal_name]["data_event_1"]
de_5 = self["vs_time_corrected"][signal_name]["data_event_5"]
events = (de_1[:] | de_5[:]).astype(np.bool)
# Get scaling factor and units
if signal_name in self.scaling_and_units:
scaling_factor = self.scaling_and_units[signal_name]["scaling_factor"]
units = self.scaling_and_units[signal_name]["units"]
else:
scaling_factor = 1
units = "UNKNOWN"
# Samples per timespan
samples_per_ts = np.array([1] * len(time))
signal = BedmasterSignal(
name=signal_name,
source="vitals",
channel=signal_name,
value=self._ensure_contiguous(values),
time=self._ensure_contiguous(time),
units=units,
sample_freq=np.array([(0.5, 0)], dtype="float,int"),
scale_factor=scaling_factor,
time_corr_arr=events,
samples_per_ts=self._ensure_contiguous(samples_per_ts),
)
# Apply inter-bundle correction
if self.interbundle_corr["vs"]:
self.apply_ibcorr(signal)
if signal.time.size == 0:
logging.info(
f"Signal {signal} on .mat file {self.filename} doesn't contain new "
f"information (only contains overlapped values from previous bundles). "
f"It won't be written.",
)
if self.summary_stats:
self.summary_stats.add_signal_stats(
signal.name,
"total_overlap_bundles",
source=signal.source,
)
return None
# Compress time_corr_arr
signal.time_corr_arr = np.packbits(np.transpose(signal.time_corr_arr)[0])
# Update the max segment time (for inter-bundle correction)
max_time = time[-1]
if max_time > self.max_segment["vs"]["maxTime"]:
self._update_max_segment(signal_name, "vs", max_time)
# Quality check on data
if not signal.time.shape[0] == signal.value.shape[0]:
logging.warning(
f"Something went wrong with signal {signal.name} on file: "
f"{self.filename}. Time vector doesn't have the same length than "
f"values vector. The signal won't be written.",
)
if self.summary_stats:
self.summary_stats.add_signal_stats(
signal.name,
"defective_signal",
source=signal.source,
)
return None
if not signal.samples_per_ts.shape[0] == signal.time.shape[0]:
logging.warning(
f"Something went wrong with signal {signal.name} on file: "
f"{self.filename}. Time vector doesn't have the same length than "
f"values vector. The signal won't be written.",
)
if self.summary_stats:
self.summary_stats.add_signal_stats(
signal.name,
"defective_signal",
source=signal.source,
)
return None
if self.summary_stats:
self.summary_stats.add_from_signal(signal)
return signal
def get_wv(
self,
channel_n: str,
signal_name: str = None,
) -> Optional[BedmasterSignal]:
"""
Get the corrected wv signal from the.mat file.
1. Gets the signal and its metadata from the .mat file
2. Applies corrections on the signal
3. Wraps the corrected signal and its metadata on a BedmasterDataObject
:param channel_n: <string> channel where the signal is
:param signal_name: <string> name of the signal
:return: <BedmasterSignal> wrapped corrected signal
"""
if channel_n not in self["wv"].keys():
raise ValueError(
f"In bedmaster_file {self.filename}, the signal {channel_n} was "
"not found.",
)
if not signal_name:
signal_name = self.get_wv_from_channel(channel_n)
if not signal_name:
signal_name = "?"
values = np.array(np.transpose(self["wv"][channel_n][:])[0])
if values.ndim == 2:
values = self.format_data(values)
if values.ndim >= 2:
raise ValueError(
f"Something went wrong with signal {signal_name} "
f"on file: {self.filename}. Dimension of values "
f"formatted values is higher than expected (>1).",
)
time = np.transpose(self["wv_time_corrected"][channel_n]["res_wv"][:])[0]
# Get scaling factor and units
scaling_factor, units = self.get_scaling_and_units(channel_n, signal_name)
# Get the occurrence of event 1 and 5
de_1 = self["wv_time_corrected"][channel_n]["data_event_1"]
de_5 = self["wv_time_corrected"][channel_n]["data_event_5"]
time_reset_events = de_1[:] | de_5[:].astype(np.bool)
# Get sample frequency
sample_freq = self.get_sample_freq_from_channel(channel_n)
# Get samples per timespan
samples_per_ts = self["wv_time_original"][channel_n]["Samples"][()]
if samples_per_ts.ndim == 2:
samples_per_ts = self.format_data(samples_per_ts)
signal = BedmasterSignal(
name=signal_name,
source="waveform",
channel=channel_n,
value=values[:],
time=time[:],
units=units,
sample_freq=sample_freq,
scale_factor=scaling_factor,
time_corr_arr=time_reset_events,
samples_per_ts=samples_per_ts,
)
# Apply inter-bundle correction
if self.interbundle_corr["wv"]:
self.apply_ibcorr(signal)
if signal.time.size == 0:
logging.info(
f"In bedmaster_file {self.filename}, {signal} is completely "
"overlapped, it won't be written.",
)
if self.summary_stats:
self.summary_stats.add_signal_stats(
signal.name,
"total_overlap_bundles",
source=signal.source,
)
return None
# Add the rest of events and compress the array
tc_len = len(signal.time_corr_arr)
de_2 = self["wv_time_corrected"][channel_n]["data_event_2"]
de_3 = self["wv_time_corrected"][channel_n]["data_event_3"]
de_4 = self["wv_time_corrected"][channel_n]["data_event_4"]
events = signal.time_corr_arr | de_2[-tc_len:] | de_3[-tc_len:] | de_4[-tc_len:]
events = np.packbits(np.transpose(events)[0])
signal.time_corr_arr = events
# Update the max segment time (for inter-bundle correction)
max_time = time[-1]
if max_time > self.max_segment["wv"]["maxTime"]:
self._update_max_segment(channel_n, "wv", max_time)
# Quality check on data
if not signal.time.shape[0] == signal.samples_per_ts.shape[0]:
logging.warning(
f"Something went wrong with signal: "
f"{signal.name} on file: {self.filename}. "
f"Time vector doesn't have the same length than "
f"'samples_per_ts' vector. The signal won't be written.",
)
if self.summary_stats:
self.summary_stats.add_signal_stats(
signal.name,
"defective_signal",
source=signal.source,
)
return None
if not signal.value.shape[0] == np.sum(signal.samples_per_ts):
logging.warning(
f"Something went wrong with signal: "
f"{signal.name} on file: {self.filename} "
f"'samples_per_ts' vector's sum isn't equal to "
f"values vector's length. This seems an error on the primitive "
f".stp file. The signal won't be written.",
)
if self.summary_stats:
self.summary_stats.add_signal_stats(
signal.name,
"defective_signal",
source=signal.source,
)
return None
if self.summary_stats:
self.summary_stats.add_from_signal(signal)
return signal
def get_wv_from_channel(self, channel: str) -> Optional[str]:
path = f"wv_time_original/{channel}/Label"
length = self[path].shape[-1]
if length < 10:
signals = self[path][:]
else:
signals = self[path][..., range(0, length, length // 10)]
signals = np.unique(signals.T, axis=0)
signals = signals[(signals != 32) & (signals != 0)]
if signals.ndim > 1:
logging.warning(
f"Channel {channel} on file {self.filename} "
f"is a mix of different signals: {signals}. "
f"This situation is not supported. "
f"The channel will be ignored.",
)
if self.summary_stats:
self.summary_stats.add_file_stats("multiple_label_signal")
return None
if signals.size == 0:
logging.warning(
f"The signal on channel {channel} on file {self.filename} "
f"has no name. It is probably an empty signal or a badly "
f"recorded one. It won't be written to the tensorized file.",
)
if self.summary_stats:
self.summary_stats.add_file_stats("no_label_signal")
return None
name = "".join([chr(letter) for letter in signals])
return name
def get_sample_freq_from_channel(self, channel: str, first_idx=0):
sf_arr = self["wv_time_original"][channel]["SampleRate"][first_idx:].T[0]
if sf_arr.shape[0] <= 0:
logging.info(
f"The signal on channel {channel} on file {self.filename} has an "
f"incorrect sample frequency format. Either it doesn't have sample "
f"frequency or it has an incongruent one. Sample frequency will be set "
f"to Nan for this signal.",
)
return np.array([(np.nan, 0)], dtype="float,int")
changes = np.concatenate([[-1], np.where(sf_arr[:-1] != sf_arr[1:])[0]])
return np.fromiter(
((sf_arr[index + 1], index + 1) for index in changes),
dtype="float,int",
)
def get_scaling_and_units(self, channel_n, signal_name):
if signal_name in self.scaling_and_units:
scaling_factor = self.scaling_and_units[signal_name]["scaling_factor"]
units = self.scaling_and_units[signal_name]["units"]
else:
try:
calibration = self["wv_time_original"][channel_n]["Cal"][()]
calibration = self.decode_data([calibration.T[0]])[0].decode("utf-8")
calibration = [
part for part in re.split(r"(\d*\.?\d+)", calibration) if part
]
if len(calibration) == 2:
scaling_factor, units = calibration
else:
raise ValueError
except (KeyError, ValueError):
logging.warning(
f"Scaling factor or units not found "
f"for signal {signal_name} on file {self.filename}. They will "
f"be set to units: UNKNOWN, scaling_factor: 0.",
)
scaling_factor = 0
units = "UNKNOWN"
return float(scaling_factor), units
class BedmasterAlarmsReader(Reader):
"""
Implementation of the Reader for Bedmaster Alarms data.
"""
def __init__(
self,
alarms_path: str,
edw_path: str,
mrn: str,
csn: str,
adt: str,
move_file: str = EDW_FILES["move_file"]["name"],
):
"""
Iinit Bedmaster Alarms Reader.
:param alarms_path: Absolute path of Bedmaster alarms directory.
:param edw_path: Absolute path of edw directory.
:param mrn: MRN of the patient.
:param csn: CSN of the patient visit.
:param adt: Path to adt table.
:param move_file: File containing the movements of the patient
(admission, transfer and discharge) from the patient.
Can be inferred if None.
"""
self.alarms_path = alarms_path
self.edw_path = edw_path
self.mrn = mrn
self.csn = csn
if not move_file.endswith(".csv"):
move_file += ".csv"
self.move_file = os.path.join(self.edw_path, self.mrn, self.csn, move_file)
self.adt = adt
self.alarms_dfs = self._get_alarms_dfs()
def list_alarms(self) -> List[str]:
"""
List all the Bedmaster alarms registered from the patient.
:return: <List[str]> List with all the registered Bedmaster alarms
from the patient
"""
alarms: Set[str] = set()
alarm_name_column = ALARMS_FILES["columns"][3]
for alarms_df in self.alarms_dfs:
alarms = alarms.union(
set(alarms_df[alarm_name_column].astype("str").str.upper()),
)
return list(alarms)
def get_alarm(self, alarm_name: str) -> BedmasterAlarm:
"""
Get the Bedmaster alarms data from the Bedmaster Alarms .csv files.
:return: <BedmasterAlarm> wrapped information.
"""
dates = np.array([])
durations = np.array([])
date_column, level_column, alarm_name_column, duration_column = ALARMS_FILES[
"columns"
][1:5]
first = True
for alarm_df in self.alarms_dfs:
idx = np.where(
alarm_df[alarm_name_column].astype("str").str.upper() == alarm_name,
)[0]
dates = np.append(dates, np.array(alarm_df[date_column])[idx])
durations = np.append(durations, np.array(alarm_df[duration_column])[idx])
if len(idx) > 0 and first:
first = False
level = np.array(alarm_df[level_column])[idx[0]]
dates = self._ensure_contiguous(dates)
durations = self._ensure_contiguous(durations)
return BedmasterAlarm(
name=alarm_name,
start_date=dates,
duration=durations,
level=level,
)
def _get_alarms_dfs(self) -> List[pd.core.frame.DataFrame]:
"""
List all the Bedmaster alarms data frames containing data for the given
patient.
:return: <List[pd.core.frame.DataFrame]> List with all the Bedmaster alarms
data frames containing data for the given patient.
"""
if os.path.isfile(self.move_file):
movement_df = pd.read_csv(self.move_file)
else:
adt_df = pd.read_csv(self.adt)
movement_df = adt_df[adt_df["MRN"] == self.mrn]
movement_df = movement_df[movement_df["PatientEncounterID"] == self.csn]
department_nm = np.array(movement_df["DepartmentDSC"], dtype=str)
room_bed = np.array(movement_df["BedLabelNM"], dtype=str)
transfer_in = np.array(movement_df["TransferInDTS"], dtype=str)
transfer_out = np.array(movement_df["TransferOutDTS"], dtype=str)
alarms_dfs = []
for i, dept in enumerate(department_nm):
move_in = self._get_unix_timestamp(transfer_in[i])
move_out = self._get_unix_timestamp(transfer_out[i])
if dept in ALARMS_FILES["names"]:
names = ALARMS_FILES["names"][dept]
else:
logging.warning(
f"Department {dept} is not found in ALARMS_FILES['names'] "
"in ml4c3/definitions.py. No alarms data will be searched for this "
"department. Please, add this information to "
"ALARMS_FILES['names'].",
)
continue
bed = room_bed[i][-3:]
if any(s.isalpha() for s in bed):
bed = room_bed[i][-5:-2] + room_bed[i][-1]
for csv_name in names:
if not os.path.isfile(
os.path.join(self.alarms_path, f"bedmaster_alarms_{csv_name}.csv"),
):
continue
alarms_df = pd.read_csv(
os.path.join(self.alarms_path, f"bedmaster_alarms_{csv_name}.csv"),
low_memory=False,
)
alarms_df = alarms_df[alarms_df["Bed"].astype(str) == bed]
alarms_df = alarms_df[alarms_df["AlarmStartTime"] >= move_in]
alarms_df = alarms_df[alarms_df["AlarmStartTime"] <= move_out]
if len(alarms_df.index) > 0:
alarms_dfs.append(alarms_df)
return alarms_dfs
@staticmethod
def _get_unix_timestamp(time_stamp_str: str) -> int:
"""
Convert readable time stamps to unix time stamps.
:param time_stamps: <str> String with readable time stamps.
:return: <int> Integer Unix time stamp.
"""
try:
time_stamp = | pd.to_datetime(time_stamp_str) | pandas.to_datetime |
import requests
import pandas as pd
import numpy as np
from credential import API_KEY
target_dir = '../csv_data/'
movies = pd.read_csv(f'{target_dir}movies.csv')
df_genres = pd.read_csv(f'{target_dir}genres.csv')
df_genre_info = pd.read_csv(f'{target_dir}genre_info.csv')
df_companies = pd.read_csv(f'{target_dir}companies.csv')
df_company_info = pd.read_csv(f'{target_dir}company_info.csv')
df_countries = pd.read_csv(f'{target_dir}countries.csv')
df_country_info = pd.read_csv(f'{target_dir}country_info.csv')
df_spoken_languages = pd.read_csv(f'{target_dir}spoken_languages.csv')
df_language_info = pd.read_csv(f'{target_dir}language_info.csv')
df_people = pd.read_csv(f'{target_dir}people.csv')
df_person = pd.read_csv(f'{target_dir}person.csv')
seen_genre_id = df_genre_info['genre_id'].values
seen_company_id = df_company_info['company_id'].values
seen_country_id = df_country_info['country_id'].values
seen_language_id = df_language_info['language_id'].values
def request_upcoming(pageNum):
endpoint = f"https://api.themoviedb.org/3/movie/upcoming?api_key={API_KEY}&language=en-US&page={pageNum}"
# sending get request and saving the response as response object
r = requests.get(url=endpoint)
# extracting data in json format
data = r.json()
return data
def request_movie(mid):
endpoint = f"https://api.themoviedb.org/3/movie/{mid}?api_key={API_KEY}&language=en-US"
# sending get request and saving the response as response object
r = requests.get(url=endpoint)
# extracting data in json format
data = r.json()
return data
df_upcoming = pd.DataFrame(columns=[
'mid', 'adult', 'budget', 'language_id', 'overview', 'popularity',
'poster_path', 'runtime', 'status', 'tagline', 'title', 'video',
'vote_average', 'vote_count', 'year', 'revenue'
])
i = 0 # only track index of df_upcoming
for page in range(1, 6): # 20 upcoming movies per page, so getting 100
upcomings = request_upcoming(page)['results']
for upcoming in upcomings:
try:
mid = upcoming['id']
except:
continue
# From movie API
movie = request_movie(mid)
try:
adult = movie['adult']
budget = movie['budget']
language_id = movie['original_language']
overview = movie['overview']
popularity = movie['popularity']
poster_path = movie['poster_path']
runtime = movie['runtime']
# status should be 'Upcoming'
tagline = movie['tagline']
title = movie['title']
video = movie['video']
vote_average = movie['vote_average']
vote_count = movie['vote_count']
_ymd = movie['release_date']
year = _ymd.split('-')[0]
revenue = movie['revenue']
except:
continue
# Fill in dataframes
df_upcoming.at[i, 'mid'] = mid
df_upcoming.at[i, 'adult'] = adult
df_upcoming.at[i, 'budget'] = budget
df_upcoming.at[i, 'language_id'] = language_id
df_upcoming.at[i, 'overview'] = overview
df_upcoming.at[i, 'popularity'] = popularity
df_upcoming.at[i, 'poster_path'] = poster_path
df_upcoming.at[i, 'runtime'] = runtime
df_upcoming.at[i, 'status'] = 'Upcoming'
df_upcoming.at[i, 'tagline'] = tagline
df_upcoming.at[i, 'title'] = title
df_upcoming.at[i, 'video'] = video
df_upcoming.at[i, 'vote_average'] = vote_average
df_upcoming.at[i, 'vote_count'] = vote_count
df_upcoming.at[i, 'year'] = year
df_upcoming.at[i, 'revenue'] = revenue
i += 1
# Possible updates in other tables
# 1. Genres
genres_list = movie['genres']
for genre_dict in genres_list:
genre_row = pd.Series({
'mid': mid,
'genre_id': genre_dict['id']
})
df_genres = df_genres.append(genre_row, ignore_index=True)
# Check unseen
if genre_dict['id'] not in seen_genre_id:
# update genre_info
df_genre_info = df_genre_info.append(pd.Series({
'genre_id': genre_dict['id'],
'genre_name': genre_dict['name']
}), ignore_index=True)
# 2. Companies
companies_list = movie['production_companies']
for company_dict in companies_list:
company_row = pd.Series({
'mid': mid,
'company_id': company_dict['id']
})
df_companies = df_companies.append(company_row, ignore_index=True)
# Check unseen
if company_dict['id'] not in seen_company_id:
# update company_info
df_company_info = df_company_info.append(pd.Series({
'company_id': company_dict['id'],
'company_name': company_dict['name'],
'country_id': company_dict['origin_country']
}), ignore_index=True)
# 3. Countries
countries_list = movie['production_countries']
for country_dict in countries_list:
country_row = pd.Series({
'mid': mid,
'country_id': country_dict['iso_3166_1']
})
df_countries = df_countries.append(country_row, ignore_index=True)
# Check unseen
if country_dict['iso_3166_1'] not in seen_country_id:
# update genre_info
df_country_info = df_country_info.append(pd.Series({
'country_id': country_dict['iso_3166_1'],
'country_name': country_dict['name']
}), ignore_index=True)
# 4. Spoken_languages
spoken_languages_list = movie['spoken_languages']
for spoken_language_dict in spoken_languages_list:
spoken_language_row = pd.Series({
'mid': mid,
'language_id': spoken_language_dict['iso_639_1']
})
df_spoken_languages = df_spoken_languages.append(spoken_language_row, ignore_index=True)
# Check unseen
if spoken_language_dict['iso_639_1'] not in seen_language_id:
# update genre_info
df_language_info = df_language_info.append(pd.Series({
'language_id': spoken_language_dict['iso_639_1'],
'language_name': spoken_language_dict['english_name']
}), ignore_index=True)
# 5. People --> Can't load casts of upcoming movies from tmdb API
if i%20==0:
print("Iteration: ", i)
###########
# Cleaning empty values, consistent data types
##########
"""
Rule:
- if a field is numerical, -1 represents N.A.
- if a field is non-numerical, ""(empty string) reprsents N.A
"""
# Fill na with -1
df_genres['genre_id'] = df_genres['genre_id'].fillna(-1)
df_genres = df_genres.astype({'genre_id': 'int32'})
df_companies['company_id'] = df_companies['company_id'].fillna(-1)
df_companies = df_companies.astype({'mid':'int32', 'company_id': 'int32'})
df_company_info['country_id'] = df_company_info['country_id'].fillna("")
df_countries['country_id'] = df_countries['country_id'].fillna("")
df_spoken_languages['language_id'] = df_spoken_languages['language_id'].fillna("")
# Concatenate upcomings to movies
df_movies = | pd.concat([movies, df_upcoming]) | pandas.concat |
from io import StringIO
from copy import deepcopy
import numpy as np
import pandas as pd
import re
from glypnirO_GUI.get_uniprot import UniprotParser
from sequal.sequence import Sequence
from sequal.resources import glycan_block_dict
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column_name = "Modification Type(s)"
observed_mz = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
rt = "Scan Time"
selected_aa = {"N", "S", "T"}
regex_glycan_number_pattern = "\d+"
glycan_number_regex = re.compile(regex_glycan_number_pattern)
regex_pattern = "\.[\[\]\w\.\+\-]*\."
sequence_regex = re.compile(regex_pattern)
uniprot_regex = re.compile("(?P<accession>[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})(?P<isoform>-\d)?")
glycan_regex = re.compile("(\w+)\((\d+)\)")
def filter_U_only(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 or True not in np.isin(unique_glycan, "U"):
# print(unique_glycan)
return True
return False
def filter_with_U(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 \
and \
True in np.isin(unique_glycan, "U"):
return True
return False
def get_mod_value(amino_acid):
if amino_acid.mods:
if amino_acid.mods[0].value.startswith("+"):
return float(amino_acid.mods[0].value[1:])
else:
return -float(amino_acid.mods[0].value[1:])
else:
return 0
def load_fasta(fasta_file_path, selected=None, selected_prefix=""):
with open(fasta_file_path, "rt") as fasta_file:
result = {}
current_seq = ""
for line in fasta_file:
line = line.strip()
if line.startswith(">"):
if selected:
if selected_prefix + line[1:] in selected:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[current_seq] += line
return result
class Result:
def __init__(self, df):
self.df = df
self.empty = df.empty
def calculate_proportion(self, occupancy=True):
df = self.df.copy()
#print(df)
if not occupancy:
df = df[df["Glycans"] != "U"]
if "Peptides" in df.columns:
gr = [# "Isoform",
"Peptides", "Position"]
else:
gr = [# "Isoform",
"Position"]
for _, g in df.groupby(gr):
total = g["Value"].sum()
for i, r in g.iterrows():
df.at[i, "Value"] = r["Value"] / total
return df
def to_summary(self, df=None, name="", trust_byonic=False, occupancy=True):
if df is None:
df = self.df
if not occupancy:
df = df[df["Glycans"] != "U"]
if trust_byonic:
temp = df.set_index([# "Isoform",
"Position", "Glycans"])
else:
temp = df.set_index([# "Isoform",
"Peptides", "Glycans", "Position"])
temp.rename(columns={"Value": name}, inplace=True)
return temp
class GlypnirOComponent:
def __init__(self, filename, area_filename, replicate_id, condition_id, protein_name, minimum_score=0, trust_byonic=False, legacy=False):
if type(filename) == pd.DataFrame:
data = filename.copy()
else:
data = pd.read_excel(filename, sheet_name="Spectra")
if type(area_filename) == pd.DataFrame:
file_with_area = area_filename
else:
if area_filename.endswith("xlsx"):
file_with_area = pd.read_excel(area_filename)
else:
file_with_area = pd.read_csv(area_filename, sep="\t")
data["Scan number"] = pd.to_numeric(data["Scan #"].str.extract("scan=(\d+)", expand=False))
data = pd.merge(data, file_with_area, left_on="Scan number", right_on="First Scan")
self.protein_name = protein_name
self.data = data.sort_values(by=['Area'], ascending=False)
self.replicate_id = replicate_id
self.condition_id = condition_id
self.data = data[data["Area"].notnull()]
self.data = self.data[(self.data["Score"] >= minimum_score) &
(self.data[protein_column_name].str.contains(protein_name))
# (data["Protein Name"] == ">"+protein_name) &
]
self.data = self.data[~self.data[protein_column_name].str.contains(">Reverse")]
if len(self.data.index) > 0:
self.empty = False
else:
self.empty = True
self.row_to_glycans = {}
self.glycan_to_row = {}
self.trust_byonic = trust_byonic
self.legacy = legacy
self.sequon_glycosites = set()
self.glycosylated_seq = set()
def calculate_glycan(self, glycan):
current_mass = 0
current_string = ""
for i in glycan:
current_string += i
if i == ")":
s = glycan_regex.search(current_string)
if s:
name = s.group(1)
amount = s.group(2)
current_mass += glycan_block_dict[name]*int(amount)
current_string = ""
return current_mass
def process(self):
# entries_number = len(self.data.index)
# if analysis == "N-glycan":
# expand_window = 2
# self.data["total_number_of_asn"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_n-linked_sequon"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_hexnac"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_deamidation"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_modded_asn"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_unmodded_asn"] = pd.Series([0] * entries_number, index=self.data.index, dtype=int)
# elif analysis == "O-glycan":
# self.data["total_number_of_hex"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_modded_ser_thr"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_unmodded_ser_or_thr"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["o_glycosylation_status"] = pd.Series([False]*entries_number, index=self.data.index, dtype=bool)
for i, r in self.data.iterrows():
glycan_dict = {}
search = sequence_regex.search(r[sequence_column_name])
seq = Sequence(search.group(0))
stripped_seq = seq.to_stripped_string()
# modifications = {}
# if pd.notnull(r[modifications_column_name]):
#
# for mod in r[modifications_column_name].split(","):
# number = 1
# if "*" in mod:
# m = mod.split("*")
# minimod = Sequence(m[0].strip())
# number = int(m[1].strip())
#
# else:
# minimod = Sequence(mod.strip())
# for mo in minimod[0].mods:
# if mo.value not in modifications:
# modifications[mo.value] = {}
# modifications[mo.value][minimod[0].value] = {"mod": deepcopy(mo),
# "number": number}
# #if minimod[0].mods[0].value not in modifications:
# # modifications[minimod[0].mods[0].value] = {}
# #modifications[minimod[0].mods[0].value][minimod[0].value] = {"mod": deepcopy(minimod[0].mods[0]),
# # "number": number}
#
# if minimod[0].value == "N":
# if analysis == "N-glycan":
# for mo in minimod[0].mods:
# if mo.value == 1:
# #if minimod[0].mods[0].value == 1:
# self.data.at[i, "total_number_of_deamidation"] += number
# self.data.at[i, "total_number_of_modded_asn"] += number
# elif minimod[0].value in "ST":
# if analysis == "O-glycan":
# for mo in minimod[0].mods:
# self.data.at[i, "total_number_of_modded_ser_thr"] += number
glycans = []
if pd.notnull(r[glycans_column_name]):
glycans = r[glycans_column_name].split(",")
if search:
self.data.at[i, "stripped_seq"] = stripped_seq.rstrip(".").lstrip(".")
origin_seq = r[starting_position_column_name] - 1
glycan_reordered = []
self.data.at[i, "origin_start"] = origin_seq
self.data.at[i, "Ending Position"] = r[starting_position_column_name] + len(self.data.at[i, "stripped_seq"])
self.data.at[i, "position_to_glycan"] = ""
if self.trust_byonic:
n_site_status = {}
p_n = r[protein_column_name].lstrip(">")
# print(self.protein_name, p_n)
# motifs = [match for match in seq.find_with_regex(motif, ignore=seq.gaps())]
# if self.analysis == "N-glycan":
# if len(fasta_library[p_n]) >= origin_seq + expand_window:
# if expand_window:
# expanded_window = Sequence(fasta_library[p_n][origin_seq: origin_seq + len(self.data.at[i, "stripped_seq"]) + expand_window])
# expanded_window_motifs = [match for match in expanded_window.find_with_regex(motif, ignore=expanded_window.gaps())]
# origin_map = [i.start + origin_seq for i in expanded_window_motifs]
# if len(expanded_window_motifs) > len(motifs):
# self.data.at[i, "expanded_motif"] = str(expanded_window[expanded_window_motifs[-1]])
# self.data.at[i, "expanded_aa"] = str(expanded_window[-expand_window:])
#
# else:
# origin_map = [i.start + origin_seq for i in motifs]
# else:
# origin_map = [i.start + origin_seq for i in motifs]
#
# if analysis == "N-glycan":
# self.data.at[i, "total_number_of_asn"] = seq.count("N", 0, len(seq))
# if expand_window:
# self.data.at[i, "total_number_of_n-linked_sequon"] = len(expanded_window_motifs)
# else:
# self.data.at[i, "total_number_of_n-linked_sequon"] = len(motifs)
# self.data.at[i, "total_number_of_unmodded_asn"] = self.data.at[i, "total_number_of_asn"] - self.data.at[i, "total_number_of_modded_asn"]
# elif analysis == "O-glycan":
# self.data.at[i, "total_number_of_ser_thr"] = seq.count("S", 0, len(seq)) + seq.count("T", 0, len(seq))
# self.data.at[i, "total_number_of_unmodded_ser_or_thr"] = self.data.at[i, "total_number_of_modded_ser_thr"] - self.data.at[i, "total_number_of_modded_ser_thr"]
# current_glycan = 0
max_glycans = len(glycans)
glycosylation_count = 1
if max_glycans:
self.row_to_glycans[i] = np.sort(glycans)
for g in glycans:
data_gly = self.calculate_glycan(g)
glycan_dict[str(round(data_gly, 3))] = g
self.glycan_to_row[g] = i
glycosylated_site = []
for aa in range(1, len(seq) - 1):
if seq[aa].mods:
mod_value = float(seq[aa].mods[0].value)
round_mod_value = round(mod_value)
# str_mod_value = seq[aa].mods[0].value[0] + str(round_mod_value)
#if str_mod_value in modifications:
# if seq[aa].value in "ST" and analysis == "O-glycan":
# if round_mod_value == 80:
# continue
# if seq[aa].value in modifications[str_mod_value]:
# if seq[aa].value == "N" and round_mod_value == 1:
# seq[aa].extra = "Deamidated"
# continue
# if modifications[str_mod_value][seq[aa].value]['number'] > 0:
# modifications[str_mod_value][seq[aa].value]['number'] -= 1
# seq[aa].mods[0].mass = mod_value
round_3 = round(mod_value, 3)
if str(round_3) in glycan_dict:
seq[aa].extra = "Glycosylated"
pos = int(r[starting_position_column_name]) + aa - 2
self.sequon_glycosites.add(pos + 1)
position = "{}_position".format(str(glycosylation_count))
self.data.at[i, position] = seq[aa].value + str(pos + 1)
glycosylated_site.append(self.data.at[i, position] + "_" + str(round_mod_value))
glycosylation_count += 1
glycan_reordered.append(glycan_dict[str(round_3)])
if glycan_reordered:
self.data.at[i, "position_to_glycan"] = ",".join(glycan_reordered)
self.data.at[i, "glycoprofile"] = ";".join(glycosylated_site)
# if seq[aa].value == "N":
# if analysis == "N-glycan":
# if self.trust_byonic:
# if not in origin_map:
#
# # position = "{}_position".format(str(glycosylation_count))
# # self.data.at[i, position] = seq[aa].value + str(
# # r[starting_position_column_name]+aa)
# # self.data.at[i, position + "_match"] = "H"
# # glycosylation_count += 1
# self.data.at[i, "total_number_of_hexnac"] += 1
# elif seq[aa].value in "ST":
# if analysis == "O-glycan":
# self.data.at[i, "total_number_of_hex"] += 1
# if mod_value in modifications:
# if seq[aa].value in "ST" and analysis == "O-glycan":
# if round_mod_value == 80:
# continue
#
# if seq[aa].value in modifications[mod_value]:
# if seq[aa].value == "N" and round_mod_value == 1:
# seq[aa].extra = "Deamidated"
# continue
# if modifications[mod_value][seq[aa].value]['number'] > 0:
# modifications[mod_value][seq[aa].value]['number'] -= 1
# seq[aa].mods[0].mass = float(seq[aa].mods[0].value)
#
# if max_glycans and current_glycan != max_glycans:
#
# seq[aa].mods[0].value = glycans[current_glycan]
# seq[aa].extra = "Glycosylated"
#
# if seq[aa].value == "N":
# if analysis == "N-glycan":
# if "hexnac" in glycans[current_glycan].lower():
# self.data.at[i, "total_number_of_hexnac"] += 1
#
# elif seq[aa].value in "ST":
# if analysis == "O-glycan":
# self.data.at[i, "total_number_of_hex"] += 1
#
# current_glycan += 1
#if current_glycan == max_glycans:
#break
# for n in origin_map:
# position = "{}_position".format(str(glycosylation_count))
# self.data.at[i, position] = seq[n-origin_seq+1].value + str(
# n + 1)
#
# if seq[n-origin_seq+1].extra == "Glycosylated":
# self.data.at[i, position + "_match"] = "H"
# elif seq[n-origin_seq+1].extra == "Deamidated":
# self.data.at[i, position + "_match"] = "D"
# else:
# self.data.at[i, position + "_match"] = "U"
#
# if analysis == "N-glycan":
# if self.legacy:
# if self.data.at[i, "total_number_of_n-linked_sequon"] != self.data.at[i, "total_number_of_hexnac"]:
# if seq[n-origin_seq+1].extra == "Deamidated":
# if self.data.at[i, "total_number_of_hexnac"] > 0:
# self.data.at[i, position + "_match"] = "D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# else:
# self.data.at[i, position + "_match"] = "D"
# else:
# if self.data.at[i, "total_number_of_hexnac"] > 0:
# if self.data.at[i, "total_number_of_deamidation"] == 0:
# self.data.at[i, position + "_match"] = "H"
# else:
# self.data.at[i, position + "_match"] ="D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# if not seq[n-origin_seq+1].extra:
# if self.data.at[i, "total_number_of_hexnac"] > 0 and self.data.at[i, "total_number_of_deamidation"]> 0:
# self.data.at[i, position + "_match"] = "D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# elif self.data.at[i, "total_number_of_hexnac"] > 0:
# self.data.at[i, position + "_match"] = "H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# else:
# self.data.at[i, position + "_match"] = "U"
# glycosylation_count += 1
else:
if pd.notnull(r[glycans_column_name]):
glycans = r[glycans_column_name].split(",")
glycans.sort()
self.data.at[i, glycans_column_name] = ",".join(glycans)
self.data.at[i, "glycosylation_status"] = True
self.glycosylated_seq.add(self.data.at[i, "stripped_seq"])
def analyze(self, max_sites=0, combine_d_u=True, splitting_sites=False):
result = []
temp = self.data.sort_values(["Area", "Score"], ascending=False)
temp[glycans_column_name] = temp[glycans_column_name].fillna("None")
out = []
if self.trust_byonic:
seq_glycosites = list(self.sequon_glycosites)
seq_glycosites.sort()
# print(seq_glycosites)
# if self.analysis == "N-glycan":
# if max_sites == 0:
# temp = temp[(0 < temp["total_number_of_n-linked_sequon"])]
# else:
# temp = temp[(0 < temp["total_number_of_n-linked_sequon"]) & (temp["total_number_of_n-linked_sequon"]<= max_sites) ]
for i, g in temp.groupby(["stripped_seq", "z", "glycoprofile", observed_mz]):
seq_within = []
unique_row = g.loc[g["Area"].idxmax()]
#
# glycan = 0
# first_site = ""
if seq_glycosites:
for n in seq_glycosites:
if unique_row[starting_position_column_name] <= n < unique_row["Ending Position"]:
# print(unique_row["stripped_seq"], n, unique_row[starting_position_column_name])
seq_within.append(
unique_row["stripped_seq"][n-unique_row[starting_position_column_name]]+str(n))
# print(unique_row)
# if self.legacy:
# for c in range(len(unique_row.index)):
# if unique_row.index[c].endswith("_position"):
#
# if pd.notnull(unique_row[unique_row.index[c]]):
# if not first_site:
# first_site = unique_row[unique_row.index[c]]
# if unique_row[unique_row.index[c]] not in result:
# result[unique_row[unique_row.index[c]]] = {}
#
# if "U" in unique_row[unique_row.index[c+1]]:
# if "U" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["U"] = 0
# result[unique_row[unique_row.index[c]]]["U"] += unique_row["Area"]
# elif "D" in unique_row[unique_row.index[c+1]]:
# if combine_d_u:
# if "U" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["U"] = 0
# result[unique_row[unique_row.index[c]]]["U"] += unique_row["Area"]
# else:
# if "D" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["D"] = 0
# result[unique_row[unique_row.index[c]]]["D"] += unique_row["Area"]
# else:
# if splitting_sites or unique_row["total_number_of_hexnac"] == 1:
#
# if self.row_to_glycans[unique_row.name][glycan] not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]][self.row_to_glycans[unique_row.name][glycan]] = 0
# result[unique_row[unique_row.index[c]]][
# self.row_to_glycans[unique_row.name][glycan]] += unique_row["Area"]
# glycan += 1
#
# else:
# if unique_row["total_number_of_hexnac"] > 1 and not splitting_sites:
# temporary_glycan = ";".join(self.row_to_glycans[unique_row.name][glycan])
#
# if temporary_glycan not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]][temporary_glycan] = unique_row["Area"]
# break
# else:
glycosylation_count = 0
glycans = unique_row["position_to_glycan"].split(",")
for c in range(len(unique_row.index)):
if unique_row.index[c].endswith("_position"):
if pd.notnull(unique_row[unique_row.index[c]]):
pos = unique_row[unique_row.index[c]]
result.append({"Position": pos, "Glycans": glycans[glycosylation_count], "Value": unique_row["Area"]})
ind = seq_within.index(pos)
seq_within.pop(ind)
glycosylation_count += 1
if seq_within:
for s in seq_within:
result.append({"Position": s, "Glycans": "U", "Value": unique_row["Area"]})
# if N_combo:
#
# N_combo.sort()
# sequons = ";".join(N_combo)
#
# # working_isoform = unique_row["isoform"]
# # if working_isoform not in result:
# # # if working_isoform != 1.0 and 1.0 in result:
# # # if sequons in result[working_isoform][1.0]:
# # # if unique_row[glycans_column_name] in result[working_isoform][1.0][sequons] or "U" in result[working_isoform][1.0][sequons]:
# # # working_isoform = 1.0
# # # else:
# # result[working_isoform] = {}
# if sequons not in result[working_isoform]:
# result[working_isoform][sequons] = {}
# #if pd.notnull(unique_row[glycans_column_name]):
# if unique_row[glycans_column_name] != "None":
# if unique_row[glycans_column_name] not in result[working_isoform][sequons]:
# result[working_isoform][sequons][unique_row[glycans_column_name]] = 0
# result[working_isoform][sequons][unique_row[glycans_column_name]] += unique_row["Area"]
# else:
# if "U" not in result[working_isoform][sequons]:
# result[working_isoform][sequons]["U"] = 0
# result[working_isoform][sequons]["U"] += unique_row["Area"]
# #print(result)
if result:
result = pd.DataFrame(result)
group = result.groupby(["Position", "Glycans"])
out = group.agg(np.sum).reset_index()
else:
out = pd.DataFrame([], columns=["Position", "Glycans", "Values"])
# for k in result:
# for k2 in result[k]:
# for k3 in result[k][k2]:
# out.append({"Isoform": k, "Position": k2, "Glycans": k3, "Value": result[k][k2][k3]})
else:
# result_total = {}
# if max_sites != 0:
# temp = temp[temp['total_number_of_hex'] <= max_sites]
for i, g in temp.groupby(["stripped_seq", "z", glycans_column_name, starting_position_column_name, observed_mz]):
unique_row = g.loc[g["Area"].idxmax()]
if unique_row[glycans_column_name] != "None":
result.append({"Peptides": i[0], "Glycans": i[2], "Value": unique_row["Area"], "Position": i[3]})
else:
result.append({"Peptides": i[0], "Glycans": "U", "Value": unique_row["Area"], "Position": i[3]})
result = pd.DataFrame(result)
group = result.groupby(["Peptides", "Position", "Glycans"])
out = group.agg(np.sum).reset_index()
# working_isoform = unique_row["isoform"]
# if working_isoform not in result:
# # if working_isoform != 1.0 and 1.0 in result:
# # if unique_row["stripped_seq"] in result[working_isoform][1.0]:
# # #if i[3] in result[working_isoform][1.0][unique_row["stripped_seq"]]:
# # # if unique_row[glycans_column_name] in result[working_isoform][1.0][unique_row["stripped_seq"]][i[3]] or "U" in \
# # # result[working_isoform][1.0][unique_row["stripped_seq"]][i[3]]:
# # working_isoform = 1.0
# # else:
# result[working_isoform] = {}
#
# if unique_row["stripped_seq"] not in result[working_isoform]:
# result[working_isoform][unique_row["stripped_seq"]] = {}
# # result_total[unique_row["isoform"]][unique_row["stripped_seq"]] = 0
# if i[3] not in result[working_isoform][unique_row["stripped_seq"]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]] = {}
# if i[2] == "None":
# if "U" not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]]["U"] = 0
# result[working_isoform][unique_row["stripped_seq"]][i[3]]["U"] += unique_row["Area"]
#
# else:
# # if splitting_sites:
# # for gly in self.row_to_glycans[unique_row.name]:
# # if gly not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# # result[working_isoform][unique_row["stripped_seq"]][i[3]][gly] = 0
# # result[working_isoform][unique_row["stripped_seq"]][i[3]][gly] += unique_row["Area"]
# # else:
# if unique_row[glycans_column_name] not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]][unique_row[glycans_column_name]] = 0
# result[working_isoform][unique_row["stripped_seq"]][i[3]][unique_row[glycans_column_name]] += unique_row["Area"]
#
# for k in result:
# for k2 in result[k]:
# for k3 in result[k][k2]:
# for k4 in result[k][k2][k3]:
# out.append({"Isoform": k, "Peptides": k2, "Glycans": k4, "Value": result[k][k2][k3][k4], "Position": k3})
return Result(out)
class GlypnirO:
def __init__(self, trust_byonic=False, get_uniprot=False):
self.trust_byonic = trust_byonic
self.components = None
self.uniprot_parsed_data = pd.DataFrame([])
self.get_uniprot = get_uniprot
def add_component(self, filename, area_filename, replicate_id, sample_id):
component = GlypnirOComponent(filename, area_filename, replicate_id, sample_id)
def add_batch_component(self, component_list, minimum_score, protein=None, combine_uniprot_isoform=True, legacy=False):
self.load_dataframe(component_list)
protein_list = []
if protein is not None:
self.components["Protein"] = pd.Series([protein]*len(self.components.index), index=self.components.index)
for i, r in self.components.iterrows():
comp = GlypnirOComponent(r["filename"], r["area_filename"], r["replicate_id"], condition_id=r["condition_id"], protein_name=protein, minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
self.components.at[i, "component"] = comp
print("{} - {}, {} peptides has been successfully loaded".format(r["condition_id"], r["replicate_id"], str(len(comp.data.index))))
else:
components = []
for i, r in self.components.iterrows():
data = pd.read_excel(r["filename"], sheet_name="Spectra")
protein_id_column = protein_column_name
if combine_uniprot_isoform:
protein_id_column = "master_id"
for i2, r2 in data.iterrows():
search = uniprot_regex.search(r2[protein_column_name])
if not r2[protein_column_name].startswith(">Reverse") and not r2[protein_column_name].endswith("(Common contaminant protein)"):
if search:
data.at[i2, "master_id"] = search.groupdict(default="")["accession"]
if not self.get_uniprot:
protein_list.append([search.groupdict(default="")["accession"], r2[protein_column_name]])
if search.groupdict(default="")["isoform"] != "":
data.at[i2, "isoform"] = int(search.groupdict(default="")["isoform"][1:])
else:
data.at[i2, "isoform"] = 1
else:
data.at[i2, "master_id"] = r2[protein_column_name]
data.at[i2, "isoform"] = 1
else:
data.at[i2, "master_id"] = r2[protein_column_name]
data.at[i2, "isoform"] = 1
if r["area_filename"].endswith("xlsx"):
file_with_area = pd.read_excel(r["area_filename"])
else:
file_with_area = pd.read_csv(r["area_filename"], sep="\t")
for index, g in data.groupby([protein_id_column]):
u = index
if not u.startswith(">Reverse") and not u.endswith("(Common contaminant protein)"):
comp = GlypnirOComponent(g, file_with_area, r["replicate_id"],
condition_id=r["condition_id"], protein_name=u,
minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
if not comp.empty:
components.append({"filename": r["filename"], "area_filename": r["area_filename"], "condition_id": r["condition_id"], "replicate_id": r["replicate_id"], "Protein": u, "component": comp})
yield i, r
print(
"{} - {} peptides has been successfully loaded".format(r["condition_id"],
r["replicate_id"]))
self.components = pd.DataFrame(components, columns=list(self.components.columns) + ["component", "Protein"])
if not self.get_uniprot:
protein_df = pd.DataFrame(protein_list, columns=["Entry", "Protein names"])
self.uniprot_parsed_data = protein_df
#print(self.uniprot_parsed_data)
def load_dataframe(self, component_list):
if type(component_list) == list:
self.components = pd.DataFrame(component_list)
elif type(component_list) == pd.DataFrame:
self.components = component_list
elif type(component_list) == str:
if component_list.endswith(".txt"):
self.components = pd.read_csv(component_list, sep="\t")
elif component_list.endswith(".csv"):
self.components = pd.read_csv(component_list)
elif component_list.endswith(".xlsx"):
self.components = | pd.read_excel(component_list) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 28 14:18:51 2022
@author: Yang
"""
"""
This is some tools for analyzing the simulation result from AtomECS (SimECS).
"""
import scipy.constants as cts
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
#matplotlib.rc('text', usetex = True)
#plt.rcParams['font.family'] = "Times New Roman"
import CONST
def get_T_Rho(FileName, cap_r, r_max, dr):
#define useful constants
m_Rb = CONST.M_RB
Kb = CONST.KB
# load the file and add labels to each columns of them
trj = pd.read_table(FileName, skiprows=9, sep=' ', skipinitialspace=True)
trj.columns = ['id','atom','x','y','z','vx','vy','vz','speed','vxy','t']
# define the atom cloud by selecting atom under at certain threshold
trj_cloud = trj[(trj.x)**2 + (trj.y)**2 + (trj.z)**2 <= cap_r**2]
# getting coordinates
vxs, vys, vzs = get_velocities(trj_cloud)
# substracting the COM velocity
vxs = vxs - np.mean(vxs)
vys = vys - np.mean(vys)
vzs = vzs - np.mean(vzs)
# calculating the temperature of the atom cloud
speeds2 = vxs**2 + vys**2 + vzs**2
T = ((0.5*m_Rb*speeds2)/(1.5*Kb)).mean()
# calculate rhos
R, rho_shell, rho_e, psd_e, psd_mean, psd_max = get_Rho(trj, cap_r, r_max, dr, T)
return (len(trj_cloud),T, R, np.array(rho_shell), rho_e, psd_e, psd_mean, psd_max)
def get_Rho (trj, cap_r, r_max, dr, T):
m_Rb = CONST.M_RB
Kb = CONST.KB
h = CONST.H
# define the atom cloud by selecting atom under at certain threshold
trj_core = trj[(trj.x)**2 + (trj.y)**2 + (trj.z)**2 <= cap_r**2]
# get the coordinates of each direction
xs, ys, zs = get_coord(trj, use_com = True)
# calculate the distance of each atoms to the origin
r2 = xs**2 + ys**2 + zs**2
# the r value for each atom for calculating the histogram
r = np.sqrt(r2)
rg = np.sqrt(np.mean(r**2))
r_sphere = np.sqrt(5/3) * rg
# calculate the number of bins for the RDF
nbins = int(r_max/dr)
Natom_shell, R = np.histogram(r, bins = nbins, range = (0, r_max), density = False)
R = np.delete(R,0)
#calculate the volume of each shell, or, in another words, the normalization factor
norm_factors = (4/3)*np.pi*(R**3 - (R-dr)**3)
rho_shell = (Natom_shell/norm_factors)
# calculate different rhos
e_radius = get_eradius(rho_shell, dr)
rho_e = len(trj_core) / calc_volume(e_radius)
rho_mean = len(trj_core) / calc_volume(r_sphere)
rho_max = np.max(rho_shell)
# calculate lambda
lamb_da = h/np.sqrt(2*np.pi*m_Rb*Kb*T)
# calculate the PSD
psd_e = rho_e * lamb_da**3
psd_mean = rho_mean * lamb_da**3
psd_max = rho_max * lamb_da**3
return (np.array(R), np.array(rho_shell), rho_mean, psd_e, psd_mean, psd_max)
def get_coord(trj, use_com):
if use_com == True:
comx = np.mean(np.array(trj.iloc[:, 2]))
comy = np.mean(np.array(trj.iloc[:, 3]))
comz = np.mean(np.array(trj.iloc[:, 4]))
else:
comx = 0
comy = 0
comz = 0
return (np.array(trj.iloc[:, 2]) - comx , np.array(trj.iloc[:, 3]) - comy, np.array(trj.iloc[:, 4]) - comz)
def get_velocities(trj):
return (trj.vx, trj.vy, trj.vz)
def calc_volume (r):
return ((4/3) * np.pi * r**3)
def get_eradius (rho_shell, dr):
e_rho_max = np.max(rho_shell)/np.e
e_r_ndx = min(range(len(rho_shell)), key = lambda i: abs(rho_shell[i] - e_rho_max))
return e_r_ndx*dr
def get_instant_laser_intersection(timestep, frequency, lx0 = 0.0, ly0 = 0.0, lz0 = 0.0):
lx = 0.0002 * np.sin(frequency*2*np.pi * 1e-6 * timestep)
ly = 0.0002 * np.sin(frequency*2*np.pi * 1e-6 * timestep)
lz = 0.0002 * np.sin(frequency*2*np.pi * 1e-6 * timestep)
return (lx, ly, lz)
def get_Ti(FileName):
m_Rb = 86.909*cts.value('atomic mass constant')
Kb = cts.value('Boltzmann constant')
trj = pd.read_table(FileName, skiprows=9, sep=' ', skipinitialspace=True)
trj.columns = ['id','atom','x','y','z','vx','vy','vz','speed','vxy']
vxi = np.array(trj.iloc[:, 6])
vyi = np.array(trj.iloc[:, 7])
vzi = np.array(trj.iloc[:, 8])
speedsi2 = vxi**2 + vyi**2 + vzi**2
Ti = ((0.5*m_Rb*speedsi2)/(1.5*Kb)).mean()
return (round(Ti,3))
def trj_analysis (features, pre_directory, tot_steps, d_step, cap_r, r_max, dr, output_dir, dt, skipfirstframe):
# Initiate lists for storing calculated data
Tini = [] # initial temperature
Nini = [] # initial number
TFinals = [] # final temperature
NFinals = [] # final number
PSDFinalsE = [] # final psd with e_radius determined density
PSDFinalsMean = [] # final psd with mean density
PSDFinalsMax = [] # final psd wihm max density
for feature, tot_step in zip(features, tot_steps):
if feature == '':
feature = '.'
# defining the path
directory = pre_directory + feature +'/trjs/'
STEP = []
T = []
Natom = []
RHO = []
RHOE = []
PSDE = []
PSDMEAN = []
PSDMAX = []
# initialize the step number based on skipping the first step or not
if skipfirstframe == True:
step = d_step
else:
step = 0
if step == 0:
filename = '1.trj' # because we don't have "0.trj"
else:
filename = str(step) + '.trj'
counter = 0
#print(step, tot_step)
while step <= tot_step:
# print(counter)
num, temp, R, rho, rho_mean, psd_e, psd_mean, psd_max = get_T_Rho(directory + filename, cap_r, r_max, dr)
# accumulating rho values
if len(RHO) == 0:
RHO = rho
else:
RHO += rho
# append values to the storing lists
STEP.append(step)
T.append(temp)
Natom.append(num)
RHOE.append(rho_mean)
PSDE.append(psd_e)
PSDMEAN.append(psd_mean)
PSDMAX.append(psd_max)
step += d_step
filename = str(step) + '.trj'
counter += 1
RHO = RHO/counter
NFinals.append(num)
TFinals.append(temp)
PSDFinalsE.append(psd_e)
PSDFinalsMean.append(psd_mean)
PSDFinalsMax.append(psd_max)
| pd.DataFrame(RHO, R*1000) | pandas.DataFrame |
from pathlib import Path
import albumentations as ab
import cv2
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.utils import resample
from tensorflow.python.keras.preprocessing.image import random_zoom
from self_supervised_3d_tasks.data.generator_base import DataGeneratorBase
from self_supervised_3d_tasks.data.make_data_generator import get_data_generators_internal, make_cross_validation
class KaggleGenerator(DataGeneratorBase):
def __init__(
self,
data_path,
file_list,
dataset_table,
batch_size=8,
shuffle=False,
suffix=".jpeg",
pre_proc_func=None,
multilabel=False,
augment=False):
self.augment = augment
self.multilabel = multilabel
self.suffix = suffix
self.dataset = dataset_table
self.base_path = Path(data_path)
super().__init__(file_list, batch_size, shuffle, pre_proc_func)
def load_image(self, index):
path = self.base_path / self.dataset.iloc[index][0]
image = Image.open(path.with_suffix(self.suffix))
arr = np.array(image, dtype="float32")
arr = arr / 255.0
return arr
def data_generation(self, list_files_temp):
data_x = []
data_y = []
for c in list_files_temp:
image = self.load_image(c)
label = self.dataset.iloc[c][1]
if self.augment:
image = random_zoom(image, zoom_range=(0.85, 1.15), channel_axis=2, row_axis=0, col_axis=1, fill_mode='constant', cval=0.0)
image = ab.HorizontalFlip()(image=image)["image"]
image = ab.VerticalFlip()(image=image)["image"]
if self.multilabel:
if label == 0:
label = [1, 0, 0, 0, 0]
elif label == 1:
label = [1, 1, 0, 0, 0]
elif label == 2:
label = [1, 1, 1, 0, 0]
elif label == 3:
label = [1, 1, 1, 1, 0]
elif label == 4:
label = [1, 1, 1, 1, 1]
label = np.array(label)
data_x.append(image)
data_y.append(label)
data_x = np.stack(data_x)
data_y = np.stack(data_y)
return data_x, data_y
def __prepare_dataset(csv_file, sample_classes_uniform, shuffle_before_split):
dataset = | pd.read_csv(csv_file) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
def main():
# 导入数据
# 订单与商品
prior = | pd.read_csv('D://A//data//instacart//order_products__prior.csv') | pandas.read_csv |
# TODO decide whether include MAX PV and MAX ST or the percentage of area usage
import pandas as pd
import os
def create_decentral_overview(components_csv_data):
# defining columns of the sheet including decentralized components
decentral_columns = ["Building", "PV 1", "Max. PV 1", "PV 2", "Max. PV 2", "PV 3", "Max. PV 3", "PV 4", "Max. PV 4",
"PV 5", "Max. PV 5", "Installed PV", "Max. PV", "STC 1", "Max. STC 1", "STC 2", "Max. STC 2",
"STC 3", "Max. STC 3", "STC 4", "Max. STC 4", "STC 5", "Max. STC 5", "Installed STC",
"Max. STC", "Gasheating-System", "ASHP", "GCHP", "Battery-Storage", "Heat Transformer",
"Electric Heating"]
# defining units for decentral components
decentral_columns_units = {"Building": "", "PV 1": "(kW)", "Max. PV 1": "(kW)", "PV 2": "(kW)", "Max. PV 2": "(kW)",
"PV 3": "(kW)", "Max. PV 3": "(kW)", "PV 4": "(kW)", "Max. PV 4": "(kW)",
"PV 5": "(kW)", "Max. PV 5": "(kW)", "Installed PV": "(kW)", "Max. PV": "(kW)",
"STC 1": "(kW)", "Max. STC 1": "(kW)", "STC 2": "(kW)", "Max. STC 2": "(kW)",
"STC 3": "(kW)", "Max. STC 3": "(kW)", "STC 4": "(kW)", "Max. STC 4": "(kW)",
"STC 5": "(kW)", "Max. STC 5": "(kW)", "Installed STC": "(kW)", "Max. STC": "(kW)",
"Gasheating-System": "(kW)", "ASHP": "(kW)", "GCHP": "(kW)", "Battery-Storage": "(kWh)",
"Heat Transformer": "(kW)", "Electric Heating": "(kW)"}
decentral_components = pd.DataFrame(columns=decentral_columns)
decentral_components = decentral_components.append(pd.Series(decentral_columns_units), ignore_index=True)
# creating a list to reduce the number of rows
decentral_components_list = ["_1_pv", "_2_pv", "_3_pv", "_4_pv", "_5_pv", "_gasheating_transformer",
"_ashp_transformer", "_gchp_transformer", "_battery_storage", "_district_heat_link",
"_electricheating_transformer", "_1_solarthermal_source_collector",
"_2_solarthermal_source_collector", "_3_solarthermal_source_collector",
"_4_solarthermal_source_collector", "_5_solarthermal_source_collector"]
decentral_components_from_csv = []
for comp in components_csv_data["ID"]:
i = comp.split("_")
if "pv" not in i[0]:
if "central" not in i[0]:
decentral_components_from_csv.append(i[0])
decentral_components_from_csv = set(decentral_components_from_csv)
# import investment values from components.csv
for i in decentral_components_from_csv:
installed_power = []
for comp in decentral_components_list:
# investment values of pv
variable_central = (components_csv_data.loc[components_csv_data["ID"].str.contains(str(i) + comp)]
["investment/kW"]).values
variable_central = float(variable_central[0]) if variable_central.size > 0 else 0
installed_power.append(variable_central)
maximums_pv = []
maximums_st = []
installed_pv = 0.0
installed_st = 0.0
for roofnum in range(5):
# max values for each pv system
maximum_pv = (components_csv_data.loc[components_csv_data["ID"].str.contains(
str(i) + "_" + str(roofnum+1) + "_pv_source")]["max. invest./kW"]).values
maximum_st = (components_csv_data.loc[components_csv_data["ID"].str.contains(
str(i) + "_" + str(roofnum+1) + "_solarthermal_source_collector")]["max. invest./kW"]).values
if maximum_pv.size > 0:
maximums_pv.append(float(maximum_pv[0]))
else:
maximums_pv.append(0)
if maximum_st.size > 0:
maximums_st.append(float(maximum_st[0]))
else:
maximums_st.append(0)
installed_pv_roof = (components_csv_data.loc[components_csv_data["ID"].str.contains(str(i)
+ decentral_components_list[roofnum])]["investment/kW"]).values
installed_pv_roof = float(installed_pv_roof[0]) if installed_pv_roof.size > 0 else 0
installed_pv = installed_pv + installed_pv_roof
installed_st_roof = (components_csv_data.loc[components_csv_data["ID"].str.contains(
str(i) + decentral_components_list[-(5-roofnum)])]["investment/kW"]).values
installed_st_roof = float(installed_st_roof[0]) if installed_st_roof.size > 0 else 0
installed_st = installed_st + installed_st_roof
max_total_pv = sum(maximums_pv)
max_total_st = sum(maximums_st)
# dict to append the values
# comps indices 0:pv1, 1:pv2, ... todo
decentral_components_dict = {"Building": str(i),
"PV 1": installed_power[0], "Max. PV 1": maximums_pv[0],
"PV 2": installed_power[1], "Max. PV 2": maximums_pv[1],
"PV 3": installed_power[2], "Max. PV 3": maximums_pv[2],
"PV 4": installed_power[3], "Max. PV 4": maximums_pv[3],
"PV 5": installed_power[4], "Max. PV 5": maximums_pv[4],
"Installed PV": installed_pv, "Max. PV": max_total_pv,
"STC 1": installed_power[11], "Max. STC 1": maximums_st[0],
"STC 2": installed_power[12], "Max. STC 2": maximums_st[1],
"STC 3": installed_power[13], "Max. STC 3": maximums_st[2],
"STC 4": installed_power[14], "Max. STC 4": maximums_st[3],
"STC 5": installed_power[15], "Max. STC 5": maximums_st[4],
"Installed STC": installed_st, "Max. STC": max_total_st,
"Gasheating-System": installed_power[5], "ASHP": installed_power[6],
"GCHP": installed_power[7], "Battery-Storage": installed_power[8],
"Heat Transformer": installed_power[9],
"Electric Heating": installed_power[10]}
decentral_components = decentral_components.append(pd.Series(decentral_components_dict), ignore_index=True)
return decentral_components
def create_central_overview(components_csv_data):
# defining columns of the sheet including centralized components
central_columns = ["label", "investment"]
central_values = pd.DataFrame(columns=central_columns)
central_components = []
for comp in components_csv_data["ID"]:
k = comp.split("_")
if k[0] == "central":
if k[-1] in ["transformer", "storage", "link"]:
if len(k) == len(set(k)):
central_components.append(comp)
for comp in central_components:
# investment values of central components
variable_central = (components_csv_data.loc[components_csv_data["ID"].str.contains(comp)]["investment/kW"]).values
variable_central = float(variable_central[0]) if variable_central.size > 0 else 0
central_components_dict = {"label": comp, "investment": variable_central}
central_values = central_values.append(pd.Series(central_components_dict), ignore_index=True)
return central_values
def urban_district_upscaling_post_processing(components: str):
"""
todo docstring
"""
components_csv_data = pd.read_csv(components)
components_csv_data = components_csv_data.replace(to_replace="---", value=0)
# pre_scenario in order to import the labels
decentral_components = create_decentral_overview(components_csv_data)
central_values = create_central_overview(components_csv_data)
# output
writer = pd.ExcelWriter(os.path.dirname(__file__) + "/overview.xlsx",
engine="xlsxwriter")
decentral_components.to_excel(writer, "decentral_components", index=False)
central_values.to_excel(writer, "central_components", index=False)
print("Overview created.")
writer.save()
if __name__ == "__main__":
# csv which contains the exportable data
components_csv_data = | pd.read_csv("components.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
from easy_base import EasySklearn
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import pandas as pd
import matplotlib.pyplot as plt
class EasySklearnClustering(EasySklearn):
def __init__(self):
EasySklearn.__init__(self)
self.n_clusters = 4
@property
def default_models_(self):
return {
'KM': {'clf': KMeans(n_clusters=self.n_clusters, random_state=9),
'param': {}}
}
@property
def default_models_name_(self):
return [model for model in self.default_models_]
# 画聚类图
def plot_cluster(self, X, clf):
clf.fit(X)
n_class = clf.cluster_centers_.shape[0]
new_df = | pd.DataFrame(X) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Telecom Churn Case Study
# With 21 predictor variables we need to predict whether a particular customer will switch to another telecom provider or not. In telecom terminology, this is referred to as churning and not churning, respectively.
# ### Step 1: Importing and Merging Data
# Suppressing Warnings
import warnings
warnings.filterwarnings('ignore')
# Importing Pandas and NumPy
import pandas as pd, numpy as np
# Importing all datasets
churn_data = pd.read_csv("churn_data.csv")
churn_data.head()
customer_data = pd.read_csv("customer_data.csv")
customer_data.head()
internet_data = pd.read_csv("internet_data.csv")
internet_data.head()
# #### Combining all data files into one consolidated dataframe
# Merging on 'customerID'
df_1 = pd.merge(churn_data, customer_data, how='inner', on='customerID')
# Final dataframe with all predictor variables
telecom = pd.merge(df_1, internet_data, how='inner', on='customerID')
# ### Step 2: Inspecting the Dataframe
telecom.OnlineBackup.value_counts()
# Let's see the head of our master dataset
telecom.head()
# Let's check the dimensions of the dataframe
telecom.shape
# let's look at the statistical aspects of the dataframe
telecom.describe()
# Let's see the type of each column
telecom.info()
# ### Step 3: Data Preparation
# #### Converting some binary variables (Yes/No) to 0/1
# +
# List of variables to map
varlist = ['PhoneService', 'PaperlessBilling', 'Churn', 'Partner', 'Dependents']
# Defining the map function
def binary_map(x):
return x.map({'Yes': 1, "No": 0})
# Applying the function to the housing list
telecom[varlist] = telecom[varlist].apply(binary_map)
# -
telecom.head()
# #### For categorical variables with multiple levels, create dummy features (one-hot encoded)
# +
# Creating a dummy variable for some of the categorical variables and dropping the first one.
dummy1 = pd.get_dummies(telecom[['Contract', 'PaymentMethod', 'gender', 'InternetService']], drop_first=True)
# Adding the results to the master dataframe
telecom = pd.concat([telecom, dummy1], axis=1)
# -
telecom.head()
# +
# Creating dummy variables for the remaining categorical variables and dropping the level with big names.
# Creating dummy variables for the variable 'MultipleLines'
ml = pd.get_dummies(telecom['MultipleLines'], prefix='MultipleLines')
# Dropping MultipleLines_No phone service column
ml1 = ml.drop(['MultipleLines_No phone service'], 1)
#Adding the results to the master dataframe
telecom = pd.concat([telecom,ml1], axis=1)
# Creating dummy variables for the variable 'OnlineSecurity'.
os = pd.get_dummies(telecom['OnlineSecurity'], prefix='OnlineSecurity')
os1 = os.drop(['OnlineSecurity_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,os1], axis=1)
# Creating dummy variables for the variable 'OnlineBackup'.
ob = pd.get_dummies(telecom['OnlineBackup'], prefix='OnlineBackup')
ob1 = ob.drop(['OnlineBackup_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,ob1], axis=1)
# Creating dummy variables for the variable 'DeviceProtection'.
dp = pd.get_dummies(telecom['DeviceProtection'], prefix='DeviceProtection')
dp1 = dp.drop(['DeviceProtection_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,dp1], axis=1)
# Creating dummy variables for the variable 'TechSupport'.
ts = pd.get_dummies(telecom['TechSupport'], prefix='TechSupport')
ts1 = ts.drop(['TechSupport_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,ts1], axis=1)
# Creating dummy variables for the variable 'StreamingTV'.
st =pd.get_dummies(telecom['StreamingTV'], prefix='StreamingTV')
st1 = st.drop(['StreamingTV_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,st1], axis=1)
# Creating dummy variables for the variable 'StreamingMovies'.
sm = pd.get_dummies(telecom['StreamingMovies'], prefix='StreamingMovies')
sm1 = sm.drop(['StreamingMovies_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,sm1], axis=1)
# -
telecom.head()
# #### Dropping the repeated variables
# We have created dummies for the below variables, so we can drop them
telecom = telecom.drop(['Contract','PaymentMethod','gender','MultipleLines','InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection',
'TechSupport', 'StreamingTV', 'StreamingMovies'], 1)
telecom = telecom.loc[~telecom.index.isin([488, 753, 936, 1082, 1340, 3331, 3826, 4380, 5218, 6670, 6754])]
# +
# telecom['TotalCharges'].sample(40)
# telecom['TotalCharges'].str.replace('.', '', 1).str.contains('\D',regex=True).sum()
# telecom[telecom['TotalCharges'].str.replace('.', '', 1).str.contains('\D',regex=True)].TotalCharges.index
# -
#The varaible was imported as a string we need to convert it to float
telecom['TotalCharges'] = telecom['TotalCharges'].str.strip().astype('float64')
telecom.info()
# Now you can see that you have all variables as numeric.
# #### Checking for Outliers
# Checking for outliers in the continuous variables
num_telecom = telecom[['tenure','MonthlyCharges','SeniorCitizen','TotalCharges']]
# Checking outliers at 25%, 50%, 75%, 90%, 95% and 99%
num_telecom.describe(percentiles=[.25, .5, .75, .90, .95, .99])
# From the distribution shown above, you can see that there no outliers in your data. The numbers are gradually increasing.
# #### Checking for Missing Values and Inputing Them
# Adding up the missing values (column-wise)
telecom.isnull().sum()
# It means that 11/7043 = 0.001561834 i.e 0.1%, best is to remove these observations from the analysis
# Checking the percentage of missing values
round(100*(telecom.isnull().sum()/len(telecom.index)), 2)
# Removing NaN TotalCharges rows
telecom = telecom[~np.isnan(telecom['TotalCharges'])]
# Checking percentage of missing values after removing the missing values
round(100*(telecom.isnull().sum()/len(telecom.index)), 2)
# Now we don't have any missing values
# ### Step 4: Test-Train Split
from sklearn.model_selection import train_test_split
# +
# Putting feature variable to X
X = telecom.drop(['Churn','customerID'], axis=1)
X.head()
# +
# Putting response variable to y
y = telecom['Churn']
y.head()
# -
# Splitting the data into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=100)
# ### Step 5: Feature Scaling
from sklearn.preprocessing import StandardScaler
# +
scaler = StandardScaler()
X_train[['tenure','MonthlyCharges','TotalCharges']] = scaler.fit_transform(X_train[['tenure','MonthlyCharges','TotalCharges']])
X_train.head()
# -
### Checking the Churn Rate
churn = (sum(telecom['Churn'])/len(telecom['Churn'].index))*100
churn
# We have almost 27% churn rate
# ### Step 6: Looking at Correlations
# Importing matplotlib and seaborn
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Let's see the correlation matrix
plt.figure(figsize = (20,10)) # Size of the figure
sns.heatmap(telecom.corr(),annot = True)
plt.show()
# #### Dropping highly correlated dummy variables
X_test = X_test.drop(['MultipleLines_No','OnlineSecurity_No','OnlineBackup_No','DeviceProtection_No','TechSupport_No',
'StreamingTV_No','StreamingMovies_No'], 1)
X_train = X_train.drop(['MultipleLines_No','OnlineSecurity_No','OnlineBackup_No','DeviceProtection_No','TechSupport_No',
'StreamingTV_No','StreamingMovies_No'], 1)
# #### Checking the Correlation Matrix
# After dropping highly correlated variables now let's check the correlation matrix again.
plt.figure(figsize = (20,10))
sns.heatmap(X_train.corr(),annot = True)
plt.show()
# ### Step 7: Model Building
# Let's start by splitting our data into a training set and a test set.
# #### Running Your First Training Model
import statsmodels.api as sm
# Logistic regression model
logm1 = sm.GLM(y_train,(sm.add_constant(X_train)), family = sm.families.Binomial())
logm1.fit().summary()
# ### Step 8: Feature Selection Using RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
from sklearn.feature_selection import RFE
rfe = RFE(logreg, 15) # running RFE with 13 variables as output
rfe = rfe.fit(X_train, y_train)
rfe.support_
list(zip(X_train.columns, rfe.support_, rfe.ranking_))
col = X_train.columns[rfe.support_]
X_train.columns[~rfe.support_]
# ##### Assessing the model with StatsModels
X_train_sm = sm.add_constant(X_train[col])
logm2 = sm.GLM(y_train,X_train_sm, family = sm.families.Binomial())
res = logm2.fit()
res.summary()
# Getting the predicted values on the train set
y_train_pred = res.predict(X_train_sm)
y_train_pred[:10]
y_train_pred = y_train_pred.values.reshape(-1)
y_train_pred[:10]
# ##### Creating a dataframe with the actual churn flag and the predicted probabilities
y_train_pred_final = pd.DataFrame({'Churn':y_train.values, 'Churn_Prob':y_train_pred})
y_train_pred_final['CustID'] = y_train.index
y_train_pred_final.head()
# ##### Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
# +
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
# Let's see the head
y_train_pred_final.head()
# -
from sklearn import metrics
# Confusion matrix
confusion = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted )
print(confusion)
# +
# Predicted not_churn churn
# Actual
# not_churn 3270 365
# churn 579 708
# -
# Let's check the overall accuracy.
print(metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
# #### Checking VIFs
# Check for the VIF values of the feature variables.
from statsmodels.stats.outliers_influence import variance_inflation_factor
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif = pd.DataFrame()
vif['Features'] = X_train[col].columns
vif['VIF'] = [variance_inflation_factor(X_train[col].values, i) for i in range(X_train[col].shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
# There are a few variables with high VIF. It's best to drop these variables as they aren't helping much with prediction and unnecessarily making the model complex. The variable 'PhoneService' has the highest VIF. So let's start by dropping that.
col = col.drop('PhoneService', 1)
col
# Let's re-run the model using the selected variables
X_train_sm = sm.add_constant(X_train[col])
logm3 = sm.GLM(y_train,X_train_sm, family = sm.families.Binomial())
res = logm3.fit()
res.summary()
y_train_pred = res.predict(X_train_sm).values.reshape(-1)
y_train_pred[:10]
y_train_pred_final['Churn_Prob'] = y_train_pred
# Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
y_train_pred_final.head()
# Let's check the overall accuracy.
print(metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
# So overall the accuracy hasn't dropped much.
# ##### Let's check the VIFs again
vif = pd.DataFrame()
vif['Features'] = X_train[col].columns
vif['VIF'] = [variance_inflation_factor(X_train[col].values, i) for i in range(X_train[col].shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
# Let's drop TotalCharges since it has a high VIF
col = col.drop('TotalCharges')
col
# Let's re-run the model using the selected variables
X_train_sm = sm.add_constant(X_train[col])
logm4 = sm.GLM(y_train,X_train_sm, family = sm.families.Binomial())
res = logm4.fit()
res.summary()
y_train_pred = res.predict(X_train_sm).values.reshape(-1)
y_train_pred[:10]
y_train_pred_final['Churn_Prob'] = y_train_pred
# Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
y_train_pred_final.head()
# Let's check the overall accuracy.
print(metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
# The accuracy is still practically the same.
# ##### Let's now check the VIFs again
vif = pd.DataFrame()
vif['Features'] = X_train[col].columns
vif['VIF'] = [variance_inflation_factor(X_train[col].values, i) for i in range(X_train[col].shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
# All variables have a good value of VIF. So we need not drop any more variables and we can proceed with making predictions using this model only
# Let's take a look at the confusion matrix again
confusion = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted )
confusion
# Actual/Predicted not_churn churn
# not_churn 3269 366
# churn 595 692
# Let's check the overall accuracy.
metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted)
# ## Metrics beyond simply accuracy
TP = confusion[1,1] # true positive
TN = confusion[0,0] # true negatives
FP = confusion[0,1] # false positives
FN = confusion[1,0] # false negatives
# Let's see the sensitivity of our logistic regression model
TP / float(TP+FN)
# Let us calculate specificity
TN / float(TN+FP)
# Calculate false postive rate - predicting churn when customer does not have churned
print(FP/ float(TN+FP))
# positive predictive value
print (TP / float(TP+FP))
# Negative predictive value
print (TN / float(TN+ FN))
# ### Step 9: Plotting the ROC Curve
# An ROC curve demonstrates several things:
#
# - It shows the tradeoff between sensitivity and specificity (any increase in sensitivity will be accompanied by a decrease in specificity).
# - The closer the curve follows the left-hand border and then the top border of the ROC space, the more accurate the test.
# - The closer the curve comes to the 45-degree diagonal of the ROC space, the less accurate the test.
def draw_roc( actual, probs ):
fpr, tpr, thresholds = metrics.roc_curve( actual, probs,
drop_intermediate = False )
auc_score = metrics.roc_auc_score( actual, probs )
plt.figure(figsize=(5, 5))
plt.plot( fpr, tpr, label='ROC curve (area = %0.2f)' % auc_score )
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate or [1 - True Negative Rate]')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
return None
fpr, tpr, thresholds = metrics.roc_curve( y_train_pred_final.Churn, y_train_pred_final.Churn_Prob, drop_intermediate = False )
list(zip(fpr,tpr,thresholds))
draw_roc(y_train_pred_final.Churn, y_train_pred_final.Churn_Prob)
# ### Step 10: Finding Optimal Cutoff Point
# Optimal cutoff probability is that prob where we get balanced sensitivity and specificity
# Let's create columns with different probability cutoffs
numbers = [float(x)/10 for x in range(10)]
for i in numbers:
y_train_pred_final[i]= y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > i else 0)
y_train_pred_final.head()
# +
# Now let's calculate accuracy sensitivity and specificity for various probability cutoffs.
cutoff_df = pd.DataFrame( columns = ['prob','accuracy','sensi','speci'])
from sklearn.metrics import confusion_matrix
# TP = confusion[1,1] # true positive
# TN = confusion[0,0] # true negatives
# FP = confusion[0,1] # false positives
# FN = confusion[1,0] # false negatives
num = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for i in num:
cm1 = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final[i] )
total1=sum(sum(cm1))
accuracy = (cm1[0,0]+cm1[1,1])/total1
speci = cm1[0,0]/(cm1[0,0]+cm1[0,1])
sensi = cm1[1,1]/(cm1[1,0]+cm1[1,1])
cutoff_df.loc[i] =[ i ,accuracy,sensi,speci]
print(cutoff_df)
# -
# Let's plot accuracy sensitivity and specificity for various probabilities.
cutoff_df.plot.line(x='prob', y=['accuracy','sensi','speci'])
plt.show()
# #### From the curve above, 0.3 is the optimum point to take it as a cutoff probability.
# +
y_train_pred_final['final_predicted'] = y_train_pred_final.Churn_Prob.map( lambda x: 1 if x > 0.3 else 0)
y_train_pred_final.head()
# -
# Let's check the overall accuracy.
metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.final_predicted)
confusion2 = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.final_predicted )
confusion2
TP = confusion2[1,1] # true positive
TN = confusion2[0,0] # true negatives
FP = confusion2[0,1] # false positives
FN = confusion2[1,0] # false negatives
# Let's see the sensitivity of our logistic regression model
TP / float(TP+FN)
# Let us calculate specificity
TN / float(TN+FP)
# Calculate false postive rate - predicting churn when customer does not have churned
print(FP/ float(TN+FP))
# Positive predictive value
print (TP / float(TP+FP))
# Negative predictive value
print (TN / float(TN+ FN))
#
#
#
#
# ## Precision and Recall
# - **_Looking at the confusion matrix again_**
confusion = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted )
confusion
# ##### Precision
# TP / TP + FP
confusion[1,1]/(confusion[0,1]+confusion[1,1])
# ##### Recall
# TP / TP + FN
confusion[1,1]/(confusion[1,0]+confusion[1,1])
# Using sklearn utilities for the same
from sklearn.metrics import precision_score, recall_score
# ?precision_score
precision_score(y_train_pred_final.Churn, y_train_pred_final.predicted)
recall_score(y_train_pred_final.Churn, y_train_pred_final.predicted)
# ### Precision and recall tradeoff
from sklearn.metrics import precision_recall_curve
y_train_pred_final.Churn, y_train_pred_final.predicted
p, r, thresholds = precision_recall_curve(y_train_pred_final.Churn, y_train_pred_final.Churn_Prob)
plt.plot(thresholds, p[:-1], "g-")
plt.plot(thresholds, r[:-1], "r-")
plt.show()
# ### Step 11: Making predictions on the test set
X_test[['tenure','MonthlyCharges','TotalCharges']] = scaler.transform(X_test[['tenure','MonthlyCharges','TotalCharges']])
X_test = X_test[col]
X_test.head()
X_test_sm = sm.add_constant(X_test)
# Making predictions on the test set
y_test_pred = res.predict(X_test_sm)
y_test_pred[:10]
# Converting y_pred to a dataframe which is an array
y_pred_1 = pd.DataFrame(y_test_pred)
# Let's see the head
y_pred_1.head()
# Converting y_test to dataframe
y_test_df = | pd.DataFrame(y_test) | pandas.DataFrame |
import os
import numpy as np
from numpy.core.defchararray import index
import pandas as pd
import networkx as nx
import json
import util
interval = 2 # minutes
dataname = 'KL'
version = 'v1'
dataname = '%s_%s' % (dataname, version)
input_dir = 'D:/Gabby/OneDrive/WORK/COD/PRE/DataParse/hkgovDataAPI/data/irnAvgSpeed/%s'%version
outputdir = 'D:/Gabby/OneDrive/WORK/COD/PRE/Bigscity-LibCity/raw_data/%s/' % (dataname)
util.ensure_dir(outputdir)
outputdir_name = outputdir + dataname
def load_json_as_dict(filename):
with open(filename, 'r') as fp:
data_dict = json.load(fp)
return data_dict
input_dir = 'D:\Data\Pre\Traffic\HK_Gov_road\cralwed_ss7049b\data/road_network\strategyRoadNetwork\generated/%s/' % dataname
strategy_CENTERLINE_df = pd.read_csv(input_dir + '/strategy_CENTERLINE_df.csv')
edge_weight1_dict = load_json_as_dict(input_dir + 'edge_weight1_dict.json')
ROUTE_idx_dict = load_json_as_dict(input_dir + 'ROUTE_idx_dict.json')
idx_ROUTE_dict = load_json_as_dict(input_dir + 'idx_ROUTE_dict.json')
# ROUTE_ID
strategy_CENTERLINE_df.ROUTE_ID = strategy_CENTERLINE_df.ROUTE_ID.astype(str)
strategy_CENTERLINE_df = strategy_CENTERLINE_df[strategy_CENTERLINE_df.ROUTE_ID.isin(ROUTE_idx_dict.keys())]
strategy_CENTERLINE_df.replace({'ROUTE_ID': ROUTE_idx_dict}, inplace=True)
features = ['ROUTE_ID', 'SHAPE_Length', 'ELEVATION', 'TRAVEL_DIRECTION']
rename_features = ['geo_id', 'length', 'elevation', 'direction']
geo = strategy_CENTERLINE_df[features]
geo['type'] = 'LineString'
geo.rename(columns=dict(zip(features, rename_features)), inplace=True)
geo.to_csv(outputdir_name+'.geo', index=False)
# DG = nx.read_gpickle(input_dir + 'strategy_roadnetwork_attr.gpkl')
# edgesView = list(DG.edges.data())
# edge_df = pd.DataFrame(edgesView, columns=['origin_id', 'destination_id', 'weight_dict'])
# edge_df['link_weight'] = edge_df['weight_dict'].apply(pd.Series, index=['weight'])
rel = []
rel_id = 0
all_pairs_dijkstra_path_length = load_json_as_dict(input_dir + 'all_pairs_dijkstra_path_length.json')
for source in all_pairs_dijkstra_path_length:
for destination in all_pairs_dijkstra_path_length[source]:
print([source, destination, all_pairs_dijkstra_path_length[source][destination]])
rel.append([rel_id, 'geo', source, destination, all_pairs_dijkstra_path_length[source][destination]])
rel_id += 1
rel_df = pd.DataFrame(rel, columns=['rel_id', 'type', 'origin_id', 'destination_id', 'dist'])
# rel_df = edge_df[['origin_id', 'destination_id', 'link_weight']]
# rel_df['type'] = 'geo'
# rel_df['rel_id'] = range(rel_df.shape[0])
# rel_df = rel_df[['rel_id', 'type', 'origin_id', 'destination_id', 'link_weight']]
rel_df.to_csv(outputdir_name+'.rel', index=False)
print('rel_df shape: ', rel_df.shape)
fmt_df_dir = 'D:/Gabby/OneDrive/WORK/COD/PRE/DataParse/hkgovDataAPI/data/irnAvgSpeed/'
fmt_irnAvgSpeed_df_filename = fmt_df_dir + 'irnAvgSpeed_20210908_20210914.pkl.gz'
fmt_irnAvgSpeed_df1 = | pd.read_pickle(fmt_irnAvgSpeed_df_filename, compression='gzip') | pandas.read_pickle |
"""
Empirical dynamic modelling (EDM) toolkit
Work in progress
"""
# load standard libraries
import time as _time
# load 3rd party libraries
import numpy as _np
import pandas as _pd
import xarray as _xr
import matplotlib.pyplot as _plt
#from deprecated import deprecated
from multiprocessing import cpu_count as _cpu_count
from joblib import Parallel as _Parallel
from joblib import delayed as _delayed
from scipy.stats import binned_statistic_dd as _binned_statistic_dd
# load my external libraries
from johnspythonlibrary2.Plot import finalizeSubplot as _finalizeSubplot, finalizeFigure as _finalizeFigure, subTitle as _subtitle
###################################################################################
#%% signal generation
# various generated signals to test code in this library
# load my external signal generation functions
from johnspythonlibrary2.Process.SigGen import lorentzAttractor, tentMap, saved_lorentzAttractor#, coupledHarmonicOscillator, predatorPrey,
def twoSpeciesWithBidirectionalCausality(N,tau_d=0,IC=[0.2,0.4],plot=False,params={'Ax':3.78,'Ay':3.77,'Bxy':0.07,'Byx':0.08}):
"""
Coupled two equation system with bi-directional causality.
Eq. 1 in Ye 2015
Reference
---------
Eq. 1 in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4592974/
Examples
--------
Examples 1 and 2::
N=3000
twoSpeciesWithBidirectionalCausality(N,plot=True)
twoSpeciesWithBidirectionalCausality(N,tau_d=2,plot=True)
"""
x=_np.ones(N+tau_d,dtype=float)*IC[0]
y=_np.ones(N+tau_d,dtype=float)*IC[1]
for i in range(tau_d,N+tau_d-1):
x[i+1]=x[i]*(params['Ax']-params['Ax']*x[i]-params['Bxy']*y[i])
y[i+1]=y[i]*(params['Ay']-params['Ay']*y[i]-params['Byx']*x[i-tau_d])
x=_xr.DataArray( x[tau_d:],
dims=['t'],
coords={'t':_np.arange(_np.shape(x[tau_d:])[0])},
attrs={'units':"au",
'standard_name': 'Amplitude'})
x.t.attrs={'units':'au'}
y=_xr.DataArray( y[tau_d:],
dims=['t'],
coords={'t':_np.arange(_np.shape(y[tau_d:])[0])},
attrs={'units':"au",
'standard_name': 'Amplitude'})
y.t.attrs={'units':'au'}
if plot==True:
fig,ax=_plt.subplots()
x.plot(ax=ax,label='x')
y.plot(ax=ax,label='y')
ax.legend()
return x,y
###################################################################################
#%% sub-functions
def applyForecast(s,Py,edm_map,T,plot=False):
"""
The forecasting method. Combines weights with correct indices (keys) to get the forecast
Parameters
----------
s : xarray.core.dataarray.DataArray
complete signal. first half (sx) and second half (sy) of the data
Py : xarray.core.dataarray.DataArray
Second half (sy) signal converted to time lagged state space
keys : xarray.core.dataarray.DataArray
keys (indices) of nearest neighbors
weights: xarray.core.dataarray.DataArray
weights of nearest neighbors
T : int
number of time steps in which to forecast into the future.
plot : bool
optional plot of results
Examples
--------
Example 1::
import numpy as np
import matplotlib.pyplot as plt; plt.close('all')
import pandas as pd
N=1000
ds=lorentzAttractor(N=N)
s=ds.x.copy()
s['t']=np.arange(0,N)
sx=s[:N//2]
sy=s[N//2:]
E=3
tau=1
knn=E+1
Px=convertToTimeLaggedSpace(sx,E=E,tau=tau)
Py=convertToTimeLaggedSpace(sy,E=E,tau=tau)
Py['t']=Py.t+N//2
edm_map=createMap(Px,Py,knn=knn)
results=applyForecast(s,Py,edm_map,T=10,plot=True)
"""
# initialize a matrix for the forecast results
index=Py.t.values[:-T]
results=_xr.DataArray(dims=['t','future'],
coords={'t':index,
'future':_np.arange(0,T+1)})
# perform forecast
shape=edm_map['keys'].sel(t=index).shape
for a in results.transpose():
y=s.sel(t=edm_map['keys'].sel(t=index).values.reshape(-1)+a.future.values).values.reshape(shape)
results.loc[:,a.future.values] = (edm_map['weights'].sel(t=index)*y).sum(axis=1).values
if plot==True:
# contruct actual future data matrix to use with the pearson correlation below
dfTActual=_xr.DataArray( _np.zeros(results.shape),
dims=results.dims,
coords=results.coords)
for fut in results.future.data:
dfTActual.loc[:,fut]=Py.sel(delay=0,t=(dfTActual.t.data+fut)).data
fig,ax=_plt.subplots(T+1,sharex=True,sharey=True)
rho=_xr.DataArray(dims=['T'],
coords={'T':_np.arange(T+1)})
for Ti in rho.coords['T'].data:
print(Ti)
rho.loc[Ti]=calcCorrelationCoefficient(dfTActual.sel(future=Ti), results.sel(future=Ti))
for i, Ti in enumerate(range(0,1+T)):
dfTActual.sel(future=Ti).plot(ax=ax[i])
results.sel(future=Ti).plot(ax=ax[i])
ax[i].set_title('')
ax[i].set_xlabel('')
_subtitle(ax[i],'T=%d, rho=%.3f'%(Ti,rho.sel(T=Ti).data))
ax[0].set_title('N=%d'%len(s))
_finalizeFigure(fig,h_pad=0)
return results
def calc_EDM_time(N,E,tau,dt=int(1)):
"""
Calculates the effective "time basis" used in the time-lagged state space
Parameters
----------
N : int
Number of points in the original time series data
E : int
Dimensionality in the time-lagged basis
tau : int
Time step between dimensional terms, E, used in the time-lagged basis
dt : float
Time step in time series data. Default is 1.
Returns
-------
numpy.ndarray of floats
The effective "time basis" used in the time-lagged state space
"""
return _np.arange((E-1)*tau,N,dtype=type(dt))*dt
def calc_EMD_delay(E,tau):
"""
Calculates the effective "time basis" offsets (delay) used in the time-lagged state space
Parameters
----------
E : int
Dimensionality in the time-lagged basis
tau : int
Time step between dimensional terms, E, used in the time-lagged basis
Returns
-------
numpy.ndarray of ints
the effective "time basis" offsets (delay) used in the time-lagged state space
"""
return _np.arange(-(E-1)*tau,1,tau,dtype=int)
def calcWeights(radii,method='exponential'):
"""
Calculates weights used with the findNearestNeighbors() function
Example
-------
Example 1::
# create data
x=_np.arange(0,10+1)
y=_np.arange(100,110+1)
X,Y=_np.meshgrid(x,y)
X=X.reshape((-1,1))
Y=Y.reshape((-1,1))
A=_np.concatenate((X,Y),axis=1)
# points to investigate
B=[[5.1,105.1],[8.9,102.55],[3.501,107.501]]
numberOfNearestPoints=5
points,indices,radii=findNearestNeighbors(A,B,numberOfNearestPoints=numberOfNearestPoints)
weights=calcWeights(radii)
print(weights)
for i in range(len(B)):
fig,ax=_plt.subplots()
ax.plot(X.reshape(-1),Y.reshape(-1),'.',label='original data')
ax.plot(B[i][0],B[i][1],'x',label='point of interest')
ax.plot(points[i][:,0],points[i][:,1],label='%d nearest neighbors\nwith weights shown'%numberOfNearestPoints,marker='o',linestyle='', markerfacecolor="None")
plt.legend()
"""
if type(radii) in [_np.ndarray]:
radii=_xr.DataArray(radii)
if method =='exponential':
if True:
weights=_np.exp(-radii/_np.array(radii.min(axis=1)).reshape(-1,1))
weights=weights/_np.array(weights.sum(axis=1)).reshape(-1,1)
else: # this is the old method but it threw warnings. I've replaced it with the above, but I'm leaving the old here just in case
weights=_np.exp(-radii/radii.min(axis=1)[:,_np.newaxis]) # this throws the same error as below. fix.
weights=weights/weights.sum(axis=1)[:,_np.newaxis] # this line throws a warning. fix. "FutureWarning: Support for multi-dimensional indexing (e.g. 'obj[:,None]') is deprecated and will be removed in a future version. Convert to a numpy array before indexing instead."
elif method =='uniform':
weights=_np.ones(radii.shape)/radii.shape[1]
else:
raise Exception('Incorrect weighting method provided')
return _xr.DataArray(weights)
def calcCorrelationCoefficient(data,fit,plot=False):
"""
Pearson correlation coefficient.
Note that pearson correlation is rho=sqrt(r^2)=r and allows for a value from
1 (perfectly coorelated) to 0 (no correlation) to -1 (perfectly anti-correlated)
Reference
---------
* Eq. 22 in https://mathworld.wolfram.com/CorrelationCoefficient.html
Examples
--------
Example 1::
## Test for positive correlation. Simple.
import numpy as np
f=2e3
t=np.arange(0,1e-3,2e-6)
data=np.sin(2*np.pi*f*t)
fit=data+(np.random.rand(len(t))-0.5)*0.1
calcCorrelationCoefficient(data,fit,plot=True)
Example 3::
## Test for negative correlation. Opposite of Example 1.
import numpy as np
f=2e3
t=np.arange(0,1e-3,2e-6)
y1=np.sin(2*np.pi*f*t)
y2=-y1+(np.random.rand(len(t))-0.5)*0.1
calcCorrelationCoefficient(y1,y2,plot=True)
Example 4::
## Test for divide by zero warning
import numpy as np
f=2e3
t=np.arange(0,1e-3,2e-6)
data=np.sin(2*np.pi*f*t)
fit=np.zeros(data.shape)
calcCorrelationCoefficient(data,fit,plot=True)
Example 5::
## Test for divide by nan
import numpy as np
f=2e3
t=np.arange(0,1e-3,2e-6)
data=np.sin(2*np.pi*f*t)
fit=np.zeros(data.shape)*np.nan
calcCorrelationCoefficient(data,fit,plot=True)
"""
if type(data)==_np.ndarray:
data=_xr.DataArray(data,
dims=['t'],
coords={'t':_np.arange(data.shape[0])})
fit=_xr.DataArray(fit,
dims=['t'],
coords={'t':_np.arange(fit.shape[0])})
elif type(data)==_xr.core.dataarray.DataArray:
pass
else:
raise Exception('Improper data type')
if True:
y=data.data
f=fit.data
# SSxy=((f-f.mean())*(y-y.mean())).sum()
# SSxx=((f-f.mean())**2).sum()
# SSyy=((y-y.mean())**2).sum()
SSxy=_np.nansum( (f-_np.nanmean(f))*(y-_np.nanmean(y)) )
SSxx=_np.nansum((f-_np.nanmean(f))**2)
SSyy=_np.nansum((y-_np.nanmean(y))**2)
if _np.sqrt(SSxx*SSyy)!=0:
rho=SSxy/_np.sqrt(SSxx*SSyy) # r-squared value #TODO this line occassionally returns a RuntimeWarning. Fix. "RuntimeWarning: invalid value encountered in double_scalars"
# rho[i]=SSxy**2/(SSxx*SSyy) # r-squared value
else: # TODO possibly add a divide by nan or by inf case?
rho=_np.nan # recently added this case for divide by zero. i'm leaving this comment here until i'm sure the fix is bug free
if plot==True:
fig,ax=_plt.subplots()
ax.plot(y,label='Original data')
ax.plot(f,label='Reconstructed data')
ax.legend()
ax.set_title('Rho = %.3f'%(rho))
return rho
def check_dataArray(x,resetTimeIndex=False):
"""
Takes in an input signal (xarray.DataArray) and makes sure it meets the various requirements used through this library.
This function either corrects any issues it finds or throws an error.
Parameters
----------
x : Ideally numpy array or xarray.DataArray
Input signal
resetTimeIndex : bool
True - resets time coordinate to [0,1,2,3,...,N-1]
Returns
-------
x : xarray.core.dataarray.DataArray
Output signal with the correct variable name for the time series data.
Examples
--------
Example 1::
# standard
dt=0.1
t=np.arange(1000)*dt
y=_xr.DataArray( _np.sin(0.00834*2*np.pi*t),
dims=['t'],
coords={'t':t})
check_dataArray(y)
Example 2::
# numpy array
dt=0.1
t=np.arange(1000)*dt
check_dataArray(_np.sin(0.00834*2*np.pi*t))
Example 3::
# other input types
dt=0.1
t=np.arange(1000)*dt
y=_xr.DataArray( _np.sin(0.00834*2*np.pi*t),
dims=['t'],
coords={'t':t})
check_dataArray([y])
check_dataArray( _pd.Series(_np.sin(0.00834*2*np.pi*t)))
Example 4::
# time data named incorrectly
dt=0.1
t=np.arange(1000)*dt
y=_xr.DataArray( _np.sin(0.00834*2*np.pi*t),
dims=['time'],
coords={'time':t})
check_dataArray(y)
Example 5::
# time data named incorrectly again
dt=0.1
t=np.arange(1000)*dt
y=_xr.DataArray( _np.sin(0.00834*2*np.pi*t),
dims=['Time'],
coords={'Time':t})
check_dataArray(y)
"""
# if input is a numpy array, convert it to an xarray.DataArray structure
if type(x) == _np.ndarray:
x=_xr.DataArray(x,
dims=['t'],
coords={'t':_np.arange(0,_np.shape(x)[0])})
# make sure data is an xarray.DataArray structure
elif type(x) not in [_xr.core.dataarray.DataArray]:
raise Exception('Input should be an xarray.DataArray. Instead, %s encountered.'%(str(type(x))))
# make sure time dimension is present and named correctly
if x.coords._names==set(): # if no coordinate is present
x=_xr.DataArray( x,
dims=['t'],
coords={'t':_np.arange(x.shape[0])})
elif 'time' in x.dims:
x=x.rename({'time':'t'})
elif 't' not in x.dims:
raise Exception('time or t dimension not in signal')
if resetTimeIndex==True:
x['t']=_np.arange(x.t.shape[0]).astype(int)
return x
def convertToTimeLaggedSpace( s,
E,
tau,
fuse=False):
"""
Convert input to time lagged space using the embedded dimension, E,
and time step, tau.
Parameters
----------
s : xarray.DataArray or list of xarray.DataArray
Input signal(s) to convert to time-lagged space. If multiple signals are provided, the signals are "fused" together.
E : int
Dimensional parameter
tau : int
Time lag parameter
fuse : bool
True - fuses input signals
Returns
-------
P : xarray.DataArray or list of xarray.DataArray
Dataframe containing the input signal(s) converted to time-lagged space.
Signals are fused if fuse=True
Example
-------
Example 1::
s=_xr.DataArray(_np.arange(0,100),
dims=['t'],
coords={'t':_np.arange(100,200)})
P=convertToTimeLaggedSpace(s, E=5, tau=1)
Example 2::
s1=_xr.DataArray(_np.arange(0,100))
s2=_xr.DataArray(_np.arange(1000,1100))
s=[s1,s2]
P12=convertToTimeLaggedSpace(s, E=5, tau=1)
Example 3::
s=_xr.DataArray(_np.arange(0,100))
P=convertToTimeLaggedSpace(s, E=5, tau=5)
Example 4::
# fusion example
N=1000
ds=lorentzAttractor(N=N,plot='all')
s=[ds.x,ds.z]
E=3
tau=2
fuse=True
P=convertToTimeLaggedSpace(s, E=E, tau=tau,fuse=fuse)
"""
# make sure input is a list
if type(s) != list:
s=[s]
# process each input
Plist=[]
for si in s:
# check input data
si=check_dataArray(si)
# initialize empty dataArray
index=calc_EDM_time(si.shape[0],E=E,tau=tau,dt=1).astype(int)
columns=calc_EMD_delay(E,tau)
P=_xr.DataArray(dims=['t','delay'],
coords={'t':index,
'delay':columns})
# populate dataarray one columns at a time. #TODO Is there a way to do this without a for loop?
for i,ii in enumerate(columns):
P.loc[:,ii]=si[index+ii].values
Plist.append(P)
if fuse==True:
P=_xr.concat(Plist,dim='delay')
P['delay']=_np.arange(P.shape[1])
Plist=[P]
# return P for a single input or a list of P for multiple inputs
if _np.shape(Plist)[0]==1:
return Plist[0]
else:
return Plist
def createMap(PA1,PA2,knn,weightingMethod='exponential'):
"""
Creates an SMI map from PA1 to PA2.
Parameters
----------
PA1 : xarray.core.dataarray.DataArray
Input signal, s1A, converted to time lagged state space
PA2 : xarray.core.dataarray.DataArray
Input signal, s2A, converted to time lagged state space
knn : None or int
Number of nearest neighbors. Default (None) is E+1.
weightingMethod : str
'exponential' - (Default). Exponential weighting of nearest neighbors.
Returns
-------
edm_map : dict with keys ('keys' and 'weights')
edm_map['keys'] contains the keys (indices) associated with the map
edm_map['weights'] contains the weights associated with the map
Examples
--------
Example 1::
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
N=100
s=tentMap(N=N)
sx=s[:N//2]
sy=s[N//2:]
E=3
knn=E+1
tau=2
PA1,PA2=convertToTimeLaggedSpace([sx,sy],E=E,tau=tau)
edm_map=createMap(PA1,PA2,knn=knn)
index=N//2+5
index=8
fig,ax=plt.subplots()
sx.plot(ax=ax,label='Training data',color='k')
sy.plot(ax=ax,label='Test data',color='blue')
plt.plot(PA2.sel(t=index).delay.values+PA2.sel(t=index).t.values+N//2+1,PA2.sel(t=index).values,'r',marker='x',label='Points in question',linewidth=2)
for j,i in enumerate(edm_map['keys'].sel(t=index).values):
print(j,i)
if j==0:
label='nearest neighbors'
else:
label=''
plt.plot( PA1.sel(t=i).t+PA1.sel(t=i).delay+1,
PA1.sel(t=i).values,'g',marker='.',label=label,linewidth=2)
"""
coordinates, indices, radii=findNearestNeighbors(PA1.values,PA2.values,numberOfNearestPoints=knn)
keysOfNearestNeighbors=_xr.DataArray(indices+PA1.t[0].values,
dims=['t','shift'],
coords={'t':PA2.t.values,
'shift':_np.arange(knn)})
radii[radii==0]=_np.nan # temporary code. if a perfect match occurs (i.e. radii=0), then an error will occur. This should take care of that.
radii=_xr.DataArray(radii,
dims=['t','shift'],
coords={'t':PA2.t.values,
'shift':_np.arange(knn)})
weights=calcWeights(radii,method=weightingMethod)
return {'keys':keysOfNearestNeighbors,'weights':weights}
def findNearestNeighbors(X,Y,numberOfNearestPoints=1,plot=False):
"""
Find the nearest neighbors in X to each point in Y
Examples
--------
Example 1::
# create data
a=_np.arange(0,10+1)
b=_np.arange(100,110+1)
A,B=_np.meshgrid(a,b)
A=A.reshape((-1,1))
B=B.reshape((-1,1))
X=_np.concatenate((A,B),axis=1)
# points to investigate
Y=np.array([[5.1,105.1],[8.9,102.55],[2,107]])
numberOfNearestPoints=5
# one at a time
for y in Y:
y=y.reshape(1,-1)
points,indices,radii=findNearestNeighbors(X,y,numberOfNearestPoints=numberOfNearestPoints,plot=True)
# or all at once
points,indices,radii=findNearestNeighbors(X,Y,numberOfNearestPoints=numberOfNearestPoints,plot=False)
Example 2::
t=np.linspace(0,0.2,200)
y=np.sin(2*np.pi*53.5*t[0:200])
p=y[100:103:2]
y=_xr.DataArray(y[0:100])
E=2
tau=2
A=convertToTimeLaggedSpace(y,E=E,tau=tau)
offset=A.t[0].values
A=A.values
B=p.reshape(1,-1)
knn=6
points,indices,radii=findNearestNeighbors(A,B,knn)
points=points.reshape(knn,-1)
indices=indices.reshape(-1)+offset
radii=radii.reshape(-1)
fig,ax=plt.subplots()
y.plot(ax=ax,marker='.')
ax.plot(np.arange(100,103,2),p,color='tab:blue',marker='x',label='point in question')
for i in range(radii.shape[0]):
if i==0:
label='nearest neighbors'
else:
label=''
ax.plot( np.arange(indices[i],indices[i]-(E-1)*tau-1,-tau)[::-1],
#np.arange(indices[i],indices[i]+E),
points[i,:],
color='tab:orange',marker='x',label=label)
ax.legend()
"""
from sklearn.neighbors import NearestNeighbors
neigh = NearestNeighbors(n_neighbors=numberOfNearestPoints)
neigh.fit(X)
radii,indices=neigh.kneighbors(Y)
points=X[indices]
# optional plot. plots the results of only the first fit point
if plot==True:
i=0
fig,ax=_plt.subplots()
ax.plot(X[:,0],X[:,1],'.',label='original data')
ax.plot(Y[i,0],Y[i,1],'x',label='point of interest')
ax.plot(points[i,:,0],points[i,:,1],label='%d nearest neighbors'%numberOfNearestPoints,marker='o',linestyle='', markerfacecolor="None")
_plt.legend()
return points, indices, radii
def printTime(string,start):
""" print time since start """
if string!='':
print('%s'%(string))
print('%.3fs'%((_time.time()-start)))
def reconstruct(sx,edm_map,time_basis=None, sy=None,plot=False):
"""
Performs EDM reconstruction on sx using mapping information (keys,weights)
Note that this is only SMI reconstruction if sx is not the same signal used to create the map (keys,weights). See the SMIReconstruction function below for details.
Parameters
----------
sx : xarray.core.dataarray.DataArray
Input signal to use for the reconstruction
keys : xarray.core.dataarray.DataArray
keys (indices) generated from createMap()
weights : xarray.core.dataarray.DataArray
weights generated from createMap()
time_basis : None or array
If the sy has a non-standard time basis, you can specify it here.
Detault = None, which assumes sx and sy are sequential datasets.
sx : xarray.core.dataarray.DataArray
Optional sy signal. If provided and plot==True, it will be plotted to be compared with sy_recon
Returns
-------
sy_recon : xarray.core.dataarray.DataArray
Reconstructed signal of sy
Examples
--------
Example 1::
# standard two signal example
N=100
s=tentMap(N=N)
sx,sy,s=splitData(s)
E=3
knn=E+1
tau=2
Px,Py=convertToTimeLaggedSpace([sx,sy],E=E,tau=tau)
edm_map=createMap(Px,Py,knn=knn)
sy_recon=reconstruct( sx,
edm_map,
sy=sy,
plot=True)
Example 2::
# standard two signal example where I've changed the time basis on the second signal
N=100
s=tentMap(N=N)
sx,sy,s=splitData(s)
sy['t']=_np.arange(N//2+10,N+10)
E=3
knn=E+1
tau=2
Px,Py=convertToTimeLaggedSpace([sx,sy],E=E,tau=tau)
edm_map=createMap(Px,Py,knn=knn)
sy_recon=reconstruct( sx,
edm_map,
time_basis=Py.t+sy.t[0],
sy=sy,
plot=True)
Example 3::
# signal fusion example
import matplotlib.pyplot as plt; plt.close('all')
N=500
ds=lorentzAttractor(N=N,removeFirstNPoints=500)
sx=[ds.x[:N//2],ds.y[:N//2]]
sy=[ds.x[N//2:],ds.y[N//2:]]
E=3
tau=2
knn=E+1 # simplex method
## convert to time-lagged space
P1A=convertToTimeLaggedSpace(sx,E=E,tau=tau,fuse=True)
P1B=convertToTimeLaggedSpace(sy,E=E,tau=tau,fuse=True)
## Create map from s1A to s1B
edm_map=createMap(P1A,P1B,knn)
recon=reconstruct( sx[0],
edm_map,
time_basis=P1B.t+sy[0].t[0],
sy=sy[0],
plot=True)
# time_basis=P1B.t+sy[0].t[0]
# sx=sx[0]
# sy=sy[0]
"""
# check input type
sx=check_dataArray(sx,resetTimeIndex=True)
# perform reconstruction
t=edm_map['keys'].t.values
shape=edm_map['keys'].shape
temp=sx.sel(t=edm_map['keys'].values.reshape(-1)).values.reshape(shape)
sy_recon=_xr.DataArray((temp*edm_map['weights'].values).sum(axis=1),
dims=['t'],
coords={'t':t})
if plot==True:
fig,(ax1,ax2)=_plt.subplots(1,2,sharex=True)
sx.plot(ax=ax1,color='k',label='x')
sy_recon.plot(ax=ax2,color='g',label='y reconstruction')
if type(sy)!=type(None):
sy=check_dataArray(sy,resetTimeIndex=True)
sy.plot(ax=ax2,color='b',label='y actual')
rho=calcCorrelationCoefficient(sy.where(sy_recon.t==sy.t), sy_recon)
ax2.set_title('rho=%.3f'%rho)
ax2.legend()
ax1.legend()
# restore time basis
if type(time_basis)==type(None):
sy_recon['t']=edm_map['keys'].t+sx.t[-1]+1
else:
sy_recon['t']=time_basis
return sy_recon
def splitData(s,split='half',reset_indices=True):
"""
split data, s, into a first and second signal. By default, this splits s into half.
Parameters
----------
s : _xr.core.dataarray.DataArray
input signal
split : int or str='half'
The number of points to put into the first signal. shape(s)-split goes into the second signal.
'half' is default. splits signal in half. also makes sure the signal has an even number of points
reset_indices : bool
default = True. Resets axis=0 to an array of 0 to np.shape(s). The various codes in this library can be tempormental if this is not done.
Returns
-------
sX : (same dtype as s)
First split of the signal
sY : (same dtype as s)
Second split of the signal
s : (same dtype as s)
Typically, this is identical to the input s. If split=='half' and shape(s) is an odd number, then the last entry of s is truncated to make it contain an even number of points
Examples
--------
Example 1::
# split data in half but input signal contains an odd number of points
s_in=_xr.DataArray(_np.arange(100,201))
sX,sY,s_out=splitData(s_in)
Example 2::
# split data unevenly. input signal contains an odd number of points
s_in=_xr.DataArray(_np.arange(100,201))
sX,sY,s_out=splitData(s_in,split=10)
"""
# check input type
s=check_dataArray(s)
# reset indices
if reset_indices==True:
if type(s) == _xr.core.dataarray.DataArray:
s[s.dims[0]]=_np.arange(0,s.shape[0])
elif type(s) == _pd.core.series.Series:
s.index=_np.arange(0,s.shape[0])
if split=='half':
# make sure s has an even number of points. if not, truncate last point to make it contain an even number of points
if _np.mod(s.shape[0],2)==1:
s=s[:-1]
N=s.shape[0]//2
elif type(split) == int:
N=split
else:
raise Exception('Improperly defined value for split. Should be an integer.')
# split s
sX=s[0:N]
sY=s[N:]
return sX,sY,s
def apply_M_map(M, Px, time, fill_NaN=True, interp_method='nearest'):
from scipy.interpolate import interpn
bin_edges=M.coords[M.dims[0]].data
# points=(bin_edges,bin_edges,bin_edges)
points = [bin_edges for i in range(len(M.dims))]
result = _xr.DataArray(interpn(points,M.data,Px.values, method=interp_method, bounds_error=False ),
dims='t',
coords=[time])
if fill_NaN == True:
result.data=_pd.Series(result).interpolate(method='linear').values
return result
def bin_embedded_data(Px, Py_noisy, m, verbose=False, plot=False):
"""
Bins Py_noisy[:,-1] into a matrix, M, defined by the coordinates of Px
Parameters
----------
Px : xarray.core.dataarray.DataArray
signal x in time lagged state space.
Py_noisy : xarray.core.dataarray.DataArray
signal y_noisy in time lagged state space
m : int
length of single side of matrix, M (e.g. mxm or mxmxm)
verbose : bool, optional
Optional printouts for debugging
plot : bool, optional
Optional plot of M, but only if E==2
Returns
-------
M : xarray.core.dataarray.DataArray
Matrix with Py_noisy[:-1] binned inside and with coordinates set my Px
indices : numpy.ndarray
The index of each Py_noisy[:-1] that was placed in M. Has the same dimensions as Py_noisy and Px
"""
if Px.shape[1] != Py_noisy.shape[1]:
raise Exception('Dimensionality (E) of Px and Py do not match. I.e. %d != %d'%(Px.shape[1],Py_noisy.shape[1]))
else:
# Determine dimensionality, E
E=Px.shape[1]
def create_bins(Px, m):
temp=_np.linspace(Px.min(),Px.max(),m+2)
temp=_np.linspace(Px.min()-(temp[1]-temp[0])*2,Px.max()+(temp[1]-temp[0])*2,m+2)
bin_edges=(temp[:-1]+temp[1:])/2
bin_centers=temp[1:-1]
return bin_edges, bin_centers
# create grid for matrix, M
bin_edges,bin_centers = create_bins(Px, m)
bins=[]
sample=[]
dims=[]
coords=[]
for i in range(E):
bins.append(bin_edges)
sample.append(Px[:,i].values)
dims.append('x%d'%(i+1))
coords.append(bin_centers)
M, _, indices=_binned_statistic_dd( sample, ## binned_statistic_dd doesn't seem to work with E>=5
values=Py_noisy[:,-1].values,
# values=Py_noisy[:,0].values,
statistic='mean',
bins=bins,
expand_binnumbers=True)
M=_xr.DataArray(M,
dims=dims,
coords=coords,
attrs={'long_name':'z'})
if verbose==True:
print('M is %.1f%% nan'%(_np.isnan(M).data.sum()/M.shape[0]**E * 100) )
if (plot==True or plot=='all') and E==2:
fig,ax=_plt.subplots()
M.plot(ax=ax)
return M, indices
###################################################################################
#%% Main functions
def reconstruction_original(x, y, E=2, tau=1, knn=None, plot=True):
"""
Examples
--------
Example::
import pandas as pd
import matplotlib.pyplot as plt; plt.close('all')
import numpy as np
import xarray as xr
N=1000000
dt=0.05
ds=saved_lorentzAttractor(N=N,dt=dt,removeMean=True,normalize=True, removeFirstNPoints=1000)
x=ds.x
y=ds.z
E=2
tau=3
reconstruction_original(x,y,E=E,tau=tau,plot=True)
E=3
tau=7
reconstruction_original(x,y,E=E,tau=tau,plot=True)
"""
# check input type
x=check_dataArray(x)
y=check_dataArray(y)
# split signals
sxA, sxB, sx = splitData(x)
syA, syB, sy = splitData(y)
# do reconstruction
return SMIReconstruction(da_s1A=sxA, da_s1B=sxB, da_s2A=syA, da_s2B=syB, E=E, tau=tau, knn=knn, plot=plot)
def reconstruction_binned(x, y, m=100,E=2,tau=1,plot=True, interp_method='nearest'):
"""
Examples
--------
Example::
import pandas as pd
import matplotlib.pyplot as plt; plt.close('all')
import numpy as np
import xarray as xr
N=1000000
dt=0.05
ds=saved_lorentzAttractor(N=N,dt=dt,removeMean=True,normalize=True, removeFirstNPoints=1000)
x=ds.x
y=ds.z
E=2
tau=3
m=40
reconstruction_binned(x,y,m=m,E=E,tau=tau,plot=True)
E=3
tau=7
m=11
reconstruction_binned(x,y,m=m,E=E,tau=tau,plot=True, interp_method='linear')
reconstruction_binned(x,y,m=m,E=E,tau=tau,plot=True, interp_method='nearest')
"""
# check input type
x=check_dataArray(x)
y=check_dataArray(y)
# split signals
sxA, sxB, sx = splitData(x)
syA, syB, sy = splitData(y)
# convert signals to time lagged state space
PxA, PyA, PxB=convertToTimeLaggedSpace([sxA,syA, sxB], E, tau)
# build map, M
M, _ = bin_embedded_data(PxA, PyA, m=m, verbose=True)
# reconstruct
syB_recon = apply_M_map(M, PxB, time=sxB.t.data[(E-1)*tau:], interp_method=interp_method)
if plot==True:
rho = calcCorrelationCoefficient(syB[(E-1)*tau:], syB_recon)
fig,ax=_plt.subplots(2,1,sharex=True)
sy.plot(ax=ax[0],label='original')
sy.plot(ax=ax[1],label='original')
syB_recon.plot(ax=ax[1],label='recon')
ax[0].set_title('rho=%.3f'%(rho))
ax[0].legend()
ax[1].legend()
_finalizeFigure(fig,figSize=[6,4])
def upsample(x,y_undersampled,sampling_factor, m=100,E=2,tau=1,plot=True,y_orig=None, interp_method='nearest'):
"""
Examples
--------
Example 1::
import pandas as pd
import matplotlib.pyplot as plt; plt.close('all')
import numpy as np
import xarray as xr
N=1000000
dt=0.05
# ds=lorentzAttractor(N=N,dt=dt,removeMean=True,normalize=True, removeFirstNPoints=1000)
ds=saved_lorentzAttractor(N=N,dt=dt,removeMean=True,normalize=True, removeFirstNPoints=1000)
x=ds.x
y=ds.z
E=2
tau=1
m=50
sampling_factor=10
y_undersampled=(y.copy()+np.random.normal(0,0.1,size=y.shape))[::sampling_factor]
y_orig=y
y_upsampled=upsample_v2(x,y_undersampled,sampling_factor=sampling_factor,m=m,E=E,tau=tau,plot=True,y_orig=y_orig)
E=3
tau=2
m=10
sampling_factor=10
y_undersampled=(y.copy()+np.random.normal(0,0.1,size=y.shape))[::sampling_factor]
y_orig=y
y_upsampled=upsample_v2(x,y_undersampled,sampling_factor=sampling_factor,m=m,E=E,tau=tau,plot=True,y_orig=y_orig)
"""
# check input type
x=check_dataArray(x)
y_undersampled=check_dataArray(y_undersampled)
# intentionally undersample x data so that its indices match y_undersampled
x_undersampled=x.loc[y_undersampled.t.data]
# convert signals to time lagged state space
Px=convertToTimeLaggedSpace(x, E, tau*sampling_factor)
Px_undersampled,Py_undersampled=convertToTimeLaggedSpace([x_undersampled,y_undersampled], E, tau)
# add values to E-dimensioned matrix, M
M, _ = bin_embedded_data(Px_undersampled, Py_undersampled, m=m, verbose=True)
# optional intermediate plot
if plot=='all' and E==2:
Py_orig=convertToTimeLaggedSpace(y_orig, E, tau*sampling_factor)
M_full, _ = bin_embedded_data(Px, Py_orig, m=m, verbose=False)
fig,ax=_plt.subplots(1,2)
M.plot(ax=ax[0])
M_full.plot(ax=ax[1])
ax[0].set_aspect('equal')
ax[1].set_aspect('equal')
# apply M to Px to get y_upsampled
y_upsampled = apply_M_map(M, Px, time=x.t.data[(E-1)*tau*sampling_factor:], fill_NaN=False, interp_method=interp_method)
# Optional: put "good" y data back into the y_upsampled
if True:
y_upsampled.loc[y_undersampled.t.data[(E-1)*tau:]] = y_undersampled.loc[y_undersampled.t.data[(E-1)*tau:]]
# Optional: linear interpolation to fill in NaN values
if True:
y_upsampled.data=_pd.Series(y_upsampled).interpolate(method='linear').values
rho=calcCorrelationCoefficient(y_orig[(E-1)*tau*sampling_factor:], y_upsampled)
# plot results
if plot==True:
fig,ax=_plt.subplots()
if len(x)>10000:
y_orig[:10000].plot(ax=ax, label='y original')
y_undersampled[:10000//sampling_factor].plot(ax=ax, label='y undersampled', linestyle='',marker='.')
y_upsampled[:10000].plot(ax=ax, label='y upsampled')
else:
y_orig.plot(ax=ax, label='y original')
y_undersampled.plot(ax=ax, label='y undersampled', linestyle='',marker='x')
y_upsampled.plot(ax=ax, label='y upsampled')
ax.legend()
ax.set_title('rho = %.3f, E=%d, tau=%d, m=%d'%(rho,E,tau,m))
return y_upsampled, rho
def denoise_signal(x,y_noisy,m=100,E=2,tau=1,plot=True,y_orig=None):
"""
Examples
--------
Example 1::
import pandas as pd
import matplotlib.pyplot as plt; plt.close('all')
import numpy as np
import xarray as xr
N=1000000
dt=0.05
# ds=lorentzAttractor(N=N,dt=dt,removeMean=True,normalize=True, removeFirstNPoints=1000)
ds=saved_lorentzAttractor(N=N,dt=dt,removeMean=True,normalize=True, removeFirstNPoints=1000)
x=ds.x
y=ds.z
np.random.seed(0)
y_noisy=y.copy()+np.random.normal(0,1.0,size=y.shape)
if False:
fig,ax=_plt.subplots()
y_noisy.plot(ax=ax)
y.plot(ax=ax)
y_orig=y
E=2
tau=3
m=40
denoise_signal(x,y_noisy,m=m,E=E,tau=tau,plot='all',y_orig=y_orig)
E=3
tau=4
m=11
denoise_signal(x,y_noisy,m=m,E=E,tau=tau,plot=True,y_orig=y_orig)
"""
# check input type
x=check_dataArray(x)
y_noisy=check_dataArray(y_noisy)
# convert signals to time lagged state space
Px,Py_noisy=convertToTimeLaggedSpace([x,y_noisy], E, tau)
# create matrix, M, that maps Px to Py_noisy
M, indices = bin_embedded_data(Px, Py_noisy, m=m, plot=plot, verbose=True)
# apply Px to M to get y_filtered
if True:
y_filt=apply_M_map(M, Px, time = y_noisy.t.data[(E-1)*tau:],fill_NaN=False)
else:
ind=[]
for i in range(E):
ind.append(indices[i,:]-1)
# index
y_filt=_xr.DataArray( M.data[ind],
dims='t',
coords= [y_noisy.t.data[(E-1)*tau:]],
# coords= [y_noisy.t.data[:-(E-1)*tau]],
)
#
if plot==True or plot=='all':
lw=1.5
rho=calcCorrelationCoefficient(y_orig[(E-1)*tau:], y_filt)
# rho=calcCorrelationCoefficient(y_orig[:-(E-1)*tau], y_filt)
fig,ax=_plt.subplots()
y_noisy.plot(ax=ax,label='y+noise',linewidth=lw)
y_orig.plot(ax=ax,label='y original',linewidth=lw)
y_filt.plot(ax=ax,label='y filtered',linewidth=lw)
ax.legend()
ax.set_title('rho = %.3f, E=%d, tau=%d, m=%d'%(rho,E,tau,m))
return y_filt
def forecast(s,E,T,tau=1,knn=None,plot=False,weightingMethod=None):
"""
Create a map of s[first half] to s[second half] and forecast up to T steps into the future.
Parameters
----------
s : xarray.core.dataarray.DataArray
Input signal
E : int
EDM dimensionality. 2 to 10 are typical values.
T : int
Number of time steps into the future to forecast. 1<T<10 is typical.
tau : int
EDM time step parameter. tau>=1.
knn : int
Number of nearest neighbors for SMI search.
plot : bool, optional
Plot results
weightingMethod : NoneType or str
None defaults to "exponential" weighting
"exponential" weighting
Returns
----------
rho : xarray.core.dataarray.DataArray
Pearson correlation value for each value of 0 to T.
Examples
--------
Example 1::
N=1000
s=tentMap(N=N)
E=3
T=10
tau=1
knn=E+1
rho,results,future_actual=forecast(s,E,T,tau,knn,True)
"""
# check input
s=check_dataArray(s)
# initialize parameters
N=s.shape[0]
if knn == None or knn=='simplex' or knn==0:
knn=E+1
elif knn == 'smap':
knn=s.shape[0]//2-E+1
if weightingMethod==None:
weightingMethod='exponential'
print("N=%d, E=%d, T=%d, tau=%d, knn=%d, weighting=%s"%(N,E,T,tau,knn,weightingMethod))
# prep data
sX,sY,s=splitData(s)
dfX,dfY=convertToTimeLaggedSpace([sX,sY],E,tau)
# do forecast
edm_map=createMap(dfX,dfY,knn,weightingMethod=weightingMethod)
results=applyForecast(s,dfY,edm_map,T,plot=False)
# contruct actual future data matrix to use with the pearson correlation below
future_actual=_xr.DataArray( _np.zeros(results.shape),
dims=results.dims,
coords=results.coords)
for fut in results.future.data:
future_actual.loc[:,fut]=dfY.sel(delay=0,t=(future_actual.t.data+fut)).data
# calculate pearson correlation for each step into the future
rho=_xr.DataArray(dims=['future'],
coords={'future':results.future})
for fut in rho.future.data:
rho.loc[fut]=calcCorrelationCoefficient(future_actual.sel(future=fut), results.sel(future=fut))
# optional plot
if plot==True:
fig,ax=_plt.subplots()
rho.plot(ax=ax,marker='.')
_finalizeSubplot( ax,
xlabel='Steps into the future',
ylabel='Correlation',
ylim=[-0.01,1.01],
xlim=[0,rho.future.data[-1]],
legendOn=False,
title="N=%d, E=%d, T=%d, tau=%d, knn=%d, weighting=%s"%(N,E,T,tau,knn,weightingMethod))
fig,ax=_plt.subplots(results.future.shape[0],sharex=True)
for i,fut in enumerate(results.future.data):
future_actual.sel(future=fut).plot(ax=ax[i])
results.sel(future=fut).plot(ax=ax[i])
ax[i].set_title('')
ax[i].set_xlabel('')
ax[i].tick_params(axis='both', direction='in')
_subtitle(ax=ax[i],string='Steps = %d, rho = %.3f'%(fut,rho.sel(future=fut).data))
_finalizeFigure(fig)
return rho, results, future_actual
def SMIReconstruction( da_s1A,
da_s1B,
da_s2A,
E,
tau,
da_s2B=None,
knn=None,
plot=False,
s1Name='s1',
s2Name='s2',
A='A',
B='B',
printStepInfo=False):
"""
SMI reconstruction.
Parameters
----------
da_s1A : xarray.core.dataarray.DataArray
signal s1A (top left)
da_s1B : xarray.core.dataarray.DataArray
signal s1B (top right)
da_s2A : xarray.core.dataarray.DataArray
signal s2A (bottom left)
E : int
dimensionality of time-lagged phase space
tau : int
time step parameter
da_s2B : xarray.core.dataarray.DataArray
signal s2B (bottom right)
knn : int
number of nearest neighbors. None is default = E+1
plot : bool
(Optional) plot
Returns
-------
sB2_recon : xarray.core.dataarray.DataArray
Reconstructed sB2 signal
rho : float
Correlation value between sB2 and sB2_reconstruction. Value is between 0 and where 1 is perfect agreement.
Notes
-----
* This algorithm is based on https://doi.org/10.1088%2F1361-6595%2Fab0b1f
Examples
--------
Example 1::
import pandas as pd
import matplotlib.pyplot as plt; plt.close('all')
import numpy as np
N=10000
T=1
ds1=lorentzAttractor(N=N,ICs={ 'x0':-9.38131377,
'y0':-8.42655716 ,
'z0':29.30738524},)
t=np.linspace(0,T+T/N,N+1)
da_s1A=ds1.x
da_s2A=ds1.z
ds2=lorentzAttractor(N=N,ICs={ 'x0':-9.38131377/2,
'y0':-8.42655716/2 ,
'z0':29.30738524/3},)
da_s1B=ds2.x
da_s2B=ds2.z
E=4
knn=E+1
tau=1
sB2_recon,rho=SMIReconstruction( da_s1A,
da_s1B,
da_s2A,
E,
tau,
da_s2B=da_s2B,
plot=True,
s1Name='Lorentz-x',
s2Name='Lorentz-z',
A='IC1',
B='IC2')
Example 2::
## signal fusion case. Use both x and y to reconstruct z
import matplotlib.pyplot as plt; plt.close('all')
N=2000
ds1=lorentzAttractor(N=N)
da_s1A=[ds1.x[:N//2],ds1.y[:N//2]]
da_s1B=[ds1.x[N//2:],ds1.y[N//2:]]
da_s2A=ds1.z[:N//2]
da_s2B=ds1.z[N//2:]
E=3
tau=1
da_s1A=ds1.x[:N//2]
da_s1B=ds1.x[N//2:]
da_s2A=ds1.z[:N//2]
da_s2B=ds1.z[N//2:]
sB2_recon,rho=SMIReconstruction( da_s1A=da_s1A,
da_s2A=da_s2A,
da_s1B=da_s1B,
E=E,
tau=tau,
da_s2B=da_s2B,
plot=True,
s1Name='x only',
s2Name='z',
A='IC1',
B='IC2')
da_s1A=[ds1.x[:N//2],ds1.y[:N//2]]
da_s1B=[ds1.x[N//2:],ds1.y[N//2:]]
da_s2A=ds1.z[:N//2]
da_s2B=ds1.z[N//2:]
sB2_recon,rho=SMIReconstruction( da_s1A=da_s1A,
da_s2A=da_s2A,
da_s1B=da_s1B,
E=E,
tau=tau,
da_s2B=da_s2B,
plot=True,
s1Name='x and y fusion',
s2Name='z',
A='IC1',
B='IC2')
"""
# reset the index to integers
if type(da_s1A)==list:
for da_temp in da_s1A:
da_temp['t']=_np.arange(da_temp.t.shape[0])
else:
da_s1A['t']=_np.arange(da_s1A.t.shape[0])
if type(da_s1B)==list:
for da_temp in da_s1B:
da_temp['t']=_np.arange(da_temp.t.shape[0])
else:
da_s1B['t']=_np.arange(da_s1B.t.shape[0])
da_s2A['t']=_np.arange(da_s2A.t.shape[0])
try:
da_s2B['t']=_np.arange(da_s2B.t.shape[0])
except:
pass
# define number of nearest neighbors if not previously defined
if type(knn)==type(None) or knn==0:
knn=E+1 # simplex method
if printStepInfo==True:
print("E = %d, \ttau = %d, \tknn = %d"%(E,tau,knn),end='')
## convert to time-lagged space
if type(da_s1A)==list:
fuse=True
else:
fuse=False
P1A=convertToTimeLaggedSpace(da_s1A, E, tau, fuse=fuse)
P1B=convertToTimeLaggedSpace(da_s1B, E, tau, fuse=fuse)
# P2A=convertToTimeLaggedSpace(da_s2A, E, tau)
## Create map from s1A to s1B
edm_map=createMap(P1A,P1B,knn)
## apply map to s2A to get reconstructed s2Bs.s
s2B_recon=reconstruct(da_s2A,edm_map)
s2B_recon['t']=P1A.t
## calc rho
rho=calcCorrelationCoefficient(da_s2B[(E-1)*tau:],s2B_recon)
if printStepInfo==True:
print(", \trho = %.3f"%rho)
## optional plot
if (plot==True or plot=='all') and type(da_s2B) != type(None):
## sanity check map by reconstructing s1B from s1A
if fuse==True:
s1B_recon=reconstruct(da_s1A[0],edm_map)
else:
s1B_recon=reconstruct(da_s1A,edm_map)
s1B_recon['t']=P1A.t
if fuse==True:
rho_s1B=calcCorrelationCoefficient(da_s1B[0][(E-1)*tau:],s1B_recon)
else:
rho_s1B=calcCorrelationCoefficient(da_s1B[(E-1)*tau:],s1B_recon)
fig=_plt.figure()
ax1 = _plt.subplot(221)
ax2 = _plt.subplot(222, sharex = ax1)
ax3 = _plt.subplot(223, sharex = ax1)
ax4 = _plt.subplot(224, sharex = ax2)
ax=[ax1,ax2,ax3,ax4]
if fuse==True:
da_s1A[0].plot(ax=ax[0],label='original')
da_s1B[0].plot(ax=ax[1],label='original')
else:
da_s1A.plot(ax=ax[0],label='original')
da_s1B.plot(ax=ax[1],label='original')
s1B_recon.plot(ax=ax[1],label='recon')
_subtitle(ax[1], 'rho=%.3f'%(rho_s1B))
da_s2A.plot(ax=ax[2],label='original')
da_s2B.plot(ax=ax[3],label='original')
s2B_recon.plot(ax=ax[3],label='recon')
_subtitle(ax[3], 'rho=%.3f'%(rho))
ax[1].legend()
ax[3].legend()
_finalizeFigure(fig,figSize=[6,4])
return s2B_recon,rho
def ccm( s1A,
s1B,
s2A,
E,
tau,
s2B=None,
knn=None,
plot=False,
removeOffset=False):
"""
Cross correlation map
Parameters
----------
s1A : xarray.core.dataarray.DataArray
signal s1A (top left)
s1B : xarray.core.dataarray.DataArray
signal s1B (top right)
s2A : xarray.core.dataarray.DataArray
signal s2A (bottom left)
E : int
dimensionality of time-lagged phase space
tau : int
time step parameter
s2B : xarray.core.dataarray.DataArray
signal s2B (bottom right)
knn : int
number of nearest neighbors. None is default = E+1
plot : bool
(Optional) plot
Examples
--------
Example1::
#TODO. The results of this example looks identical?? Error? Investigate.
# lorentz equations
N=1000
ds=lorentzAttractor(N=N,plot=False)
x=ds.x
z=ds.z
# add noise
x+=_np.random.normal(0,x.std()/1,N)
# prep data
s1A=x[0:N//2]
s1B=x[N//2:N]
s2A=x[0:N//2]
s2B=x[N//2:N]
# call function
E=3
tau=1
knn=None
rho=ccm(s1A,s1B,s2A,s2B=s2B,E=E,tau=tau,plot=True,knn=knn)
"""
# check data input
s1A=check_dataArray(s1A,resetTimeIndex=False)
s2A=check_dataArray(s2A,resetTimeIndex=False)
s1B=check_dataArray(s1B,resetTimeIndex=False)
s2B=check_dataArray(s2B,resetTimeIndex=False)
# define number of nearest neighbors if not previously defined
if type(knn)==type(None) or knn==0:
knn=E+1 # simplex method
# remove offset
if removeOffset==True:
s1A=s1A.copy()-s1A.mean()
s1B=s1B.copy()-s1B.mean()
s2A=s2A.copy()-s2A.mean()
s2B=s2B.copy()-s2B.mean()
# convert to time-lagged space
P1A=convertToTimeLaggedSpace(s1A, E, tau)
P1B=convertToTimeLaggedSpace(s1B, E, tau)
P2A=convertToTimeLaggedSpace(s2A, E, tau)
P2B=convertToTimeLaggedSpace(s2B, E, tau)
## A to B
edm_map=createMap(P1A.copy(),P1B.copy(),knn=knn)
s2B_recon=reconstruct(s2A.copy(),edm_map)
rho_1to2=calcCorrelationCoefficient(s2B[(E-1)*tau:],s2B_recon,plot=False)
## B to A
edm_map=createMap(P2A.copy(),P2B.copy(),knn)
s1B_recon=reconstruct(s1A.copy(),edm_map)
rho_2to1=calcCorrelationCoefficient(s1B[(E-1)*tau:],s1B_recon,plot=False)
if plot==True:
fig,ax=_plt.subplots(1,2,sharex=True,sharey=True)
ax[1].plot(s1B[(E-1)*tau:],s1B_recon,linestyle='',marker='.')
ax[0].plot(s2B[(E-1)*tau:],s2B_recon,linestyle='',marker='.')
ax[0].set_aspect('equal')
ax[1].set_aspect('equal')
ax[0].plot([s2B.min().data,s2B.max().data],[s2B.min().data,s2B.max().data])
ax[1].plot([s1B.min().data,s1B.max().data],[s1B.min().data,s1B.max().data])
ax[0].set_title('s1 to s2 CCM')
ax[1].set_title('s2 to s1 CCM')
return rho_1to2, rho_2to1
def SMIParameterScan(s1A,s2A,s1B,ERange,tauRange,s2B=None,plot=False,numberCPUs=_cpu_count()-1):
"""
Parameter scan for SMIReconstruction()
Parameters
----------
s1A : xarray.core.dataarray.DataArray
signal s1A (top left)
s1B : xarray.core.dataarray.DataArray
signal s1B (top right)
s2A : xarray.core.dataarray.DataArray
signal s2A (bottom left)
ERange : numpy.ndarray of dtype int
E values to scan through
tauRange : numpy.ndarray of dtype int
tau values to scan through
s2B : xarray.core.dataarray.DataArray
signal s2B (bottom right)
plot : bool
(Optional) plot
numberCPUs : int
Number of cpus to dedicate for this scan. Default is the total number minus 1.
Returns
----------
results : xarray.core.dataarray.DataArray
Pearson correlation results for each value of E and tau. 2D array with coordinates E and tau.
Examples
--------
Example 1 ::
import numpy as np
N=10000
dt=0.025
# solve Lorentz equations with one set of ICs
ds_A=lorentzAttractor(N=N,dt=dt)
s1A=ds_A.x
s2A=ds_A.z
# solve Lorentz equations with a second set of ICs
ds_B=lorentzAttractor( N=N,
dt=dt,
ICs={ 'x0':-9.38131377/2,
'y0':-8.42655716/2 ,
'z0':29.30738524/3})
s1B=ds_B.x
s2B=ds_B.z
# perform reconstruction with a parameter scan of E and tau
ERange=np.arange(2,12+1,1)
tauRange=np.arange(1,100+1,2)
results=SMIParameterScan(s1A=s1A,s2A=s2A,s1B=s1B, s2B=s2B,ERange=ERange,tauRange=tauRange,plot=True)
fig=_plt.gcf()
fig.savefig("SMIReconstruction_example_results.png",dpi=150)
"""
# check input signals
s1A=check_dataArray(s1A)
s2A=check_dataArray(s2A)
s1B=check_dataArray(s1B)
if type(s2B)!= type(None):
s2B=check_dataArray(s2B)
# SMIReconstruction function, designed for parallel processing.
def doStuff(E,tau):
out2,rho=SMIReconstruction( da_s1A=s1A,
da_s2A=s2A,
da_s1B=s1B,
E=E,
tau=tau,
da_s2B=s2B,
plot=True)
return E, tau, rho
# Create unique list of each pair of (E,tau)
X,Y=_np.meshgrid(ERange,tauRange)
X=X.reshape(-1)
Y=Y.reshape(-1)
# Do SMI scan and format results in a dataarray
results = _Parallel(n_jobs=numberCPUs)(_delayed(doStuff)(E,tau) for E,tau in zip(X,Y))
results = | _pd.DataFrame(results,columns=['E','tau','rho']) | pandas.DataFrame |
# %% Imports
import pandas as pd
import re
import sys
import numpy as np
sys.path.append("../")
from metrics.metric_participants import (ComputeMetrics, print_metrics)
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from category_encoders import TargetEncoder
from sklearn.linear_model import QuantileRegressor
from sklego.preprocessing import ColumnSelector
from sklearn.preprocessing import StandardScaler
import random
from eda.checker import check_train_test
from tools.postprocessing import clip_first_month, postprocess_predictions, postprocess_submissions
random.seed(0)
sales_train = pd.read_csv("../data/data_raw/sales_train.csv")
df_full = pd.read_csv("../data/split.csv")
df_region = pd.read_csv("../data/data_raw/regions.csv")
regions_hcps = pd.read_csv("../data/data_raw/regions_hcps.csv")
activity_features = | pd.read_csv("../data/features/activity_features.csv") | pandas.read_csv |
import pandas as pd
import plotly.graph_objects as go
from EnergyIntensityIndicators.utilities import lmdi_utilities
from EnergyIntensityIndicators.utilities.dataframe_utilities \
import DFUtilities as df_utils
class AdditiveLMDI:
def __init__(self, output_directory, energy_data, energy_shares,
base_year, end_year, total_label, lmdi_type='LMDI-I'):
self.energy_data = energy_data
self.energy_shares = energy_shares
self.total_label = total_label
self.lmdi_type = lmdi_type
self.end_year = end_year
self.base_year = base_year
self.output_directory = output_directory
def log_mean_divisia_weights(self):
"""Calculate log mean weights for the additive model where T=t, 0 = t - 1
Args:
energy_data (dataframe): energy consumption data
energy_shares (dataframe): Shares of total energy for
each category in level of aggregation total_label (str):
Name of aggregation of categories in level of aggregation
lmdi_type (str, optional): 'LMDI-I' or 'LMDI-II'.
Defaults to 'LMDI-I' because it is
'consistent in aggregation and perfect
in decomposition at the subcategory level'
(Ang, B.W., 2015. LMDI decomposition approach: A guide for
implementation. Energy Policy 86, 233-238.).
"""
print(f'ADDITIVE LMDI TYPE: {self.lmdi_type}')
if not self.lmdi_type:
self.lmdi_type = 'LMDI-I'
print(f'ADDITIVE LMDI TYPE: {self.lmdi_type}')
log_mean_shares_labels = [f"log_mean_shares_{col}" for
col in self.energy_shares.columns]
log_mean_weights = | pd.DataFrame(index=self.energy_data.index) | pandas.DataFrame |
#!/usr/bin/env python
"""Hacky script to generate HTML plots of feature distributions."""
import pandas as pd
import numpy as np
import argparse
import os
from bokeh.plotting import figure, show
from bokeh.palettes import Set1
from bokeh.io import save
from bokeh.layouts import gridplot
from bokeh.resources import CDN
from scipy.stats import ks_2samp
def fetch_and_split_data(infile_name, cutoff):
"""
Reads data from a CSV file, performs some sanity checks, and then
returns to dataframes which have been split based on the 'score' column
at the prefined cutoff ratio. If predictions are rank-tied, the high-risk
group might be larger than specified by cutoff.
Args:
infile_name (str): File name of a CSV file. This function expects
the CSV to contain at 'score' column.
cutoff (float): Ratio of entities that should be considered
high-risk. Needs to be in [0,1].
Returns (pd.DataFrame, pd.DataFrame): The input data, split by score.
"""
if not infile_name:
raise ValueError("Input CSV file is required.")
if cutoff < 0. or cutoff > 1.0:
raise ValueError("cutoff must be in [0, 1].")
# fetch the data
df = | pd.read_csv(infile_name) | pandas.read_csv |
import sdi_utils.gensolution as gs
from sdi_utils import set_logging
import sdi_utils.textfield_parser as tfp
from sklearn.linear_model import LinearRegression
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 1, 3, 3, 3], 'col2': [1, 2, 3, 4, 5], 'col3': [2, 3, 4, 5, 6],
'col4': [5, 6.5, 7.5, 8, 9], 'col5': [6, 6.7, 8.2, 9, 10.1]})
default_msg = api.Message(attributes={'format': 'pandas', 'name': 'DF_name'},body = df)
api.config.regression_cols = "col2,col3,col4"
api.config.prediction_col = "col5"
return callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'': '', 'pandas': ''}
operator_description = "Train Linear Regression"
operator_description_long = "Using Scikit Learn module to train a linear regression model."
add_readme = dict()
add_readme["References"] = r"""[ScitLearn Linear Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html)"""
prediction_col = 'None'
config_params['prediction_col'] = {'title': 'Prediction Column', 'description': 'Prediction column', 'type': 'string'}
regression_cols = 'None'
config_params['regression_cols'] = {'title': 'Regression Columns', 'description': 'Regression columns', 'type': 'string'}
segment_cols = 'None'
config_params['segment_cols'] = {'title': 'Segment Columns', 'description': 'Segment Columns', 'type': 'string'}
def process(msg) :
logger, log_stream = set_logging('DEBUG')
# start custom process definition
prev_att = msg.attributes
df = msg.body
if not isinstance(df, pd.DataFrame):
logger.error('Message body does not contain a pandas DataFrame')
raise TypeError('Message body does not contain a pandas DataFrame')
att_dict = dict()
att_dict['config'] = dict()
###### start of doing calculation
# segment columns
att_dict['config']['segment_cols'] = api.config.segment_cols
segment_cols = tfp.read_list(api.config.segment_cols)
# regression columns
att_dict['config']['regression_cols'] = api.config.regression_cols
regression_cols = tfp.read_list(api.config.regression_cols)
if not regression_cols:
logger.error('No Regression Columns - mandatory data')
raise ValueError('No Regression Columns - mandatory data')
# prediction column
att_dict['config']['prediction_col'] = api.config.prediction_col
prediction_col = tfp.read_value(api.config.prediction_col)
if not prediction_col:
raise ValueError('No Predicition Column - mandatory data')
training_cols = regression_cols + [prediction_col]
model = LinearRegression(fit_intercept=True)
def fit(x):
model.fit(x[regression_cols], x[prediction_col])
return pd.Series([model.coef_, model.intercept_], index=['coef', 'intercept'])
if segment_cols:
coef_df = df.groupby(segment_cols)[training_cols].apply(fit).reset_index()
else:
model.fit(df[regression_cols], df[prediction_col])
coef_df = | pd.Series([model.coef_, model.intercept_], index=['coef', 'intercept']) | pandas.Series |
import requests
import pandas as pd
import io
import json
from django.utils.timezone import make_aware
from django.db.utils import IntegrityError
from django.core.cache import cache
from vid.models import CountyMetrics, EntireUS
from covid_tracker.settings import APP_URL
api_key = '776e4ec57ee346d6a0a2a4abb6b006a8'
act_now_api = f'https://api.covidactnow.org/v2/counties.timeseries.json?apiKey={api_key}'
nyt_timeseries = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
nyt_live = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/live/us-counties.csv'
nyt_all = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us.csv'
focus_fips = {'42101': 'philly',
'42111': 'somerset',
'53033': 'king',
'17043': 'dupage',
'17089': 'kane',
'40027': 'cleveland',
'40109': 'oklahoma',
'06037': 'los angeles'}
def retrieve_nyt():
r = requests.get(nyt_timeseries, stream=True)
size = 0
content = io.BytesIO()
num_chunks = 0
for chunk in r.iter_content(750000):
size += len(chunk)
content.write(chunk)
num_chunks += 1
content.seek(0)
print('received nyt data')
nyt_data = pd.read_csv(content,
encoding='utf8',
sep=",",
parse_dates=['date'],
dtype={'fips': str})
nyt_data = nyt_data.where(pd.notnull(nyt_data), None)
# keep only the FIPS that i care about
nyt_data = nyt_data[nyt_data['fips'].isin(list(focus_fips.keys()))]
nyt_data['date'] = pd.to_datetime(nyt_data['date'], format="%Y-%m-%d", utc=True)
print('read nyt data')
return nyt_data, num_chunks
def retrieve_single_actnow(fips):
api_single = f'https://api.covidactnow.org/v2/county/{fips}.timeseries.json?apiKey={api_key}'
raw = requests.get(api_single).content
raw_dict = json.loads(raw.decode('utf-8'))
population = raw_dict['population']
metrics = pd.DataFrame(raw_dict['metricsTimeseries'])
metrics['date'] = pd.to_datetime(metrics['date'], format="%Y-%m-%d", utc=True)
metrics = metrics[['date', 'testPositivityRatio', 'infectionRate']]
metrics['population'] = population
metrics['fips'] = fips
print(f'CovidActNow data for {focus_fips[fips]} successfully downloaded')
return metrics
def retrieve_all_actnow():
all_actnow_data = pd.DataFrame()
for fips in focus_fips:
single_county_data = retrieve_single_actnow(fips=fips)
all_actnow_data = all_actnow_data.append(single_county_data, ignore_index=True)
return all_actnow_data
# noinspection DuplicatedCode
def load_metrics():
"""
load case/death time series from nyt github csv.
Free heroku dyno has buffer limit of 1mb so request is broken into chunks
free heroku postgres db has limit of only 10k rows, so
only includes the locations in focus_fips.
"""
CountyMetrics.objects.all().delete()
nyt_data, num_chunks = retrieve_nyt()
actnow_metrics = retrieve_all_actnow()
all_data = pd.merge(nyt_data, actnow_metrics, on=['date', 'fips'], how='left')
chunk_size = round(len(all_data) / (num_chunks + 1))
current_index = 0
while current_index < len(all_data) - 1:
end_index = current_index + chunk_size
if end_index > len(all_data) - 1:
chunks_dataframe = all_data.iloc[current_index:]
else:
chunks_dataframe = all_data[current_index:end_index]
current_index += chunk_size
metrics_objects = []
for index, row in chunks_dataframe.iterrows():
metrics_objects.append(CountyMetrics(date=row['date'],
county=row['county'],
state=row['state'],
fips=row['fips'],
cases=row['cases'],
deaths=row['deaths'],
population=row['population'],
testPositivityRatio=row['testPositivityRatio'],
infectionRate=row['infectionRate']
))
print(f'created metrics objects in pd ending at index {end_index} of {len(all_data)}')
try:
CountyMetrics.objects.bulk_create(metrics_objects)
print('All metrics data successfully imported')
except IntegrityError:
print('Metrics data failed to import')
def load_live_nyt():
nyt_live_data = requests.get(nyt_live).content
nyt_live_data = pd.read_csv(io.BytesIO(nyt_live_data),
encoding='utf8',
sep=",",
parse_dates=['date'],
dtype={'fips': str})
nyt_live_data = nyt_live_data.where( | pd.notnull(nyt_live_data) | pandas.notnull |
import re
from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_koalas_dataframe,
_is_koalas_series,
init_series,
)
from woodwork.exceptions import (
ColumnNotPresentError,
IndexTagRemovedWarning,
ParametersIgnoredWarning,
TypeConversionError,
TypingInfoMismatchWarning,
WoodworkNotInitError,
)
from woodwork.logical_types import (
URL,
Address,
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Unknown,
)
from woodwork.table_accessor import (
WoodworkTableAccessor,
_check_index,
_check_logical_types,
_check_partial_schema,
_check_time_index,
_check_unique_column_names,
_check_use_standard_tags,
_infer_missing_logical_types,
)
from woodwork.table_schema import TableSchema
from woodwork.tests.testing_utils import (
is_property,
is_public_method,
to_pandas,
validate_subset_schema,
)
from woodwork.tests.testing_utils.table_utils import assert_schema_equal
from woodwork.utils import import_or_none
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_check_index_errors(sample_df):
error_message = "Specified index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_index(dataframe=sample_df, index="foo")
if isinstance(sample_df, pd.DataFrame):
# Does not check for index uniqueness with Dask
error_message = "Index column must be unique"
with pytest.raises(LookupError, match=error_message):
_check_index(sample_df, index="age")
def test_check_logical_types_errors(sample_df):
error_message = "logical_types must be a dictionary"
with pytest.raises(TypeError, match=error_message):
_check_logical_types(sample_df, logical_types="type")
bad_logical_types_keys = {
"full_name": None,
"age": None,
"birthday": None,
"occupation": None,
}
error_message = re.escape(
"logical_types contains columns that are not present in dataframe: ['birthday', 'occupation']"
)
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_logical_types(sample_df, bad_logical_types_keys)
def test_check_time_index_errors(sample_df):
error_message = "Specified time index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_time_index(dataframe=sample_df, time_index="foo")
def test_check_unique_column_names_errors(sample_df):
if _is_koalas_dataframe(sample_df):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if _is_dask_dataframe(sample_df):
duplicate_cols_df = dd.concat(
[duplicate_cols_df, duplicate_cols_df["age"]], axis=1
)
else:
duplicate_cols_df.insert(0, "age", [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(
IndexError, match="Dataframe cannot contain duplicate columns names"
):
_check_unique_column_names(duplicate_cols_df)
def test_check_use_standard_tags_errors():
error_message = "use_standard_tags must be a dictionary or a boolean"
with pytest.raises(TypeError, match=error_message):
_check_use_standard_tags(1)
def test_accessor_init(sample_df):
assert sample_df.ww.schema is None
sample_df.ww.init()
assert isinstance(sample_df.ww.schema, TableSchema)
def test_accessor_schema_property(sample_df):
sample_df.ww.init()
assert sample_df.ww._schema is not sample_df.ww.schema
assert sample_df.ww._schema == sample_df.ww.schema
def test_set_accessor_name(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name = "name"
df.ww.init()
assert df.ww.name is None
df.ww.name = "name"
assert df.ww.schema.name == "name"
assert df.ww.name == "name"
def test_rename_init_with_name(sample_df):
df = sample_df.copy()
df.ww.init(name="name")
assert df.ww.name == "name"
df.ww.name = "new_name"
assert df.ww.schema.name == "new_name"
assert df.ww.name == "new_name"
def test_name_error_on_init(sample_df):
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(name=123)
def test_name_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.name = 123
def test_name_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.name = "name"
assert df.ww.name == "name"
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.name == "name"
assert dropped_df.ww.schema.name == "name"
def test_set_accessor_metadata(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata = {"new": "metadata"}
df.ww.init()
assert df.ww.metadata == {}
df.ww.metadata = {"new": "metadata"}
assert df.ww.schema.metadata == {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
def test_set_metadata_after_init_with_metadata(sample_df):
df = sample_df.copy()
df.ww.init(table_metadata={"new": "metadata"})
assert df.ww.metadata == {"new": "metadata"}
df.ww.metadata = {"new": "new_metadata"}
assert df.ww.schema.metadata == {"new": "new_metadata"}
assert df.ww.metadata == {"new": "new_metadata"}
def test_metadata_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.metadata = {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.metadata == {"new": "metadata"}
assert dropped_df.ww.schema.metadata == {"new": "metadata"}
def test_metadata_error_on_init(sample_df):
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(table_metadata=123)
def test_metadata_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.metadata = 123
def test_accessor_physical_types_property(sample_df):
sample_df.ww.init(logical_types={"age": "Categorical"})
assert isinstance(sample_df.ww.physical_types, dict)
assert set(sample_df.ww.physical_types.keys()) == set(sample_df.columns)
for k, v in sample_df.ww.physical_types.items():
logical_type = sample_df.ww.columns[k].logical_type
if _is_koalas_dataframe(sample_df) and logical_type.backup_dtype is not None:
assert v == logical_type.backup_dtype
else:
assert v == logical_type.primary_dtype
def test_accessor_separation_of_params(sample_df):
# mix up order of acccessor and schema params
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_name",
index="id",
semantic_tags={"id": "test_tag"},
time_index="signup_date",
)
assert schema_df.ww.semantic_tags["id"] == {"index", "test_tag"}
assert schema_df.ww.index == "id"
assert schema_df.ww.time_index == "signup_date"
assert schema_df.ww.name == "test_name"
def test_init_with_full_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww._schema
head_df = schema_df.head(2)
assert head_df.ww.schema is None
head_df.ww.init_with_full_schema(schema=schema)
assert head_df.ww._schema is schema
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
iloc_df = schema_df.loc[[2, 3]]
assert iloc_df.ww.schema is None
iloc_df.ww.init_with_full_schema(schema=schema)
assert iloc_df.ww._schema is schema
assert iloc_df.ww.name == "test_schema"
assert iloc_df.ww.semantic_tags["id"] == {"index", "test_tag"}
# Extra parameters do not take effect
assert isinstance(iloc_df.ww.logical_types["id"], Integer)
def test_accessor_init_errors_methods(sample_df):
methods_to_exclude = ["init", "init_with_full_schema", "init_with_partial_schema"]
public_methods = [
method
for method in dir(sample_df.ww)
if is_public_method(WoodworkTableAccessor, method)
]
public_methods = [
method for method in public_methods if method not in methods_to_exclude
]
method_args_dict = {
"add_semantic_tags": [{"id": "new_tag"}],
"describe": None,
"pop": ["id"],
"describe": None,
"describe_dict": None,
"drop": ["id"],
"get_valid_mi_columns": None,
"mutual_information": None,
"mutual_information_dict": None,
"remove_semantic_tags": [{"id": "new_tag"}],
"rename": [{"id": "new_id"}],
"reset_semantic_tags": None,
"select": [["Double"]],
"set_index": ["id"],
"set_time_index": ["signup_date"],
"set_types": [{"id": "Integer"}],
"to_disk": ["dir"],
"to_dictionary": None,
"value_counts": None,
"infer_temporal_frequencies": None,
}
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for method in public_methods:
func = getattr(sample_df.ww, method)
method_args = method_args_dict[method]
with pytest.raises(WoodworkNotInitError, match=error):
if method_args:
func(*method_args)
else:
func()
def test_accessor_init_errors_properties(sample_df):
props_to_exclude = ["iloc", "loc", "schema", "_dataframe"]
props = [
prop
for prop in dir(sample_df.ww)
if is_property(WoodworkTableAccessor, prop) and prop not in props_to_exclude
]
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for prop in props:
with pytest.raises(WoodworkNotInitError, match=error):
getattr(sample_df.ww, prop)
def test_init_accessor_with_schema_errors(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
schema = schema_df.ww.schema
iloc_df = schema_df.iloc[:, :-1]
assert iloc_df.ww.schema is None
error = "Provided schema must be a Woodwork.TableSchema object."
with pytest.raises(TypeError, match=error):
iloc_df.ww.init_with_full_schema(schema=int)
error = (
"Woodwork typing information is not valid for this DataFrame: "
"The following columns in the typing information were missing from the DataFrame: {'ip_address'}"
)
with pytest.raises(ValueError, match=error):
iloc_df.ww.init_with_full_schema(schema=schema)
def test_accessor_with_schema_parameter_warning(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww.schema
head_df = schema_df.head(2)
warning = (
"A schema was provided and the following parameters were ignored: index, "
"time_index, logical_types, already_sorted, semantic_tags, use_standard_tags"
)
with pytest.warns(ParametersIgnoredWarning, match=warning):
head_df.ww.init_with_full_schema(
index="ignored_id",
time_index="ignored_time_index",
logical_types={"ignored": "ltypes"},
already_sorted=True,
semantic_tags={"ignored_id": "ignored_test_tag"},
use_standard_tags={"id": True, "age": False},
schema=schema,
)
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
def test_accessor_getattr(sample_df):
schema_df = sample_df.copy()
# We can access attributes on the Accessor class before the schema is initialized
assert schema_df.ww.schema is None
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
schema_df.ww.index
schema_df.ww.init()
assert schema_df.ww.name is None
assert schema_df.ww.index is None
assert schema_df.ww.time_index is None
assert set(schema_df.ww.columns.keys()) == set(sample_df.columns)
error = re.escape("Woodwork has no attribute 'not_present'")
with pytest.raises(AttributeError, match=error):
sample_df.ww.init()
sample_df.ww.not_present
def test_getitem(sample_df):
df = sample_df
df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={"age": "Double"},
semantic_tags={"age": {"custom_tag"}},
)
assert list(df.columns) == list(df.ww.schema.columns)
subset = ["id", "signup_date"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index == "id"
assert df_subset.ww.time_index == "signup_date"
subset = ["age", "email"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index is None
assert df_subset.ww.time_index is None
assert isinstance(df_subset.ww.logical_types["age"], Double)
assert df_subset.ww.semantic_tags["age"] == {"custom_tag", "numeric"}
subset = df.ww[[]]
assert len(subset.ww.columns) == 0
assert subset.ww.index is None
assert subset.ww.time_index is None
series = df.ww["age"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["age"]))
assert isinstance(series.ww.logical_type, Double)
assert series.ww.semantic_tags == {"custom_tag", "numeric"}
series = df.ww["id"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["id"]))
assert isinstance(series.ww.logical_type, Integer)
assert series.ww.semantic_tags == {"index"}
def test_getitem_init_error(sample_df):
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
sample_df.ww["age"]
def test_getitem_invalid_input(sample_df):
df = sample_df
df.ww.init()
error_msg = r"Column\(s\) '\[1, 2\]' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww[["email", 2, 1]]
error_msg = "Column with name 'invalid_column' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww["invalid_column"]
def test_accessor_equality(sample_df):
# Confirm equality with same schema and same data
schema_df = sample_df.copy()
schema_df.ww.init()
copy_df = schema_df.ww.copy()
assert schema_df.ww == copy_df.ww
# Confirm not equal with different schema but same data
copy_df.ww.set_time_index("signup_date")
assert schema_df.ww != copy_df.ww
# Confirm not equal with same schema but different data - only pandas
loc_df = schema_df.ww.loc[:2, :]
if isinstance(sample_df, pd.DataFrame):
assert schema_df.ww != loc_df
else:
assert schema_df.ww == loc_df
def test_accessor_shallow_equality(sample_df):
metadata_table = sample_df.copy()
metadata_table.ww.init(table_metadata={"user": "user0"})
diff_metadata_table = sample_df.copy()
diff_metadata_table.ww.init(table_metadata={"user": "user2"})
assert diff_metadata_table.ww.__eq__(metadata_table, deep=False)
assert not diff_metadata_table.ww.__eq__(metadata_table, deep=True)
schema = metadata_table.ww.schema
diff_data_table = metadata_table.ww.loc[:2, :]
same_data_table = metadata_table.ww.copy()
assert diff_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=False)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=True)
assert diff_data_table.ww.__eq__(metadata_table.ww, deep=False)
if isinstance(sample_df, pd.DataFrame):
assert not diff_data_table.ww.__eq__(metadata_table.ww, deep=True)
def test_accessor_init_with_valid_string_time_index(time_index_df):
time_index_df.ww.init(name="schema", index="id", time_index="times")
assert time_index_df.ww.name == "schema"
assert time_index_df.ww.index == "id"
assert time_index_df.ww.time_index == "times"
assert isinstance(
time_index_df.ww.columns[time_index_df.ww.time_index].logical_type, Datetime
)
def test_accessor_init_with_numeric_datetime_time_index(time_index_df):
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": Datetime})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(
name="schema", time_index="strs", logical_types={"strs": Datetime}
)
assert schema_df.ww.time_index == "ints"
assert schema_df["ints"].dtype == "datetime64[ns]"
def test_accessor_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Integer)
assert date_col.semantic_tags == {"time_index", "numeric"}
# Specify logical type for time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": "Double"})
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="strs", logical_types={"strs": "Double"})
date_col = schema_df.ww.columns["strs"]
assert schema_df.ww.time_index == "strs"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="ints", logical_types={"ints": "Categorical"})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="letters", logical_types={"strs": "Integer"})
# Set numeric time index after init
schema_df = time_index_df.copy()
schema_df.ww.init(logical_types={"ints": "Double"})
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"numeric", "time_index"}
def test_numeric_time_index_dtypes(numeric_time_index_df):
numeric_time_index_df.ww.init(time_index="ints")
assert numeric_time_index_df.ww.time_index == "ints"
assert isinstance(numeric_time_index_df.ww.logical_types["ints"], Integer)
assert numeric_time_index_df.ww.semantic_tags["ints"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("floats")
assert numeric_time_index_df.ww.time_index == "floats"
assert isinstance(numeric_time_index_df.ww.logical_types["floats"], Double)
assert numeric_time_index_df.ww.semantic_tags["floats"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("with_null")
assert numeric_time_index_df.ww.time_index == "with_null"
assert isinstance(
numeric_time_index_df.ww.logical_types["with_null"], IntegerNullable
)
assert numeric_time_index_df.ww.semantic_tags["with_null"] == {
"time_index",
"numeric",
}
def test_accessor_init_with_invalid_string_time_index(sample_df):
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
sample_df.ww.init(name="schema", time_index="full_name")
def test_accessor_init_with_string_logical_types(sample_df):
logical_types = {"full_name": "natural_language", "age": "Double"}
schema_df = sample_df.copy()
schema_df.ww.init(name="schema", logical_types=logical_types)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, Double)
logical_types = {
"full_name": "NaturalLanguage",
"age": "IntegerNullable",
"signup_date": "Datetime",
}
schema_df = sample_df.copy()
schema_df.ww.init(
name="schema", logical_types=logical_types, time_index="signup_date"
)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, IntegerNullable)
assert schema_df.ww.time_index == "signup_date"
def test_int_dtype_inference_on_init():
df = pd.DataFrame(
{
"ints_no_nans": pd.Series([1, 2]),
"ints_nan": pd.Series([1, np.nan]),
"ints_NA": pd.Series([1, pd.NA]),
"ints_NA_specified": pd.Series([1, pd.NA], dtype="Int64"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["ints_no_nans"].dtype == "int64"
assert df["ints_nan"].dtype == "float64"
assert df["ints_NA"].dtype == "category"
assert df["ints_NA_specified"].dtype == "Int64"
def test_bool_dtype_inference_on_init():
df = pd.DataFrame(
{
"bools_no_nans": pd.Series([True, False]),
"bool_nan": pd.Series([True, np.nan]),
"bool_NA": pd.Series([True, pd.NA]),
"bool_NA_specified": pd.Series([True, pd.NA], dtype="boolean"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["bools_no_nans"].dtype == "bool"
assert df["bool_nan"].dtype == "category"
assert df["bool_NA"].dtype == "category"
assert df["bool_NA_specified"].dtype == "boolean"
def test_str_dtype_inference_on_init():
df = pd.DataFrame(
{
"str_no_nans": pd.Series(["a", "b"]),
"str_nan": pd.Series(["a", np.nan]),
"str_NA": pd.Series(["a", pd.NA]),
"str_NA_specified": pd.Series([1, pd.NA], dtype="string"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["str_no_nans"].dtype == "category"
assert df["str_nan"].dtype == "category"
assert df["str_NA"].dtype == "category"
assert df["str_NA_specified"].dtype == "category"
def test_float_dtype_inference_on_init():
df = pd.DataFrame(
{
"floats_no_nans": pd.Series([1.1, 2.2]),
"floats_nan": pd.Series([1.1, np.nan]),
"floats_NA": pd.Series([1.1, pd.NA]),
"floats_nan_specified": pd.Series([1.1, np.nan], dtype="float"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["floats_no_nans"].dtype == "float64"
assert df["floats_nan"].dtype == "float64"
assert df["floats_NA"].dtype == "category"
assert df["floats_nan_specified"].dtype == "float64"
def test_datetime_dtype_inference_on_init():
df = pd.DataFrame(
{
"date_no_nans": pd.Series([pd.to_datetime("2020-09-01")] * 2),
"date_nan": pd.Series([pd.to_datetime("2020-09-01"), np.nan]),
"date_NA": pd.Series([pd.to_datetime("2020-09-01"), pd.NA]),
"date_NaT": pd.Series([pd.to_datetime("2020-09-01"), pd.NaT]),
"date_NA_specified": pd.Series(
[pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]"
),
}
)
df.ww.init()
assert df["date_no_nans"].dtype == "datetime64[ns]"
assert df["date_nan"].dtype == "datetime64[ns]"
assert df["date_NA"].dtype == "datetime64[ns]"
assert df["date_NaT"].dtype == "datetime64[ns]"
assert df["date_NA_specified"].dtype == "datetime64[ns]"
def test_datetime_inference_with_format_param():
df = pd.DataFrame(
{
"index": [0, 1, 2],
"dates": ["2019/01/01", "2019/01/02", "2019/01/03"],
"ymd_special": ["2019~01~01", "2019~01~02", "2019~01~03"],
"mdy_special": pd.Series(
["3~11~2000", "3~12~2000", "3~13~2000"], dtype="string"
),
}
)
df.ww.init(
name="df_name",
logical_types={
"ymd_special": Datetime(datetime_format="%Y~%m~%d"),
"mdy_special": Datetime(datetime_format="%m~%d~%Y"),
"dates": Datetime,
},
time_index="ymd_special",
)
assert df["dates"].dtype == "datetime64[ns]"
assert df["ymd_special"].dtype == "datetime64[ns]"
assert df["mdy_special"].dtype == "datetime64[ns]"
assert df.ww.time_index == "ymd_special"
assert isinstance(df.ww["dates"].ww.logical_type, Datetime)
assert isinstance(df.ww["ymd_special"].ww.logical_type, Datetime)
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
df.ww.set_time_index("mdy_special")
assert df.ww.time_index == "mdy_special"
df = pd.DataFrame(
{
"mdy_special": pd.Series(
["3&11&2000", "3&12&2000", "3&13&2000"], dtype="string"
),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["mdy_special"].dtype == "category"
df.ww.set_types(logical_types={"mdy_special": Datetime(datetime_format="%m&%d&%Y")})
assert df["mdy_special"].dtype == "datetime64[ns]"
df.ww.set_time_index("mdy_special")
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
assert df.ww.time_index == "mdy_special"
def test_timedelta_dtype_inference_on_init():
df = pd.DataFrame(
{
"delta_no_nans": (
pd.Series([pd.to_datetime("2020-09-01")] * 2)
- pd.to_datetime("2020-07-01")
),
"delta_nan": (
pd.Series([pd.to_datetime("2020-09-01"), np.nan])
- pd.to_datetime("2020-07-01")
),
"delta_NaT": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NaT])
- pd.to_datetime("2020-07-01")
),
"delta_NA_specified": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]")
- pd.to_datetime("2020-07-01")
),
}
)
df.ww.init()
assert df["delta_no_nans"].dtype == "timedelta64[ns]"
assert df["delta_nan"].dtype == "timedelta64[ns]"
assert df["delta_NaT"].dtype == "timedelta64[ns]"
assert df["delta_NA_specified"].dtype == "timedelta64[ns]"
def test_sets_category_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
pd.Series(["a", pd.NaT, "c"], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=["a", "b", "c"]),
PostalCode,
SubRegionCode,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if isclass(logical_type):
logical_type = logical_type()
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert df.ww.columns[column_name].logical_type == logical_type
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
df = latlong_df.loc[:, [column_name]]
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, LatLong)
assert df[column_name].dtype == LatLong.primary_dtype
df_pandas = to_pandas(df[column_name])
expected_val = (3, 4)
if _is_koalas_dataframe(latlong_df):
expected_val = [3, 4]
assert df_pandas.iloc[-1] == expected_val
def test_sets_string_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
]
logical_types = [
Address,
Filepath,
PersonFullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_boolean_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
pd.Series([True, np.nan, True], name=column_name),
pd.Series([True, pd.NA, True], name=column_name),
]
logical_types = [Boolean, BooleanNullable]
for series in series_list:
for logical_type in logical_types:
if series.isnull().any() and logical_type == Boolean:
continue
series = series.astype("object")
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_int64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([1, 2, 3], name=column_name),
pd.Series([1, None, 3], name=column_name),
pd.Series([1, np.nan, 3], name=column_name),
pd.Series([1, pd.NA, 3], name=column_name),
]
logical_types = [Integer, IntegerNullable, Age, AgeNullable]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if series.isnull().any() and logical_type in [Integer, Age]:
continue
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_float64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([1.1, 2, 3], name=column_name),
pd.Series([1.1, None, 3], name=column_name),
pd.Series([1.1, np.nan, 3], name=column_name),
]
logical_types = [Double, AgeFractional]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_datetime64_dtype_on_init():
column_name = "test_series"
series_list = [
| pd.Series(["2020-01-01", "2020-01-02", "2020-01-03"], name=column_name) | pandas.Series |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
_testing as tm,
concat,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
pytestmark = pytest.mark.single
def test_format_type(setup_path):
df = DataFrame({"A": [1, 2]})
with ensure_clean_path(setup_path) as path:
with HDFStore(path) as store:
store.put("a", df, format="fixed")
store.put("b", df, format="table")
assert store.get_storer("a").format_type == "fixed"
assert store.get_storer("b").format_type == "table"
def test_format_kwarg_in_constructor(setup_path):
# GH 13291
msg = "format is not a defined argument for HDFStore"
with tm.ensure_clean(setup_path) as path:
with pytest.raises(ValueError, match=msg):
HDFStore(path, format="table")
def test_api_default_format(setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with | HDFStore(path) | pandas.HDFStore |
import argparse
import os
import tempfile
import textwrap
from pathlib import Path
from typing import Dict
import pandas as pd
import pytest
from google.api_core.exceptions import BadRequest
from IPython.core.error import UsageError
from pytest_mock.plugin import MockerFixture
from bqtestmagic import BigQueryTest, SQLTestMagic, label
class TestSQLTestMagic:
@pytest.fixture
def bqtest(self) -> SQLTestMagic:
return SQLTestMagic()
def test_fetch_query_result_as_dataframe_if_target_is_bigquery(
self, mocker: MockerFixture, bqtest: SQLTestMagic
):
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
mocker.patch("google.cloud.bigquery.Client")
mock = mocker.patch("bqtestmagic.BigQueryTest.test", return_value=df)
actual = bqtest.sql("BigQuery", "SELECT 1 col1, 3 col2 UNION ALL SELECT 2, 4")
mock.assert_called_once()
pd.testing.assert_frame_equal(df, actual)
def test_raise_error_if_target_is_not_bigquery(self, bqtest: SQLTestMagic):
with pytest.raises(UsageError):
bqtest.sql("other", "SELECT 1 col1")
@pytest.mark.parametrize(
("line", "query", "csv_file", "sql_file", "project", "reliable", "labels"),
[
("BigQuery", "SELECT 1 col1", None, None, None, False, {}),
(
"BigQuery --csv_file=a.csv --sql_file=b.sql --project=my-project --reliable --labels a=b", # noqa: E501
"SELECT 1 col1",
Path("a.csv"),
Path("b.sql"),
"my-project",
True,
{"a": "b"},
),
],
)
def test_parse_argstring(
self,
mocker: MockerFixture,
bqtest: SQLTestMagic,
line: str,
query: str,
csv_file: Path,
sql_file: Path,
project: str,
reliable: bool,
labels: Dict[str, str],
):
client = mocker.patch("google.cloud.bigquery.Client")
mock = mocker.patch("bqtestmagic.BigQueryTest.test")
bqtest.sql(line, query)
client.assert_called_once_with(project)
mock.assert_called_once_with(
query=query,
csv_file=csv_file,
sql_file=sql_file,
reliable=reliable,
labels=labels,
)
class TestClose:
def test_close_bigquery_client_if_it_has_close_attribute(
self, mocker: MockerFixture, bqtest: SQLTestMagic
):
client = mocker.Mock(spec=["close"])
mocker.patch("google.cloud.bigquery.Client", return_value=client)
bqtest.sql("BigQuery", "SELECT 1 col1")
client.close.assert_called_once_with()
def test_no_close_if_bigquery_client_does_not_have_close_attribute(
self, mocker: MockerFixture, bqtest: SQLTestMagic
):
client = mocker.Mock(spec=[])
mocker.patch("google.cloud.bigquery.Client", return_value=client)
bqtest.sql("BigQuery", "SELECT 1 col1")
assert hasattr(client, "close") is False
class TestLabel:
def test_failed(self):
with pytest.raises(argparse.ArgumentTypeError):
label("abc")
def test_success(self):
actual = label("abc=def")
assert actual == ("abc", "def")
class TestBigQueryTest:
@pytest.fixture
def bigquery_test(self) -> BigQueryTest:
return BigQueryTest(None)
class TestTest:
def test_raise_error_if_both_csv_file_and_sql_file_are_set(
self, bigquery_test: BigQueryTest
):
with pytest.raises(ValueError):
bigquery_test.test(
query="SELECT 1",
csv_file=Path("set.csv"),
sql_file=Path("set.sql"),
reliable=False,
labels={},
)
class TestDataframeQueryResultsToDataframe:
def test_no_tests_if_both_csv_file_and_sql_file_are_not_set(
self,
mocker: MockerFixture,
capfd: pytest.CaptureFixture,
bigquery_test: BigQueryTest,
):
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
mocker.patch("google.cloud.bigquery.Client", return_value=None)
download_query_results_to_dataframe = mocker.patch(
"bqtestmagic.BigQueryTest.download_query_results_to_dataframe",
return_value=df,
)
query = "SELECT 1 col1, 3 col2 UNION ALL SELECT 2, 4"
actual = bigquery_test.test(
query=query, csv_file=None, sql_file=None, reliable=False, labels={}
)
download_query_results_to_dataframe.assert_called_once_with(query, {})
pd.testing.assert_frame_equal(actual, df)
assert capfd.readouterr() == ("", "")
class TestPrintsWhetherQueryResultsAreEqualToCSVFileIfCSVFileIsSetAndSQLFileIsNotSet: # noqa: E501
def test_success(
self,
mocker: MockerFixture,
capfd: pytest.CaptureFixture,
bigquery_test: BigQueryTest,
):
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
mocker.patch("google.cloud.bigquery.Client", return_value=None)
download_query_results_to_dataframe = mocker.patch(
"bqtestmagic.BigQueryTest.download_query_results_to_dataframe",
return_value=df,
)
query = "SELECT 1 col1, 3 col2 UNION ALL SELECT 2, 4"
with tempfile.NamedTemporaryFile("w") as f:
f.write("col1,col2\n1,3\n2,4")
f.seek(0)
actual = bigquery_test.test(
query=query,
csv_file=Path(f.name),
sql_file=None,
reliable=False,
labels={},
)
download_query_results_to_dataframe.assert_called_once_with(
query, {}
)
pd.testing.assert_frame_equal(actual, df)
assert capfd.readouterr() == ("✓\n", "")
def test_failure(
self,
mocker: MockerFixture,
capfd: pytest.CaptureFixture,
bigquery_test: BigQueryTest,
):
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
mocker.patch("google.cloud.bigquery.Client", return_value=None)
download_query_results_to_dataframe = mocker.patch(
"bqtestmagic.BigQueryTest.download_query_results_to_dataframe",
return_value=df,
)
query = "SELECT 1 col1, 3 col2 UNION ALL SELECT 2, 4"
with tempfile.NamedTemporaryFile("w") as f:
f.write("col1,col2\n0,0\n0,0")
f.seek(0)
actual = bigquery_test.test(
query=query,
csv_file=Path(f.name),
sql_file=None,
reliable=False,
labels={},
)
download_query_results_to_dataframe.assert_called_once_with(
query, {}
)
pd.testing.assert_frame_equal(actual, df)
assert capfd.readouterr() == ("✕\n", "")
def test_query_validation_if_sql_file_is_set_and_not_reliable(
self,
mocker: MockerFixture,
bigquery_test: BigQueryTest,
):
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
mocker.patch("google.cloud.bigquery.Client", return_value=None)
mocker.patch(
"bqtestmagic.BigQueryTest.download_query_results_to_dataframe",
return_value=df,
)
mocker.patch(
"bqtestmagic.BigQueryTest.query_to_check_that_two_query_results_match", # noqa: E501
return_value=False,
)
validate_query = mocker.patch("bqtestmagic.BigQueryTest.validate_query")
unreliable_query = "SELECT col1, col1 + 3 col2 FROM UNNEST([1, 2]) col1"
with tempfile.NamedTemporaryFile("w") as f:
f.write(unreliable_query)
f.seek(0)
bigquery_test.test(
query="SELECT 1 col1, 3 col2 UNION ALL SELECT 2, 4",
csv_file=None,
sql_file=Path(f.name),
reliable=False,
labels={},
)
validate_query.assert_called_once_with(unreliable_query)
class TestPrintThatTwoQueryResultsAreEqualIfCsvFileIsNotSetAndSqlFileIsSet:
def test_success(
self,
mocker: MockerFixture,
capfd: pytest.CaptureFixture,
bigquery_test: BigQueryTest,
):
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
mocker.patch("google.cloud.bigquery.Client", return_value=None)
download_query_results_to_dataframe = mocker.patch(
"bqtestmagic.BigQueryTest.download_query_results_to_dataframe",
return_value=df,
)
query = "SELECT 1 col1, 3 col2 UNION ALL SELECT 2, 4"
with tempfile.NamedTemporaryFile("w") as f:
f.write("SELECT col1, col1 + 2 col2 FROM UNNEST([1, 2]) col1")
f.seek(0)
mocker.patch(
"bqtestmagic.BigQueryTest.query_to_check_that_two_query_results_match", # noqa: E501
return_value=True,
)
actual = bigquery_test.test(
query=query,
csv_file=None,
sql_file=Path(f.name),
reliable=True,
labels={},
)
download_query_results_to_dataframe.assert_called_once_with(
query, {}
)
| pd.testing.assert_frame_equal(actual, df) | pandas.testing.assert_frame_equal |
# Author: <NAME> <EMAIL>
import os, shutil
import subprocess
import numpy as np
import pandas as pd
import mpi4py.MPI
import itertools
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import xml.etree.ElementTree as ET
# ##############################################################################
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def mkdir(dir = None):
'''
Creates the given directory.
Parameters
----------
dir : char
Directory
'''
if not dir is None:
if not os.path.exists(dir):
os.makedirs(dir)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def clean_up(path):
if os.path.exists(path):
shutil.rmtree(path)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def create_simulation_environment(
STICS_path = None,
STICS_bin = 'JavaSticsCmd.exe',
STICS_local_path = None,
link_items = None,
copy_items = None):
# default values
if link_items is None:
link_items = ['bin',STICS_bin]
if copy_items is None:
copy_items = [
'config','grape\\CLIMAISJ.2011','grape\\CLIMAISJ.2012','grape\\CLIMAISJ.2013','grape\\CLIMAISJ.2014','grape\\CLIMAISJ_sta.xml',
'grape\\mais_ini.xml','grape\\mais.lai','grape\\Mais_tec.xml','grape\\prof.mod','grape\\sols.xml','grape\\usms.xml', 'grape\\var.mod', 'grape\\rap.mod',
]
# clean up directory, just to be sure
clean_up(STICS_local_path)
# link and copy files
for item in link_items + copy_items:
# check if the parent directory exists
dirname = os.path.dirname(os.path.join(STICS_local_path, item)) # obtain the corresponding local directory name where the item was located
if not dirname == '' and not os.path.exists(dirname):
print(' %Status: directory {0} does not exist and will be created.'.format(dirname))
mkdir(dir = dirname) # create STICS_local_path directory and subdirectory as workspace
if item in link_items:
os.symlink(os.path.join(STICS_path, item), os.path.join(STICS_local_path,item)) # create a symbolic link from source (1st arg) to destination (2nd arg)
elif item in copy_items:
if os.path.isfile(os.path.join(STICS_path, item)):
shutil.copy(os.path.join(STICS_path, item), os.path.join(STICS_local_path, item)) # if the item is a file, copy it to destination folder
else:
shutil.copytree(os.path.join(STICS_path, item), os.path.join(STICS_local_path, item)) # if the item is a directory, copy the whole directory tree to the destination folder
#shutil.copy(os.path.join(STICS_path, STICS_bin), os.path.join(STICS_local_path,STICS_bin)) # copy cmd binary file from source (1st arg) to destionation (2nd arg)
# add plant dir to hold plat parameter files
mkdir(os.path.join(STICS_local_path, 'plant'))
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ##############################################################################
comm = mpi4py.MPI.COMM_WORLD
# 1. User-specific session
# =============================================================================
Root_path = "G:\SpatialModellingSRC\STICS_PIK_Cluster" # Define the root path
CurrentDir = os.getcwd() # Specify the current working directory where EF function has been stored
STICS_path = os.path.join(Root_path, 'STICS_V91') # Specify the STICS main directory
STICS_bin = 'JavaSticsCmd.exe' # Specify STICS running command directory
STICS_workspace = 'grape' # Specify the STICS workspace folder name
start_year=2011
end_year=2014
Study_years=np.arange(start_year,end_year+1,1)
Climate_input=["CLIMAISJ."+str(int(year)) for year in Study_years]
STICS_workspace_files = Climate_input+['CLIMAISJ_sta.xml', # Concatenate climate input files with other essential input files
'mais_ini.xml','mais.lai','Mais_tec.xml','prof.mod','sols.xml','usms.xml', 'var.mod', 'rap.mod'] # 'mais.lai',
STICS_workdir = os.path.join(STICS_path, 'multiprocess') # Create a folder that enable running the MPI multiprocess
variety = 'Touriga_Nacional' # Specify the name of variety node that is appearing in the plant .xml file
STICS_plant_default=os.path.join(STICS_path, 'plant', 'vine_TN_plt_default.xml') # Default plant file for variety TN used to read default configuration every time
STICS_plant_file='vine_TN_plt.xml' # Given a plant name
fname_out = os.path.join(Root_path,'cali_{0}.dat'.format(variety.lower())) # Define the output file name
# 2 Supply observational data
Observation_file = os.path.join(STICS_path, 'Observation.xlsx') # Specify the observational file that contain the measurements of variables
LoadOB=pd.ExcelFile(Observation_file)
ListofSheets=LoadOB.sheet_names # Obtain the information on excel sheets
Ob_dataframe= | pd.read_excel(LoadOB,sheet_name=ListofSheets[0]) | pandas.read_excel |
import os
import serial, pandas, argparse
import numpy as np
import os.path as osp
from time import sleep
def receive_vector(start_marker, end_marker):
msg = ''
x = 'z'
while ord(x) != start_marker:
x = ser.read()
while ord(x) != end_marker:
if ord(x) != start_marker:
msg = f'{msg}{x.decode("utf-8")}'
x = ser.read()
try:
v = msg.split(',')
v = [int(item) for item in v]
except Exception as e:
print(e)
v = None
return v, msg
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse args')
parser.add_argument('-p', '--port', help='Serial port', default='/dev/ttyACM0')
parser.add_argument('-r', '--rate', help='Baud rate', default=115200, type=int)
parser.add_argument('-s', '--start', help='Start marker', default=60, type=int)
parser.add_argument('-e', '--end', help='End marker', default=62, type=int)
parser.add_argument('-n', '--nvectors', help='Number of vectors to record', default=10000, type=int)
parser.add_argument('-f', '--fpath', help='File path', default='data/adxl_fan/shake/shake.csv')
args = parser.parse_args()
sleep(5)
# record the data
ser = serial.Serial(args.port, args.rate)
data = []
n = 0
while n < args.nvectors:
x, msg = receive_vector(args.start, args.end)
if x is not None:
print(n, x)
data.append(x)
n += 1
else:
print(msg)
ser.close()
X = np.array(data)
# save the data
fpath = osp.dirname(args.fpath)
dirs = []
while fpath != '':
dirs.append(fpath)
fpath = osp.dirname(fpath)
for dir in dirs[::-1]:
if not osp.isdir(dir):
os.mkdir(dir)
| pandas.DataFrame(X) | pandas.DataFrame |
from datetime import datetime, date, timedelta
import os
from sys import exit
import pandas as pd
import time
from selenium import webdriver
from datetime import date, timedelta
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException
from time import gmtime, strftime
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
ROW_PER_OFFSET = 25
global my_options
my_options = Options()
my_options.add_argument("--incognito")
my_options.add_argument("--ignore-certificate-errors")
my_options.add_experimental_option('excludeSwitches', ['enable-logging'])
def create_url(city, datein, dateout=None, offset=0, people=2, no_sleep=1, currency='JPY'):
# Checking the format of the date, has to YYYY-MM-DD
format = "%Y-%m-%d"
try:
datetime.strptime(datein, format)
except ValueError:
raise ValueError('Incorrect data format, should be YYYY-MM-DD')
# Check the validity of the date
if((date.fromisoformat(datein) - date.today()).days < 0):
print('Error. The date you selected is in the past.')
exit()
formated_datein = date.fromisoformat(datein)
if dateout is None:
dateout = formated_datein + timedelta(days=no_sleep)
formated_dateout = dateout
else:
formated_dateout = date.fromisoformat(dateout)
url = "https://www.booking.com/searchresults.html?checkin_month={in_month}" \
"&checkin_monthday={in_day}&checkin_year={in_year}&checkout_month={out_month}" \
"&checkout_monthday={out_day}&checkout_year={out_year}&group_adults={people}" \
"&group_children=0&order=price&ss={city}%2C%20&offset={offset}&language=en-us&selected_currency={currency}"\
.format(in_month=str(formated_datein.month),
in_day=str(formated_datein.day),
in_year=str(formated_datein.year),
out_month=str(formated_dateout.month),
out_day=str(formated_dateout.day),
out_year=str(formated_dateout.year),
people=people,
city=city,
offset=offset,
currency=currency)
return url
def next_page(booking_url, input_offset, currency='JPY'):
# Firt, the standard link we got from searching without dates is trimmed of "&", we grab those values and separate them into a format key=value
trimmed = booking_url.split('&')
attributes, values = [], []
for e in trimmed:
attributes.append(e.partition('=')[0])
values.append(e.partition('=')[2])
post_list = dict(zip(attributes, values))
post_list['offset'] = input_offset
joined = []
for key, value in post_list.items():
joined.append('{}={}'.format(key, value))
final_link = '&'.join(joined)
# Princes are in DZA, so let's get them in JPY
final_link = final_link+'&selected_currency={}'.format(currency)
return final_link+'#map_closed'+'&top_ufis=1'
def format_url(booking_url, datein, dateout=None, currency='JPY',no_sleep=1):
# Firt, the standard link we got from searching without dates is trimmed of "&", we grab those values and separate them into a format key=value
trimmed = booking_url.split('&')
# Checking the format
format = "%Y-%m-%d"
try:
datetime.strptime(datein, format)
except ValueError:
raise ValueError('Incorrect data format, should be YYYY-MM-DD')
# Check the validity of the date
if((date.fromisoformat(datein) - date.today()).days < 0):
print('Error. The date you selected is in the past.')
exit()
formated_datein = date.fromisoformat(datein)
if dateout is None:
dateout = formated_datein + timedelta(days=no_sleep)
formated_dateout = dateout
attributes, values = [], []
for e in trimmed:
attributes.append(e.partition('=')[0])
values.append(e.partition('=')[2])
post_list = dict(zip(attributes, values))
post_list['checkin_year'],post_list['checkin_month'],post_list['checkin_monthday'] = formated_datein.year,formated_datein.month,formated_datein.day
post_list['checkout_year'],post_list['checkout_month'],post_list['checkout_monthday'] = formated_dateout.year,formated_dateout.month,formated_dateout.day
post_list['ssne'] = ''
post_list['ssne_untouched'] = ''
post_list['dest_id'] = ''
joined = []
for key, value in post_list.items():
joined.append('{}={}'.format(key, value))
final_link = '&'.join(joined)
return final_link+'&selected_currency={}'.format(currency)
def clean(input:str) -> int:
input = input.removesuffix('Showing 1 - 25')
k = input.replace('\n',',').replace('…','').split(',')
return int(max(k))
def get_number_pages(web_driver: WebDriver) -> int:
try:
all_pages = WebDriverWait(web_driver, timeout=5).until(expected_conditions.visibility_of_element_located((By.CSS_SELECTOR,'[data-testid="pagination"]')))
except TimeoutException:
print('Cannot find number of pages')
return 0
else:
return clean(all_pages.text)
def get_hotel_name(hotel_driver: WebDriver):
try:
hotel_name = WebDriverWait(hotel_driver, timeout=5).until(expected_conditions.visibility_of_element_located((By.CSS_SELECTOR, '[data-testid="title"]')))
except TimeoutException:
print("Exception has been thrown. ")
print('Cannot find hotel name element')
return 'Hotel name unknown'
else:
return hotel_name.text.strip()
def get_hotel_price(hotel: WebDriver):
try:
price = WebDriverWait(hotel, timeout=1).until(expected_conditions.visibility_of_element_located((By.CLASS_NAME,'bui-price-display__value')))
except TimeoutException:
print("Exception has been thrown. ")
print('Cannot find hotel price element')
return ""
else:
return price.text.replace('¥', '').replace(',', '').strip()
def get_hotel_price_simple(hotel_driver:WebDriver):
try:
price = WebDriverWait(hotel_driver, timeout=3).until(expected_conditions.visibility_of_element_located((By.CSS_SELECTOR,'[data-testid="price-and-discounted-price"]')))
except TimeoutException:
print("Exception has been thrown. ")
print('Cannot find hotel price element')
price = ''
return 'hotel price unknown'
else:
return price.text.strip()
def get_hotel_details_link(hotel_driver: WebDriver):
try:
link = WebDriverWait(hotel_driver, timeout=1).until(expected_conditions.visibility_of_element_located((By.CSS_SELECTOR,'[data-testid="title-link"]')))
except TimeoutException:
print("Exception has been thrown. ")
print('Cannot find link element.')
return None
else:
return link.get_attribute('href')
def get_max_occupancy_room_type(hotel_driver:WebDriver):
try:
room_string = WebDriverWait(hotel_driver, timeout=3).until(expected_conditions.visibility_of_element_located((By.CLASS_NAME,'room_link')))
except TimeoutException:
print("Exception has been thrown. ")
print('Cannot find link element.')
return "",""
else:
room_string = room_string.replace('Max people', ' Max people').replace(
'–', '').replace('-', '').replace('\n', '').replace('•', '').replace('•', '')
splitted_room_string = room_string.split('Max people:')
if ' '.join(splitted_room_string) == room_string:
room_type, max_occupancy = room_string, ""
elif len(splitted_room_string) < 2:
room_type = room_string
max_occupancy = ""
else:
room_type, max_occupancy = splitted_room_string[0].strip(), splitted_room_string[1].strip()
return room_type, max_occupancy
def url_parser(website_url):
trimmed = website_url.split('&')
attributes, values = [], []
for e in trimmed:
attributes.append(e.partition('=')[0])
values.append(e.partition('=')[2])
post_list = dict(zip(attributes, values))
print(post_list)
def generate_file(dest, datein, path, hotels_dict):
"""
This function filters the hotels retrieved in the process. It first removes all the "Capsule Hotels" and "Hostels". Then, it separates them
into two sheets : Stay Sakura's hotels, and the others that do not belong to them.
"""
#Create columns name for each data type.
hotel_columns = ['hotel_name', 'room_type', 'room_size',
'room_price', 'max_occupancy', 'meal_info','remaining_rooms']
hotels = pd.DataFrame.from_dict(
hotels_dict, orient='index', columns=hotel_columns)
hotels['checkin_date'] = datein
# First remove all Hostels and Capsule hotels
filtered = hotels.drop(hotels[hotels['hotel_name'].str.contains(
'Hostel |HOSTEL |hostel|capsule|Capsule|CAPSULE')].index)
#SEPARATE DATAFRAMES & GENERATE TWO SHEETS
stayjap_hotels = filtered[filtered['hotel_name'].str.contains(
'[A-a]sakusa [Y-y]okozuna |art deco |ART DECO|[E-e]do [N-n]o [M-m]ai|[T-t]okyo [A-a]sakusa [T-t]ownhouse|[A-a]rt [D-d]eco|[H-h]yaku [K-k]ura|HYAKU KURA')]
different_hotels = filtered.drop(filtered[filtered['hotel_name'].str.contains(
'[A-a]sakusa [Y-y]okozuna |art deco |ART DECO|[E-e]do [N-n]o [M-m]ai|[T-t]okyo [A-a]sakusa [T-t]ownhouse|[A-a]rt [D-d]eco')].index)
# Generating the name of the file so it will be unique
date_time_obj = datetime.now()
time_stamp = date_time_obj.strftime('%H_%M_%S')
file_name = '_'.join([dest, datein, time_stamp])
# Saving under a path :
saving_path = '{}\\{}.xlsx'.format(path, file_name)
# Write each dataframe to a different worksheet.
with pd.ExcelWriter('{}\\{}.xlsx'.format(path, file_name), engine='xlsxwriter') as writer:
different_hotels.to_excel(writer, sheet_name='other_hotels')
stayjap_hotels.to_excel(writer, sheet_name='stay_sakura_hotels')
return saving_path
def traverse_dates(start_date, interval_in_days=30):
delta = timedelta(days=1)
start_date = date.fromisoformat(start_date)
for _ in range(0, interval_in_days):
iso_start_date = start_date.isoformat()
print(iso_start_date)
start_date += delta
def get_room_type(hotel):
try:
hotel.find_element(By.CLASS_NAME,'hprt-roomtype-link')
except NoSuchElementException:
return None
return hotel.find_element(By.CLASS_NAME,'hprt-roomtype-link').text
def get_room_size(hotel):
try:
hotel.find_element(By.CSS_SELECTOR,"[data-name-en='room size']")
except NoSuchElementException:
return None
return hotel.find_element(By.CSS_SELECTOR,"[data-name-en='room size']").text
def get_choices(hotel):
"""
With this function, we fetch the option for the meal. There are 3 types : All Inclusive, Breakfast & Dinner included and Breakfast only.
:return -> str
"""
try:
x = hotel.find_element(By.CLASS_NAME,'hprt-table-cell-conditions')
except NoSuchElementException:
return "No info"
return x.text
def get_max_occupancy(hotel):
"""
This function fetches the maximum number of persons per room.
"""
try:
hotel.find_element(By.CLASS_NAME,
'hprt-occupancy-occupancy-info').get_attribute("innerText")
except NoSuchElementException:
return ""
string = hotel.find_element(By.CLASS_NAME,
'hprt-occupancy-occupancy-info').get_attribute("innerText")
max_occupancy = [int(s) for s in string.split() if s.isdigit()]
return max_occupancy[-1]
def get_remaining_rooms(hotel):
"""
This function fetches the remaining rooms for a type of room.
"""
try:
hotel.find_element(By.CLASS_NAME,'top_scarcity')
except NoSuchElementException:
return None
return hotel.find_element(By.CLASS_NAME,'top_scarcity').text
def get_hotel_details(hotel_id, hotel_name, web_driver, hotel_link,date_in,verbosity=False):
"""
This function is the one that opens a new tab for each hotel, fetches all the rooms it offers (with its type,surface, price and occupancy).
"""
# Open a new window
web_driver.execute_script("window.open('');")
# Switch to the new window and open new URL
web_driver.switch_to.window(web_driver.window_handles[1])
web_driver.get(hotel_link)
web_driver.implicitly_wait(3)
hotel_info = []
hotel = {}
default_type = None
default_size = None
default_remaining_rooms = None
table = web_driver.find_elements(By.CLASS_NAME,'js-rt-block-row')
if table is not None:
for row in table:
room_type = get_room_type(row)
if room_type is None:
room_type = default_type
else:
default_type = room_type
max_occupancy = get_max_occupancy(row)
room_price = get_hotel_price(row)
room_choices = get_choices(row)
room_choices = room_choices.rstrip('\n').replace('•','').rstrip('\n')
room_size = get_room_size(row)
remaining_rooms = get_remaining_rooms(row)
if remaining_rooms is None:
remaining_rooms = default_remaining_rooms
else:
default_remaining_rooms = remaining_rooms
if room_size is None:
room_size = default_size
else:
default_size = room_size
hotel_info = [hotel_name, room_type, room_size,
room_price, max_occupancy, room_choices,remaining_rooms,date_in]
if verbosity:
print(hotel_info)
hotel[hotel_id] = hotel_info
hotel_id += 1
if verbosity:
print('[~] Finished retrieving details for {}'.format(hotel_name))
web_driver.switch_to.window(web_driver.window_handles[-1])
web_driver.close()
web_driver.switch_to.window(web_driver.window_handles[0])
return hotel_id, hotel
else:
print('Not available')
def generate_file_date(dest, path:str, hotels_dict):
"""
This function filters the hotels retrieved in the process. It first removes all the "Capsule Hotels" and "Hostels". Then, it separates them
into two sheets : Stay Sakura's hotels, and the others that do not belong to them.
"""
#Create columns name for each data type.
hotel_columns = ['hotel_name', 'room_type', 'room_size',
'room_price', 'max_occupancy', 'meal_and_refundability','remaining_rooms','checkin_date']
hotels = pd.DataFrame.from_dict(
hotels_dict, orient='index', columns=hotel_columns)
# First remove all Hostels and Capsule hotels
filtered = hotels.drop(hotels[hotels['hotel_name'].str.contains(
'Hostel |HOSTEL |hostel|capsule|Capsule|CAPSULE')].index)
#SEPARATE DATAFRAMES & GENERATE TWO SHEETS
stayjap_hotels = filtered[filtered['hotel_name'].str.contains(
'[A-a]sakusa [Y-y]okozuna |art deco |ART DECO|[E-e]do [N-n]o [M-m]ai|[T-t]okyo [A-a]sakusa [T-t]ownhouse|[A-a]rt [D-d]eco|[H-h]yaku [K-k]ura|HYAKU KURA')]
different_hotels = filtered.drop(filtered[filtered['hotel_name'].str.contains(
'[A-a]sakusa [Y-y]okozuna |art deco |ART DECO|[E-e]do [N-n]o [M-m]ai|[T-t]okyo [A-a]sakusa [T-t]ownhouse|[A-a]rt [D-d]eco')].index)
# Generating the name of the file so it will be unique
date_time_obj = datetime.now()
time_stamp = date_time_obj.strftime('%H_%M_%S')
file_name = '_'.join([dest, time_stamp])
path = path.replace('/','\\')
# Saving under a path :
saving_path = '{}\{}.xlsx'.format(path, file_name)
# Write each dataframe to a different worksheet.
with | pd.ExcelWriter(saving_path, engine='xlsxwriter') | pandas.ExcelWriter |
import clickhouse_driver
import numpy as np
import pandas as pd
from qaenv import (clickhouse_ip, clickhouse_password, clickhouse_port,
clickhouse_user)
from QUANTAXIS.QAData import (QA_DataStruct_Day, QA_DataStruct_Future_day,
QA_DataStruct_Future_min,
QA_DataStruct_Index_day, QA_DataStruct_Index_min,
QA_DataStruct_Min, QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min)
from QUANTAXIS.QAUtil import QA_util_get_real_date
def promise_list(x): return [x] if isinstance(x, str) else x
def stock_format(code):
return code + '.XSHE' if code[0] != '6' else code+'.XSHG'
class QACKClient():
def __init__(self, host=clickhouse_ip, port=clickhouse_port, database='quantaxis', user=clickhouse_user, password=clickhouse_password):
self.client = clickhouse_driver.Client(host=host, port=port, database=database, user=user, password=password,
settings={
'insert_block_size': 100000000},
compression=True)
def execute(self, sql):
return self.client.query_dataframe(sql)
def to_qfq(self, res):
u = res.data.reset_index()
u = u.assign(date=u.datetime.apply(lambda x: x.date()))
u = u.set_index(['date', 'code'], drop=False)
codelist = u.index.levels[1].unique().tolist()
start = u.index.levels[0][0]
end = u.index.levels[0][-1]
adjx = self.get_stock_adj(codelist, start, end)
if adjx is None:
data = u.set_index(['datetime', 'code'])
else:
adjx = adjx.reset_index()
adjx = adjx.assign(code=adjx.order_book_id).set_index(
['date', 'code']).adj
data = u.join(adjx).set_index(['datetime', 'code']).fillna(1)
for col in ['open', 'high', 'low', 'close']:
data[col] = data[col] * data['adj']
try:
data['high_limit'] = data['high_limit'] * data['adj']
data['low_limit'] = data['high_limit'] * data['adj']
except:
pass
return QA_DataStruct_Stock_min(data.sort_index(), if_fq='qfq')
def make_ret5(self, data):
"""
use open data make ret
"""
r = data.groupby(level=1).open.apply(
lambda x: x.pct_change(5).shift(-5))
r.name = 'ret5'
return r
def make_ret_adjust(self, data):
r = data.groupby(level=1).open.apply(lambda x: x.pct_change())
return r
def get_stock_industry(self, code, start, end):
codex = []
if isinstance(code, list):
pass
else:
code = [code]
start = QA_util_get_real_date(start)
end = QA_util_get_real_date(end)
for coder in code:
if '.' in coder:
codex.append(coder)
else:
codex.append(
coder+'.XSHG' if coder[0] == '6' else coder+'.XSHE')
res = self.execute("SELECT * FROM quantaxis.citis_industry WHERE ((`date` >= '{}')) AND (`date` <= '{}') AND (`order_book_id` IN ({}))".format(
start, end, "'{}'".format("','".join(codex)))).drop_duplicates(['date', 'order_book_id'])
return res
def get_index_weight(self, code, start, end):
codex = []
if isinstance(code, list):
pass
else:
code = [code]
start = QA_util_get_real_date(start)
end = QA_util_get_real_date(end)
start = start[0:8]+'01'
end = end[0:8]+'01'
for coder in code:
if '.' in coder:
codex.append(coder)
else:
codex.append(
coder+'.XSHE' if coder[0] == '6' else coder+'.XSHG')
res = self.execute("SELECT * FROM quantaxis.index_weight WHERE ((`date` >= '{}')) AND (`date` <= '{}') AND (`index_code` IN ({}))".format(
start, end, "'{}'".format("','".join(codex)))).drop_duplicates(['date', 'order_book_id'])
return res
def get_stock_adj(self, code, start, end):
codex = []
if isinstance(code, list):
pass
else:
code = [code]
start = QA_util_get_real_date(start)
end = QA_util_get_real_date(end)
for coder in code:
if '.' in coder:
codex.append(coder)
else:
codex.append(
coder+'.XSHG' if coder[0] == '6' else coder+'.XSHE')
res = self.execute("SELECT * FROM quantaxis.stock_adj WHERE ((`date` >= '{}')) AND (`date` <= '{}') AND (`order_book_id` IN ({}))".format(
start, end, "'{}'".format("','".join(codex)))).drop_duplicates(['date', 'order_book_id'])
#res = res.assign(code = res.code.apply(lambda x: x[0:6]))
return res.set_index(['date', 'order_book_id']).sort_index()
def get_stock_day_qfq(self, codelist, start, end):
codelist = promise_list(codelist)
adjx = self.get_stock_adj(codelist, start, end)
columns_raw = ['date', 'order_book_id', 'num_trades', 'limit_up',
'limit_down', 'open', 'high', 'low', 'close', 'volume', 'total_turnover']
u = self.execute("SELECT * FROM quantaxis.stock_cn_day WHERE ((`date` >= '{}')) \
AND (`date` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw].drop_duplicates(['date', 'order_book_id'])
u = u.set_index(['date', 'order_book_id'], drop=False).sort_index()
data = u.join(adjx).set_index(
['date', 'order_book_id']).sort_index().fillna(1)
for col in ['open', 'high', 'low', 'close']:
data[col] = data[col] * data['adj']
try:
data['limit_up'] = data['limit_up'] * data['adj']
data['limit_down'] = data['limit_down'] * data['adj']
except:
pass
return data.sort_index()
def get_stock_min_qfq_with_fields(self, codelist, start, end, fields):
codelist = promise_list(codelist)
fields = promise_list(fields)
if 'XS' not in codelist[0]:
codelist = pd.Series(codelist).apply(
lambda x: x+'.XSHE' if x[0] != '6' else x+'.XSHG').tolist()
columns_raw = ['datetime', 'order_book_id'].extend(fields)
res = self.client.query_dataframe("SELECT datetime, order_book_id, {} FROM quantaxis.stock_cn_1min WHERE ((`datetime` >= '{}')) \
AND (`datetime` <= '{}') AND (`order_book_id` IN ({}))".format(','.join(fields),
start, end, "'{}'".format("','".join(codelist)))).drop_duplicates(['datetime', 'order_book_id'])
u = res.assign(datetime=pd.to_datetime(res.datetime), code=res.order_book_id)
u = u.assign(date=u.datetime.apply(lambda x: x.date()))
u = u.set_index(['date', 'code'], drop=False)
codelist = u.index.levels[1].unique().tolist()
start = u.index.levels[0][0]
end = u.index.levels[0][-1]
adjx = self.get_stock_adj(codelist, start, end)
if adjx is None:
data = u.set_index(['datetime', 'code'])
else:
adjx = adjx.reset_index()
adjx = adjx.assign(code=adjx.order_book_id).set_index(
['date', 'code']).adj
data = u.join(adjx).set_index(['datetime', 'code']).fillna(1)
for col in ['open', 'high', 'low', 'close']:
if col in fields:
data[col] = data[col] * data['adj']
try:
data['high_limit'] = data['high_limit'] * data['adj']
data['low_limit'] = data['high_limit'] * data['adj']
except:
pass
return data.loc[:, fields].sort_index()
def get_stock_day_qfq_with_fields(self, codelist, start, end, fields=None):
codelist = promise_list(codelist)
fields = promise_list(fields)
adjx = self.get_stock_adj(codelist, start, end)
columns_raw = ['date', 'order_book_id'].extend(fields)
print("SELECT date, order_book_id, {} FROM quantaxis.stock_cn_day WHERE ((`date` >= '{}')) \
AND (`date` <= '{}') AND (`order_book_id` IN ({}))".format(','.join(fields),
start, end, "'{}'".format("','".join(codelist))))
u = self.execute("SELECT date, order_book_id, {} FROM quantaxis.stock_cn_day WHERE ((`date` >= '{}')) \
AND (`date` <= '{}') AND (`order_book_id` IN ({}))".format(','.join(fields),
start, end, "'{}'".format("','".join(codelist)))).drop_duplicates(['date', 'order_book_id'])
u = u.set_index(['date', 'order_book_id'], drop=False).sort_index()
data = u.join(adjx).set_index(
['date', 'order_book_id']).sort_index().fillna(1)
for col in ['open', 'high', 'low', 'close']:
if col in fields:
data[col] = data[col] * data['adj']
try:
data['limit_up'] = data['limit_up'] * data['adj']
data['limit_down'] = data['limit_down'] * data['adj']
except:
pass
return data.loc[:, fields].sort_index()
def get_stock_day_date(self, code, start, end):
print("SELECT order_book_id, date FROM quantaxis.stock_cn_day WHERE ((`date` >= '{}')) AND (`date` <= '{}') AND (`order_book_id` == '{}')".format(start, end, code))
u = self.execute("SELECT order_book_id, date FROM quantaxis.stock_cn_day WHERE ((`date` >= '{}')) \
AND (`date` <= '{}') AND (`order_book_id` == '{}')".format(start, end, code)).drop_duplicates()
u = u.set_index('order_book_id').sort_values('date')
return u
def get_stock_min_datetime(self, code, start, end):
u = self.execute("SELECT order_book_id, datetime FROM quantaxis.stock_cn_1min WHERE ((`datetime` >= '{}')) \
AND (`datetime` <= '{}') AND (`order_book_id` == '{}')".format(start, end, code)).drop_duplicates().set_index('order_book_id').sort_values('datetime')
return u
#def get_stock_day_qfq()
def get_stock_day_qfq_adv(self, codelist, start, end):
res = self.get_stock_day_qfq(codelist, start, end).reset_index()
return QA_DataStruct_Stock_day(res.assign(amount=res.total_turnover, code=res.order_book_id).set_index(['date', 'code']), if_fq='qfq')
def get_stock_min_qfq_adv(self, codelist, start, end):
return self.to_qfq(self.get_stock_min(codelist, start, end))
def get_stock_list(self):
return self.client.query_dataframe('select * from stock_cn_codelist').query('status=="Active"')
def get_fund_list(self):
return self.client.query_dataframe('select * from fund_cn_codelist')
def get_etf_components(self, etf, start, end):
codelist = promise_list(etf)
columns_raw = ['stock_code', 'stock_amount', 'cash_substitute',
'cash_substitute_proportion', 'fixed_cash_substitute', 'order_book_id',
'trading_date']
return self.client.query_dataframe("SELECT * FROM quantaxis.etf_components WHERE ((`trading_date` >= '{}')) \
AND (`trading_date` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw].drop_duplicates(['stock_code', 'trading_date', 'order_book_id'])
def get_stock_day(self, codelist, start, end):
codelist = promise_list(codelist)
if 'XS' not in codelist[0]:
codelist = pd.Series(codelist).apply(
lambda x: x+'.XSHE' if x[0] != '6' else x+'.XSHG').tolist()
columns_raw = ['date', 'order_book_id', 'num_trades', 'limit_up',
'limit_down', 'open', 'high', 'low', 'close', 'volume', 'total_turnover']
res = self.client.query_dataframe("SELECT * FROM quantaxis.stock_cn_day WHERE ((`date` >= '{}')) \
AND (`date` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw].drop_duplicates(['date', 'order_book_id'])
return QA_DataStruct_Stock_day(res.assign(date=pd.to_datetime(res.date), code=res.order_book_id, amount=res.total_turnover).set_index(['date', 'code']).sort_index())
def get_stock_min(self, codelist, start, end):
codelist = promise_list(codelist)
if 'XS' not in codelist[0]:
codelist = pd.Series(codelist).apply(
lambda x: x+'.XSHE' if x[0] != '6' else x+'.XSHG').tolist()
columns_raw = ['datetime', 'order_book_id', 'open',
'high', 'low', 'close', 'volume', 'total_turnover']
res = self.client.query_dataframe("SELECT * FROM quantaxis.stock_cn_1min WHERE ((`datetime` >= '{}')) \
AND (`datetime` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw].drop_duplicates(['datetime', 'order_book_id'])
return QA_DataStruct_Stock_min(res.assign(datetime=pd.to_datetime(res.datetime), code=res.order_book_id, amount=res.total_turnover, type='1min',).set_index(['datetime', 'code']).sort_index())
def get_stock_min_close(self, codelist, start, end):
codelist = promise_list(codelist)
if 'XS' not in codelist[0]:
codelist = pd.Series(codelist).apply(
lambda x: x+'.XSHE' if x[0] != '6' else x+'.XSHG').tolist()
columns_raw = ['datetime', 'order_book_id', 'close']
res = self.client.query_dataframe("SELECT datetime, order_book_id, close FROM quantaxis.stock_cn_1min WHERE ((`datetime` >= '{}')) \
AND (`datetime` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw].drop_duplicates(['datetime', 'order_book_id'])
return res.assign(datetime=pd.to_datetime(res.datetime)).set_index(['datetime', 'order_book_id']).sort_index()
def get_future_day(self, codelist, start, end):
codelist = promise_list(codelist)
columns_raw = ['date', 'order_book_id', 'limit_up', 'limit_down', 'open_interest',
'prev_settlement', 'settlement', 'open', 'high', 'low', 'close',
'volume', 'total_turnover']
res = self.client.query_dataframe("SELECT * FROM quantaxis.future_cn_day WHERE ((`date` >= '{}')) \
AND (`date` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw].drop_duplicates(['date', 'order_book_id'])
return QA_DataStruct_Future_day(res.assign(date=pd.to_datetime(res.date),
code=res.order_book_id,
amount=res.total_turnover,
position=res.open_interest,
price=res.settlement).set_index(['date', 'code']))
def get_future_min(self, codelist, start, end):
codelist = promise_list(codelist)
columns_raw = ['datetime', 'order_book_id', 'open_interest', 'trading_date',
'open', 'high', 'low', 'close',
'volume', 'total_turnover']
res = self.client.query_dataframe("SELECT * FROM quantaxis.future_cn_1min WHERE ((`datetime` >= '{}')) \
AND (`datetime` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw].drop_duplicates(['datetime', 'order_book_id'])
return QA_DataStruct_Future_min(res.assign(datetime=pd.to_datetime(res.datetime),
position=res.open_interest,
code=res.order_book_id,
tradetime=res.trading_date,
price=res.close,
type='1min',
amount=res.total_turnover).set_index(['datetime', 'code']).sort_index())
def get_stock_tick(self, codelist, start, end):
codelist = promise_list(codelist)
if 'XS' not in codelist[0]:
codelist = pd.Series(codelist).map(str).apply(
lambda x: x+'.XSHE' if x[0] != '6' else x+'.XSHG').tolist()
columns_raw = ['datetime', 'trading_date', 'order_book_id', 'open', 'last', 'high',
'low', 'prev_close', 'volume', 'total_turnover', 'limit_up',
'limit_down', 'a1', 'a2', 'a3', 'a4', 'a5', 'b1', 'b2', 'b3', 'b4',
'b5', 'a1_v', 'a2_v', 'a3_v', 'a4_v', 'a5_v', 'b1_v', 'b2_v', 'b3_v',
'b4_v', 'b5_v', 'change_rate']
res = self.client.query_dataframe("SELECT * FROM quantaxis.stock_cn_tick WHERE ((`datetime` >= '{}')) \
AND (`datetime` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw]
return res.assign(datetime=pd.to_datetime(res.datetime), code=res.order_book_id, amount=res.total_turnover).set_index(['datetime', 'code']).sort_index()
def get_index_tick(self, codelist, start, end):
codelist = promise_list(codelist)
columns_raw = ['datetime', 'trading_date', 'order_book_id', 'open', 'last', 'high',
'low', 'prev_close', 'volume', 'total_turnover', 'limit_up',
'limit_down', 'a1', 'a2', 'a3', 'a4', 'a5', 'b1', 'b2', 'b3', 'b4',
'b5', 'a1_v', 'a2_v', 'a3_v', 'a4_v', 'a5_v', 'b1_v', 'b2_v', 'b3_v',
'b4_v', 'b5_v', 'change_rate']
res = self.client.query_dataframe("SELECT * FROM quantaxis.index_cn_tick WHERE ((`datetime` >= '{}')) \
AND (`datetime` <= '{}') AND (`order_book_id` IN ({}))".format(start, end, "'{}'".format("','".join(codelist)))).loc[:, columns_raw].drop_duplicates(['datetime', 'order_book_id'])
return res.assign(datetime= | pd.to_datetime(res.datetime) | pandas.to_datetime |
import pandas
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import preprocessing
from setlist import setlist
import sys
import os
path=os.getcwd()
path=path.strip('complete_model')
sys.path.append(path)
from helper import svm,misc_helper
class train_test_generator:
def generate(self):
dataFrame = pandas.read_csv('../../CSV_Data/master_dataset.csv')
feature_columns = list(dataFrame.columns.values)[0:-1]
features,target = misc_helper.split_feature_target(dataFrame)
train,test,train_target,test_target = train_test_split(features,target,test_size = 0.2,stratify=target)
train,test = misc_helper.get_scaled_data(train,test)
#Initial Datasets
train = pandas.DataFrame(train,columns=feature_columns)
train.to_csv('datasets/train.csv',index=False)
train_target = pandas.DataFrame(train_target,columns=['label'])
train_target.to_csv('datasets/train_target.csv',index=False)
test = pandas.DataFrame(test,columns=feature_columns)
test.to_csv('datasets/test.csv',index=False)
test_target = pandas.DataFrame(test_target,columns=['label'])
test_target.to_csv('datasets/test_target.csv',index=False)
#
train_target_sets = train_target.copy(deep=True)
test_target_sets = test_target.copy(deep=True)
for i in range(len(setlist)):
train_target_sets['label'][train_target['label'].isin(setlist[i])] = str(i)
train_target_sets.to_csv('datasets/train_target_sets.csv',index=False)
for i in range(len(setlist)):
test_target_sets['label'][test_target['label'].isin(setlist[i])] = str(i)
test_target_sets.to_csv('datasets/test_target_sets.csv',index=False)
#Diving into sets
train_sets_features = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
train_sets_targets = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
test_sets_features = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
test_sets_targets = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
for index,row in train.iterrows():
setIndex = int(train_target_sets['label'][index])
if setIndex < len(train_sets_features):
train_sets_features[setIndex].append(row)
train_sets_targets[setIndex].append(train_target['label'][index])
for index,row in test.iterrows():
setIndex = int(test_target_sets['label'][index])
if setIndex < len(test_sets_features):
test_sets_features[setIndex].append(row)
test_sets_targets[setIndex].append(test_target['label'][index])
for i in range(len(train_sets_features)):
df = pandas.DataFrame(train_sets_features[i],columns=feature_columns)
df.to_csv('datasets/train_set_'+str(i),index=False)
df = | pandas.DataFrame(train_sets_targets[i],columns=['label']) | pandas.DataFrame |
import argparse
import logging
import os
import sys
import numpy as np
import pandas as pd
"""initialize"""
| pd.set_option("max_colwidth", 60) | pandas.set_option |
import numpy as np
import pandas as pd
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Activation, BatchNormalization, Dense, Input
from keras.models import Model
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import ARDRegression, Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_true - y_pred)))
if __name__ == "__main__":
NUM_FOLDS = 50
SEED = 1000
shigeria_pred1 = np.load("shigeria_pred1.npy")
shigeria_pred2 = np.load("shigeria_pred2.npy")
shigeria_pred3 = np.load("shigeria_pred3.npy")
shigeria_pred4 = np.load("shigeria_pred4.npy")
shigeria_pred5 = np.load("shigeria_pred5.npy")
shigeria_pred6 = np.load("shigeria_pred6.npy")
shigeria_pred7 = np.load("shigeria_pred7.npy")
shigeria_pred8 = np.load("shigeria_pred8.npy")
shigeria_pred9 = np.load("shigeria_pred9.npy")
shigeria_pred10 = np.load("shigeria_pred10.npy")
upura_pred = np.load("upura_pred.npy")
takuoko_exp085 = np.load("takuoko_exp085.npy")
takuoko_exp096 = np.load("takuoko_exp096.npy")
takuoko_exp105 = np.load("takuoko_exp105.npy")
takuoko_exp108 = np.load("takuoko_exp108.npy")
takuoko_exp184 = np.load("takuoko_exp184.npy")
X_train_svd = np.load("X_train_all.npy")
X_test_svd = np.load("X_test_all.npy")
train_idx = np.load("train_idx.npy", allow_pickle=True)
svd1 = TruncatedSVD(n_components=3, n_iter=10, random_state=42)
svd1.fit(X_train_svd)
X_train_svd = svd1.transform(X_train_svd)
X_test_svd = svd1.transform(X_test_svd)
X_test = pd.DataFrame(
{
"shigeria_pred1": shigeria_pred1.reshape(-1),
"shigeria_pred2": shigeria_pred2.reshape(-1),
"shigeria_pred3": shigeria_pred3.reshape(-1),
"shigeria_pred4": shigeria_pred4.reshape(-1),
"shigeria_pred5": shigeria_pred5.reshape(-1),
"shigeria_pred6": shigeria_pred6.reshape(-1),
"shigeria_pred7": shigeria_pred7.reshape(-1),
"shigeria_pred8": shigeria_pred8.reshape(-1),
"shigeria_pred9": shigeria_pred9.reshape(-1),
"shigeria_pred10": shigeria_pred10.reshape(-1),
"upura": upura_pred,
"takuoko_exp085": takuoko_exp085,
"takuoko_exp096": takuoko_exp096,
"takuoko_exp105": takuoko_exp105,
"takuoko_exp108": takuoko_exp108,
"takuoko_exp184": takuoko_exp184,
}
)
X_test = pd.concat(
[
X_test,
pd.DataFrame(
X_test_svd, columns=[f"svd_{c}" for c in range(X_test_svd.shape[1])]
),
],
axis=1,
)
# upura oof
pred_val000 = pd.read_csv("../input/commonlit-oof/pred_val000.csv")
# shigeria oof
andrey_df = pd.read_csv("../input/commonlitstackingcsv/roberta_base_itpt.csv")
andrey_df2 = pd.read_csv("../input/commonlitstackingcsv/attention_head_nopre.csv")
andrey_df3 = pd.read_csv("../input/commonlitstackingcsv/attention_head_itpt.csv")
andrey_df4 = pd.read_csv(
"../input/d/shigeria/bayesian-commonlit/np_savetxt_andrey4.csv"
)
andrey_df5 = pd.read_csv("../input/commonlitstackingcsv/mean_pooling_last1.csv")
andrey_df6 = pd.read_csv(
"../input/commonlitstackingcsv/attention_head_cls_last3s.csv"
)
andrey_df7 = pd.read_csv(
"../input/commonlitstackingcsv/mean_pooling_cls_last3s.csv"
)
andrey_df8 = pd.read_csv(
"../input/commonlitstackingcsv/attention_head_cls_last4s.csv"
)
andrey_df9 = pd.read_csv("../input/commonlitstackingcsv/electra_large_nopre.csv")
andrey_df10 = pd.read_csv(
"../input/commonlitstackingcsv/attention_head_mean_pooling_cls_last3s.csv"
)
# takuoko oof
pred_val085 = | pd.read_csv("../input/commonlit-oof/pred_val085.csv") | pandas.read_csv |
from stix_shifter.stix_transmission.src.modules.cloudsql import cloudsql_connector
from stix_shifter.stix_transmission.src.modules.base.base_status_connector import Status
import pandas as pd
from unittest.mock import patch
import json
import unittest
@patch('ibmcloudsql.SQLQuery.__init__', autospec=True)
@patch('ibmcloudsql.SQLQuery.logon', autospec=True)
class TestCloudSQLConnection(unittest.TestCase, object):
def test_is_async(self, mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
check_async = module.Connector(connection, config).is_async
assert check_async
@patch('ibmcloudsql.SQLQuery.get_jobs')
def test_ping_endpoint(self, mock_ping_response, mock_api_client_logon,
mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = '[{"job_id": "placeholder", "status": "placeholder",\
"user_id": "placeholder", "statement": "placeholder",\
"resultset_location": "placeholder", "submit_time": "placeholder",\
"end_time": "placeholder", "error": "placeholder", error_message": "placeholder"}]'
mock_ping_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
ping_response = module.Connector(connection, config).ping()
assert ping_response is not None
assert ping_response['success']
@patch('ibmcloudsql.SQLQuery.submit_sql')
def test_query_response(self, mock_query_response, mock_api_client_logon,
mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = '108cb8b0-0744-4dd9-8e35-ea8311cd6211'
mock_query_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
query = '{"query":"SELECT target.id from cos://us-geo/at-data/rest.1*.json STORED AS JSON c"}'
query_response = module.Connector(connection, config).create_query_connection(query)
assert query_response is not None
assert 'search_id' in query_response
assert query_response['search_id'] == "108cb8b0-0744-4dd9-8e35-ea8311cd6211"
@patch('ibmcloudsql.SQLQuery.get_job', autospec=True)
def test_status_response(self, mock_status_response,
mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = json.loads('{"status": "completed", "end_time": "2018-08-28T15:51:24.899Z", "submit_time": "2018-08-28T15:51:19.899Z"}')
mock_status_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
search_id = "108cb8b0-0744-4dd9-8e35-ea8311cd6211"
status_response = module.Connector(connection, config).create_status_connection(search_id)
assert status_response is not None
assert status_response['success']
assert 'status' in status_response
assert status_response['status'] == Status.COMPLETED.value
@patch('stix_shifter.stix_transmission.src.modules.cloudsql.cloudsql_results_connector.CloudSQLResultsConnector.records', autospec=True)
def test_results_response(self, mock_results_response,
mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = pd.DataFrame(columns=['id'])
mocked_return_value = mocked_return_value.append([{'id': 'crn:v1:bluemix:public:iam-identity::a/::apikey:1234'}], ignore_index=True)
mock_results_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
search_id = "108cb8b0-0744-4dd9-8e35-ea8311cd6211"
offset = 0
length = 1
results_response = module.Connector(connection, config).create_results_connection(search_id, offset, length)
assert results_response is not None
assert results_response['success']
assert 'data' in results_response
assert len(results_response['data']) > 0
@patch('ibmcloudsql.SQLQuery.delete_result', autospec=True)
def test_delete_response(self, mock_delete_response,
mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = pd.DataFrame(columns=['Deleted Object'])
mocked_return_value = mocked_return_value.append([{'Deleted Object': 'result/jobid=9b0f77b1-74e6-4953-84df-e0571a398ef7/part-00000-17db0efc-f563-45c2-9d12-560933cd01b6-c000-attempt_20181016200828_0024_m_000000_0.csv'}], ignore_index=True)
mock_delete_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
search_id = "108cb8b0-0744-4dd9-8e35-ea8311cd6211"
delete_response = module.Connector(connection, config).delete_query_connection(search_id)
assert delete_response is not None
assert delete_response['success']
@patch('ibmcloudsql.SQLQuery.submit_sql')
@patch('ibmcloudsql.SQLQuery.get_job', autospec=True)
@patch('ibmcloudsql.SQLQuery.get_result', autospec=True)
def test_query_flow(self, mock_results_response, mock_status_response,
mock_query_response, mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
results_mock = | pd.DataFrame(columns=['id']) | pandas.DataFrame |
from collections import defaultdict
infinite_defaultdict = lambda: defaultdict(infinite_defaultdict)
import pandas as pd
VECTOR_SIZE = [10, 100, 1000, 10000, 100000]
THREADS = [1, 2, 4, 8, 10, 16, 20]
#REPLICATES = range(1, 31)
REPLICATES = [1]
Panel5D = pd.core.panelnd.create_nd_panel_factory(
klass_name='Panel5D',
orders=['replicate', 'vector_size', 'threads', 'function_name', 'metric'],
slices={'vector_size': 'vector_size', 'threads': 'threads',
'function_name': 'function_name',
'metric': 'metric'},
slicer=pd.core.panel4d.Panel4D,
aliases={'major': 'function_name', 'minor': 'metric'},
stat_axis=2)
Panel5D = pd.core.panelnd.create_nd_panel_factory(
klass_name = 'Panel5D',
orders = [ 'cool', 'labels','items','major_axis','minor_axis'],
slices = { 'labels' : 'labels', 'items' : 'items',
'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' },
slicer = pd.core.panel4d.Panel4D,
aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' },
stat_axis = 2)
def prepare_panels(expname):
header = ['%Time', 'Exclusive msec', 'Inclusive total msec', '#Call', '#Subrs', 'Inclusive usec/call', 'Name']
replicate_panels = defaultdict(dict)
for r in REPLICATES:
data = defaultdict(dict)
for v in VECTOR_SIZE:
for t in THREADS:
tauprofile = "../workdir/{}/r{}/{}/{}/tauprofile".format(expname, r, v, t)
header_started = None
tv_data = defaultdict(list)
with open(tauprofile, 'r') as f:
for line in f:
if line.startswith('-----') and header_started is None:
header_started = True
elif line.startswith('-----') and header_started:
break
for line in f:
function_data = line[:-1].strip().split()
if len(function_data) > len(header):
function_data = function_data[:len(header) - 1] + [" ".join(function_data[len(header) - 1:])]
for metric, value in zip(header, function_data):
if not value.isalpha():
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
tv_data[metric].append(value)
data[v][t] = pd.DataFrame(tv_data, index=tv_data['Name'])
replicate_panels[r] = | pd.Panel4D(data) | pandas.Panel4D |
from sqlalchemy import create_engine
import pandas as pd
import datetime
import config
import pmdarima as pm
import numpy as np
import arch
import statistics
import traceback
pd.set_option('display.max_columns', None)
def initializer(symbol):
# Get Data
engine = create_engine(config.psql)
num_data_points = 255
one_year_ago = (datetime.datetime.utcnow().date() - datetime.timedelta(days=num_data_points * 1.45)).strftime("%Y-%m-%d")
query = f"select distinct * from stockdata_hist where symbol = '{symbol}' and tdate > '{one_year_ago}' AND (CAST(tdate AS TIME) = '20:00') limit {num_data_points}"
df = pd.read_sql_query(query, con=engine).sort_values(by='tdate', ascending=True)
# Get Forecast Range
steps = 5
today = df['tdate'].iloc[-1]
end_prediction_date = today + datetime.timedelta(days=steps)
end_friday = end_prediction_date + datetime.timedelta((4-end_prediction_date.weekday()) % 7)
tomorrow = today+datetime.timedelta(days=1)
date_range = pd.date_range(tomorrow, end_friday, freq="B")
period = len(pd.date_range(tomorrow, end_friday, freq="B"))
return df, tomorrow, date_range, period, engine
def arima(symbol, df, period, date_range):
df['tdate'] = pd.to_datetime(df['tdate'])
df.set_index(df['tdate'], inplace=True)
y = df['tick_close']
# Model ARIMA parameters
# model = pm.auto_arima(y, error_action='ignore', trace=True,
# suppress_warnings=True, maxiter=10,
# seasonal=True, m=50)
# print(type(model))
# print("get params:")
# print(model.get_params()['order'])
# print(type(model.get_params()['order']))
# print(model.summary())
m = 7
order = (1, 1, 1)
sorder = (0, 0, 1, m)
model = pm.arima.ARIMA(order, seasonal_order=sorder,
start_params=None, method='lbfgs', maxiter=50,
suppress_warnings=True, out_of_sample_size=0, scoring='mse',
scoring_args=None, trend=None, with_intercept=True)
model.fit(y)
# Forecast
forecasts = model.predict(n_periods=period, return_conf_int=True) # predict N steps into the future
flatten = forecasts[1].tolist()
results_df = pd.DataFrame(flatten, columns=['arima_low', 'arima_high'])
results_df['arima_forecast'] = forecasts[0]
results_df['tdate'] = date_range
results_df['uticker'] = symbol
results_df['arima_order'] = f"{order} {sorder}"
results_df['last_price'] = df['tick_close'][-1]
results_df['last_vwap'] = df['vwap'][-1]
results_df['arima_diff'] = (results_df['arima_forecast']-results_df['last_price'])/results_df['last_price']
results_df = results_df[['uticker', 'tdate', 'arima_low', 'arima_forecast', 'arima_high', 'arima_order', 'last_price', 'last_vwap', 'arima_diff']]
return results_df
def garch_model(df, period, date_range):
df = df.sort_index(ascending=True)
df['tdate'] = | pd.to_datetime(df['tdate']) | pandas.to_datetime |
import pandas as pd
def generate_train(playlists):
# define category range
cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100),
'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)}
cat_pids = {}
for cat, interval in cates.items():
df = playlists[(playlists['num_tracks'] >= interval[0]) & (playlists['num_tracks'] <= interval[1])].sample(
n=1000)
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions, tracks):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
# all interactions used for evaluation
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
df_eval_itr = pd.concat([df_eval_itr, df_itr])
# clean interactions for training
interactions = interactions.drop(df_itr.index)
print("cat1 done")
if cat == 'cat2':
num_samples = 1
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[df_itr['pos'] == 0]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat2 done")
if cat == 'cat3':
num_samples = 5
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat3 done")
if cat == 'cat4':
num_samples = 5
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat4 done")
if cat == 'cat5':
num_samples = 10
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = | pd.concat([df_eval_itr, df_itr]) | pandas.concat |
"""
Tests the financial data structures
"""
import unittest
import os
import numpy as np
import pandas as pd
from mlfinlab.multi_product.etf_trick import get_futures_roll_series
class TestETFTrick(unittest.TestCase):
"""
Test the various financial data structures:
1. Dollar bars
2. Volume bars
3. Tick bars
"""
def setUp(self):
"""
Set the file path for the tick data csv
"""
project_path = os.path.dirname(__file__)
path = project_path + '/test_data'
self.open_df_path = '{}/open_df.csv'.format(path)
self.close_df_path = '{}/close_df.csv'.format(path)
self.open_df = | pd.read_csv(self.open_df_path, usecols=['date', 'spx']) | pandas.read_csv |
from ...utils import constants
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely
import pytest
from contextlib import ExitStack
from sklearn.metrics import mean_absolute_error
from ...models.geosim import GeoSim
from ...core.trajectorydataframe import TrajDataFrame
def global_variables():
# tessellation
tess_polygons = [[[7.481, 45.184],
[7.481, 45.216],
[7.526, 45.216],
[7.526, 45.184],
[7.481, 45.184]],
[[7.481, 45.216],
[7.481, 45.247],
[7.526, 45.247],
[7.526, 45.216],
[7.481, 45.216]],
[[7.526, 45.184],
[7.526, 45.216],
[7.571, 45.216],
[7.571, 45.184],
[7.526, 45.184]],
[[7.526, 45.216],
[7.526, 45.247],
[7.571, 45.247],
[7.571, 45.216],
[7.526, 45.216]]]
geom = [shapely.geometry.Polygon(p) for p in tess_polygons]
tessellation = gpd.GeoDataFrame(geometry=geom, crs="EPSG:4326")
tessellation = tessellation.reset_index().rename(columns={"index": constants.TILE_ID})
social_graph = [[0,1],[0,2],[0,3],[1,3],[2,4]]
return tessellation, social_graph
tessellation, social_graph = global_variables()
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', [social_graph, 'random'])
@pytest.mark.parametrize('n_agents', [1,5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
# First test set: CORRECT arguments, no ERRORS expected (#test: 4)
def test_geosim_generate_success(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
# Second test set: WRONG arguments, expected to FAIL
# test 2.1: wrong n_agents (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [-2,-1,0])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_geosim_wrong_n_agents(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.2: end_date prior to start_date (#test: 1)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_geosim_wrong_dates(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.3: wrong type for the spatial_tessellation (#test: 5)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', ["", None, [], "tessellation", [1,2,3]])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_geosim_wrong_tex_type(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.4: #of tiles in spatial_tessellation < 2 (#test: 2)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [pd.DataFrame(),tessellation[:1]])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_geosim_wrong_tiles_num(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.5: wrong social_graph type (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', [None, False, 24])
@pytest.mark.parametrize('n_agents', [1,5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_geosim_wrong_social_graph_type(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.5: correct social_graph type with wrong value (#test: 2)
@pytest.mark.parametrize('start_date', [ | pd.to_datetime('2020/01/01 08:00:00') | pandas.to_datetime |
"""records by year"""
import datetime
import psycopg2.extras
import pandas as pd
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This chart plots the number of daily maximum
high temperatures, minimum low temperatures and precipitation records
set by year. Ties are not included. The algorithm sets the records based
on the first year of data and then iterates over each sequential year
and sets the new daily records. A general model of the number of new
records to be set each year would be 365 / (number of years). So you would
expect to set 365 records the first year, 183 the second, and so on...
"""
desc['arguments'] = [
dict(type='station', name='station', default='IATDSM',
label='Select Station:', network='IACLIMATE')
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
table = "alldata_%s" % (station[:2],)
sts = ctx['_nt'].sts[station]['archive_begin']
if sts is None:
raise NoDataFound("Station metadata unknown.")
syear = sts.year if sts.month == 1 and sts.day == 1 else (sts.year + 1)
syear = max(syear, 1893)
eyear = datetime.datetime.now().year
cursor.execute("""
SELECT sday, year, high, low, precip, day from """+table+"""
where station = %s and sday != '0229'
and year >= %s ORDER by day ASC
""", (station, syear))
hrecords = {}
hyears = [0]*(eyear - syear)
lrecords = {}
lyears = [0]*(eyear - syear)
precords = {}
pyears = [0]*(eyear - syear)
expect = [0]*(eyear - syear)
# hstraight = 0
for row in cursor:
sday = row[0]
year = row[1]
high = row[2]
low = row[3]
precip = row[4]
if year == syear:
hrecords[sday] = high
lrecords[sday] = low
precords[sday] = precip
continue
if precip > precords[sday]:
precords[sday] = row['precip']
pyears[year - syear - 1] += 1
if high > hrecords[sday]:
hrecords[sday] = row['high']
hyears[year - syear - 1] += 1
# hstraight += 1
# if hstraight > 3:
# print hstraight, sday, row[4]
# else:
# hstraight = 0
if low < lrecords[sday]:
lrecords[sday] = low
lyears[year - syear - 1] += 1
years = range(syear + 1, eyear+1)
for i, year in enumerate(years):
expect[i] = 365.0/float(year - syear + 1)
df = pd.DataFrame(dict(expected=pd.Series(expect),
highs=pd.Series(hyears),
lows=pd.Series(lyears),
precip= | pd.Series(pyears) | pandas.Series |
import os
import pandas as pd
from plotly.subplots import make_subplots
from datetime import timedelta,datetime,date
import plotly.graph_objects as go
import plotly.express as px
import processor._load_intraday as load_intraday
import news._news_yh as news_yh
import news._news_sa as news_sa
class TwitterPlot:
"""This is a class using plotly to plot graph
"""
def __init__(self,key_word_):
self.key_word = key_word_
self.today = str(date.today())
self.saveaddr = f'data\\senti_graph\\{self.today}'
def plot_senti1(self,hourly_ohlc,all_sentis,earning_release_within):
#self define earning
#earning_release_within.index =pd.DataFrame()
#earning_release_within[pd.to_datetime('2020-02-02 16:00:00')]=0
# plot it with plotly
fig = make_subplots(rows=4, cols=1,
shared_xaxes=True,
vertical_spacing=0,row_heights=[1.5, 1, 1, 1])
fig.add_trace(go.Ohlc(
x=hourly_ohlc.index,
open=hourly_ohlc.open,
high=hourly_ohlc.high,
low=hourly_ohlc.low,
close=hourly_ohlc.close,
name="Intraday stock price"),
row=1, col=1)
fig.add_trace(go.Bar(
x=hourly_ohlc.index,
y=hourly_ohlc.volume,
name="Intraday volume",
marker_color="lightslategray"),
row=2, col=1)
'''
#PLOT the earning
fig.add_trace(go.Scatter(
x=earning_release_within.index,
y=earning_release_within.Surprise,
name="Earning Event",
marker_color="green"),
row=3, col=1)
'''
# plot the sentiment counts
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.All_counts,
name="Publication count",
marker_color="orange"),
row=3, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.Positive,
name="Positive score",
marker_color="green"),
row=4, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.Negative,
name="Negative score",
marker_color="red"),
row=4, col=1)
fig.update(layout_xaxis_rangeslider_visible=False)
# mark the weekends in the plot
wkds_list= TwitterPlot.mark_weekend(all_sentis.index)
"""
# try to include all the weekends in the graph
shapes=[]
for wkds in wkds_list:
shapes.append(
dict(
type="rect",
# x-reference is assigned to the x-values
xref="x",
# y-reference is assigned to the plot paper [0,1]
yref="paper",
x0=wkds[0],
y0=0,
x1=wkds[1],
y1=1,
fillcolor="LightSalmon",
opacity=0.5,
layer="below",
line_width=0,
),
)
"""
fig.update_layout(
shapes=[
# 1st highlight earning
dict(
type="rect",
xref="x",
yref="paper",
x0=earning_release_within.index[0],
y0=0,
x1=earning_release_within.index[0]+timedelta(hours=1),
y1=1,
fillcolor="darkviolet",
opacity=0.5,
layer="below",
line_width=0,
),
#last weekends in all dates we have
# dict(
# type="rect",
# # x-reference is assigned to the x-values
# xref="x",
# # y-reference is assigned to the plot paper [0,1]
# yref="paper",
# x0=wkds_list[-1][0],
# y0=0,
# x1=wkds_list[-1][1],
# y1=1,
# fillcolor="LightSalmon",
# opacity=0.5,
# layer="below",
# line_width=0,
# ),
]
)
# title
fig.update_layout(height=600, width=1200,
title_text=f"{self.key_word} intraday twitter sentiment and earnings info")
fig.show()
# save the plot
if not os.path.exists(self.saveaddr):os.mkdir(self.saveaddr)
fig.write_image(f'{self.saveaddr}\\{self.key_word}.png')
def plot_senti2(self,all_sentis,earning_release_within):
# plot it with plotly
fig = make_subplots(rows=3, cols=1,
shared_xaxes=True,
vertical_spacing=0,row_heights=[2, 1, 1])
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.All_counts,
name="Publication count",
marker_color="lightslategray"),
row=1, col=1)
fig.add_trace(go.Scatter(
x=earning_release_within.index,
y=earning_release_within.Surprise,
name="Earning Event",
marker_color="green"),
row=2, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.Positive,
name="Positive score",
marker_color="green"),
row=3, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.Negative,
name="Negative score",
marker_color="red"),
row=3, col=1)
# mark the weekends in the plot
wkds_list = TwitterPlot.mark_weekend(all_sentis.index)
fig.update_layout(
shapes=[
# 1st highlight earning
dict(
type="rect",
xref="x",
yref="paper",
x0=earning_release_within.index[0],
y0=0,
x1=earning_release_within.index[0]+timedelta(hours=1),
y1=1,
fillcolor="darkviolet",
opacity=0.5,
layer="below",
line_width=0,
),
# last weekends in all dates we have
dict(
type="rect",
# x-reference is assigned to the x-values
xref="x",
# y-reference is assigned to the plot paper [0,1]
yref="paper",
x0=wkds_list[-1][0],
y0=0,
x1=wkds_list[-1][1],
y1=1,
fillcolor="LightSalmon",
opacity=0.5,
layer="below",
line_width=0,
),
]
)
#title
fig.update_layout(height=600, width=1200,
title_text="{0} intraday twitter sentiment".format(self.key_word))
fig.show()
if not os.path.exists(self.saveaddr):os.mkdir(self.saveaddr)
fig.write_image(f'{self.saveaddr}\\{self.key_word}.png')
def plot_senti3(self,hourly_ohlc,all_sentis):
# plot it with plotly
fig = make_subplots(
rows=4, cols=1,
shared_xaxes=True,
vertical_spacing=0,
row_heights=[1.5, 1, 1, 1])
fig.add_trace(go.Ohlc(
x=hourly_ohlc.index,
open=hourly_ohlc.open,
high=hourly_ohlc.high,
low=hourly_ohlc.low,
close=hourly_ohlc.close,
name="Intraday stock price"),
row=1, col=1)
fig.add_trace(go.Bar(
x=hourly_ohlc.index,
y=hourly_ohlc.volume,
name="Intraday volume",
marker_color="lightslategray"),
row=2, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.All_counts,
name="Publication count",
marker_color="orange"),
row=3, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.Positive,
name="Positive score",
marker_color="green"),
row=4, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.Negative,
name="Negative score",
marker_color="red"),
row=4, col=1)
fig.update(layout_xaxis_rangeslider_visible=False)
wkds_list = TwitterPlot.mark_weekend(all_sentis.index)
fig.update_layout(
shapes=[
# last weekends in all dates we have
dict(
type="rect",
# x-reference is assigned to the x-values
xref="x",
# y-reference is assigned to the plot paper [0,1]
yref="paper",
x0=wkds_list[-1][0],
y0=0,
x1=wkds_list[-1][1],
y1=1,
fillcolor="LightSalmon",
opacity=0.5,
layer="below",
line_width=0,
),
]
)
fig.update_layout(height=600, width=1200,
title_text="{0} intraday twitter sentiment and earnings info".format(self.key_word))
fig.show()
if not os.path.exists(self.saveaddr):os.mkdir(self.saveaddr)
fig.write_image(f'{self.saveaddr}\\{self.key_word}.png')
def plot_senti4(self,all_sentis):
# plot it with plotly
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0,row_heights=[1,1])
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.All_counts,
name="Publication count",
marker_color="lightslategray"),
row=1, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.Positive,
name="Positive count",
marker_color="green"),
row=2, col=1)
fig.add_trace(go.Bar(
x=all_sentis.index,
y=all_sentis.Negative,
name="Negative count",
marker_color="red"),
row=2, col=1)
wkds_list = TwitterPlot.mark_weekend(all_sentis.index)
fig.update_layout(
shapes=[
# last weekends in all dates we have
dict(
type="rect",
# x-reference is assigned to the x-values
xref="x",
# y-reference is assigned to the plot paper [0,1]
yref="paper",
x0=wkds_list[-1][0],
y0=0,
x1=wkds_list[-1][1],
y1=1,
fillcolor="LightSalmon",
opacity=0.5,
layer="below",
line_width=0,
),
]
)
fig.update_layout(height=600, width=1200,
title_text="{0} intraday twitter sentiment".format(self.key_word))
fig.show()
if not os.path.exists(self.saveaddr):os.mkdir(self.saveaddr)
fig.write_image(f'{self.saveaddr}\\{self.key_word}.png')
def plot_preopen_senti(self,senti_result):
#plot preopening sentiment
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0,row_heights=[1,1])
fig.add_trace(go.Bar(
x=senti_result.index,
y=senti_result.user_score,
name="User Weighted Score",
marker_color="lightslategray"),
row=1, col=1)
fig.add_trace(go.Bar(
x=senti_result.index,
y=senti_result.Positive,
name="Positive count",
marker_color="green"),
row=2, col=1)
fig.add_trace(go.Bar(
x=senti_result.index,
y=senti_result.Negative,
name="Negative count",
marker_color="red"),
row=2, col=1)
fig.update_layout(height=600, width=1200,
title_text=f"{self.key_word} pre-opening twitter sentiment")
#fig.show()
#save the graph
saveaddr = f'data\\preopen\\{self.today}'
if not os.path.exists(saveaddr):os.mkdir(saveaddr)
fig.write_image(f'{saveaddr}\\{self.key_word}.png')
@staticmethod
def plot_topics(topic,topic_heat):
'''
want to plot the line plot for topic which is being discussed alot
that break its historical quantile
'''
fig = go.Figure()
fig.add_trace(go.Scatter(x=topic_heat.index, y=topic_heat.values,
mode='lines',
name='Topic Mentioned Times',
line=dict(color='black', width=1)))
## add horizontal quantile line
fig.add_shape(type="line",name='25th Percentile',
x0=topic_heat.index[0], y0=topic_heat.quantile(0.25),
x1=topic_heat.index[-1], y1=topic_heat.quantile(0.25),
line=dict(
color="LightSeaGreen",
width=2,
dash="dashdot",
)
)
# add 75% line
fig.add_shape(type="line",name='75th Percentile',
x0=topic_heat.index[0], y0=topic_heat.quantile(0.75),
x1=topic_heat.index[-1], y1=topic_heat.quantile(0.75),
line=dict(
color="LightSeaGreen",
width=2,
dash="dashdot",
)
)
# add median
fig.add_shape(type="line",name='Median',
x0=topic_heat.index[0], y0=topic_heat.quantile(0.5),
x1=topic_heat.index[-1], y1=topic_heat.quantile(0.5),
line=dict(
color="lightslategray",
width=2,
dash="dashdot",
)
)
#title
fig.update_layout(height=600, width=1200,
title_text=f"Topic: {topic}")
fig.update_layout(showlegend=True)
fig.show()
# save plot
fig.write_image(f'data\\macro\\visual\\topic{topic}.png')
def plot_topicswprice(topic,topic_heat,pricer):
'''
want to plot the line plot for topic which is being discussed alot
that break its historical quantile with price inside
'''
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0,row_heights=[ 1, 1])
fig.add_trace(go.Scatter(x=topic_heat.index, y=topic_heat.values,
mode='lines',
name='Topic Mentioned Times',
line=dict(color='red', width=1),
),
row=1, col=1)
fig.add_trace(go.Scatter(x=pricer.index, y=pricer.values,
mode='lines',
name='QQQ/IWM',
line=dict(color='black', width=1),
),
row=2, col=1)
#title
fig.update_layout(height=600, width=1200,
title_text=f"Topic: {topic}")
#
fig.update_layout(showlegend=True)
fig.show()
# save plot
fig.write_image(f'data\\macro\\visual\\topic{topic}.png')
@staticmethod
def get_earning_within(ticker,all_sentiments):
"""search the earning events within the analysis date and 1 week after
"""
past_earning_time = news_sa.get_earning_news('NFLX','revenue')
earning_release_within = | pd.DataFrame(columns=["EstimatedEPS","ReportedEPS","Surprise"]) | pandas.DataFrame |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = | GroupByMixin._dispatch("count") | pandas.core.groupby.base.GroupByMixin._dispatch |
import pandas as pd
import pybedtools
import xarray as xr
import numpy as np
import dask
import warnings
import joblib
import subprocess
import pathlib
import yaml
import pyBigWig
from pybedtools import BedTool
from concurrent.futures import ProcessPoolExecutor, as_completed
from .region_ds_utilities import update_region_ds_config
from .utilities import determine_engine, obj_to_str, write_ordered_chunks
import os
from ALLCools.utilities import parse_chrom_size
os.environ["NUMEXPR_MAX_THREADS"] = "16"
def _bigwig_over_bed(bed: pd.DataFrame, path, value_type="mean", dtype="float32"):
with pyBigWig.open(path, "r") as bw:
def _region_stat(row, t=value_type):
chrom, start, end, *_ = row
try:
value = bw.stats(chrom, start, end, type=t)[0]
except RuntimeError:
# happens when the region has error or chrom not exist in bw
# let user decide what happen, here just return nan
value = np.NaN
return value
values = bed.apply(_region_stat, t=value_type, axis=1)
values = values.astype(dtype)
return values
def _region_bed_sorted(bed_path, g, bed_sorted):
chrom_sizes = parse_chrom_size(g)
bed_df = pd.read_csv(bed_path, sep="\t", index_col=None, header=None)
# select chroms that exist in g
bed_df = bed_df.loc[bed_df.iloc[:, 0].isin(chrom_sizes.keys())]
bed = BedTool.from_dataframe(bed_df)
if bed_sorted:
return bed
else:
return bed.sort(g=g)
def _bed_intersection(bed: pybedtools.BedTool, path, g, region_index, bed_sorted):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
query_bed = _region_bed_sorted(path, g, bed_sorted)
try:
df = bed.intersect(
query_bed, wa=True, f=0.2, g=g, sorted=True
).to_dataframe()
if df.shape[0] == 0:
regions_idx = pd.Series([])
else:
regions_idx = df["name"]
except pd.errors.EmptyDataError:
regions_idx = | pd.Series([]) | pandas.Series |
import pandas as pd
dataset = | pd.read_csv('FixDataBind.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# author: <NAME>
# Email: <EMAIL>
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import generators
from __future__ import with_statement
import re
from bs4 import BeautifulSoup
from concurrent import futures
import os
import sys
import traceback
import time
import datetime
import pandas as pd
import requests
import json
import shutil
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from fake_useragent import UserAgent
from openpyxl import load_workbook
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.header import Header
############ 全局变量初始化 ##############
HEADERS = dict()
# 并发线程数
NUM_THREADS = None
# 城市选择
city_dict = {
"成都": "cd",
"北京": "bj",
"上海": "sh",
"广州": "gz",
"深圳": "sz",
"南京": "nj",
"合肥": "hf",
"杭州": "hz",
}
# 是否打印HTTP错误
PRINT = True if ((len(sys.argv) > 1) and (sys.argv[1] == 'true')) else False
# 伪造User-Agent库初始化
ua = UserAgent()
# 不使用代理
proxies = None
WORKPATH="/home/frank/workspace/lianjia/data"
CITY = city_dict["北京"]
""" HTTP GET 操作封装 """
def get_bs_obj_from_url(http_url):
done = False
exception_time = 0
HEADERS["User-Agent"] = ua.random
while not done:
try:
if PRINT:
print("正在获取 {}".format(http_url))
r = requests.get(http_url, headers=HEADERS, proxies=proxies, timeout=3)
bs_obj = BeautifulSoup(r.text, "lxml")
done = True
except Exception as e:
if PRINT:
print(e)
exception_time += 1
time.sleep(1)
if exception_time > 10:
return None
return bs_obj
""" 判断一个字符串是否可以转成数字 """
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def esf_mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print("{} create successfully.".format(path))
return True
else:
print("{} already exist.".format(path))
return False
def get_district_from_city(city):
print("---get {} districts---".format(city))
city_url = "http://{}.lianjia.com".format(city)
http_url = city_url + "/ershoufang"
bs_obj = get_bs_obj_from_url(http_url)
parent_div = bs_obj.find("div", {"data-role": "ershoufang"})
a_list = parent_div.find_all("a")
district_list = [a.attrs["href"].replace("/ershoufang/", "")[:-1]
for a in a_list
if a.attrs['href'].startswith("/ershoufang")]
print("---total {} districts---".format(len(district_list)))
return district_list
def get_district_name_from_city(city):
print("---get {} districts---".format(city))
city_url = "http://{}.lianjia.com".format(city)
http_url = city_url + "/ershoufang"
bs_obj = get_bs_obj_from_url(http_url)
parent_div = bs_obj.find("div", {"data-role": "ershoufang"})
a_list = parent_div.find_all("a")
name_list = [a.get_text() for a in a_list
if a.attrs['href'].startswith("/ershoufang")]
print("---total {} districts---".format(len(name_list)))
return name_list
def get_esf_from_district(city, district):
http_url = "http://{}.lianjia.com/ershoufang/{}".format(city, district)
bs_obj = get_bs_obj_from_url(http_url)
esf_list = []
try:
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
#try again
try:
bs_obj = get_bs_obj_from_url(http_url)
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
return esf_list
print("---district {} total ershoufang numbers: {}---".format(district, total_esf_num))
if total_esf_num == 0:
print("---district {} total get {}/{}---\n".format(district, len(esf_list), total_esf_num))
return esf_list
for price in range(1, 9):
esf_list_partial = get_esf_id_in_price(city, district, price)
if esf_list_partial is not None and len(esf_list_partial) > 0:
esf_list += esf_list_partial
print("---district {} total get {}/{}---\n".format(district, len(esf_list), total_esf_num))
return esf_list
def get_esf_id_in_price(city, district, price):
http_url = "http://{}.lianjia.com/ershoufang/{}/p{}".format(city, district, price)
bs_obj = get_bs_obj_from_url(http_url)
total_esf_num = 0
try:
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
print(" price {} get error.".format(price))
pass
#print("------price {} total : {}---".format(price, total_esf_num))
esf_list = []
if total_esf_num == 0:
print(" price {} finish---done.".format(price))
return esf_list
try:
page_box = bs_obj.find("div", {"class": "page-box house-lst-page-box"})
total_pages = int(json.loads(page_box.attrs["page-data"])["totalPage"])
except Exception as e:
print(" price {} page get error.".format(price))
return esf_list
with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
future_list = []
for page_no in range(1, total_pages + 1):
future_list.append(executor.submit(get_esf_id_in_page, city, district, price, page_no))
fail_list = []
count = 0
for future in futures.as_completed(future_list):
page_no, esf_list_partial = future.result()
if esf_list_partial is None or len(esf_list_partial) == 0:
fail_list.append(page_no)
else:
esf_list += esf_list_partial
count += 1
sys.stdout.write("\r price {} finish {}/{}".format(price, len(esf_list), total_esf_num))
for page_no in fail_list:
_, esf_list_partial = get_esf_id_in_page(city, district, price, page_no)
if esf_list_partial is not None and len(esf_list_partial) > 0:
esf_list += esf_list_partial
count += 1
sys.stdout.write("\r price {} finish {}/{}".format(price, len(esf_list), total_esf_num))
print("---done.")
return esf_list
def get_esf_id_in_page(city, district, price, page_no):
http_url = "http://{}.lianjia.com/ershoufang/{}/pg{}p{}".format(city, district, page_no, price)
bs_obj = get_bs_obj_from_url(http_url)
if bs_obj is None:
print("get ershoufang id, price {} page {} is none".format(price, page_no))
return None
parent_list = bs_obj.find_all("li", {"class": "clear"})
esf_list = []
if not (len(parent_list) == 0):
for li in parent_list:
esf_url = str(li.find("div", {"class": "title"}).find("a").attrs["href"])
esf_id = "".join(list(filter(str.isdigit, esf_url)))
esf_list.append(esf_id)
return page_no, esf_list
def get_esf_of_city(city):
district_list = get_district_from_city(city)
esf_list = []
for district in district_list:
esf_of_district = get_esf_from_district(city, district)
esf_list += esf_of_district
esf_list = sorted(set(esf_list), key=esf_list.index)
return esf_list
def get_esf_info(city, esf_id):
http_url = "https://{}.lianjia.com/ershoufang/{}.html".format(city, esf_id)
bs_obj = get_bs_obj_from_url(http_url)
df = pd.DataFrame()
if bs_obj is not None:
try:
test = bs_obj.find("div", {"class": "icon-404 icon fl"})
if test is not None:
return esf_id, df
total_price = bs_obj.find("span", {"class": "total"}).get_text()
if not is_number(total_price):
return esf_id, df
unit_price = bs_obj.find("div", {"class": "unitPrice"}).get_text().replace("元/平米", "")
huxing = bs_obj.find("div", {"class": "room"}).find("div", {"class": "mainInfo"}).get_text()
xiaoqu = bs_obj.find("div", {"class": "communityName"}).find("a").get_text()
area_info = bs_obj.find("div", {"class": "areaName"}).find_all("a")
chengqu = area_info[0].get_text()
quyu = area_info[1].get_text()
base_info = bs_obj.find("div", {"class": "newwrap baseinform"})
# 基本属性
base = base_info.find("div", {"class": "base"}).get_text()
louceng = None if "所在楼层" not in base else base.split("所在楼层")[1].split("(")[0]
zonglouceng = None if "所在楼层" not in base else base.split("(共")[1].split("层")[0]
jianzhumianji = None if "建筑面积" not in base else base.split("建筑面积")[1].split("㎡")[0]
if not is_number(jianzhumianji):
return esf_id, df
huxingjiegou = None if "户型结构" not in base else base.split("户型结构")[1].split("\n")[0]
if "套内面积" not in base:
taoneimianji = None
elif "暂无数据" in base.split("套内面积")[1].split("\n")[0]:
taoneimianji = None
else:
taoneimianji = base.split("套内面积")[1].split("㎡")[0]
jianzhuleixing = None if "建筑类型" not in base else base.split("建筑类型")[1].split("\n")[0]
chaoxiang = None if "房屋朝向" not in base else base.split("房屋朝向")[1].split("\n")[0]
jianzhujiegou = None if "建筑结构" not in base else base.split("建筑结构")[1].split("\n")[0]
zhuangxiu = None if "装修情况" not in base else base.split("装修情况")[1].split("\n")[0]
tihubili = None if "梯户比例" not in base else base.split("梯户比例")[1].split("\n")[0]
gongnuan = None if "供暖方式" not in base else base.split("供暖方式")[1].split("\n")[0]
dianti = None if "配备电梯" not in base else base.split("配备电梯")[1].split("\n")[0]
chanquan = None if "产权年限" not in base else base.split("产权年限")[1].split("\n")[0]
yongshui = "商水" if base_info.find(text="商水") is not None else "民水"
yongdian = "商电" if base_info.find(text="商电") is not None else "民电"
# 交易属性
trans = base_info.find("div", {"class": "transaction"}).get_text()
guapaishijian = None if "挂牌时间" not in trans else trans.split("挂牌时间")[1].strip().split("\n")[0]
jiaoyiquanshu = None if "交易权属" not in trans else trans.split("交易权属")[1].strip().split("\n")[0]
fangwuyongtu = None if "房屋用途" not in trans else trans.split("房屋用途")[1].strip().split("\n")[0]
fangwunianxian = None if "房屋年限" not in trans else trans.split("房屋年限")[1].strip().split("\n")[0]
chanquansuoshu = None if "产权所属" not in trans else trans.split("产权所属")[1].strip().split("\n")[0]
diyaxinxi = None if "抵押信息" not in trans else trans.split("抵押信息")[1].strip().split("\n")[0]
df = pd.DataFrame(index=[esf_id], data=[[http_url, chengqu, quyu, xiaoqu,
huxing, total_price, unit_price, jianzhumianji,
taoneimianji, chaoxiang, louceng, zonglouceng,
huxingjiegou, jianzhuleixing, jianzhujiegou,
fangwuyongtu, jiaoyiquanshu, fangwunianxian,
guapaishijian, zhuangxiu, tihubili, gongnuan,
dianti, chanquan, yongshui, yongdian,
chanquansuoshu, diyaxinxi]],
columns=["URL", "城区", "片区", "小区",
"户型", "总价", "单价", "建筑面积",
"套内面积", "朝向", "楼层", "总楼层",
"户型结构", "建筑类型", "建筑结构",
"房屋用途", "交易权属", "房屋年限",
"挂牌时间", "装修", "梯户比例", "供暖",
"配备电梯", "产权", "用水", "用电",
"产权所属", "抵押信息"])
except Exception as e:
print("[E]: get_esf_info, esf_id =", esf_id, e)
traceback.print_exc()
pass
return esf_id, df
def get_esf_info_from_esf_list(city, esf_list):
df_esf_info = pd.DataFrame()
count = 0
pct = 0
with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
future_list = []
for esf in esf_list:
future_list.append(executor.submit(get_esf_info, city, esf))
fail_list = []
#print(" ")
for future in futures.as_completed(future_list):
esf, df_info_partial = future.result()
if len(df_info_partial) == 0:
fail_list.append(esf)
else:
df_esf_info = df_esf_info.append(df_info_partial)
count += 1
sys.stdout.write("\rget ershoufang info: {}/{}".format(count, len(esf_list)))
for esf in fail_list:
_, df_info_partial = get_esf_info(city, esf)
if len(df_info_partial) > 0:
df_esf_info = df_esf_info.append(df_info_partial)
count += 1
sys.stdout.write("\rget ershoufang info: {}/{}".format(count, len(esf_list)))
print(" ")
return df_esf_info
def compare_two_list(new_esf_list, old_esf_list):
add_list = []
remove_list = []
same_list = []
for esf_id in new_esf_list:
if esf_id not in old_esf_list:
add_list.append(esf_id)
else:
same_list.append(esf_id)
for esf_id in old_esf_list:
if esf_id not in new_esf_list:
remove_list.append(esf_id)
return add_list, remove_list, same_list
def excel_add_sheet(dataframe, filename, sheetname, indexname):
excelwriter = pd.ExcelWriter(filename)
book = load_workbook(excelwriter.path)
excelwriter.book = book
dataframe.to_excel(excelwriter, sheetname, index_label=indexname)
excelwriter.close()
return
def get_price_changed_esf_info(same_list, new_esf_info, old_esf_info):
df_jiang = pd.DataFrame()
df_zhang = pd.DataFrame()
count = 0
for esf_id in same_list:
try:
new_price = new_esf_info.loc[[esf_id]]["总价"].values[0]
old_price = old_esf_info.loc[[esf_id]]["总价"].values[0]
old_unit_price = old_esf_info.loc[esf_id]["单价"]
new_info = new_esf_info.loc[[esf_id]]
if new_price > old_price:
new_info.insert(loc=6, column="原总价", value=old_price)
new_info.insert(loc=7, column="涨价", value=(new_price-old_price))
zhangfu=format(((new_price-old_price)/old_price), '.2%')
new_info.insert(loc=8, column="涨幅", value=zhangfu)
new_info.insert(loc=10, column="原单价", value=old_unit_price)
df_zhang = df_zhang.append(new_info)
elif new_price < old_price:
new_info.insert(loc=6, column="原总价", value=old_price)
new_info.insert(loc=7, column="降价", value=(old_price-new_price))
diefu=format(((old_price-new_price)/old_price), '.2%')
new_info.insert(loc=8, column="降幅", value=diefu)
new_info.insert(loc=10, column="原单价", value=old_unit_price)
df_jiang = df_jiang.append(new_info)
else:
pass
except Exception as e:
print("[E]: get_price_changed, esf_id =", esf_id, e)
pass
count += 1
sys.stdout.write("\rget price change info: {}/{}".format(count, len(same_list)))
print(" ")
return df_jiang, df_zhang
def get_chengjiao_yesterday(city):
district_list = get_district_from_city(city)
chengjiao = 0
for district in district_list:
http_url = 'https://{}.lianjia.com/fangjia/{}'.format(city, district)
bs_obj = get_bs_obj_from_url(http_url)
if bs_obj is None:
chengjiao += 0
continue
item = bs_obj.find("div", {"class": "item item-1-2"})
if item is None:
chengjiao += 0
continue
num = item.find("div", {"class": "num"}).find("span").get_text()
chengjiao += (0 if "暂无数据" in num else int(num))
return chengjiao
def get_lianjia_fangjia_info(city):
try:
http_url = 'https://{}.lianjia.com/fangjia'.format(city)
bs_obj = get_bs_obj_from_url(http_url)
tongji = bs_obj.find("div", {"class": "box-l-b"})
lj_all = tongji.find_all("div", {"class": "num"})
lj_new = lj_all[0].get_text()
lj_ren = lj_all[1].get_text()
lj_kan = lj_all[2].get_text()
except Exception as e:
lj_new, lj_ren, lj_kan = get_lianjia_fangjia_info(city)
return lj_new, lj_ren, lj_kan
def get_tongji_info(city, filename):
lj_new, lj_ren, lj_kan = get_lianjia_fangjia_info(city)
chengjiao = get_chengjiao_yesterday(city)
new_str = datetime.date.today().strftime('%Y-%m-%d')
total_info = pd.read_excel(filename, sheet_name="total", index_col=0)
total_list = total_info.index.values
new_info = pd.read_excel(filename, sheet_name="新上", index_col=0)
new_list = new_info.index.values
rm_info = pd.read_excel(filename, sheet_name="下架", index_col=0)
rm_list = rm_info.index.values
jiang_info = pd.read_excel(filename, sheet_name="降价", index_col=0)
jiang_list = jiang_info.index.values
zhang_info = pd.read_excel(filename, sheet_name="涨价", index_col=0)
zhang_list = zhang_info.index.values
junjia = format(sum(total_info['总价']) * 10000 / sum(total_info['建筑面积']), '.2f')
jiangfu = (jiang_info['降幅'].str.strip("%").astype(float)/100) if len(jiang_list) else 0
junjiang = (format(sum(jiangfu) / len(jiangfu), '.2%')) if len(jiang_list) else 0
zhangfu = (zhang_info['涨幅'].str.strip("%").astype(float)/100) if len(zhang_list) else 0
junzhang = (format(sum(zhangfu) / len(zhangfu), '.2%')) if len(zhang_list) else 0
data=[[len(total_list), junjia, chengjiao, len(new_list), len(rm_list),
len(jiang_list), junjiang, len(zhang_list), junzhang, lj_new,
lj_ren, lj_kan]]
columns=['总数', '均价', '成交', '上架', '下架', '降价', '降幅', '涨价',
'涨幅', '新上', '新客户', '带看']
name_list = get_district_name_from_city(city)
for name in name_list:
chengqu = total_info[total_info['城区']==name]
avg_price = format(sum(chengqu['总价']) * 10000 /
sum(chengqu['建筑面积']), '.2f') if len(chengqu) else 0
data[0].append(avg_price)
columns.append(name)
info = pd.DataFrame(index=[new_str], data=data, columns=columns)
return info
def get_email_content(info):
content = '本期统计信息:\n'
content += '线上总套数:{}套,'.format(info['总数'].values[0])
content += '均价:{}元/平米\n'.format(info['均价'].values[0])
content += '昨日成交数:{}套\n'.format(info['成交'].values[0])
content += '新上房源数:{}套\n'.format(info['上架'].values[0])
content += '下架房源数:{}套\n'.format(info['下架'].values[0])
content += '降价房源数:{}套,'.format(info['降价'].values[0])
content += '均降:{}\n'.format(info['降幅'].values[0])
content += '涨价房源数:{}套,'.format(info['涨价'].values[0])
content += '均涨:{}\n'.format(info['涨幅'].values[0])
content += '\n'
content += '链家统计信息:\n'
content += '新增房源数:{}套\n'.format(info['新上'].values[0])
content += '新增客户数:{}人\n'.format(info['新客户'].values[0])
content += '新增带看数:{}次\n'.format(info['带看'].values[0])
return content
def addimg(src, imgid):
fp = open(src, 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
msgImage.add_header('Content-ID', imgid)
return msgImage
def send_email(content, filename):
sender = '<EMAIL>'
receivers = ['<EMAIL>']
key = open('../key', 'r').read()
message = MIMEMultipart()
message['From'] = sender
message['Subject'] = Header(filename, 'utf-8')
#message.attach(MIMEText(content, 'plain', 'utf-8'))
html = '<p>{}</p>'.format(content.replace('\n', '<br>'))
html += '<p><img src="cid:image1"></p>'
html += '<p><img src="cid:image2"></p>'
message.attach(MIMEText(html, 'html', 'utf-8'))
message.attach(addimg("total.jpg","image1"))
message.attach(addimg("chengqu.jpg","image2"))
att = MIMEText(open(filename, 'rb').read(), 'base64', 'utf-8')
att["Content-Type"] = 'application/octet-stream'
att_str = 'attachment; filename={}'.format(filename)
att["Content-Disposition"] = att_str
message.attach(att)
try:
smtpObj = smtplib.SMTP('smtp.qq.com')
smtpObj.login(sender, key)
smtpObj.sendmail(sender, receivers, message.as_string())
print("send email successfully.")
except smtplib.SMTPException:
print("send email failed.")
return
def get_tongji_plot(filename):
info = pd.read_excel(filename, sheet_name="统计", index_col=0)
info = info.sort_index()
try:
info.plot(x=pd.to_datetime(info.index), y=['总数', '均价', '成交'],
marker='.', subplots=True, grid=True, figsize=(12,9))
plt.savefig('total.jpg', bbox_inches='tight')
name_list = get_district_name_from_city(CITY)
info.plot(x=pd.to_datetime(info.index), y=name_list,
marker='.', subplots=True, grid=True, figsize=(12,3*len(name_list)))
plt.savefig('chengqu.jpg', bbox_inches='tight')
except Exception as e:
print("get tongji plot failed", e)
return
def get_esf_location_by_index(index, http_url):
#print("index {} start".format(index))
lng = 0.0
lat = 0.0
bs_obj = get_bs_obj_from_url(http_url)
if bs_obj is None:
print("get location failed, index={}".format(index))
return index, lng, lat
try:
lng = float(bs_obj.find('lng').get_text())
lat = float(bs_obj.find('lat').get_text())
except Exception as e:
print("get lng/lat failed. bs_obj={}".format(bs_obj))
pass
#print("index {} end".format(index))
return index, lng, lat
def get_esf_location(filename):
ak = open('../ak', 'r').read().replace('\n', '')
wb = load_workbook(filename)
ws = wb.get_sheet_by_name('total')
max_row = ws.max_row
max_col = ws.max_column
ws.cell(row=1, column=max_col+1, value='经度')
ws.cell(row=1, column=max_col+2, value='纬度')
count = 0
with futures.ThreadPoolExecutor(max_workers=None) as executor:
future_list = []
for index in range(2, max_row+1):
chengqu = ws.cell(row=index, column=3).value
xiaoqu = ws.cell(row=index, column=5).value
location = '北京市{}区{}'.format(chengqu, xiaoqu)
if location is None:
print("get location failed, index={}".format(index))
continue
http_url = 'http://api.map.baidu.com/geocoder/v2/?address={}&ak={}'.format(location, ak)
future_list.append(executor.submit(get_esf_location_by_index, index, http_url))
fail_list = []
for future in futures.as_completed(future_list):
idx, lng, lat = future.result()
if lng == 0.0:
fail_list.append(idx)
else:
ws.cell(row=idx, column=max_col+1).value=format(lng, '.6f')
ws.cell(row=idx, column=max_col+2).value=format(lat, '.6f')
count += 1
sys.stdout.write("\rget location info: {}/{}...".format(count, max_row-1))
for idx in fail_list:
chengqu = ws.cell(row=index, column=3).value
xiaoqu = ws.cell(row=index, column=5).value
location = '北京市{}区{}'.format(chengqu, xiaoqu)
if location is None:
print("get location failed, index={}".format(index))
continue
http_url = 'http://api.map.baidu.com/geocoder/v2/?address={}&ak={}'.format(location, ak)
_, lng, lat = get_esf_location_by_index(idx, http_url)
ws.cell(row=idx, column=max_col+1).value=format(lng, '.6f')
ws.cell(row=idx, column=max_col+2).value=format(lat, '.6f')
count += 1
sys.stdout.write("\rget location info: {}/{}...".format(count, max_row-1))
print("done.")
wb.save(filename)
return
def main():
###########################################################
# 总共N个步骤,依次运行。
# 运行第一步的时候,把其余几步的代码注释掉,依次类推
###########################################################
os.chdir(WORKPATH)
if not PRINT:
log_file = open('../log', 'a')
sys.stdout = log_file
# 1. make new dir
print("\n1. getting date info...")
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
new_str = today.strftime('%Y-%m-%d')
old_str = yesterday.strftime('%Y-%m-%d')
new_file = "{}_info_{}.xlsx".format(CITY, new_str)
old_file = "{}_info_{}.xlsx".format(CITY, old_str)
print("today: {}, yesterday: {}.".format(new_str, old_str))
# 2. get ershoufang id of the city
print("\n2.getting ershoufang list...")
esf_list = get_esf_of_city(CITY)
with open("{}_list_{}.txt".format(CITY, new_str), mode="w") as f:
for esf in esf_list:
f.write(esf + "\n")
print("ershoufang list write finished.")
# 3. get ershoufang info
print("\n3. getting ershoufang info...")
with open("{}_list_{}.txt".format(CITY, new_str), mode="r") as f:
esf_list = [int(line[:-1]) for line in f.readlines()]
print("get ershoufang info start...")
df_esf_info = get_esf_info_from_esf_list(CITY, esf_list)
writer = pd.ExcelWriter(new_file)
df_esf_info.to_excel(writer, "total")
writer.save()
try:
os.remove("{}_list_{}.txt".format(CITY, new_str))
except Exception as e:
pass
print("ershoufang info write finished.")
# 4. find new ershoufang list and info
print("\n4. getting different ershoufang list...")
df_esf_info = pd.read_excel(new_file, sheet_name="total", index_col=0)
new_esf_list = df_esf_info.index.values
df_esf_info = | pd.read_excel(old_file, sheet_name="total", index_col=0) | pandas.read_excel |
# pylint: disable=missing-docstring
import pytest
import numpy as np
import pandas as pd
from tidy_hl7_msgs.helpers import (
concat, flatten, zip_nested, are_lens_equal, are_segs_identical,
are_nested_lens_equal, zip_msg_ids, trim_rows, to_df, join_dfs
)
def test_are_lens_equal():
assert are_lens_equal([1, 2, 3], [1, 2, 3]) is True
assert are_lens_equal([1, 2, 3], [1, 2, 3, 4]) is False
def test_are_nested_lens_equal():
assert are_nested_lens_equal(
[[1], [1]],
[[1], [1]]
) is True
assert are_nested_lens_equal(
[[1], [1]],
[[1], [1, 2]]
) is False
def test_are_segs_identical():
identical_segs_lst = ['DG1.3.1', 'DG1.3.2', 'DG1.6']
assert are_segs_identical(identical_segs_lst) is True
identical_segs_dict = {
'DG1.3.1': 'loc_1',
'DG1.3.2': 'loc_2',
'DG1.6': 'loc_3',
}
assert are_segs_identical(identical_segs_dict) is True
non_identical_segs_lst = ['DG1.3.1', 'DG1.3.2', 'PID.3.4']
assert are_segs_identical(non_identical_segs_lst) is False
non_identical_segs_dict = {
'DG1.3.1': 'loc_1',
'DG1.3.2': 'loc_2',
'PID.3.4': 'loc_3',
}
assert are_segs_identical(non_identical_segs_dict) is False
def test_flatten():
assert flatten([[1, 2], [3, 4]]) == [1, 2, 3, 4]
assert flatten([[1, 2, [3, 4]]]) == [1, 2, [3, 4]]
assert flatten([[1, 2], []]) == [1, 2]
assert flatten([]) == []
def test_zip_nested():
assert zip_nested([['a', 'b']], [['y', 'z']]) == (
[[('a', 'y'), ('b', 'z')]]
)
assert zip_nested([['a', 'b'], ['c', 'd']], [['w', 'x'], ['y', 'z']]) == (
[[('a', 'w'), ('b', 'x')], [('c', 'y'), ('d', 'z')]]
)
with pytest.raises(AssertionError):
zip_nested([['a', 'b']], [['x', 'y', 'z']])
def test_concat():
assert concat([[['a', 'b']]]) == ['a', 'b']
assert concat([[['a', 'b']], [['y', 'z']], [['s', 't']]]) == (
['a,y,s', 'b,z,t']
)
with pytest.raises(AssertionError):
concat([[['a', 'b']], [['x', 'y', 'z']]])
def test_zip_msg_ids():
with pytest.raises(AssertionError):
zip_msg_ids(['a', 'b', 'c'], ['y', 'z'])
def test_trim_rows():
# pylint: disable=invalid-name
d = {
'msg_id': ['123', '123', '123'],
'col1': [1, 2, np.nan],
'col2': [3, 4, np.nan],
}
df = pd.DataFrame(data=d)
n_segs = {'123': 2}
assert len(trim_rows(df, n_segs)) == 2
def test_to_df():
# pylint: disable=invalid-name
d = {
'msg_id': ['msg_id1', 'msg_id2', 'msg_id2'],
'seg': ['1', '1', '2'],
'report_loc': ['val1', 'val1', 'val2'],
}
expected_df = pd.DataFrame(data=d)
df = to_df([('msg_id1', ['val1']), ('msg_id2', ['val1', 'val2'])], "report_loc")
assert all(df['msg_id'].values == expected_df['msg_id'].values)
assert all(df['seg'].values == expected_df['seg'].values)
assert all(df['report_loc'].values == expected_df['report_loc'].values)
def test_join_dfs():
# pylint: disable=invalid-name
d1 = {
'msg_id': ['msg_id1', 'msg_id2', 'msg_id2'],
'seg': ['seg1', 'seg1', 'seg2'],
'report_loc1': ['a', 'b', 'c'],
}
df1 = | pd.DataFrame(data=d1) | pandas.DataFrame |
import pandas as pd
import numpy as np
import keras
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras import backend as K
from keras.utils import to_categorical
from keras.layers import Dense, Conv2D, Flatten,MaxPooling2D
from keras.layers import Dropout
from keras.callbacks import EarlyStopping
from keras.models import load_model
import seaborn as sns
sns.set(color_codes=True)
pal = sns.color_palette("Set2", 10)
sns.set_palette(pal)
df = | pd.read_pickle('data2.pkl') | pandas.read_pickle |
"""
Really, mostly data getters.
get_toi1937_lightcurve
get_groundphot
get_autorotation_dataframe
get_gaia_basedata
_get_nbhd_dataframes
_get_fullfaint_dataframes
_get_fullfaint_edr3_dataframes
_get_denis_fullfaint_edr3_dataframes
_get_extinction_dataframes
_get_median_ngc2516_core_params
get_denis_xmatch
append_phot_binary_column
PleaidesQuadProtModel
"""
import os, collections, pickle
import numpy as np, pandas as pd
from glob import glob
from copy import deepcopy
from numpy import array as nparr
from astropy.io import fits
from astropy import units as u
from astropy.table import Table
from astroquery.vizier import Vizier
from astroquery.xmatch import XMatch
import cdips.utils.lcutils as lcu
import cdips.lcproc.detrend as dtr
import cdips.lcproc.mask_orbit_edges as moe
from cdips.utils.catalogs import (
get_cdips_catalog, get_tic_star_information
)
from cdips.utils.gaiaqueries import (
query_neighborhood, given_source_ids_get_gaia_data,
given_dr2_sourceids_get_edr3_xmatch
)
from earhart.paths import PHOTDIR, RESULTSDIR, DATADIR
def get_toi1937_lightcurve():
"""
Create the stitched CDIPS FFI light curve for TOI 1937. (Starting from the
raw light curves, and the PCA eigenvectors previously made for this
sector). Note: the main execution of this PCA detrending happens on
phtess2.
A few notes:
* 3 eigenvectors were used, plus the background light BGV timeseries.
* a +/-12 hour orbit edge mask was used (to avoid what looked like
scattered light)
* the output can be checked at
/results/quicklook_lcs/5489726768531119616_allvar_report.pdf
"""
picklepath = os.path.join(
PHOTDIR, 'toi1937_merged_stitched_s7s9_lc_20201130.pkl'
)
if not os.path.exists(picklepath):
# Use the CDIPS IRM2 light curves as starting base.
# 5489726768531119616_s09_llc.fits
lcpaths = glob(os.path.join(PHOTDIR, '*_s??_llc.fits'))
assert len(lcpaths) == 2
infodicts = [
{'SECTOR': 7, 'CAMERA': 3, 'CCD': 4, 'PROJID': 1527},
{'SECTOR': 9, 'CAMERA': 3, 'CCD': 3, 'PROJID': 1558},
]
##########################################
# next ~45 lines pinched from cdips.drivers.do_allvariable_report_making
##########################################
#
# detrend systematics. each light curve yields tuples of:
# primaryhdr, data, ap, dtrvecs, eigenvecs, smooth_eigenvecs
#
dtr_infos = []
for lcpath, infodict in zip(lcpaths, infodicts):
dtr_info = dtr.detrend_systematics(
lcpath, infodict=infodict, max_n_comp=3
)
dtr_infos.append(dtr_info)
#
# stitch all available light curves
#
ap = dtr_infos[0][2]
timelist = [d[1]['TMID_BJD'] for d in dtr_infos]
maglist = [d[1][f'PCA{ap}'] for d in dtr_infos]
magerrlist = [d[1][f'IRE{ap}'] for d in dtr_infos]
extravecdict = {}
extravecdict[f'IRM{ap}'] = [d[1][f'IRM{ap}'] for d in dtr_infos]
for i in range(0,7):
extravecdict[f'CBV{i}'] = [d[3][i, :] for d in dtr_infos]
time, flux, fluxerr, vec_dict = lcu.stitch_light_curves(
timelist, maglist, magerrlist, extravecdict
)
#
# mask orbit edges
#
s_time, s_flux, inds = moe.mask_orbit_start_and_end(
time, flux, raise_expectation_error=False, orbitgap=0.7,
orbitpadding=12/24,
return_inds=True
)
s_fluxerr = fluxerr[inds]
#
# save output
#
ap = dtr_infos[0][2]
lcdict = {
'source_id': np.int64(5489726768531119616),
'E_BpmRp': 0.1343,
'ap': ap,
'TMID_BJD': time,
f'IRM{ap}': vec_dict[f'IRM{ap}'],
f'PCA{ap}': flux,
f'IRE{ap}': fluxerr,
'STIME': s_time.astype(np.float64),
f'SPCA{ap}': s_flux.astype(np.float64),
f'SPCAE{ap}': s_fluxerr.astype(np.float64),
'dtr_infos': dtr_infos,
'vec_dict': vec_dict,
'tess_texp': np.nanmedian(np.diff(s_time))
}
with open(picklepath , 'wb') as f:
pickle.dump(lcdict, f)
#
# verify output
#
from cdips.plotting.allvar_report import make_allvar_report
plotdir = os.path.join(RESULTSDIR, 'quicklook_lcs')
outd = make_allvar_report(lcdict, plotdir)
with open(picklepath, 'rb') as f:
print(f'Found {picklepath}: loading it!')
lcdict = pickle.load(f)
return (
lcdict['STIME'].astype(np.float64) - 2457000,
lcdict['SPCA2'].astype(np.float64),
lcdict['SPCAE2'].astype(np.float64),
lcdict['tess_texp']
)
def get_groundphot(datestr=None):
lcglob = os.path.join(PHOTDIR, 'collected',
f'*{datestr}*.txt')
lcpath = glob(lcglob)
assert len(lcpath) == 1
lcpath = lcpath[0]
if 'epdlc' in lcpath:
# LCOGT reduced by Joel Hartman format.
colnames = [
"frameid", "time_bjd_UTC_minus_2400000", "raw_mag_ap1",
"raw_mag_err_ap1", "quality_ap1", "raw_mag_ap2", "raw_mag_err_ap2",
"quality_ap2", "raw_mag_ap3", "raw_mag_err_ap3", "quality_ap3",
"fit_mag_ap1", "fit_mag_ap2", "fit_mag_ap3", "epd_mag_ap1",
"epd_mag_ap2", "epd_mag_ap3", "x_px", "y_px", "bkgd",
"bkgd_deviation", "S", "D", "K", "hour_angle", "zenith_distance",
"time_JD_UTC"
]
df = pd.read_csv(lcpath, delim_whitespace=True, names=colnames,
comment='#')
# TT = TAI + 32.184 = UTC + (number of leap seconds) + 32.184
# TDB ~= TT
# for these data the leap second list indicates 37 is the correct
# number: https://www.ietf.org/timezones/data/leap-seconds.list
t_offset = (37 + 32.184)*u.second
x_obs_bjd_utc = np.array(df["time_bjd_UTC_minus_2400000"]) + 2400000
# return times in BJD_TDB
x_obs = x_obs_bjd_utc + float(t_offset.to(u.day).value)
y_obs, y_err = (
lcu._given_mag_get_flux(df['fit_mag_ap1'], df["raw_mag_err_ap1"])
)
t_exp = np.nanmedian(np.diff(x_obs))
elif 'El_Sauce' in lcpath:
# <NAME>'s El Sauce reduction format.
raise NotImplementedError
return x_obs, y_obs, y_err, t_exp
def get_gaia_basedata(basedata):
if basedata == 'extinctioncorrected':
raise NotImplementedError('need to implement extinction')
nbhd_df, core_df, halo_df, full_df, target_df = _get_extinction_dataframes()
elif basedata == 'fullfaint':
nbhd_df, core_df, halo_df, full_df, target_df = _get_fullfaint_dataframes()
elif basedata == 'fullfaint_edr3':
nbhd_df, core_df, halo_df, full_df, target_df = _get_fullfaint_edr3_dataframes()
elif basedata == 'bright':
nbhd_df, core_df, halo_df, full_df, target_df = _get_nbhd_dataframes()
else:
raise NotImplementedError
full_df = append_phot_binary_column(full_df)
return nbhd_df, core_df, halo_df, full_df, target_df
def _get_nbhd_dataframes():
"""
WARNING!: this "bright" subset is a crossmatch between the full NGC 2516
target list (CG18+KC19+M21), and the CDIPS target catalog (G_Rp<16; v0.4).
However, since the CDIPS targets didn't incorporate M21, it's not as direct
of a match as desired. This is fine for understanding the auto-detection of
rotation periods. But for overall cluster rotation period completeness,
it's not.
The "neighborhood" was selected via
bounds = { 'parallax_lower': 1.5, 'parallax_upper': 4.0, 'ra_lower': 108,
'ra_upper': 132, 'dec_lower': -76, 'dec_upper': -45 }
nbhd_df = query_neighborhood(bounds, groupname, n_max=6000,
overwrite=False, manual_gmag_limit=17)
This procedure yields:
Got 7052 neighbors with Rp<16
Got 893 in core from CDIPS target catalog
Got 1345 in corona from CDIPS target catalog
"""
df = get_cdips_catalog(ver=0.4)
nbhd_df, core_df, halo_df, full_df, target_df = _get_fullfaint_dataframes()
#
# do the "bright" selection by a crossmatch between the full target list
# and the CDIPS catalog. so implicitly, it's a CDIPS target star catalog
# match. this misses some Meingast stars, b/c they weren't in the CDIPS
# v0.4 target list. but this
#
cdips_df = df['source_id']
mdf = full_df.merge(cdips_df, on='source_id', how='inner')
nbhd_df = nbhd_df[nbhd_df.phot_rp_mean_mag < 16]
core_df = mdf[mdf.subcluster == 'core']
halo_df = mdf[mdf.subcluster == 'halo']
print(42*'.')
print('"Bright" sample:')
print(f'...Got {len(nbhd_df)} neighbors with Rp<16')
print(f'...Got {len(core_df)} in core from CDIPS target catalog')
print(f'...Got {len(halo_df)} in corona from CDIPS target catalog')
print(42*'.')
return nbhd_df, core_df, halo_df, full_df, target_df
def _get_fullfaint_dataframes():
"""
Return: nbhd_df, core_df, halo_df, full_df, target_df
(for NGC 2516, "full faint" sample -- i.e., as faint as possible.)
The "core" is all available Cantat-Gaudin 2018 members, with no magnitude
cutoff.
The "halo" is the full Kounkel & Covey 2019 + Meingast 2021 member set,
provided that the source is not in the core. (i.e., KC19 and M21 get no
points for getting the "core" targets correct).
The "neighborhood" was selected via
bounds = { 'parallax_lower': 1.5, 'parallax_upper': 4.0, 'ra_lower': 108,
'ra_upper': 132, 'dec_lower': -76, 'dec_upper': -45 }
nbhd_df = query_neighborhood(bounds, groupname, n_max=14000,
overwrite=False, manual_gmag_limit=19)
This procedure yields:
Got 1106 in fullfaint CG18
Got 3003 in fullfaint KC19
Got 1860 in fullfaint M21
Got 1912 in fullfaint KC19 after removing core matches
Got 1096 in fullfaint M21 after removing core matches
Got 280 in fullfaint M21 after removing KC19 matches
Got 13834 neighbors
Got 1106 in core
Got 2192 in corona
Got 1091 KC19 / CG18 overlaps
Got 764 M21 / CG18 overlaps
Got 3298 unique sources in the cluster.
"""
# get the full CG18 NGC 2516 memberships, downloaded from Vizier
cg18path = os.path.join(DATADIR, 'gaia',
'CantatGaudin2018_vizier_only_NGC2516.fits')
hdul = fits.open(cg18path)
cg18_tab = Table(hdul[1].data)
cg18_df = cg18_tab.to_pandas()
cg18_df['source_id'] = cg18_df['Source']
# get the full KC19 NGC 2516 memberships, from Marina's file
# NGC 2516 == "Theia 613" in Kounkel's approach.
kc19path = os.path.join(DATADIR, 'gaia', 'string_table1.csv')
kc19_df = pd.read_csv(kc19path)
kc19_df = kc19_df[kc19_df.group_id == 613]
# get the full M21 NGC 2516 memberships
m21path = os.path.join(DATADIR, 'gaia', 'Meingast_2021_NGC2516_all1860members.fits')
m21_df = Table(fits.open(m21path)[1].data).to_pandas()
m21_df = m21_df.rename(mapper={'GaiaDR2': 'source_id'}, axis=1)
print(f'Got {len(cg18_df)} in fullfaint CG18')
print(f'Got {len(kc19_df)} in fullfaint KC19')
print(f'Got {len(m21_df)} in fullfaint M21')
kc19_cg18_overlap_df = kc19_df[(kc19_df.source_id.isin(cg18_df.source_id))]
kc19_df = kc19_df[~(kc19_df.source_id.isin(cg18_df.source_id))]
print(f'Got {len(kc19_df)} in fullfaint KC19 after removing core matches')
m21_cg18_overlap_df = m21_df[(m21_df.source_id.isin(cg18_df.source_id))]
m21_df = m21_df[~(m21_df.source_id.isin(cg18_df.source_id))]
print(f'Got {len(m21_df)} in fullfaint M21 after removing core matches')
m21_df = m21_df[~(m21_df.source_id.isin(kc19_df.source_id))]
print(f'Got {len(m21_df)} in fullfaint M21 after removing KC19 matches')
##########
# NGC 2516 rough
bounds = {
'parallax_lower': 1.5, 'parallax_upper': 4.0, 'ra_lower': 108,
'ra_upper': 132, 'dec_lower': -76, 'dec_upper': -45
}
groupname = 'customngc2516_fullfaint'
nbhd_df = query_neighborhood(bounds, groupname, n_max=14000,
overwrite=False, manual_gmag_limit=19)
# query gaia DR2 to get the fullfaint photometry
kc19_df_0 = given_source_ids_get_gaia_data(
np.array(kc19_df.source_id),
'ngc2516_kc19_earhart_fullfaint', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True
)
cg18_df_0 = given_source_ids_get_gaia_data(
np.array(cg18_df.Source),
'ngc2516_cg18_earhart_fullfaint', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True
)
m21_df_0 = given_source_ids_get_gaia_data(
np.array(m21_df.source_id),
'ngc2516_m21_earhart_fullfaint', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True
)
assert len(cg18_df) == len(cg18_df_0)
assert len(kc19_df) == len(kc19_df_0)
assert len(m21_df) == len(m21_df_0)
target_df = kc19_df_0[kc19_df_0.source_id == 5489726768531119616] # TIC 2683...
sel_nbhd = (
(~nbhd_df.source_id.isin(kc19_df.source_id))
&
(~nbhd_df.source_id.isin(cg18_df.source_id))
&
(~nbhd_df.source_id.isin(m21_df.source_id))
)
orig_nbhd_df = deepcopy(nbhd_df)
nbhd_df = nbhd_df[sel_nbhd]
print(f'Got {len(nbhd_df)} neighbors')
print(f'Got {len(cg18_df)} in core')
print(f'Got {len(kc19_df)+len(m21_df)} in corona')
print(f'Got {len(kc19_cg18_overlap_df)} KC19 / CG18 overlaps')
print(f'Got {len(m21_cg18_overlap_df)} M21 / CG18 overlaps')
#
# wrap up into the full source list
#
cg18_df_0['subcluster'] = 'core'
kc19_df_0['subcluster'] = 'halo'
m21_df_0['subcluster'] = 'halo'
core_df = cg18_df_0
halo_df = pd.concat((kc19_df_0, m21_df_0)).reset_index()
full_df = pd.concat((core_df, halo_df)).reset_index()
assert len(np.unique(full_df.source_id)) == len(full_df)
print(f'Got {len(full_df)} unique sources in the cluster.')
full_df['in_CG18'] = full_df.source_id.isin(cg18_df.source_id)
kc19_df = pd.read_csv(kc19path)
kc19_df = kc19_df[kc19_df.group_id == 613]
full_df['in_KC19'] = full_df.source_id.isin(kc19_df.source_id)
m21_df = Table(fits.open(m21path)[1].data).to_pandas()
m21_df = m21_df.rename(mapper={'GaiaDR2': 'source_id'}, axis=1)
full_df['in_M21'] = full_df.source_id.isin(m21_df.source_id)
return nbhd_df, core_df, halo_df, full_df, target_df
def _get_fullfaint_edr3_dataframes():
"""
Return: nbhd_df, core_df, halo_df, full_df, target_df
(for NGC 2516, "full faint" sample -- i.e., as faint as possible, but
***after crossmatching the GAIA DR2 targets with GAIA EDR3***. This
crossmatch is run using the dr2_neighbourhood table from the Gaia archive,
and then taking the closest angular separation match for cases with
multiple matches.)
Further notes are in "_get_fullfaint_dataframes" docstring.
This procedure yields:
FOR DR2:
Got 1106 in fullfaint CG18
Got 3003 in fullfaint KC19
Got 1860 in fullfaint M21
Got 1912 in fullfaint KC19 after removing core matches
Got 1096 in fullfaint M21 after removing core matches
Got 280 in fullfaint M21 after removing KC19 matches
Got 13834 neighbors
Got 1106 in core
Got 2192 in corona
Got 1091 KC19 / CG18 overlaps
Got 764 M21 / CG18 overlaps
FOR EDR3:
Got 1106 EDR3 matches in core.
99th pct [arcsec] 1577.8 -> 0.3
Got 1912 EDR3 matches in KC19.
99th pct [arcsec] 1702.8 -> 0.5
Got 280 EDR3 matches in M21.
99th pct [arcsec] 1426.6 -> 0.3
Got 13843 EDR3 matches in nbhd.
99th pct [arcsec] 1833.9 -> 3.7
(((
CG18/core: got 1143 matches vs 1106 source id queries.
KC19/halo: got 2005 matches vs 1912 source id queries
Nbhd: got 15123 matches vs 13843 source id queries.
)))
"""
# get the full CG18 NGC 2516 memberships, downloaded from Vizier
cg18path = os.path.join(DATADIR, 'gaia',
'CantatGaudin2018_vizier_only_NGC2516.fits')
hdul = fits.open(cg18path)
cg18_tab = Table(hdul[1].data)
cg18_df = cg18_tab.to_pandas()
cg18_df['source_id'] = cg18_df['Source']
# get the full KC19 NGC 2516 memberships, from Marina's file
# NGC 2516 == "Theia 613" in Kounkel's approach.
kc19path = os.path.join(DATADIR, 'gaia', 'string_table1.csv')
kc19_df = pd.read_csv(kc19path)
kc19_df = kc19_df[kc19_df.group_id == 613]
# get the full M21 NGC 2516 memberships
m21path = os.path.join(DATADIR, 'gaia', 'Meingast_2021_NGC2516_all1860members.fits')
m21_df = Table(fits.open(m21path)[1].data).to_pandas()
m21_df = m21_df.rename(mapper={'GaiaDR2': 'source_id'}, axis=1)
print(42*'='+'\nFOR DR2:')
print(f'Got {len(cg18_df)} in fullfaint CG18')
print(f'Got {len(kc19_df)} in fullfaint KC19')
print(f'Got {len(m21_df)} in fullfaint M21')
kc19_cg18_overlap_df = kc19_df[(kc19_df.source_id.isin(cg18_df.source_id))]
kc19_df = kc19_df[~(kc19_df.source_id.isin(cg18_df.source_id))]
print(f'Got {len(kc19_df)} in fullfaint KC19 after removing core matches')
m21_cg18_overlap_df = m21_df[(m21_df.source_id.isin(cg18_df.source_id))]
m21_df = m21_df[~(m21_df.source_id.isin(cg18_df.source_id))]
print(f'Got {len(m21_df)} in fullfaint M21 after removing core matches')
m21_df = m21_df[~(m21_df.source_id.isin(kc19_df.source_id))]
print(f'Got {len(m21_df)} in fullfaint M21 after removing KC19 matches')
##########
# NGC 2516 rough
bounds = {
'parallax_lower': 1.5, 'parallax_upper': 4.0, 'ra_lower': 108,
'ra_upper': 132, 'dec_lower': -76, 'dec_upper': -45
}
groupname = 'customngc2516_fullfaint'
nbhd_df = query_neighborhood(bounds, groupname, n_max=14000,
overwrite=False, manual_gmag_limit=19)
sel_nbhd = (
(~nbhd_df.source_id.isin(kc19_df.source_id))
&
(~nbhd_df.source_id.isin(cg18_df.source_id))
&
(~nbhd_df.source_id.isin(m21_df.source_id))
)
orig_nbhd_df = deepcopy(nbhd_df)
nbhd_df = nbhd_df[sel_nbhd]
print(f'Got {len(nbhd_df)} neighbors')
print(f'Got {len(cg18_df)} in core')
print(f'Got {len(kc19_df)+len(m21_df)} in corona')
print(f'Got {len(kc19_cg18_overlap_df)} KC19 / CG18 overlaps')
print(f'Got {len(m21_cg18_overlap_df)} M21 / CG18 overlaps')
assert (
len(cg18_df)+len(kc19_df)+len(m21_df) ==
len(np.unique(np.array(pd.concat((cg18_df, kc19_df, m21_df))['source_id'])))
)
cg18_df_edr3 = (
given_dr2_sourceids_get_edr3_xmatch(
nparr(cg18_df.Source).astype(np.int64), 'fullfaint_ngc2516_cg18_df',
overwrite=False)
)
kc19_df_edr3 = (
given_dr2_sourceids_get_edr3_xmatch(
nparr(kc19_df.source_id).astype(np.int64), 'fullfaint_ngc2516_kc19_df',
overwrite=False)
)
m21_df_edr3 = (
given_dr2_sourceids_get_edr3_xmatch(
nparr(m21_df.source_id).astype(np.int64), 'fullfaint_ngc2516_m21_df',
overwrite=False)
)
nbhd_df_edr3 = (
given_dr2_sourceids_get_edr3_xmatch(
nparr(nbhd_df.source_id).astype(np.int64), 'fullfaint_ngc2516_nbhd_df',
overwrite=False)
)
print(42*'='+'\nFOR EDR3:')
# Take the closest (proper motion and epoch-corrected) angular distance as
# THE single match.
get_edr3_xm = lambda _df: (
_df.sort_values(by='angular_distance').
drop_duplicates(subset='dr2_source_id', keep='first')
)
s_cg18_df_edr3 = get_edr3_xm(cg18_df_edr3)
s_kc19_df_edr3 = get_edr3_xm(kc19_df_edr3)
s_m21_df_edr3 = get_edr3_xm(m21_df_edr3)
s_nbhd_df_edr3 = get_edr3_xm(nbhd_df_edr3)
print(f'Got {len(s_cg18_df_edr3)} EDR3 matches in core.\n'+
f'99th pct [arcsec] {np.nanpercentile(cg18_df_edr3.angular_distance, 99):.1f} -> {np.nanpercentile(s_cg18_df_edr3.angular_distance, 99):.1f}')
print(f'Got {len(s_kc19_df_edr3)} EDR3 matches in KC19.\n'+
f'99th pct [arcsec] {np.nanpercentile(kc19_df_edr3.angular_distance, 99):.1f} -> {np.nanpercentile(s_kc19_df_edr3.angular_distance, 99):.1f}')
print(f'Got {len(s_m21_df_edr3)} EDR3 matches in M21.\n'+
f'99th pct [arcsec] {np.nanpercentile(m21_df_edr3.angular_distance, 99):.1f} -> {np.nanpercentile(s_m21_df_edr3.angular_distance, 99):.1f}')
print(f'Got {len(s_nbhd_df_edr3)} EDR3 matches in nbhd.\n'+
f'99th pct [arcsec] {np.nanpercentile(nbhd_df_edr3.angular_distance, 99):.1f} -> {np.nanpercentile(s_nbhd_df_edr3.angular_distance, 99):.1f}')
# Finally, query Gaia EDR3 to get the latest and greatest fullfaint
# photometry
kc19_df_0 = given_source_ids_get_gaia_data(
np.array(s_kc19_df_edr3.dr3_source_id),
'fullfaint_ngc2516_kc19_df_edr3', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True, gaia_datarelease='gaiaedr3'
)
cg18_df_0 = given_source_ids_get_gaia_data(
np.array(s_cg18_df_edr3.dr3_source_id),
'fullfaint_ngc2516_cg18_df_edr3', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True, gaia_datarelease='gaiaedr3'
)
m21_df_0 = given_source_ids_get_gaia_data(
np.array(s_m21_df_edr3.dr3_source_id),
'fullfaint_ngc2516_m21_df_edr3', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True, gaia_datarelease='gaiaedr3'
)
nbhd_df_0 = given_source_ids_get_gaia_data(
np.array(s_nbhd_df_edr3.dr3_source_id),
'fullfaint_ngc2516_nbhd_df_edr3', n_max=15000, overwrite=False,
enforce_all_sourceids_viable=True, gaia_datarelease='gaiaedr3'
)
assert len(cg18_df) == len(cg18_df_0)
assert len(kc19_df) == len(kc19_df_0)
assert len(m21_df) == len(m21_df_0)
assert len(nbhd_df) == len(nbhd_df_0)
# nb. these "source_ids" are now EDR3 source_ids.
np.testing.assert_array_equal(np.array(kc19_df_0.source_id),
np.array(kc19_df_0.source_id_2))
np.testing.assert_array_equal(np.array(cg18_df_0.source_id),
np.array(cg18_df_0.source_id_2))
np.testing.assert_array_equal(np.array(m21_df_0.source_id),
np.array(m21_df_0.source_id_2))
np.testing.assert_array_equal(np.array(nbhd_df_0.source_id),
np.array(nbhd_df_0.source_id_2))
kc19_df_0['dr2_source_id'] = nparr(s_kc19_df_edr3['dr2_source_id']).astype(np.int64)
cg18_df_0['dr2_source_id'] = nparr(s_cg18_df_edr3['dr2_source_id']).astype(np.int64)
m21_df_0['dr2_source_id'] = nparr(s_m21_df_edr3['dr2_source_id']).astype(np.int64)
nbhd_df_0['dr2_source_id'] = nparr(s_nbhd_df_edr3['dr2_source_id']).astype(np.int64)
target_df = kc19_df_0[kc19_df_0.source_id == 5489726768531119616] # TIC 2683...
#
# wrap up into the full source list
#
cg18_df_0['subcluster'] = 'core'
kc19_df_0['subcluster'] = 'halo'
m21_df_0['subcluster'] = 'halo'
core_df = cg18_df_0
halo_df = pd.concat((kc19_df_0, m21_df_0)).reset_index()
full_df = pd.concat((core_df, halo_df)).reset_index()
assert len(np.unique(full_df.source_id)) == len(full_df)
print(f'Got {len(full_df)} unique sources in the cluster.')
full_df['in_CG18'] = full_df.source_id.isin(cg18_df.source_id)
full_df['in_KC19'] = full_df.source_id.isin(kc19_df.source_id)
full_df['in_M21'] = full_df.source_id.isin(m21_df.source_id)
nbhd_df['dr2_radial_velocity'] = nbhd_df['radial_velocity']
return nbhd_df, core_df, halo_df, full_df, target_df
def _get_denis_fullfaint_edr3_dataframes():
targetpath = '../data/denis/target_gaia_denis_xm.csv'
cg18path = '../data/denis/cg18_gaia_denis_xm.csv'
kc19path = '../data/denis/kc19_gaia_denis_xm.csv'
nbhdpath = '../data/denis/nbhd_gaia_denis_xm.csv'
return (
pd.read_csv(nbhdpath),
pd.read_csv(cg18path),
| pd.read_csv(kc19path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created 2020.05.22.
Script for doing group level analysis of fidelity and true/false positive rates.
@author: rouhinen
"""
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from tqdm import tqdm
import pandas as pd
from fidelityOpMinimal import fidelity_estimation, make_series_paired, source_fid_to_weights
"""Load source identities, forward and inverse operators from npy. """
subjectsPath = 'C:\\temp\\fWeighting\\fwSubjects_p\\'
sourceIdPattern = '\\sourceIdentities_parc2018yeo7_XYZ.npy' # XYZ is replaced below.
sourceFidPattern = '\\sourceFidelities_MEEG_parc2018yeo7_XYZ.npy'
savePathBase = "C:\\temp\\fWeighting\\plotDump\\schaeferXYZ "
forwardPattern = '\\forwardOperatorMEEG.npy'
inversePattern = '\\inverseOperatorMEEG.npy'
XYZto = '100'
n_samples = 10000
n_cut_samples = 40
widths = np.arange(5, 6)
# Source fidelity to weights settings
exponent = 2
normalize = True
flips = False
# Save and plotting settings
savePDFs = True
tightLayout = True
""" Replace XYZ """
sourceIdPattern = sourceIdPattern.replace('XYZ', XYZto)
sourceFidPattern = sourceFidPattern.replace('XYZ', XYZto)
savePathBase = savePathBase.replace('XYZ', XYZto)
def get_tp_fp_rates(cp_PLV, truth_matrix):
# Set thresholds from the data. Get about 200 thresholds.
maxVal = np.max(cp_PLV)
thresholds = np.sort(np.ravel(cp_PLV))
distance = int(len(thresholds) // 200) + (len(thresholds) % 200 > 0) # To int, round up.
thresholds = thresholds[0:-1:distance]
thresholds = np.append(thresholds, maxVal)
tpRate = np.zeros(len(thresholds), dtype=float)
fpRate = np.zeros(len(thresholds), dtype=float)
for i, threshold in enumerate(thresholds):
estTrueMat = cp_PLV > threshold
tPos = np.sum(estTrueMat * truth_matrix)
fPos = np.sum(estTrueMat * np.logical_not(truth_matrix))
tNeg = np.sum(np.logical_not(estTrueMat) * np.logical_not(truth_matrix))
fNeg = np.sum(truth_matrix) - tPos
tpRate[i] = tPos / (tPos + fNeg)
fpRate[i] = fPos / (fPos + tNeg)
return tpRate, fpRate
def find_nearest_index(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def get_nearest_tp_semi_bin(binArray, tpRate, fpRate):
nearestTP = np.zeros(len(binArray))
for i, fpval in enumerate(binArray):
index = find_nearest_index(fpRate, fpval)
nearestTP[i] = tpRate[index]
return nearestTP
def delete_diagonal(symm_matrix):
symm_matrix = symm_matrix[~np.eye(symm_matrix.shape[0],dtype=bool)].reshape(
symm_matrix.shape[0],-1)
return symm_matrix
def get_n_parcels(identities):
idSet = set(identities) # Get unique IDs
idSet = [item for item in idSet if item >= 0] # Remove negative values (should have only -1 if any)
n_parcels = len(idSet)
return n_parcels
## Create "bins" for X-Axis.
n_bins = 101
binArray = np.logspace(-2, 0, n_bins-1, endpoint=True) # Values from 0.01 to 1
binArray = np.concatenate(([0], binArray)) # Add 0 to beginning
## Get subjects list, and first subject's number of parcels.
subjects = next(os.walk(subjectsPath))[1]
if any('_Population' in s for s in subjects):
subjects.remove('_Population')
subjectFolder = os.path.join(subjectsPath, subjects[0])
sourceIdFile = glob.glob(subjectFolder + sourceIdPattern)[0]
identities = np.load(sourceIdFile) # Source length vector. Expected ids for parcels are 0 to n-1, where n is number of parcels, and -1 for sources that do not belong to any parcel.
n_parcels = get_n_parcels(identities)
## Initialize arrays
fidWArray = np.zeros((len(subjects), n_parcels), dtype=float)
fidOArray = np.zeros((len(subjects), n_parcels), dtype=float)
tpWArray = np.zeros((len(subjects), n_bins), dtype=float)
tpOArray = np.zeros((len(subjects), n_bins), dtype=float)
sizeArray = []
### Loop over subjects. Insert values to subject x parcels/bins arrays.
for run_i, subject in enumerate(tqdm(subjects)):
## Load files
subjectFolder = os.path.join(subjectsPath, subject)
fileSourceIdentities = glob.glob(subjectFolder + sourceIdPattern)[0]
fileForwardOperator = glob.glob(subjectFolder + forwardPattern)[0]
fileInverseOperator = glob.glob(subjectFolder + inversePattern)[0]
fileSourceFidelities = glob.glob(subjectFolder + sourceFidPattern)[0]
identities = np.load(fileSourceIdentities) # Source length vector. Expected ids for parcels are 0 to n-1, where n is number of parcels, and -1 for sources that do not belong to any parcel.
forward = np.matrix(np.load(fileForwardOperator)) # sensors x sources
inverse = np.matrix(np.load(fileInverseOperator)) # sources x sensors
sourceFids = np.load(fileSourceFidelities) # sources
# identities = np.genfromtxt(fileSourceIdentities, dtype='int32', delimiter=delimiter) # Source length vector. Expected ids for parcels are 0 to n-1, where n is number of parcels, and -1 for sources that do not belong to any parcel.
# forward = np.matrix(np.genfromtxt(fileForwardOperator, dtype='float', delimiter=delimiter)) # sensors x sources
# inverse = np.matrix(np.genfromtxt(fileInverseOperator, dtype='float', delimiter=delimiter)) # sources x sensors
# sourceFids = np.genfromtxt(fileSourceFidelities, dtype='float', delimiter=delimiter) # sources
weights = source_fid_to_weights(sourceFids, exponent=exponent, normalize=normalize,
inverse=inverse, identities=identities, flips=flips)
inverse_w = np.einsum('ij,i->ij', inverse, weights)
n_parcels = get_n_parcels(identities)
if run_i == 0:
prior_n_parcels = n_parcels
else:
if prior_n_parcels == n_parcels:
print('Running subject ' + subject)
else:
print('Mismatch in number of parcels between subjects!')
""" Get fidelities from unpaired data. """
# inverse_w, _ = _compute_weights(sourceSeries, parcelSeries, identities, inverse)
fidelityW, _ = fidelity_estimation(forward, inverse_w, identities)
fidelityO, _ = fidelity_estimation(forward, inverse, identities)
""" Do network estimation. Get cross-patch PLV values from paired data"""
parcelSeriesPairs, pairs = make_series_paired(n_parcels, n_samples)
_, cp_PLVPW = fidelity_estimation(forward, inverse_w, identities, parcel_series=parcelSeriesPairs)
_, cp_PLVPO = fidelity_estimation(forward, inverse, identities, parcel_series=parcelSeriesPairs)
# Do the cross-patch PLV estimation for unmodeled series
cp_PLVU = np.zeros([n_parcels, n_parcels], dtype=np.complex128)
for t in range(n_samples):
parcelPLVn = parcelSeriesPairs[:,t] / np.abs(parcelSeriesPairs[:,t])
cp_PLVU += np.outer(parcelPLVn, np.conjugate(parcelPLVn)) /n_samples
cp_PLVUim = np.abs(np.imag(cp_PLVU))
# Build truth matrix from pairs.
truthMatrix = np.zeros((n_parcels, n_parcels), dtype=bool)
for i, pair in enumerate(pairs):
truthMatrix[pair[0], pair[1]] = True
truthMatrix[pair[1], pair[0]] = True
# Delete diagonal from truth and estimated matrices
truthMatrix = delete_diagonal(truthMatrix)
cp_PLVPW = delete_diagonal(cp_PLVPW)
cp_PLVPO = delete_diagonal(cp_PLVPO)
# Use imaginary PLV for the estimation.
cp_PLVWim = np.abs(np.imag(cp_PLVPW))
cp_PLVOim = np.abs(np.imag(cp_PLVPO))
## True positive and false positive rate estimation.
tpRateW, fpRateW = get_tp_fp_rates(cp_PLVWim, truthMatrix)
tpRateO, fpRateO = get_tp_fp_rates(cp_PLVOim, truthMatrix)
# Get nearest TP values closest to the FP pair at the "bin" values.
nearTPW = get_nearest_tp_semi_bin(binArray, tpRateW, fpRateW)
nearTPO = get_nearest_tp_semi_bin(binArray, tpRateO, fpRateO)
fidWArray[run_i,:] = fidelityW
fidOArray[run_i,:] = fidelityO
tpWArray[run_i,:] = nearTPW
tpOArray[run_i,:] = nearTPO
sizeArray.append(len(identities)) # Approximate head size with number of sources.
print(f'gain of fidelities. Mean/mean {np.mean(fidWArray)/np.mean(fidOArray)}')
print(f'gain of fidelities. Mean(fidelityW/fidelityO) {np.mean(fidWArray/fidOArray)}')
### Statistics.
fidWAverage = np.average(fidWArray, axis=0)
fidWStd = np.std(fidWArray, axis=0)
fidOAverage = np.average(fidOArray, axis=0)
fidOStd = np.std(fidOArray, axis=0)
tpWAverage = np.average(tpWArray, axis=0)
tpWStd = np.std(tpWArray, axis=0)
tpOAverage = np.average(tpOArray, axis=0)
tpOStd = np.std(tpOArray, axis=0)
### TEMP testing for sort first, then average and SD.
fidWArraySorted = np.sort(fidWArray, axis=1)
fidWAverageSorted = np.average(fidWArraySorted, axis=0)
fidWStdSorted = np.std(fidWArraySorted, axis=0)
fidOArraySorted = np.sort(fidOArray, axis=1)
fidOAverageSorted = np.average(fidOArraySorted, axis=0)
fidOStdSorted = np.std(fidOArraySorted, axis=0)
""" Plots """
# Set global figure parameters, including CorelDraw compatibility (.fonttype)
import matplotlib.pylab as pylab
if tightLayout == True:
params = {'legend.fontsize':'7',
'figure.figsize':(1.6, 1),
'axes.labelsize':'7',
'axes.titlesize':'7',
'xtick.labelsize':'7',
'ytick.labelsize':'7',
'lines.linewidth':'0.5',
'pdf.fonttype':42,
'ps.fonttype':42,
'font.family':'Arial'}
else: # Looks nice on the screen parameters
params = {'legend.fontsize':'7',
'figure.figsize':(3, 2),
'axes.labelsize':'7',
'axes.titlesize':'7',
'xtick.labelsize':'7',
'ytick.labelsize':'7',
'lines.linewidth':'0.5',
'pdf.fonttype':42,
'ps.fonttype':42,
'font.family':'Arial'}
pylab.rcParams.update(params)
parcelList = list(range(0, n_parcels))
""" Plot Fidelities. """
# Sort according to original fidelity
sorting = np.argsort(fidOAverage)
meansWF = pd.DataFrame(fidWAverage[sorting])
stdsWF = pd.DataFrame(fidWStd[sorting])
meansOF = pd.DataFrame(fidOAverage[sorting])
stdsOF = | pd.DataFrame(fidOStd[sorting]) | pandas.DataFrame |
import os
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from yaml import load
from sklearn.metrics import accuracy_score, confusion_matrix
from network.functions_output_training import *
# Load configuration file
with open("config.yml") as yaml_config:
config = load(yaml_config)
# Import data
history = pickle.load(open(os.path.join(config['weight_path'], 'train_history_dict'), "rb" ))
test_data = pd.read_csv(config['predictions_output_path'] +'test_data.csv', sep = ';')
predictions = pd.read_csv(config['predictions_output_path'] +'test_predictions.csv', sep = ';')
data = | pd.concat([test_data, predictions], axis=1) | pandas.concat |
import pandas as pd
from sklearn import tree
from sklearn import preprocessing
import nested_cv
def base_classifier(traitar_model, phenotype_feature_table, features, phenotype, out, do_normalization, get_misclassified_selected):
"""get base classifier for each feature"""
model = pd.read_csv(traitar_model, sep = "\t", index_col = 0)
sel_feats = model.index
table = pd.read_csv(phenotype_feature_table, sep = "\t", index_col = 0)
feats = pd.read_csv(features, sep = "\t", index_col = 0).index
#target
pt_notnull = | pd.notnull(table.loc[:, phenotype]) | pandas.notnull |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 10:15:25 2021
@author: lenakilian
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import copy as cp
from datetime import datetime
import geopandas as gpd
import pickle
wd = r'/Users/lenakilian/Documents/Ausbildung/UoLeeds/PhD/Analysis/'
years = list(range(2007, 2018, 2))
geog = 'MSOA'
dict_cat = 'category_6'
lookup = | pd.read_csv(wd + 'data/raw/Geography/Conversion_Lookups/UK_full_lookup_2001_to_2011.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
import matplotlib.pyplot as plt
import seaborn as sns
from imblearn.over_sampling import SMOTE
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
#下采样
def lowSampling(df, percent=3/3):
data1 = df[df[0] == 1] # 将多数
data0 = df[df[0] == 0] # 将少数类别的样本放在data0
index = np.random.randint(
len(data1), size=int(percent * (len(df) - len(data1)))) # 随机给定下采样取出样本的序号
lower_data1 = data1.iloc[list(index)] # 下采样
return(pd.concat([lower_data1, data0]))
#上采样
def upSampling(train):
X_train, y_train = SMOTE(kind='svm', ratio=1).fit_sample(train[:, 1:], train[:, 0])
return X_train, y_train
def drawConfusionM(y_pred, y_test,title):
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, fmt='')
plt.title(title)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def loadData(SamplingMethode):
# 读取数据
train = | pd.read_csv("data.csv", header=0) | pandas.read_csv |
import pandas as pd
import numpy as np
def create_empty_df(columns, dtypes, index=None):
df = pd.DataFrame(index=index)
for c,d in zip(columns, dtypes):
df[c] = pd.Series(dtype=d)
return df
def set_column_values_to_lowercase(df, columns_to_lowercase):
df_lower = df.copy()
for col in columns_to_lowercase:
df_lower[col] = df_lower[col].str.lower()
return df_lower
def compile_files_in_folder_into_df(folder_path, filename_like):
df_all = pd.DataFrame([])
filepaths = glob.glob('{}/{}'.format(folder_path,filename_like))
for filepath in filepaths:
df = | pd.read_csv(filepath) | pandas.read_csv |
# Load the library with the iris dataset
from sklearn.datasets import load_iris
# Load scikit's random forest classifier library
from sklearn.ensemble import RandomForestClassifier
from scipy import interp
# Using Skicit-learn to split data into training and testing sets
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, mean_squared_error, roc_auc_score,roc_curve, auc
from sklearn.ensemble import RandomForestRegressor
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from statistics import mean, stdev
import seaborn as sns
from sklearn.model_selection import StratifiedKFold
# Load pandas
import pandas as pd
# Load numpy
import numpy as np
from sklearn import preprocessing
from numpy import array
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score,cross_val_predict
def average(nums, default=float('nan')):
return sum(nums) / float(len(nums)) if nums else default
def read_csv(csv_file, nrows=None):
df = | pd.read_csv(csv_file, nrows=nrows) | pandas.read_csv |
# coding: utf-8
# # Stap 2: Unsupervised Learning
# #### Importeren van packages
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import decomposition
from sklearn import cluster
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
from scipy.cluster.hierarchy import dendrogram,linkage
import hdbscan
from scipy.spatial.distance import cdist, pdist
# #### Instellen van pad voor data toegang
# In[8]:
get_ipython().run_line_magic('cd', 'C:\\Users\\mark_\\source\\repos\\Erasmus\\Bedbezetting\\2')
pad = get_ipython().run_line_magic('pwd', '')
# #### Inlezen van data
# In[3]:
df=pd.read_excel(pad+'\\data_clustering.xlsx')
df['Spcm'] = df.Spcm.str.split(' ').str[0]
# #### Integer and one-hot encoding
# In[4]:
#Sources:
#https://machinelearningmastery.com/how-to-one-hot-encode-sequence-data-in-python/
#https://datascience.stackexchange.com/questions/6508/k-means-incoherent-behaviour-choosing-k-with-elbow-method-bic-variance-explain
#https://support.minitab.com/en-us/minitab/18/help-and-how-to/modeling-statistics/multivariate/how-to/cluster-k-means/before-you-start/overview/
# De clustering vindt apart plaats voor klinisch niet-spoed en spoed
klinisch_spoed = df[(df['Opnametype']!='Dagverpleging') & (df['Spoedopname Ind']=='J')]
drop_klinisch_spoed=['Opname Datum','Opname Tijd','Ontslag Datum','Ontslag Tijd', 'id','Spoedopname Ind', 'Weekdag_ontslag_datum']
klinisch_spoed = klinisch_spoed.drop(drop_klinisch_spoed,axis=1)
klinisch_spoed.reset_index(drop=True,inplace=True)
lijst_categorical_spoed = ['Geslacht','Opnametype', 'Spcm','Spcm Herkomst', 'Weekdag_opname_datum', 'Diagnose Code', 'Icd10 Code']
#In de volgende regels worden de kolommen verwijderd uit de dataframes die niet meegenomen worden in de clustering. Aanpassen kan hier.
klinisch_nietspoed = df[(df['Opnametype']!='Dagverpleging') & (df['Spoedopname Ind']=='N')]
drop_klinisch_nietspoed=['Opname Datum','Opname Tijd','Ontslag Datum','Ontslag Tijd', 'id','Spoedopname Ind', 'Weekdag_ontslag_datum']
klinisch_nietspoed = klinisch_nietspoed.drop(drop_klinisch_nietspoed,axis=1)
klinisch_nietspoed.reset_index(drop=True,inplace=True)
lijst_categorical_nietspoed = ['Geslacht','Opnametype', 'Spcm','Spcm Herkomst', 'Weekdag_opname_datum', 'Diagnose Code', 'Icd10 Code']
# In[5]:
# One hot encoding
onehot_klinisch_spoed=klinisch_spoed.copy()
onehot_klinisch_spoed=pd.get_dummies(onehot_klinisch_spoed,prefix_sep="_", columns=lijst_categorical_spoed)
onehot_klinisch_nietspoed=klinisch_nietspoed.copy()
onehot_klinisch_nietspoed= | pd.get_dummies(onehot_klinisch_nietspoed,prefix_sep="_", columns=lijst_categorical_nietspoed) | pandas.get_dummies |
import pandas as pd
import numpy as np
def calc_weighted_avg_price(price, weight, grouper):
grouper.rename('grouper', inplace=True)
price_weight = price.multiply(weight)
df = pd.concat([price_weight, weight], axis=1)
grouped = df.groupby(grouper).sum()
res = (
grouped
.iloc[:, 0]
.divide(grouped.iloc[:, 1])
)
return res
def calc_weighted_anchored_price(price, weight, grouper):
grouper.rename('grouper', inplace=True)
price_weight = price.multiply(weight)
df = | pd.concat([price_weight, weight], axis=1) | pandas.concat |
################################################### Importing dataset ################################################
### Importing Libraries ###
import numpy
import pandas
import seaborn
### Import Dataset ###
dataset = pandas.read_csv('Data/dataR2.csv')
dataset.columns = ['Age', 'BMI', 'Glucose', 'Insulin', 'HOMA', 'Leptin', 'Adiponectin', 'Resistin', 'MCP1', 'Classification'] # Reformat Column Names
datasetinfo = pandas.concat([dataset.dtypes, dataset.nunique(dropna = False).sort_values(ascending = False), dataset.isnull().sum().sort_values(ascending = False), (100*dataset.isnull().sum()/dataset.isnull().count()).sort_values(ascending = False)], axis = 1, keys = ['Type', 'Unique Values', 'Null Values', 'Null Percentage']) # Null Value Check
X = pandas.DataFrame()
Y = | pandas.DataFrame() | pandas.DataFrame |
from scipy import spatial
import pandas as pd
import numpy as np
import constants
def cosine_similarity(x, y):
return 1 - spatial.distance.cosine(x, y)
def find_closest_word_vectors(word, word_set, nlp):
spacy_vocab = nlp.vocab
q = spacy_vocab[word].vector
if True: # q.sum() == 0:
q = nlp(word).vector
words = []
similarities = []
for current_word in word_set:
try:
similarities.append(cosine_similarity(spacy_vocab[current_word].vector, q))
words.append(current_word)
except Exception as e:
#similarities.append(-2)
pass
# TODO: If it can't compute the cosine similarity, should probably just skip it and not append
# TODO: Look at all items with invalid cosine similarity and see if any are valuable at all. If not, don't add these.
df = pd.DataFrame({constants.WORD: words, constants.SIMILARITY: similarities})
df = df.sort_values(constants.SIMILARITY, ascending=False)
return df
def find_closest_word_vectors_series(word, word_set, spacy_vocab):
q = spacy_vocab[word].vector
words = []
similarities = []
word_list_series = pd.Series(list(word_set))
similarities = word_list_series.apply(lambda current_word: cosine_similarity(spacy_vocab[current_word].vector, q))
df = | pd.DataFrame({constants.WORD: word_list_series, constants.SIMILARITY: similarities}) | pandas.DataFrame |
"""
A inspection to compute the ratio of non-values in output columns
"""
from typing import Iterable
import pandas
from mlinspect.inspections._inspection import Inspection
from mlinspect.inspections._inspection_input import OperatorType, InspectionInputSinkOperator
class CompletenessOfColumns(Inspection):
"""
An inspection to compute the completeness of columns
"""
def __init__(self, columns):
self._present_column_names = []
self._null_value_counts = []
self._total_counts = []
self._operator_type = None
self.columns = columns
@property
def inspection_id(self):
return tuple(self.columns)
def visit_operator(self, inspection_input) -> Iterable[any]:
"""
Visit an operator
"""
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
self._present_column_names = []
self._null_value_counts = []
self._total_counts = []
self._operator_type = inspection_input.operator_context.operator
if not isinstance(inspection_input, InspectionInputSinkOperator):
present_columns_index = []
for column_name in self.columns:
column_present = column_name in inspection_input.output_columns.fields
if column_present:
column_index = inspection_input.output_columns.get_index_of_column(column_name)
present_columns_index.append(column_index)
self._present_column_names.append(column_name)
self._null_value_counts.append(0)
self._total_counts.append(0)
for row in inspection_input.row_iterator:
for present_column_index, column_index in enumerate(present_columns_index):
column_value = row.output[column_index]
is_null = | pandas.isna(column_value) | pandas.isna |
import numpy as np
import pandas as pd
import pytest
import skorecard.reporting.report
from skorecard.metrics import metrics
from skorecard.bucketers import DecisionTreeBucketer
@pytest.fixture()
def X_y():
"""Set of X,y for testing the transformers."""
X = np.array(
[[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]],
np.int32,
)
y = np.array([0, 0, 0, 1, 1, 1, 0, 0, 1])
return X, y
@pytest.fixture()
def X1_X2():
"""Set of dataframes to test psi."""
X1 = pd.DataFrame(
[[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], columns=["col1", "col2"]
)
X2 = pd.DataFrame(
[[0, 2], [3, 0], [0, 0], [1, 2], [0, 4], [2, 1], [1, 1], [2, 1], [1, 1]], columns=["col1", "col2"]
)
return X1, X2
def test_iv_on_array(X_y):
"""Test the IV calculation for two arrays."""
X, y = X_y
X = pd.DataFrame(X, columns=["0", "1"])
np.testing.assert_almost_equal(metrics._IV_score(y, X["0"]), 5.307, decimal=2)
np.testing.assert_almost_equal(metrics._IV_score(y, X["1"]), 4.635, decimal=2)
def test_psi_zero(df):
"""Test that PSI on same array is zero."""
features = ["LIMIT_BAL", "BILL_AMT1"]
X = df[features]
y = df["default"]
X_bins = DecisionTreeBucketer(variables=features).fit_transform(X, y)
psi_vals = skorecard.reporting.report.psi(X_bins, X_bins)
assert set(psi_vals.keys()) == set(X_bins.columns)
assert all([val == 0 for val in psi_vals.values()])
def test_psi_values(X1_X2):
"""Assert PSi values match expectations."""
X1, X2 = X1_X2
expected_psi = {"col1": 0.0773, "col2": 0.965}
psi_vals = skorecard.reporting.report.psi(X1, X2)
np.testing.assert_array_almost_equal(pd.Series(expected_psi).values, | pd.Series(psi_vals) | pandas.Series |
# -*- coding: utf-8 -*-
"""Generating the training data.
This script generates the training data according to the config specifications.
Example
-------
To run this script, pass in the desired config file as argument::
$ generate baobab/configs/tdlmc_diagonal_config.py --n_data 1000
"""
import os, sys
import random
import argparse
import gc
from types import SimpleNamespace
from tqdm import tqdm
import numpy as np
import pandas as pd
# Lenstronomy modules
import lenstronomy
print("Lenstronomy path being used: {:s}".format(lenstronomy.__path__[0]))
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.PointSource.point_source import PointSource
# Baobab modules
from baobab.configs import BaobabConfig
import baobab.bnn_priors as bnn_priors
from baobab.sim_utils import Imager, Selection
def parse_args():
"""Parse command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Baobab config file path')
parser.add_argument('--n_data', default=None, dest='n_data', type=int,
help='size of dataset to generate (overrides config file)')
parser.add_argument('--dest_dir', default=None, dest='dest_dir', type=str,
help='destination for output folder (overrides config file)')
args = parser.parse_args()
# sys.argv rerouting for setuptools entry point
if args is None:
args = SimpleNamespace()
args.config = sys.argv[0]
args.n_data = sys.argv[1]
args.dest_dir = sys.argv[2]
return args
def main():
args = parse_args()
cfg = BaobabConfig.from_file(args.config)
if args.n_data is not None:
cfg.n_data = args.n_data
if args.dest_dir is not None:
cfg.destination_dir = args.dest_dir
# Seed for reproducibility
np.random.seed(cfg.seed)
random.seed(cfg.seed)
# Create data directory
save_dir = cfg.out_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print("Destination folder path: {:s}".format(save_dir))
else:
raise OSError("Destination folder already exists.")
# Instantiate density models
kwargs_model = dict(
lens_model_list=[cfg.bnn_omega.lens_mass.profile, cfg.bnn_omega.external_shear.profile],
source_light_model_list=[cfg.bnn_omega.src_light.profile],
)
lens_mass_model = LensModel(lens_model_list=kwargs_model['lens_model_list'])
src_light_model = LightModel(light_model_list=kwargs_model['source_light_model_list'])
if 'lens_light' in cfg.components:
kwargs_model['lens_light_model_list'] = [cfg.bnn_omega.lens_light.profile]
lens_light_model = LightModel(light_model_list=kwargs_model['lens_light_model_list'])
else:
lens_light_model = None
if 'agn_light' in cfg.components:
kwargs_model['point_source_model_list'] = [cfg.bnn_omega.agn_light.profile]
ps_model = PointSource(point_source_type_list=kwargs_model['point_source_model_list'], fixed_magnification_list=[False])
else:
ps_model = None
# Instantiate Selection object
selection = Selection(cfg.selection, cfg.components)
# Instantiate Imager object
if cfg.bnn_omega.kinematics.calculate_vel_disp or cfg.bnn_omega.time_delays.calculate_time_delays:
for_cosmography = True
else:
for_cosmography = False
imager = Imager(cfg.components, lens_mass_model, src_light_model, lens_light_model=lens_light_model, ps_model=ps_model, kwargs_numerics=cfg.numerics, min_magnification=cfg.selection.magnification.min, for_cosmography=for_cosmography, magnification_frac_err=cfg.bnn_omega.magnification.frac_error_sigma)
# Initialize BNN prior
if for_cosmography:
kwargs_lens_eqn_solver = {'min_distance': 0.05, 'search_window': cfg.instrument['pixel_scale']*cfg.image['num_pix'], 'num_iter_max': 100}
bnn_prior = getattr(bnn_priors, cfg.bnn_prior_class)(cfg.bnn_omega, cfg.components, kwargs_lens_eqn_solver)
else:
kwargs_lens_eqn_solver = {}
bnn_prior = getattr(bnn_priors, cfg.bnn_prior_class)(cfg.bnn_omega, cfg.components)
# Initialize empty metadata dataframe
metadata = pd.DataFrame()
metadata_path = os.path.join(save_dir, 'metadata.csv')
current_idx = 0 # running idx of dataset
pbar = tqdm(total=cfg.n_data)
while current_idx < cfg.n_data:
sample = bnn_prior.sample() # FIXME: sampling in batches
if selection.reject_initial(sample): # select on sampled model parameters
continue
# Generate the image
img, img_features = imager.generate_image(sample, cfg.image.num_pix, cfg.survey_object_dict)
if img is None: # select on stats computed while rendering the image
continue
# Save image file
if cfg.image.squeeze_bandpass_dimension:
img = np.squeeze(img)
img_filename = 'X_{0:07d}.npy'.format(current_idx)
img_path = os.path.join(save_dir, img_filename)
np.save(img_path, img)
# Save labels
meta = {}
for comp in cfg.components: # Log model parameters
for param_name, param_value in sample[comp].items():
meta['{:s}_{:s}'.format(comp, param_name)] = param_value
if cfg.bnn_prior_class in ['EmpiricalBNNPrior', 'DiagonalCosmoBNNPrior']: # Log other stats
for misc_name, misc_value in sample['misc'].items():
meta['{:s}'.format(misc_name)] = misc_value
meta.update(img_features)
if 'agn_light' in cfg.components:
meta['x_image'] = img_features['x_image'].tolist()
meta['y_image'] = img_features['y_image'].tolist()
meta['n_img'] = len(img_features['y_image'])
meta['magnification'] = img_features['magnification'].tolist()
meta['measured_magnification'] = img_features['measured_magnification'].tolist()
meta['img_filename'] = img_filename
metadata = metadata.append(meta, ignore_index=True)
# Export metadata.csv for the first time
if current_idx == 0:
metadata = metadata.reindex(sorted(metadata.columns), axis=1) # sort columns lexicographically
metadata.to_csv(metadata_path, index=None) # export to csv
metadata = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 17:24:41 2020
@author: -
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import json
import skimage.io as io
from plotnine import *
from mpl_toolkits import mplot3d
import matplotlib as mpl
from scipy import stats
from pandas.api.types import CategoricalDtype
file_path="./3D_face_data/"
feature_female=np.loadtxt(file_path+'Profile_AntFeature_female.txt')
feature_male=np.loadtxt(file_path+'Profile_AntFeature_male.txt')
#'face height/face width','jawline width/faced width','chin-to-month/jawline width'
df_female=pd.DataFrame(feature_female[:,0:4], columns=['face height\nface width',
'jaw width\nfaced width',
'chin-to-mouth\njawline width',
'cheekbone width\nfaced width'])
df_female['group']='female'
df_male=pd.DataFrame(feature_male[:,0:4], columns=['face height\nface width',
'jaw width\nfaced width',
'chin-to-mouth\njawline width',
'cheekbone width\nfaced width'])
df_male['group']='male'
#====================================Male:relationship between Geofeatures and beauty========================================
df_Mabeauty=pd.read_csv(file_path+"AM_Ratings_SCUT_FBP5500.csv")
df_Mabeauty=df_Mabeauty.groupby('filename',as_index=False)['Rating'].agg({'mean':"mean",'std':"std"})
params = []
with open(file_path+'Face_Para_male_SCUT_FBP5500.json', "r") as f:
for line in f:
doc = json.loads(line)
#print(doc)
params.append(doc)
filenames=np.array([ x['filename'] for x in params])
labels=np.loadtxt(file_path+'AM_Keamlables.txt')
df_MaFeatures=pd.concat((pd.DataFrame(dict(filename=filenames,labels=labels)),df_male),axis=1)
df_MaMerge=pd.merge(df_MaFeatures,df_Mabeauty,how='left',on='filename')
df_MaMerge['labels']=df_MaMerge['labels'].astype(str)
# df_MaMerge['labels']=df_MaMerge['labels'].replace(['0.0', '1.0', '2.0', '3.0', '4.0', '5.0'],
# ['4' , '6' , '5', '2', '3', '1'])
#df_MaMerge.to_csv('./Data/df_MaMerge.csv',index=False)
violin_plot=(ggplot(df_MaMerge,aes(x='labels',y="mean",fill="labels"))
+geom_violin(show_legend=False)
+geom_boxplot(fill="white",width=0.1,show_legend=False)
+scale_fill_hue(s = 0.90, l = 0.65, h=0.0417,color_space='husl')
+ylim(1,5)
+theme_matplotlib())
print(violin_plot)
#=====================================Female:relationship between Geofeatures and beauty========================================
df_febeauty1= | pd.read_csv(file_path+"AF_Ratings_SCUT_FBP5500.csv") | pandas.read_csv |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import pytest
from distutils.version import LooseVersion
import numpy as np
import pytz
import pandas as pd
from xarray import Variable, IndexVariable, Coordinate, Dataset
from xarray.core import indexing
from xarray.core.variable import as_variable, as_compatible_data
from xarray.core.indexing import PandasIndexAdapter, LazilyIndexedArray
from xarray.core.pycompat import PY3, OrderedDict
from xarray.core.common import full_like, zeros_like, ones_like
from . import TestCase, source_ndarray, requires_dask
class VariableSubclassTestCases(object):
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(['time'], data, {'foo': 'bar'})
self.assertEqual(v.dims, ('time',))
self.assertArrayEqual(v.values, data)
self.assertEqual(v.dtype, float)
self.assertEqual(v.shape, (10,))
self.assertEqual(v.size, 10)
self.assertEqual(v.sizes, {'time': 10})
self.assertEqual(v.nbytes, 80)
self.assertEqual(v.ndim, 1)
self.assertEqual(len(v), 10)
self.assertEqual(v.attrs, {'foo': u'bar'})
def test_attrs(self):
v = self.cls(['time'], 0.5 * np.arange(10))
self.assertEqual(v.attrs, {})
attrs = {'foo': 'bar'}
v.attrs = attrs
self.assertEqual(v.attrs, attrs)
self.assertIsInstance(v.attrs, OrderedDict)
v.attrs['foo'] = 'baz'
self.assertEqual(v.attrs['foo'], 'baz')
def test_getitem_dict(self):
v = self.cls(['x'], np.random.randn(5))
actual = v[{'x': 0}]
expected = v[0]
self.assertVariableIdentical(expected, actual)
def _assertIndexedLikeNDArray(self, variable, expected_value0,
expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
self.assertEqual(variable[0].shape, ())
self.assertEqual(variable[0].ndim, 0)
self.assertEqual(variable[0].size, 1)
# test identity
self.assertTrue(variable.equals(variable.copy()))
self.assertTrue(variable.identical(variable.copy()))
# check value is equal for both ndarray and Variable
self.assertEqual(variable.values[0], expected_value0)
self.assertEqual(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
self.assertEqual(type(variable.values[0]), type(expected_value0))
self.assertEqual(type(variable[0].values), type(expected_value0))
elif expected_dtype is not False:
self.assertEqual(variable.values[0].dtype, expected_dtype)
self.assertEqual(variable[0].values.dtype, expected_dtype)
def test_index_0d_int(self):
for value, dtype in [(0, np.int_),
(np.int32(0), np.int32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_),
(np.float32(0.5), np.float32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')),
(u'foo', np.dtype('U3'))]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(['x'], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
x = self.cls(['x'], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(['x'], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
x = self.cls(['x'], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
def test_index_0d_not_a_time(self):
d = np.datetime64('NaT', 'ns')
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper(object):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return '%s(item=%r)' % (type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls('x', [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls('x', listarray)
assert x.data == listarray
assert x[0].data == listarray.squeeze()
assert x.squeeze().data == listarray.squeeze()
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range('2011-09-01', periods=10)
for dates in [date_range, date_range.values,
date_range.to_pydatetime()]:
expected = self.cls('t', dates)
for times in [[expected[i] for i in range(10)],
[expected[i:(i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)]]:
actual = Variable.concat(times, 't')
self.assertEqual(expected.dtype, actual.dtype)
self.assertArrayEqual(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls('time', pd.date_range('2000-01-01', periods=5))
expected = np.datetime64('2000-01-01T00Z', 'ns')
self.assertEqual(x[0].values, expected)
def test_datetime64_conversion(self):
times = pd.date_range('2000-01-01', periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('datetime64[s]'), False),
(times.to_pydatetime(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('timedelta64[s]'), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls('x', data)
self.assertEqual(actual.dtype, data.dtype)
def test_pandas_data(self):
v = self.cls(['x'], pd.Series([0, 1, 2], index=[3, 2, 1]))
self.assertVariableIdentical(v, v[[0, 1, 2]])
v = self.cls(['x'], pd.Index([0, 1, 2]))
self.assertEqual(v[0].values, v.values[0])
def test_pandas_period_index(self):
v = self.cls(['x'], pd.period_range(start='2000', periods=20, freq='B'))
self.assertEqual(v[0], pd.Period('2000', freq='B'))
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(['x'], x)
base_v = v.to_base_variable()
# unary ops
self.assertVariableIdentical(base_v, +v)
self.assertVariableIdentical(base_v, abs(v))
self.assertArrayEqual((-v).values, -x)
# binary ops with numbers
self.assertVariableIdentical(base_v, v + 0)
self.assertVariableIdentical(base_v, 0 + v)
self.assertVariableIdentical(base_v, v * 1)
self.assertArrayEqual((v > 2).values, x > 2)
self.assertArrayEqual((0 == v).values, 0 == x)
self.assertArrayEqual((v - 1).values, x - 1)
self.assertArrayEqual((1 - v).values, 1 - x)
# binary ops with numpy arrays
self.assertArrayEqual((v * x).values, x ** 2)
self.assertArrayEqual((x * v).values, x ** 2)
self.assertArrayEqual(v - y, v - 1)
self.assertArrayEqual(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(['x'], x, {'units': 'meters'})
self.assertVariableIdentical(base_v, +v2)
# binary ops with all variables
self.assertArrayEqual(v + v, 2 * v)
w = self.cls(['x'], y, {'foo': 'bar'})
self.assertVariableIdentical(v + w, self.cls(['x'], x + y).to_base_variable())
self.assertArrayEqual((v * w).values, x * y)
# something complicated
self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
self.assertEqual(float, (+v).dtype)
self.assertEqual(float, (+v).values.dtype)
self.assertEqual(float, (0 + v).dtype)
self.assertEqual(float, (0 + v).values.dtype)
# check types of returned data
self.assertIsInstance(+v, Variable)
self.assertNotIsInstance(+v, IndexVariable)
self.assertIsInstance(0 + v, Variable)
self.assertNotIsInstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(['x'], x)
actual = v.sum()
expected = Variable((), 10)
self.assertVariableIdentical(expected, actual)
self.assertIs(type(actual), Variable)
def test_array_interface(self):
x = np.arange(5)
v = self.cls(['x'], x)
self.assertArrayEqual(np.asarray(v), x)
# test patched in methods
self.assertArrayEqual(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
self.assertVariableIdentical(v.argsort(), v.to_base_variable())
self.assertVariableIdentical(v.clip(2, 3),
self.cls('x', x.clip(2, 3)).to_base_variable())
# test ufuncs
self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)).to_base_variable())
self.assertIsInstance(np.sin(v), Variable)
self.assertNotIsInstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
| pd.date_range('2000-01-01', periods=3) | pandas.date_range |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": | pandas.StringDtype() | pandas.StringDtype |
from datetime import datetime as dt
import os
import pandas as pd
import ntpath
import numpy as np
import math
from distutils.dir_util import copy_tree
from shutil import rmtree
import sqlite3
# 'cleanData' is taking the data that was imported from 'http://football-data.co.uk/'
# and 'cleaning' the data so that only necessary factors are used for testing.
# This function is used to make a directory.
def make_directory(path):
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
# If a directory already exists it will be removed.
def rmv_dir(path):
if os.path.exists(path):
rmtree(path)
# This function is used to copy a file/folder.
def copy_csv(from_path, to_path):
make_directory(to_path)
if os.path.isfile(from_path):
with open(to_path, 'w') as to_file, open(from_path, 'r') as from_file:
for line in from_file:
to_file.write(line)
elif os.path.isdir(from_path):
copy_tree(from_path, to_path)
else:
raise ValueError("Copy_CSV Error. File either does not exist, or is an unsupported file type")
# clean the original raw_data data by storing only the columns that we need, and removing the rest.
def clean(from_path, to_path, columns):
def convert_date(date):
if date == '':
return None
else:
_, file = ntpath.split(to_path)
if len(date.split('-')) == 3:
return date
else:
return dt.strptime(date, '%d/%m/%y').date()
# The convert Score function will check to see if the score is 'Not a Number'(NaN).
# The latter part of this conditional statement will more than likely be used more.
def convert_score(score):
if math.isnan(score):
return score
else:
return int(score)
df = pd.read_csv(from_path, error_bad_lines=False)
df = df[columns]
df = df[pd.notnull(df['Date'])]
df['FTHG'] = df['FTHG'].apply(convert_score)
df['FTAG'] = df['FTAG'].apply(convert_score)
df['Date'] = df['Date'].apply(convert_date)
head, _ = ntpath.split(to_path)
if not os.path.exists(head):
os.makedirs(head)
df.to_csv(to_path, index=False)
# This function is cleaning the data in the raw_data folder from every year.
def clean_everything(from_folder, to_folder, columns, from_year, to_year):
for year in range(from_year, to_year + 1):
csv = '{}-{}.csv'.format(year, year + 1)
frompath = os.path.join(from_folder, csv)
topath = os.path.join(to_folder, csv)
print("Cleaning data", frompath, "...")
clean(frompath, topath, columns)
# The years are then concatenated through this function.
def combine_games(cleaned_folder_path, final_path, start_year, end_year, make_file=True):
print("Combining matches played from {} to {}...".format(start_year, end_year))
dfList = []
for year in range(start_year, end_year + 1):
file = '{}-{}.csv'.format(year, year + 1)
path = os.path.join(cleaned_folder_path, file)
df = pd.read_csv(path)
dfList.append(df)
df = pd.concat(dfList, ignore_index=True, sort=False)
if make_file:
df.to_csv(final_path, index=False)
return df
def get_match_results_against(file_path, cleaned_folder_path, final_path, from_year, to_year):
print("Getting head-to-head results...")
team_detail, match_detail = {}, {}
match_detail_columns = [
'HT_win_rate_against',
'AT_win_rate_against'
]
for item in match_detail_columns:
match_detail[item] = []
# Get head-to-head result from fromYear to toYear
df = combine_games(cleaned_folder_path, final_path, from_year, to_year, make_file=False)
for index, row in df.iterrows():
home_team = row['HomeTeam']
away_team = row['AwayTeam']
if home_team not in team_detail:
team_detail[home_team] = {}
if away_team not in team_detail:
team_detail[away_team] = {}
if away_team not in team_detail[home_team]:
team_detail[home_team][away_team] = {
'match_played': 0,
'win': 0
}
if home_team not in team_detail[away_team]:
team_detail[away_team][home_team] = {
'match_played': 0,
'win': 0
}
TD_HT_AT = team_detail[home_team][away_team]
TD_AT_HT = team_detail[away_team][home_team]
home_team_win_rate = TD_HT_AT['win'] / TD_HT_AT['match_played'] if TD_HT_AT['match_played'] > 0 else np.nan
away_team_win_rate = TD_AT_HT['win'] / TD_AT_HT['match_played'] if TD_AT_HT['match_played'] > 0 else np.nan
match_detail['HT_win_rate_against'].append(home_team_win_rate)
match_detail['AT_win_rate_against'].append(away_team_win_rate)
TD_HT_AT['match_played'] += 1
TD_AT_HT['match_played'] += 1
game_result = row['FTR']
if game_result == 'H':
TD_HT_AT['win'] += 1
elif game_result == 'A':
TD_AT_HT['win'] += 1
# Only take the last x results of df and combine with filedf.
# This is because we don't always want to merge all data from 1993 to 2018
filed_f = pd.read_csv(file_path)
row_count = filed_f.shape[0]
filed_f['HT_win_rate_against'] = pd.Series(match_detail['HT_win_rate_against'][-row_count:], index=filed_f.index)
filed_f['AT_win_rate_against'] = | pd.Series(match_detail['AT_win_rate_against'][-row_count:], index=filed_f.index) | pandas.Series |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = | date_range("2013-11-03", periods=5, freq="3H") | pandas.date_range |
# Subroutines for MERRA processing to SARTA inputs
import numpy
from netCDF4 import Dataset
import datetime
import os
from scipy import stats
from numpy import ndarray, ma
import pandas
import math
def sfclvl(psfc, levarr):
### Return array with surface level indicator: the lowest vertical level above the surface pressure
# Assume psfc is 2D (lat, lon)
nlt = psfc.shape[0]
nln = psfc.shape[1]
nz = levarr.shape[0]
psq = numpy.arange(nz)
slvs = numpy.zeros((nlt,nln),dtype=numpy.int16)
for j in range(nlt):
for i in range(nln):
psb = psq[levarr <= psfc[j,i]]
slvs[j,i] = psb[-1]
return slvs
def sfclvl_rev_met(psfc, levarr, metarr):
### Return array with surface level indicator: the lowest vertical level above the surface pressure
### Here the levarr is descending in pressure
### Also check where metarr is not masked
# Assume psfc is 2D (lat, lon)
# levarr: 1D array of pressure levels
# metarr: 3D met array (masked)
nlt = psfc.shape[0]
nln = psfc.shape[1]
nz = levarr.shape[0]
psq = numpy.arange(nz)
slvs = numpy.zeros((nlt,nln),dtype=numpy.int16)
for j in range(nlt):
for i in range(nln):
psb = psq[(levarr <= psfc[j,i]) & numpy.logical_not(metarr[:,j,i].mask)]
slvs[j,i] = psb[0]
return slvs
def merra_subset_2d( srchdr, srchdt, lnsq, ltsq, varnm, mflstr = 'inst1_2d_asm'):
### Extract a MERRA 2D variable from spatial subset
flst = os.listdir(srchdr)
dtstr = srchdt.strftime('%Y%m%d')
f2d = -1
f3d = -1
j = 0
while ( (j < len(flst)) and ((f2d < 0) or (f3d < 0) ) ):
if ( (mflstr in flst[j]) and (dtstr in flst[j]) ):
f2d = j
mer2d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer2d,'r')
vararr = f.variables[varnm][0,ltsq,lnsq]
f.close()
j = j + 1
return vararr
def merra_conv_temp_prof( srchdr, srchdt, lnsq, ltsq, lvout, msgval=-9999.):
### Convert MERRA temperature profile to SARTA/AIRS pressure levels
### srchdr: Directory with MERRA daily files
### srchdt: Desired date (a python datetime object)
### lnsq: Longitude subset sequence
### ltsq: Latitude subset sequence
### lvout: Array of pressure levels
flst = os.listdir(srchdr)
dtstr = srchdt.strftime('%Y%m%d')
f2d = -1
f3d = -1
j = 0
nlvout = lvout.shape[0]
lwwt = numpy.zeros(nlvout,)
hiwt = numpy.zeros(nlvout,)
lwidx = numpy.zeros(nlvout,dtype=numpy.int32)
hiidx = numpy.zeros(nlvout,dtype=numpy.int32)
print(dtstr)
while ( (j < len(flst)) and ((f2d < 0) or (f3d < 0) ) ):
if ( ('inst1_2d_asm' in flst[j]) and (dtstr in flst[j]) ):
f2d = j
mer2d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer2d,'r')
t2m = f.variables['T2M'][0,ltsq,lnsq]
psfc = f.variables['PS'][0,ltsq,lnsq]
#tskn = f['TS'][0,ltsq,lnsq]
f.close()
if ( ('inst3_3d_asm' in flst[j]) and (dtstr in flst[j]) ):
f3d = j
mer3d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer3d,'r')
tmpmer = f.variables['T'][0,:,ltsq,lnsq]
lvmer = f.variables['lev'][:]
f.close()
j = j + 1
# Develop weights
nlvmr = lvmer.shape[0]
for k in range(nlvout):
lmr = nlvmr - 1
kfd = -1
# No need to loop in upper atm
if ( lvout[k] < lvmer[lmr]):
kfd = lmr
lwidx[k] = lmr
hiidx[k] = lmr + 1
lwwt[k] = 1.0
hiwt[k] = 0.0
# Loop otherwise
while ( (lmr > 0) and (kfd < 0) ):
if ((lvout[k] >= lvmer[lmr]) and (lvout[k] < lvmer[lmr-1]) ):
kfd = lmr
lwidx[k] = lmr - 1
hiidx[k] = lmr
lwwt[k] = (numpy.log(lvout[k]) - numpy.log(lvmer[lmr])) / \
(numpy.log(lvmer[lmr-1]) - numpy.log(lvmer[lmr]))
hiwt[k] = 1.0 - lwwt[k]
lmr = lmr - 1
srtsfc = sfclvl(psfc*0.01, lvout)
mersfc = sfclvl_rev_met(psfc*0.01, lvmer, tmpmer)
tmpmer[tmpmer > 1.0e10] = msgval
# Set up output temp profile
tprfout = numpy.zeros((nlvout,ltsq.shape[0],lnsq.shape[0]),numpy.float32) + msgval
# Subset levels at upper atmos, linear profile in upper mesosphere
hsq = numpy.arange(nlvout)
hsb = hsq[hiidx == nlvmr]
nupr = hsb.shape[0]
# Model fit
nxy = (ltsq.shape[0] * lnsq.shape[0])
lva = nlvmr
lvb = nlvmr-3
tmn = numpy.mean(tmpmer[lva:lvb,:,:],axis=0)
tmnovr = numpy.mean(tmpmer[lva:lvb,:,:])
for p in range(nlvmr-1,nlvmr-4,-1):
prsrp = numpy.tile(numpy.log(lvmer[p]),nxy)
tflt = tmpmer[p,:,:].flatten()
if p == (nlvmr-1):
prsreg = numpy.zeros((nxy,),prsrp.dtype)
prsreg[:] = prsrp
tpreg = numpy.zeros((nxy,),tflt.dtype)
tpreg[:] = tflt
else:
prsreg = numpy.append(prsreg,prsrp.flatten())
tpreg = numpy.append(tpreg,tflt)
slp, itcpt, r2, pvl, stderr = stats.linregress(prsreg,tpreg)
sstr = 'T = %.3f + %.4e logp, R2 = %.4f' % (itcpt, slp, r2)
print(sstr)
#tprfout[plvsb,q,p] = itcpt + slp * lvout[plvsb]
tbsln = itcpt + slp * numpy.log(lvout[hsb])
# Getting close to thermosphere
tbsln[0:2] = itcpt + slp * numpy.log(lvout[3]) + slp / 2.0 * (numpy.log(lvout[0:2]) - numpy.log(lvout[3]))
for k in range(nupr):
tprfout[k,:,:] = tbsln[k] + (tmn - tmnovr)
for k in range(hsb[nupr-1]+1,nlvout):
lcmsk = numpy.zeros((ltsq.shape[0],lnsq.shape[0]),dtype=numpy.float32)
# Upper atmosphere
#if (hiidx[k] == nlvmr):
# tprfout[k,:,:] = tmpmer[nlvmr-1,:,:]
# Masking tricks for the rest
if (hiidx[k] > 0):
airspsfc = psfc * 0.01
lcmsk[ (srtsfc >= (k)) & (airspsfc > lvmer[lwidx[k]]) & (mersfc <= lwidx[k]) ] = 1.0
tprfout[k,:,:] = lcmsk * (lwwt[k] * tmpmer[lwidx[k],:,:] + hiwt[k] * tmpmer[hiidx[k],:,:]) + msgval * (1.0 - lcmsk)
# Loop through all locations for sfc behavior
pref = 1000.0
kappa = 0.286
pvlds = numpy.arange(nlvout)
for q in range(ltsq.shape[0]):
for p in range(lnsq.shape[0]):
# Identify levels needed for extrapolation
airspsfc = psfc[q,p] * 0.01
plvsb = pvlds[ (pvlds <= (srtsfc[q,p]+1)) & ( (pvlds > srtsfc[q,p]) | (hiidx <= mersfc[q,p] ) ) ]
# Average potential temp
prs2 = numpy.array([ lvmer[mersfc[q,p]+2], lvmer[mersfc[q,p]+1], lvmer[mersfc[q,p]], airspsfc ])
tmp2 = numpy.array([ tmpmer[mersfc[q,p]+2,q,p], tmpmer[mersfc[q,p]+1,q,p], tmpmer[mersfc[q,p],q,p], t2m[q,p] ])
prt2 = pref / prs2
thet2 = tmp2 * numpy.power(prt2,kappa)
thmn = numpy.mean(thet2)
#slp, itcpt, r2, pvl, stderr = stats.linregress(prs2,tmp2)
#tprfout[plvsb,q,p] = itcpt + slp * lvout[plvsb]
prtinv = lvout[plvsb] / pref
tprfout[plvsb,q,p] = thmn * numpy.power(prtinv,kappa)
#if ( (p == 32) and (q == 42) ):
# mrlv = 'Plvs Needed to interpolate 42, 32: %d' % (plvsb.shape)
# print(mrlv)
# print(plvsb)
# print(lvout[plvsb])
# print(tprfout[plvsb[0]-1,q,p])
# print(prs2)
# print(thet2)
#mrlv = 'MERRA Sfc Lvl 42, 32: %d, %.4f, %.4f, %.2f' % (mersfc[42,32],lvmer[mersfc[42,32]],psfc[42,32],tmpmer[mersfc[42,32],42,32])
#print(mrlv)
#print(hiidx)
return tprfout
def merra_conv_shum_prof( srchdr, srchdt, lnsq, ltsq, lvout, vrnm = 'QV', msgval=-9999.):
### Convert MERRA specific humidity profile to SARTA/AIRS pressure levels
### srchdr: Directory with MERRA daily files
### srchdt: Desired date (a python datetime object)
### lnsq: Longitude subset sequence
### ltsq: Latitude subset sequence
### lvout: Array of pressure levels
### vrnm: Variable name for 3D variable to evaluate
flst = os.listdir(srchdr)
dtstr = srchdt.strftime('%Y%m%d')
f2d = -1
f3d = -1
j = 0
nlvout = lvout.shape[0]
lwwt = numpy.zeros(nlvout,)
hiwt = numpy.zeros(nlvout,)
lwidx = numpy.zeros(nlvout,dtype=numpy.int32)
hiidx = numpy.zeros(nlvout,dtype=numpy.int32)
print(dtstr)
while ( (j < len(flst)) and ((f2d < 0) or (f3d < 0) ) ):
if ( ('inst1_2d_asm' in flst[j]) and (dtstr in flst[j]) ):
f2d = j
mer2d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer2d,'r')
psfc = f.variables['PS'][0,ltsq,lnsq]
f.close()
if ( ('inst3_3d_asm' in flst[j]) and (dtstr in flst[j]) ):
f3d = j
mer3d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer3d,'r')
qvmer = f.variables[vrnm][0,:,ltsq,lnsq]
lvmer = f.variables['lev'][:]
f.close()
j = j + 1
# Develop weights
nlvmr = lvmer.shape[0]
for k in range(nlvout):
lmr = nlvmr - 1
kfd = -1
# No need to loop in upper atm
if ( lvout[k] < lvmer[lmr]):
kfd = lmr
lwidx[k] = lmr
hiidx[k] = lmr + 1
lwwt[k] = 1.0
hiwt[k] = 0.0
# Loop otherwise
while ( (lmr > 0) and (kfd < 0) ):
if ((lvout[k] >= lvmer[lmr]) and (lvout[k] < lvmer[lmr-1]) ):
kfd = lmr
lwidx[k] = lmr - 1
hiidx[k] = lmr
lwwt[k] = (numpy.log(lvout[k]) - numpy.log(lvmer[lmr])) / \
(numpy.log(lvmer[lmr-1]) - numpy.log(lvmer[lmr]))
hiwt[k] = 1.0 - lwwt[k]
lmr = lmr - 1
srtsfc = sfclvl(psfc*0.01, lvout)
mersfc = sfclvl_rev_met(psfc*0.01, lvmer, qvmer)
qvmer[qvmer > 1.0e10] = msgval
# Set up output qv profile
qvout = numpy.zeros((nlvout,ltsq.shape[0],lnsq.shape[0]),numpy.float32) + msgval
for k in range(nlvout):
lcmsk = numpy.zeros((ltsq.shape[0],lnsq.shape[0]),dtype=numpy.float32)
# Upper atmosphere
if (hiidx[k] == nlvmr):
qvout[k,:,:] = qvmer[nlvmr-1,:,:]
# Masking tricks for the rest
elif (hiidx[k] > 0):
airspsfc = psfc * 0.01
lcmsk[ (srtsfc >= (k)) & (airspsfc > lvmer[lwidx[k]]) & (mersfc <= lwidx[k]) ] = 1.0
qvout[k,:,:] = lcmsk * (lwwt[k] * qvmer[lwidx[k],:,:] + hiwt[k] * qvmer[hiidx[k],:,:]) + msgval * (1.0 - lcmsk)
# Loop through all locations for sfc behavior
pvlds = numpy.arange(nlvout)
for q in range(ltsq.shape[0]):
for p in range(lnsq.shape[0]):
# Identify levels needed for extrapolation
airspsfc = psfc[q,p] * 0.01
plvsb = pvlds[ (pvlds <= (srtsfc[q,p]+1)) & ( (pvlds > srtsfc[q,p]) | (hiidx <= mersfc[q,p] ) ) ]
# Average QV
prs2 = numpy.array([ lvmer[mersfc[q,p]+2], lvmer[mersfc[q,p]+1], lvmer[mersfc[q,p]] ])
tmp2 = numpy.array([ qvmer[mersfc[q,p]+2,q,p], qvmer[mersfc[q,p]+1,q,p], qvmer[mersfc[q,p],q,p] ])
thmn = numpy.mean(tmp2)
#prs2 = numpy.array([ lvmer[mersfc[q,p]], lvmer[mersfc[q,p]+1] ])
#tmp2 = numpy.array([ qvmer[mersfc[q,p],q,p], qvmer[mersfc[q,p]+1,q,p] ])
#slp, itcpt, r2, pvl, stderr = stats.linregress(prs2,tmp2)
#qvrslt = itcpt + slp * lvout[plvsb]
qvrslt = numpy.tile(thmn,plvsb.shape[0])
qvrslt[qvrslt < 0.0] = 0.0
qvout[plvsb,q,p] = qvrslt
#if qvout[k,q,p] < 0.0:
# qvout[k,q,p] = 0.0
#if qvout[k,q,p] < 0.0:
# qvout[k,q,p] = 0.0
return qvout
def rh_from_qv_prof( qvprf, tprf, plvs, msgval=-9999.):
### Compute relative humidity profile from specific humidity and temperature
### qvprf: Specific humidity profile
### plvs: Vector of pressure levels (hPa)
### msgval: Missing value
nlvout = plvs.shape[0]
ny = tprf.shape[1]
nx = tprf.shape[2]
# Set up output RH profile
rhout = numpy.zeros((nlvout,ny,nx),numpy.float32) + msgval
for k in range(nlvout):
# Set up a locmask
lcmsk = numpy.zeros((ny,nx),dtype=numpy.float32)
qvtmp = qvprf[k,:,:]
lcmsk[qvtmp != msgval] = 1.0
# Calculate sat vap pres in hPa, AMS Glossary
es = lcmsk * (6.112 * numpy.exp(17.67 * (tprf[k,:,:]-273.15) / (tprf[k,:,:] - 29.65)) ) + msgval * (1.0 - lcmsk)
mmrs = lcmsk * (0.622 * es / plvs[k]) + msgval * (1.0 - lcmsk)
rhout[k,:,:] = lcmsk * (qvprf[k,:,:] / mmrs) + msgval * (1.0 - lcmsk)
return rhout
def merra_conv_heights( srchdr, srchdt, lnsq, ltsq, lvout, tprf, vrnm = 'H', sfcht = 'PHIS', msgval=-9999.):
### Convert MERRA geopotential heights to SARTA grid
### srchdr: Directory with MERRA daily files
### srchdt: Desired date (a python datetime object)
### lnsq: Longitude subset sequence
### ltsq: Latitude subset sequence
### lvout: Array of pressure levels
### tprf: Temperature profile on output grid (for thickness calcs)
### vrnm: Variable name for 3D variable to evaluate
### sfcht: Name of surface geopotential variable
flst = os.listdir(srchdr)
dtstr = srchdt.strftime('%Y%m%d')
f2d = -1
f3d = -1
j = 0
nlvout = lvout.shape[0]
lwwt = numpy.zeros(nlvout,)
hiwt = numpy.zeros(nlvout,)
lwidx = numpy.zeros(nlvout,dtype=numpy.int32)
hiidx = numpy.zeros(nlvout,dtype=numpy.int32)
print(dtstr)
while ( (j < len(flst)) and ((f2d < 0) or (f3d < 0) ) ):
if ( ('inst1_2d_asm' in flst[j]) and (dtstr in flst[j]) ):
f2d = j
mer2d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer2d,'r')
psfc = f.variables['PS'][0,ltsq,lnsq]
f.close()
if ( ('inst3_3d_asm' in flst[j]) and (dtstr in flst[j]) ):
f3d = j
mer3d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer3d,'r')
htmer = f.variables[vrnm][0,:,ltsq,lnsq]
lvmer = f.variables['lev'][:]
phisfc = f[sfcht][0,ltsq,lnsq] / 9.8
f.close()
j = j + 1
# Develop weights
nlvmr = lvmer.shape[0]
for k in range(nlvout):
lmr = nlvmr - 1
kfd = -1
# No need to loop in upper atm
if ( lvout[k] < lvmer[lmr]):
kfd = lmr
lwidx[k] = lmr
hiidx[k] = lmr + 1
lwwt[k] = 1.0
hiwt[k] = 0.0
# Loop otherwise
while ( (lmr > 0) and (kfd < 0) ):
if ((lvout[k] >= lvmer[lmr]) and (lvout[k] < lvmer[lmr-1]) ):
kfd = lmr
lwidx[k] = lmr - 1
hiidx[k] = lmr
lwwt[k] = (numpy.log(lvout[k]) - numpy.log(lvmer[lmr])) / \
(numpy.log(lvmer[lmr-1]) - numpy.log(lvmer[lmr]))
hiwt[k] = 1.0 - lwwt[k]
lmr = lmr - 1
srtsfc = sfclvl(psfc*0.01, lvout)
mersfc = sfclvl_rev_met(psfc*0.01, lvmer, htmer)
htmer[htmer > 1.0e10] = msgval
# Set up output height profile
htout = numpy.zeros((nlvout,ltsq.shape[0],lnsq.shape[0]),numpy.float32) + msgval
# Subset levels at upper atmos
Rd = 287.0
hsq = numpy.arange(nlvout)
hsb = hsq[hiidx == nlvmr]
print(hsb)
nupr = hsb.shape[0]
for k in range(hsb[nupr-1],-1,-1):
tmpmn = (tprf[hsb[k],:,:] + tprf[hsb[k]+1,:,:]) / 2.0
if (k == (nupr-1)):
# Work from MERRA
pupr = lvout[hsb[k]]
plwr = lvmer[nlvmr-1]
thk = Rd * tmpmn / 9.8 * (numpy.log(plwr/pupr))
htout[hsb[k],:,:] = htmer[nlvmr-1,:,:] + thk
else:
# Work from prvs
pupr = lvout[hsb[k]]
plwr = lvmer[nlvmr-1]
thk = Rd * tmpmn / 9.8 * (numpy.log(plwr/pupr))
htout[hsb[k],:,:] = htout[hsb[k]+1,:,:] + thk
for k in range(hsb[nupr-1]+1,nlvout):
lcmsk = numpy.zeros((ltsq.shape[0],lnsq.shape[0]),dtype=numpy.float32)
# Masking tricks for the rest
if (hiidx[k] > 0):
airspsfc = psfc * 0.01
lcmsk[ (srtsfc >= (k)) & (airspsfc > lvmer[lwidx[k]]) & (mersfc <= lwidx[k]) ] = 1.0
htout[k,:,:] = lcmsk * (lwwt[k] * htmer[lwidx[k],:,:] + hiwt[k] * htmer[hiidx[k],:,:]) + msgval * (1.0 - lcmsk)
# Loop through all locations for sfc behavior
pvlds = numpy.arange(nlvout)
for q in range(ltsq.shape[0]):
for p in range(lnsq.shape[0]):
# Identify levels needed for extrapolation
airspsfc = psfc[q,p] * 0.01
plvsb = pvlds[ (pvlds <= (srtsfc[q,p])) & ( (pvlds > srtsfc[q,p]) | (hiidx <= mersfc[q,p] ) ) ]
# Average potential temp
prs2 = numpy.array([ lvmer[mersfc[q,p]+1], lvmer[mersfc[q,p]], airspsfc ])
tmp2 = numpy.array([ htmer[mersfc[q,p]+1,q,p], htmer[mersfc[q,p],q,p], phisfc[q,p] ])
slp, itcpt, r2, pvl, stderr = stats.linregress(prs2,tmp2)
httmp = itcpt + slp * lvout[plvsb]
httmp[httmp < 0] = 0.0
htout[plvsb,q,p] = httmp
return htout
def merra_conv_cfrac_prof( srchdr, srchdt, lnsq, ltsq, lvout, vrnm = 'CLOUD', msgval=-9999.):
### Convert MERRA cloud fraction profile to SARTA/AIRS pressure levels
### srchdr: Directory with MERRA daily files
### srchdt: Desired date (a python datetime object)
### lnsq: Longitude subset sequence
### ltsq: Latitude subset sequence
### lvout: Array of pressure levels
### vrnm: Variable name for 3D variable to evaluate
flst = os.listdir(srchdr)
dtstr = srchdt.strftime('%Y%m%d')
f2d = -1
f3d = -1
j = 0
nlvout = lvout.shape[0]
lwwt = numpy.zeros(nlvout,)
hiwt = numpy.zeros(nlvout,)
lwidx = numpy.zeros(nlvout,dtype=numpy.int32)
hiidx = numpy.zeros(nlvout,dtype=numpy.int32)
print(dtstr)
while ( (j < len(flst)) and ((f2d < 0) or (f3d < 0) ) ):
if ( ('inst1_2d_asm' in flst[j]) and (dtstr in flst[j]) ):
f2d = j
mer2d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer2d,'r')
psfc = f.variables['PS'][0,ltsq,lnsq]
f.close()
if ( ('tavg3_3d_rad' in flst[j]) and (dtstr in flst[j]) ):
f3d = j
mer3d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer3d,'r')
cfmer = f.variables[vrnm][0,:,ltsq,lnsq]
lvmer = f.variables['lev'][:]
f.close()
j = j + 1
# Develop weights
nlvmr = lvmer.shape[0]
for k in range(nlvout):
lmr = nlvmr - 1
kfd = -1
# No need to loop in upper atm
if ( lvout[k] < lvmer[lmr]):
kfd = lmr
lwidx[k] = lmr
hiidx[k] = lmr + 1
lwwt[k] = 1.0
hiwt[k] = 0.0
# Loop otherwise
while ( (lmr > 0) and (kfd < 0) ):
if ((lvout[k] >= lvmer[lmr]) and (lvout[k] < lvmer[lmr-1]) ):
kfd = lmr
lwidx[k] = lmr - 1
hiidx[k] = lmr
lwwt[k] = (numpy.log(lvout[k]) - numpy.log(lvmer[lmr])) / \
(numpy.log(lvmer[lmr-1]) - numpy.log(lvmer[lmr]))
hiwt[k] = 1.0 - lwwt[k]
lmr = lmr - 1
srtsfc = sfclvl(psfc*0.01, lvout)
mersfc = sfclvl_rev_met(psfc*0.01, lvmer, cfmer)
cfmer[cfmer > 1.0e10] = msgval
# Set up output qv profile
cfout = numpy.zeros((nlvout,ltsq.shape[0],lnsq.shape[0]),numpy.float32) + msgval
for k in range(nlvout):
lcmsk = numpy.zeros((ltsq.shape[0],lnsq.shape[0]),dtype=numpy.float32)
# Upper atmosphere
if (hiidx[k] == nlvmr):
cfout[k,:,:] = cfmer[nlvmr-1,:,:]
# Masking tricks for the rest
elif (hiidx[k] > 0):
airspsfc = psfc * 0.01
lcmsk[ (srtsfc >= (k)) & (airspsfc > lvmer[lwidx[k]]) & (mersfc <= lwidx[k]) ] = 1.0
cfout[k,:,:] = lcmsk * (lwwt[k] * cfmer[lwidx[k],:,:] + hiwt[k] * cfmer[hiidx[k],:,:]) + msgval * (1.0 - lcmsk)
# Loop through all locations for sfc behavior
pvlds = numpy.arange(nlvout)
for q in range(ltsq.shape[0]):
for p in range(lnsq.shape[0]):
# Identify levels needed for extrapolation
airspsfc = psfc[q,p] * 0.01
plvsb = pvlds[ (pvlds <= (srtsfc[q,p]+1)) & ( (pvlds > srtsfc[q,p]) | (hiidx <= mersfc[q,p] ) ) ]
# Average cfrac
prs2 = numpy.array([ lvmer[mersfc[q,p]+2], lvmer[mersfc[q,p]+1], lvmer[mersfc[q,p]] ])
tmp2 = numpy.array([ cfmer[mersfc[q,p]+2,q,p], cfmer[mersfc[q,p]+1,q,p], cfmer[mersfc[q,p],q,p] ])
thmn = numpy.mean(tmp2)
cfrslt = numpy.tile(thmn,plvsb.shape[0])
cfrslt[cfrslt < 0.0] = 0.0
cfrslt[cfrslt > 1.0] = 1.0
cfout[plvsb,q,p] = cfrslt
return cfout
def setup_airs_cloud(flnm, tms, lats, lons, tmunit = 'Seconds since 1993-01-01 00:00:00'):
# Set up matched AIRS/MERRA cloud file
# flnm: Name of output file
# tms: Time variable array
# lats: Latitude variable array
# lons: Longitude variable array
ntm = tms.shape[0]
nlat = lats.shape[0]
nlon = lons.shape[0]
# Create Output file
qout = Dataset(flnm,'w')
dimln = qout.createDimension('lon',nlon)
dimlt = qout.createDimension('lat',nlat)
dimtm = qout.createDimension('time',ntm)
dimtrk = qout.createDimension('AIRSFOV',9)
if (lons.dtype == numpy.float32):
lntp = 'f4'
else:
lntp = 'f8'
varlon = qout.createVariable('lon',lntp,['lon'], fill_value = -9999)
varlon[:] = lons
varlon.long_name = 'longitude'
varlon.units='degrees_east'
varlon.missing_value = -9999
if (lats.dtype == numpy.float32):
lttp = 'f4'
else:
lttp = 'f8'
varlat = qout.createVariable('lat',lttp,['lat'], fill_value = -9999)
varlat[:] = lats
varlat.long_name = 'latitude'
varlat.units='degrees_north'
varlat.missing_value = -9999
if (tms.dtype == numpy.float32):
tmtp = 'f4'
else:
tmtp = 'f8'
vartm = qout.createVariable('time',lttp,['time'], fill_value = -9999)
vartm[:] = tms
vartm.long_name = 'time'
vartm.units = tmunit
vartm.missing_value = -9999
# Other output variables
varcfrc1 = qout.createVariable('AIRS_CldFrac_1','f4',['time','lat','lon','AIRSFOV'], fill_value = -9999)
varcfrc1.long_name = 'AIRS cloud fraction, upper level'
varcfrc1.units = 'unitless'
varcfrc1.missing_value = -9999
varcfrc2 = qout.createVariable('AIRS_CldFrac_2','f4',['time','lat','lon','AIRSFOV'], fill_value = -9999)
varcfrc2.long_name = 'AIRS cloud fraction, lower level'
varcfrc2.units = 'unitless'
varcfrc2.missing_value = -9999
varcqc1 = qout.createVariable('AIRS_CldFrac_QC_1','i2',['time','lat','lon','AIRSFOV'], fill_value = -99)
varcqc1.long_name = 'AIRS cloud fraction quality flag, upper level'
varcqc1.units = 'unitless'
varcqc1.missing_value = -99
varcqc2 = qout.createVariable('AIRS_CldFrac_QC_2','i2',['time','lat','lon','AIRSFOV'], fill_value = -99)
varcqc2.long_name = 'AIRS cloud fraction quality flag, lower level'
varcqc2.units = 'unitless'
varcqc2.missing_value = -99
varncld = qout.createVariable('AIRS_nCld','i2',['time','lat','lon','AIRSFOV'], fill_value = -99)
varncld.long_name = 'AIRS number of cloud layers'
varncld.units = 'unitless'
varncld.missing_value = -99
qout.close()
return
def airs_cfrac_match_merra(flnm, tmidx, tmday, lats, lons, msgvl = -9999, \
l2srch = '/archive/AIRSOps/airs/gdaac/v6'):
# Set up matched AIRS/MERRA cloud file
# flnm: Name of output file
# tms: Time index in output
# tmday: Datetime object with time information
# lats: Longitude variable array
# lons: Longitude variable array
# Search AIRS Level 2
airsdr = '%s/%04d/%02d/%02d/airs2ret' % (l2srch,tmday.year,tmday.month,tmday.day)
dsclst = []
asclst = []
nlat = lats.shape[0]
nlon = lons.shape[0]
lonmn = lons[0] - 5.0
lonmx = lons[nlon-1] + 5.0
latmn = lats[0] - 5.0
latmx = lats[nlat-1] + 5.0
d0 = datetime.datetime(1993,1,1,0,0,0)
ddif = tmday - d0
bsdif = ddif.total_seconds()
# Set up reference frame
ltrp = numpy.repeat(lats,nlon)
ltidx = numpy.repeat(numpy.arange(nlat),nlon)
lnrp = numpy.tile(lons,nlat)
lnidx = numpy.tile(numpy.arange(nlon),nlat)
merfrm = pandas.DataFrame({'GridLonIdx': lnidx, 'GridLatIdx': ltidx, \
'GridLon': lnrp, 'GridLat': ltrp})
if (os.path.exists(airsdr)):
fllst = os.listdir(airsdr)
#print(fllst)
for j in range(len(fllst)):
lncr = len(fllst[j])
l4 = lncr - 4
if (fllst[j][l4:lncr] == '.hdf'):
l2fl = '%s/%s' % (airsdr,fllst[j])
ncl2 = Dataset(l2fl)
slrzn = ncl2.variables['solzen'][:,:]
l2lat = ncl2.variables['Latitude'][:,:]
l2lon = ncl2.variables['Longitude'][:,:]
l2tm = ncl2.variables['Time'][:,:]
ncl2.close()
# Check lat/lon ranges and asc/dsc
l2tmdf = numpy.absolute(l2tm - bsdif)
l2mntm = numpy.min(l2tmdf)
# Within 4 hours
if l2mntm < 14400.0:
ltflt = l2lat.flatten()
lnflt = l2lon.flatten()
latsb = ltflt[(ltflt >= latmn) & (ltflt <= latmx)]
lonsb = lnflt[(lnflt >= lonmn) & (lnflt <= lonmx)]
if ( (latsb.shape[0] > 0) and (lonsb.shape[0] > 0) ):
asclst.append(fllst[j])
sstr = '%s %.2f' % (fllst[j], l2mntm)
print(sstr)
# Set up outputs
cld1arr = numpy.zeros((nlat,nlon,9),dtype=numpy.float32) + msgvl
cld2arr = numpy.zeros((nlat,nlon,9),dtype=numpy.float32) + msgvl
cld1qc = numpy.zeros((nlat,nlon,9),dtype=numpy.int16) - 99
cld2qc = numpy.zeros((nlat,nlon,9),dtype=numpy.int16) - 99
ncldarr = numpy.zeros((nlat,nlon,9),dtype=numpy.int16) - 99
#print(asclst)
tmch = 0
if (len(asclst) > 0):
# Start matchups
for j in range(len(asclst)):
l2fl = '%s/%s' % (airsdr,asclst[j])
ncl2 = Dataset(l2fl)
l2lat = ncl2.variables['Latitude'][:,:]
l2lon = ncl2.variables['Longitude'][:,:]
cfrcair = ncl2.variables['CldFrcStd'][:,:,:,:,:]
cfrcaqc = ncl2.variables['CldFrcStd_QC'][:,:,:,:,:]
ncldair = ncl2.variables['nCld'][:,:,:,:]
ncl2.close()
nairtrk = l2lat.shape[0]
nairxtk = l2lat.shape[1]
# Data Frame
tkidx = numpy.repeat(numpy.arange(nairtrk),nairxtk)
xtidx = numpy.tile(numpy.arange(nairxtk),nairtrk)
l2lnflt = l2lon.flatten().astype(numpy.float64)
l2ltflt = l2lat.flatten().astype(numpy.float64)
l2frm = pandas.DataFrame({'L2LonIdx': xtidx, 'L2LatIdx': tkidx, \
'L2Lon': l2lnflt, 'L2Lat': l2ltflt})
l2frm['GridLon'] = numpy.around(l2frm['L2Lon']/0.625) * 0.625
l2frm['GridLat'] = numpy.around(l2frm['L2Lat']/0.5) * 0.5
l2mrg = pandas.merge(l2frm,merfrm,on=['GridLon','GridLat'])
print(l2mrg.shape)
tmch = tmch + l2mrg.shape[0]
#if j == 0:
# print(asclst[j])
# print(l2mrg[0:15])
#mrggrp = l2mrg.groupby(['GridLatIdx','GridLonIdx']).count()
#gtxt = 'Group: %d' % mrggrp.shape[0]
# Output data if available
for k in range(l2mrg.shape[0]):
yidxout = l2mrg['GridLatIdx'].values[k]
xidxout = l2mrg['GridLonIdx'].values[k]
yidxl2 = l2mrg['L2LatIdx'].values[k]
xidxl2 = l2mrg['L2LonIdx'].values[k]
cld1arr[yidxout,xidxout,:] = cfrcair[yidxl2,xidxl2,:,:,0].flatten().astype(numpy.float32)
cld2arr[yidxout,xidxout,:] = cfrcair[yidxl2,xidxl2,:,:,1].flatten().astype(numpy.float32)
cld1qc[yidxout,xidxout,:] = cfrcaqc[yidxl2,xidxl2,:,:,0].flatten().astype(numpy.int16)
cld2qc[yidxout,xidxout,:] = cfrcaqc[yidxl2,xidxl2,:,:,1].flatten().astype(numpy.int16)
ncldarr[yidxout,xidxout,:] = ncldair[yidxl2,xidxl2,:,:].flatten().astype(numpy.int16)
if (cfrcair[yidxl2,xidxl2,1,1,0] < 0.0):
print(cfrcair[yidxl2,xidxl2,1,1,0])
frcstr = '%d, %d: %.4f: ' % (yidxout,xidxout,cld1arr[yidxout,xidxout,4])
print(frcstr)
c1chk = cld1arr[:,:,4].flatten()
c1sb = c1chk[c1chk >= 0.0]
print(c1sb.shape)
print(tmch)
# Output
qout = Dataset(flnm,'r+')
varcfrc1 = qout.variables['AIRS_CldFrac_1']
varcfrc1[tmidx,:,:,:] = cld1arr[:,:,:]
varcfrc2 = qout.variables['AIRS_CldFrac_2']
varcfrc2[tmidx,:,:,:] = cld2arr[:,:,:]
varcfqc1 = qout.variables['AIRS_CldFrac_QC_1']
varcfqc1[tmidx,:,:,:] = cld1qc[:,:,:]
varcfqc2 = qout.variables['AIRS_CldFrac_QC_2']
varcfqc2[tmidx,:,:,:] = cld2qc[:,:,:]
varncld = qout.variables['AIRS_nCld']
varncld[tmidx,:,:,:] = ncldarr[:,:,:]
qout.close()
return
def merra_find_cloud_slab( srchdr, srchdt, lnsq, ltsq, outfl, tidx, msgval=-9999.):
### Identify cloud slabs from MERRA cloud content profiles
### srchdr: Directory with MERRA daily files
### srchdt: Desired date (a python datetime object)
### lnsq: Longitude subset sequence
### ltsq: Latitude subset sequence
### outfl: Output file name
### tidx: Output time index
flst = os.listdir(srchdr)
dtstr = srchdt.strftime('%Y%m%d')
mincldvl = 1.0e-7
f2d = -1
f3d = -1
j = 0
print(dtstr)
while ( (j < len(flst)) and ((f2d < 0) or (f3d < 0) ) ):
if ( ('inst1_2d_asm' in flst[j]) and (dtstr in flst[j]) ):
f2d = j
mer2d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer2d,'r')
psfc = f.variables['PS'][0,ltsq,lnsq]
f.close()
if ( ('inst3_3d_asm' in flst[j]) and (dtstr in flst[j]) ):
# Temp, QI, QL in 3D files
f3d = j
mer3d = '%s/%s' % (srchdr,flst[j])
f = Dataset(mer3d,'r')
qlmer = f.variables['QL'][0,:,ltsq,lnsq]
qimer = f.variables['QI'][0,:,ltsq,lnsq]
tmpmer = f.variables['T'][0,:,ltsq,lnsq]
lvmer = f.variables['lev'][:]
f.close()
j = j + 1
#slbtyp = numpy.array([-99,-99],dtype=numpy.int16)
# Set up reference frame
nlon = lnsq.shape[0]
nlat = ltsq.shape[0]
#ltidx = numpy.repeat(ltsq,nlon)
#lnidx = numpy.tile(lnsq,nlat)
# Liquid, ice indicators
qimer[qimer > 1.0e10] = 0.0
qlmer[qlmer > 1.0e10] = 0.0
qiind = (qimer >= mincldvl)
#qiind = ((qimer >= mincldvl) & (qimer > qlmer))
qiind.dtype = numpy.int8
qlind = (qlmer >= mincldvl)
#qlind = ((qlmer >= mincldvl) & (qlmer > qimer))
qlind.dtype = numpy.int8
qism = numpy.sum(qiind,axis=0)
qlsm = numpy.sum(qlind,axis=0)
# Output arrays
nslb = numpy.zeros(qism.shape,dtype=numpy.int16) + 1
nslb[(qism == 0) & (qlsm == 0)] = 0
ctyp1 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
ctyp2 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cpsz1 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cpsz2 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cpbt1 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cpbt2 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cptp1 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cptp2 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cttp1 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cttp2 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cngwt1 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
cngwt2 = numpy.zeros(qism.shape,dtype=numpy.float32) + msgval
# Difference booleans
print('Differencing')
qidif = numpy.diff(qiind,n=1,axis=0)
print(qidif.shape)
print(numpy.amax(qidif))
print(numpy.amin(qidif))
qldif = numpy.diff(qlind,n=1,axis=0)
nlvmer = lvmer.shape[0]
lvsq = numpy.arange(lvmer.shape[0])
lvsqdf = numpy.arange(1,lvmer.shape[0])
for j in range(nlat):
for i in range(nlon):
ltspt = j + ltsq[0]
lnspt = i + lnsq[0]
slbct = 0
if (nslb[j,i] > 0):
# Freezing level
lvfr = lvsq[(tmpmer[:,j,i] < 273.15) & (tmpmer[:,j,i] > 0.0)]
if (lvfr.shape[0] == lvsq.shape[0]):
frzlvl = psfc[j,i] * 0.01
#print('Sfc Frz Level')
else:
dllgp = (numpy.log(lvmer[lvfr[0]-1]) - numpy.log(lvmer[lvfr[0]])) * \
(273.15 - tmpmer[lvfr[0],j,i]) / \
(tmpmer[lvfr[0]-1,j,i] - tmpmer[lvfr[0],j,i])
frzlvl = lvmer[lvfr[0]] * numpy.exp(dllgp)
#if ( (tmpmer[lvfr[0]-1,j,i] == tmpmer[lvfr[0],j,i]) ):
# str1 = 'Equal temp at freezing level %d' (lvfr)
# print(str1)
#if ( (lvmer[lvfr[0]] == 0.0) or (lvmer[lvfr[0]-1] == 0.0)):
# str1 = 'Equal temp at freezing level %d' (lvfr)
# print(str1)
# print(tmpmer[:,j,i])
nlqd = 0
nice = 0
if (qism[j,i] > 1):
# Ice slab
litp = lvsqdf[qidif[:,j,i] == -1]
libt = lvsqdf[qidif[:,j,i] == 1]
if (litp.shape[0] != libt.shape[0]):
# Surface case
libt = numpy.append([0],libt)
nice = litp.shape[0]
iwp = numpy.zeros(nice,dtype=numpy.float32)
pbt = numpy.zeros(nice,dtype=numpy.float32)
ptp = numpy.zeros(nice,dtype=numpy.float32)
ctt = numpy.zeros(nice,dtype=numpy.float32)
pstrt = (psfc[j,i]-1.0) * 0.01
for sl1 in range(nice):
# Find bottom pres
if libt[sl1] > 0:
lst1 = libt[sl1] - 1
dllgp = (numpy.log(lvmer[lst1]) - numpy.log(lvmer[lst1+1])) * \
(mincldvl - qimer[lst1+1,j,i]) / \
(qimer[lst1,j,i] - qimer[lst1+1,j,i])
pchk = lvmer[lst1+1] * numpy.exp(dllgp)
else:
lst1 = libt[sl1]
# Cloud at bottom level
lst1 = llbt[sl1]
pchk = (psfc[j,i]-1.0) * 0.01
if (pchk < pstrt):
pstrt = pchk
pbt[sl1] = pstrt
# Top pres
lst2 = litp[sl1] - 1
# Find top pres
dllgp = (numpy.log(lvmer[lst2]) - numpy.log(lvmer[lst2+1])) * \
(mincldvl - qimer[lst2+1,j,i]) / \
(qimer[lst2,j,i] - qimer[lst2+1,j,i])
pchk = lvmer[lst2+1] * numpy.exp(dllgp)
if (pchk > pbt[sl1]):
pchk = pbt[sl1] - 10.0
ptp[sl1] = pchk
pstrt = pchk
# Temperature
lwwt = (numpy.log(lvmer[lst2+1]) - numpy.log(ptp[sl1])) / \
(numpy.log(lvmer[lst2]) - numpy.log(lvmer[lst2+1]))
hiwt = 1.0 - lwwt
ctt[sl1] = lwwt * tmpmer[lst2,j,i] + hiwt * tmpmer[lst2+1,j,i]
# Integrate IWP (kg m^-2)
iwp[sl1] = 0.0
for slw in range(lst1,lst2):
dlprs = (lvmer[slw] - lvmer[slw+1]) * 100.0
iwp[sl1] = iwp[sl1] + (qimer[slw,j,i] + qimer[slw+1,j,i]) * dlprs / 9.8
icefrm = pandas.DataFrame({'PresBot': pbt, 'PresTop': ptp, 'CTTemp': ctt, \
'WtrPath': iwp})
icefrm['CldType'] = 'Ice'
if (qlsm[j,i] > 1):
# Water slabs
lltp = lvsqdf[qldif[:,j,i] == -1]
llbt = lvsqdf[qldif[:,j,i] == 1]
if (lltp.shape[0] != llbt.shape[0]):
# Surface case
llbt = numpy.append([0],llbt)
if ( (j == 21) and (i == 11) ):
print(lltp)
print(llbt)
nlqd = lltp.shape[0]
pbt = numpy.zeros(nlqd,dtype=numpy.float32)
ptp = numpy.zeros(nlqd,dtype=numpy.float32)
ctt = numpy.zeros(nlqd,dtype=numpy.float32)
lwp = numpy.zeros(nlqd,dtype=numpy.float32)
pstrt = (psfc[j,i]-1.0) * 0.01
for sl1 in range(nlqd):
if llbt[sl1] > 0:
lst1 = llbt[sl1] - 1
dllgp = (numpy.log(lvmer[lst1]) - numpy.log(lvmer[lst1+1])) * \
(mincldvl - qlmer[lst1+1,j,i]) / \
(qlmer[lst1,j,i] - qlmer[lst1+1,j,i])
pchk = lvmer[lst1+1] * numpy.exp(dllgp)
else:
# Cloud at bottom level
lst1 = llbt[sl1]
pchk = (psfc[j,i]-1.0) * 0.01
# Find bottom pres
if (pchk < pstrt):
pstrt = pchk
pbt[sl1] = pstrt
# Top pres
lst2 = lltp[sl1] - 1
# Find top pres
dllgp = (numpy.log(lvmer[lst2]) - numpy.log(lvmer[lst2+1])) * \
(mincldvl - qlmer[lst2+1,j,i]) / \
(qlmer[lst2,j,i] - qlmer[lst2+1,j,i])
pchk = lvmer[lst2+1] * numpy.exp(dllgp)
if (pchk > pbt[sl1]):
pchk = pbt[sl1] - 10.0
ptp[sl1] = pchk
pstrt = pchk
# Temperature
lwwt = (numpy.log(ptp[sl1]) - numpy.log(lvmer[lst2+1])) / \
(numpy.log(lvmer[lst2]) - numpy.log(lvmer[lst2+1]))
hiwt = 1.0 - lwwt
ctt[sl1] = lwwt * tmpmer[lst2,j,i] + hiwt * tmpmer[lst2+1,j,i]
# Integrate LWP (kg m^-2)
lwp[sl1] = 0.0
for slw in range(lst1,lst2):
dlprs = (lvmer[slw] - lvmer[slw+1]) * 100.0
lwp[sl1] = lwp[sl1] + (qlmer[slw,j,i] + qlmer[slw+1,j,i]) * dlprs / 9.8
wtrfrm = pandas.DataFrame({'PresBot': pbt, 'PresTop': ptp, 'CTTemp': ctt, \
'WtrPath': lwp})
wtrfrm['CldType'] = 'Water'
if ( (nice > 0) and (nlqd > 0)):
cldfrm = icefrm.append(wtrfrm,ignore_index=True)
elif (nice > 0):
cldfrm = icefrm
elif (nlqd > 0):
cldfrm = wtrfrm
else:
nstr = 'No slab found for lat %d, lon %d' % (j,i)
slbct = 0
if ( (nice > 0) or (nlqd > 0) ):
cldfrm['FrzLvl'] = frzlvl
cldfrm['DPCloud'] = cldfrm['PresBot'] - cldfrm['PresTop']
cldfrm['DPPhase'] = 0.0
cldfrm.loc[ (cldfrm['CldType'] == 'Water') & \
(cldfrm['PresTop'] < cldfrm['FrzLvl']),'DPPhase' ] = cldfrm['PresTop']-cldfrm['FrzLvl']
cldfrm.loc[ (cldfrm['CldType'] == 'Ice') & \
(cldfrm['PresBot'] > cldfrm['FrzLvl']),'DPPhase' ] = cldfrm['FrzLvl']-cldfrm['PresBot']
cldfrm['AdjWtrPath'] = (cldfrm['DPCloud'] + cldfrm['DPPhase']) * cldfrm['WtrPath'] / cldfrm['DPCloud']
cldfrm = cldfrm.sort_values(by=['AdjWtrPath'],ascending=[False])
if ( (j == 21) and (i == 11) ):
print(psfc[j,i]*0.01)
print(cldfrm)
print(lwwt)
print(qlmer[:,j,i])
print(qimer[:,j,i])
# Final slab selection
if cldfrm.shape[0] >= 2:
cldslc = cldfrm[0:2]
slbct = 2
# Check overlap and re-calc
chg2 = False
if ((cldslc['PresBot'].values[0] > cldslc['PresBot'].values[1]) and \
(cldslc['PresTop'].values[0] < cldslc['PresBot'].values[1])):
cldslc['PresBot'].values[1] = cldslc['PresTop'].values[0] - 10.0
chg2 = True
elif ((cldslc['PresBot'].values[0] < cldslc['PresBot'].values[1]) and \
(cldslc['PresTop'].values[1] < cldslc['PresBot'].values[0])):
cldslc['PresTop'].values[1] = cldslc['PresBot'].values[0] + 10.0
chg2 = True
if chg2:
if cldslc['CldType'].values[1] == 'Ice':
cpth = qimer[:,j,i]
elif cldslc['CldType'].values[1] == 'Water':
cpth = qlmer[:,j,i]
pbt2 = cldslc['PresBot'].values[1]
ptp2 = cldslc['PresTop'].values[1]
lvrg = lvsq[(lvmer <= pbt2) & (lvmer >= ptp2)]
if lvrg.shape[0] == 0:
# Remove the slab!
cldslc = cldslc[0:1]
slbct = 1
else:
lst1 = lvrg[0]
# Temperature
lst2 = lvrg[lvrg.shape[0]-1]
lwwt = (numpy.log(ptp2) - numpy.log(lvmer[lst2+1])) / \
(numpy.log(lvmer[lst2]) - numpy.log(lvmer[lst2+1]))
hiwt = 1.0 - lwwt
cldslc['CTTemp'].values[1] = lwwt * tmpmer[lst2,j,i] + hiwt * tmpmer[lst2+1,j,i]
if ( (j == 25) and (i == 27) ):
strt = 'Temp adjust, Lst: %d, Pres[lst]: %.4f, PTP: %.4f' % (lst2,lvmer[lst2], ptp2)
print(strt)
print(lwwt)
# Integrate LWP (kg m^-2)
pth2 = 0.0
for slw in range(lst1,lst2):
dlprs = (lvmer[slw] - lvmer[slw+1]) * 100.0
pth2 = pth2 + (cpth[slw] + cpth[slw+1]) * dlprs / 9.8
cldslc['WtrPath'].values[1] = pth2
# Finally sort vertically
cldslc = cldslc.sort_values(by=['PresBot'],ascending=[False])
elif cldfrm.shape[0] == 1:
cldslc = cldfrm
slbct = 1
cldslc = cldslc.reset_index(drop=True)
if ( (j == 21) and (i == 11) ):
print(cldslc)
#print(tmpmer[:,j,i])
# Output arrays
nslb[j,i] = slbct
if slbct >= 1:
if cldslc['CldType'].values[0] == 'Water':
ctyp1[j,i] = 101.0
cpsz1[j,i] = 20.0
elif cldslc['CldType'].values[0] == 'Ice':
ctyp1[j,i] = 201.0
cpsz1[j,i] = 80.0
cngwt1[j,i] = cldslc['WtrPath'].values[0]
cpbt1[j,i] = cldslc['PresBot'].values[0]
cptp1[j,i] = cldslc['PresTop'].values[0]
cttp1[j,i] = cldslc['CTTemp'].values[0]
if slbct == 2:
if cldslc['CldType'].values[1] == 'Water':
ctyp2[j,i] = 101.0
cpsz2[j,i] = 20.0
elif cldslc['CldType'].values[1] == 'Ice':
ctyp2[j,i] = 201.0
cpsz2[j,i] = 80.0
cngwt2[j,i] = cldslc['WtrPath'].values[1]
cpbt2[j,i] = cldslc['PresBot'].values[1]
cptp2[j,i] = cldslc['PresTop'].values[1]
cttp2[j,i] = cldslc['CTTemp'].values[1]
# Output
qout = Dataset(outfl,'r+')
varctp1 = qout.variables['ctype1']
varctp1[tidx,:,:] = ctyp1[:,:]
varctp2 = qout.variables['ctype2']
varctp2[tidx,:,:] = ctyp2[:,:]
varpsz1 = qout.variables['cpsize1']
varpsz1[tidx,:,:] = cpsz1[:,:]
varpsz2 = qout.variables['cpsize2']
varpsz2[tidx,:,:] = cpsz2[:,:]
varpbt1 = qout.variables['cprbot1']
varpbt1[tidx,:,:] = cpbt1[:,:]
varpbt2 = qout.variables['cprbot2']
varpbt2[tidx,:,:] = cpbt2[:,:]
varptp1 = qout.variables['cprtop1']
varptp1[tidx,:,:] = cptp1[:,:]
varptp2 = qout.variables['cprtop2']
varptp2[tidx,:,:] = cptp2[:,:]
varctt1 = qout.variables['cstemp1']
varctt1[tidx,:,:] = cttp1[:,:]
varctt2 = qout.variables['cstemp2']
varctt2[tidx,:,:] = cttp2[:,:]
# Convert ngwat to g m^-2
cngwt1[cngwt1 > 0.0] = cngwt1[cngwt1 > 0] * 1000.0
varngw1 = qout.variables['cngwat1']
varngw1[tidx,:,:] = cngwt1[:,:]
cngwt2[cngwt2 > 0.0] = cngwt2[cngwt2 > 0] * 1000.0
varngw2 = qout.variables['cngwat2']
varngw2[tidx,:,:] = cngwt2[:,:]
qout.close()
return
def airs_granule_overlap(mtfl, yrchc, mnchc, dychc, grnchc, mskvr, mskvl, \
l2srch = '/archive/AIRSOps/airs/gdaac/v6'):
# Find range of AIRS scan rows overlapping a template region
# mtfl: MERRA file with mask information
# yrchc: Year
# mnchc: Month
# dychc: Day
# grnchc: Granule
# mskvr: Name of region mask variable
# mskvl: Value of region mask for Region Choice
# Mask, lat, lon
f = Dataset(mtfl,'r')
mask = f.variables[mskvr][:,:]
latmet = f.variables['lat'][:]
lonmet = f.variables['lon'][:]
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
mskind = numpy.zeros((mask.shape),dtype=mask.dtype)
print(mskvl)
mskind[mask == mskvl] = 1
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mskind,axis=0)
ltsm = numpy.sum(mskind,axis=1)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
nzout = 101
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mskind[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
# Set up reference frame
#ltrp = numpy.repeat(lats,nlon)
#ltidx = numpy.repeat(numpy.arange(nlat),nlon)
#lnrp = numpy.tile(lons,nlat)
#lnidx = numpy.tile(numpy.arange(nlon),nlat)
merfrm = pandas.DataFrame({'GridLon': lnrp, 'GridLat': ltrp, 'MaskInd': mskflt})
print(merfrm.shape)
mersb = merfrm[ merfrm['MaskInd'] == 1]
print(mersb.shape)
# Find reference granule
# Search AIRS Level 2
airsdr = '%s/%04d/%02d/%02d/airs2ret' % (l2srch,yrchc,mnchc,dychc)
l2fd = -1
if (os.path.exists(airsdr)):
fllst = os.listdir(airsdr)
ldstr = 'AIRS.%04d.%02d.%02d.%03d' % (yrchc, mnchc, dychc, grnchc)
for j in range(len(fllst)):
lncr = len(fllst[j])
l4 = lncr - 4
if ((fllst[j][l4:lncr] == '.hdf') and (ldstr in fllst[j])):
l2fl = '%s/%s' % (airsdr,fllst[j])
ncl2 = Dataset(l2fl)
l2lat = ncl2.variables['Latitude'][:,:]
l2lon = ncl2.variables['Longitude'][:,:]
ncl2.close()
l2fd = j
print(l2lat[22,15])
print(l2lon[22,15])
rwsq = numpy.arange(45)
rwrp = numpy.repeat(rwsq,30)
if l2fd >= 0:
l2lnflt = l2lon.flatten().astype(numpy.float64)
l2ltflt = l2lat.flatten().astype(numpy.float64)
l2frm = pandas.DataFrame({'L2RowIdx': rwrp, \
'L2Lon': l2lnflt, 'L2Lat': l2ltflt})
l2frm['GridLon'] = numpy.around(l2frm['L2Lon']/0.625) * 0.625
l2frm['GridLat'] = numpy.around(l2frm['L2Lat']/0.5) * 0.5
l2mrg = | pandas.merge(l2frm,mersb,on=['GridLon','GridLat']) | pandas.merge |
from tkinter import filedialog
from PIL import Image, ImageTk
import pandas as pd
import getpass
def save_image(old_path, new_path):
saved = False
try:
image = Image.open(old_path)
image.save(new_path)
saved = True
except AttributeError as e:
print("CAN'T SAVE FILE", e)
pass
return saved
def get_new_image_path():
usr = getpass.getuser()
path = filedialog.askopenfilename(initialdir=f"./", title="Select file",
filetypes=(("All files", "*.*"), ("PNG files", "*.png")))
df_info = pd.read_csv("./src/misc/image_info.csv")
assigned_id = int(df_info.iloc[-1]["IMG_ID"]) + 1
filename = path.split("/")[-1]
new_name = filename.split(".")[0] + f"_{assigned_id}"
file_type = filename.split(".")[-1]
image_name = f"{new_name}.{file_type}"
new_path = f"./src/images/{image_name}"
saved = save_image(path, new_path)
if saved == True:
dfx = | pd.DataFrame({"IMG_ID": [assigned_id], "NAME": [new_name], "FILE_TYPE": [f".{file_type}"]}) | pandas.DataFrame |
#import external modules
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.cluster import KMeans
from multiprocessing import Pool
#import internal modules
from src.utils import workingRoot
class NeighborProcessing:
def __init__(self,fileName,workingRoot=workingRoot):
self.filename = fileName
self.workingRoot = workingRoot
self.neighbors = | pd.read_csv(workingRoot+fileName) | pandas.read_csv |
from sklearn.datasets import load_iris
from pandas.tools.plotting import andrews_curves
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
# Look pretty...
matplotlib.style.use('ggplot')
# If the above line throws an error, use plt.style.use('ggplot') instead
# Load up SKLearn's Iris Dataset into a Pandas Dataframe
data = load_iris()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['target_names'] = [data.target_names[i] for i in data.target]
# Andrews Curves Start Here:
plt.figure()
| andrews_curves(df, 'target_names') | pandas.tools.plotting.andrews_curves |
# coding=utf-8
# Author: <NAME>
# Date: Jul 05, 2019
#
# Description: Maps DE genes to String-DB. Keeps only those genes that we want.
#
# NOTE: For some reason, "dmelanogaster_gene_ensembl" did not retrieve all gene names. Some were manually added at the end.
#
import math
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from utils import open_undefined_last_column_files, ensurePathExists
from pybiomart import Dataset
def combine_id_string_x_with_id_string_y(r):
x = r['id_string_x']
y = r['id_string_y']
if isinstance(x, list):
return x
elif not pd.isna(x):
return x
else:
return y
if __name__ == '__main__':
#
# [H]omo [S]apiens (9606) - [A]liases
#
print('Mapping HS')
# Query bioMart for Gene Name/Description
ds_HS = Dataset(name='hsapiens_gene_ensembl', host='http://www.ensembl.org')
df_HS_G = ds_HS.query(attributes=['ensembl_gene_id', 'external_gene_name', 'gene_biotype', 'description']).set_index('Gene stable ID')
rCSVFileCG = "../01-diff-gene-exp/results/HS/HS-DGE_Cyte_vs_Gonia.csv"
rCSVFileCT = "../01-diff-gene-exp/results/HS/HS-DGE_Tid_vs_Cyte.csv"
df_HS_CG = pd.read_csv(rCSVFileCG, index_col=0).loc[:, ['logFC', 'logCPM', 'FDR']]
df_HS_CG.index.name = 'id_gene'
df_HS_CG.index = df_HS_CG.index.map(lambda x: x.split('.')[0])
df_HS_CG.columns = [x + '_CyteGonia' for x in df_HS_CG.columns]
df_HS_CT = pd.read_csv(rCSVFileCT, index_col=0).loc[:, ['logFC', 'logCPM', 'FDR']]
df_HS_CT.columns = [x + '_TidCyte' for x in df_HS_CT.columns]
df_HS_CT.index.name = 'id_gene'
df_HS_CT.index = df_HS_CT.index.map(lambda x: x.split('.')[0])
# Map: id_gene <-> id_string
df_SA = open_undefined_last_column_files('../data/StringDB/9606/9606.protein.aliases.v11.0.txt.gz', skiprows=1, n_fixed_cols=2, names=["id_string", "alias", "source"])
# Parse String Data - Note some genes have multiple id_string, others have no match
df_SA = df_SA.loc[df_SA['alias'].isin(df_HS_CG.index.to_list() + df_HS_CT.index.to_list()), ["alias", "id_string"]].rename(columns={"alias": "id_gene"})
df_SAg = df_SA.groupby('id_gene').agg({'id_string': lambda x: x if len(x) == 1 else list(x)})
# Up
df_HS_CG['id_string'] = df_SAg['id_string']
df_HS_CG['Cyte_vs_Gonia'] = True
# Down
df_HS_CT['id_string'] = df_SAg['id_string']
df_HS_CT['Tid_vs_Cyte'] = True
# Merge Up/Down
df_HS = pd.merge(df_HS_CG, df_HS_CT, how='outer', left_index=True, right_index=True)
df_HS['id_string'] = df_HS.apply(combine_id_string_x_with_id_string_y, axis='columns')
df_HS['gene'] = df_HS_G['Gene name']
df_HS['biotype'] = df_HS_G['Gene type']
df_HS[['Cyte_vs_Gonia', 'Tid_vs_Cyte']] = df_HS[['Cyte_vs_Gonia', 'Tid_vs_Cyte']].fillna(False)
# Index Rows/Cols
maskcols = [
'id_string', 'gene', 'Cyte_vs_Gonia', 'Tid_vs_Cyte',
'logCPM_CyteGonia', 'logFC_CyteGonia', 'FDR_CyteGonia',
'logCPM_TidCyte', 'logFC_TidCyte', 'FDR_TidCyte',
'biotype'
]
df_HS = df_HS.loc[:, maskcols]
# To CSV
df_HS.to_csv('results/HS-DE_genes.csv.gz')
#
# !!Mitosis!! [H]omo [S]apiens (9606) - [A]liases
#
rCSVFileMP = "../01-diff-gene-exp/results/HS/HS-GE_Mitotic_vs_PreMitotic.csv"
rCSVFilePM = "../01-diff-gene-exp/results/HS/HS-GE_PostMitotic_vs_Mitotic.csv"
df_HS_MP = pd.read_csv(rCSVFileMP, index_col=0).loc[:, []]
df_HS_MP.index.name = 'id_gene'
df_HS_MP.index = df_HS_MP.index.map(lambda x: x.split('.')[0])
df_HS_MP.columns = [x + '_MitPre' for x in df_HS_MP.columns]
df_HS_PM = pd.read_csv(rCSVFilePM, index_col=0).loc[:, []]
df_HS_PM.columns = [x + '_PosMit' for x in df_HS_PM.columns]
df_HS_PM.index.name = 'id_gene'
df_HS_PM.index = df_HS_PM.index.map(lambda x: x.split('.')[0])
# Map: id_gene <-> id_string
df_SA = open_undefined_last_column_files('../data/StringDB/9606/9606.protein.aliases.v11.0.txt.gz', skiprows=1, n_fixed_cols=2, names=["id_string", "alias", "source"])
# Parse String Data - Note some genes have multiple id_string, others have no match
df_SA = df_SA.loc[df_SA['alias'].isin(df_HS_MP.index.to_list() + df_HS_PM.index.to_list()), ["alias", "id_string"]].rename(columns={"alias": "id_gene"})
df_SAg = df_SA.groupby('id_gene').agg({'id_string': lambda x: x if len(x) == 1 else list(x)})
# Up
df_HS_MP['id_string'] = df_SAg['id_string']
df_HS_MP['Mit_vs_Pre'] = True
# Down
df_HS_PM['id_string'] = df_SAg['id_string']
df_HS_PM['Pos_vs_Mit'] = True
# Merge Up/Down
df_HSmit = pd.merge(df_HS_MP, df_HS_PM, how='outer', left_index=True, right_index=True)
df_HSmit['id_string'] = df_HSmit.apply(combine_id_string_x_with_id_string_y, axis='columns')
df_HSmit['gene'] = df_HS_G['Gene name']
df_HSmit['biotype'] = df_HS_G['Gene type']
df_HSmit[['Mit_vs_Pre', 'Pos_vs_Mit']] = df_HSmit[['Mit_vs_Pre', 'Pos_vs_Mit']].fillna(False)
# Index Rows/Cols
maskcols = [
'id_string', 'gene', 'Mit_vs_Pre', 'Pos_vs_Mit',
'biotype'
]
df_HSmit = df_HSmit.loc[:, maskcols]
# To CSV
df_HSmit.to_csv('results/DE/HS-E_mitotic_genes.csv.gz')
#
# [M]us [M]usculus (10090) - [A]liases
#
print('Mapping MM')
# Query bioMart for Gene Name/Description
ds_MM = Dataset(name='mmusculus_gene_ensembl', host='http://www.ensembl.org')
df_MM_G = ds_MM.query(attributes=['ensembl_gene_id', 'external_gene_name', 'gene_biotype', 'description']).set_index('Gene stable ID')
rCSVFileCG = "../01-diff-gene-exp/results/MM/MM-DGE_Cyte_vs_Gonia.csv"
rCSVFileCT = "../01-diff-gene-exp/results/MM/MM-DGE_Tid_vs_Cyte.csv"
df_MM_CG = pd.read_csv(rCSVFileCG, index_col=0).loc[:, ['logFC', 'logCPM', 'FDR']]
df_MM_CG.index.name = 'id_gene'
df_MM_CG.index = df_MM_CG.index.map(lambda x: x.split('.')[0])
df_MM_CG.columns = [x + '_CyteGonia' for x in df_MM_CG.columns]
df_MM_CT = pd.read_csv(rCSVFileCT, index_col=0).loc[:, ['logFC', 'logCPM', 'FDR']]
df_MM_CT.columns = [x + '_TidCyte' for x in df_MM_CT.columns]
df_MM_CT.index.name = 'id_gene'
df_MM_CT.index = df_MM_CT.index.map(lambda x: x.split('.')[0])
# Map: id_gene <-> id_string
df_SA = open_undefined_last_column_files('../data/StringDB/10090/10090.protein.aliases.v11.0.txt.gz', skiprows=1, n_fixed_cols=2, names=["id_string", "alias", "source"])
# Parse String Data - Note some genes have multiple id_string, others have no match
df_SA = df_SA.loc[df_SA['alias'].isin(df_MM_CG.index.to_list() + df_MM_CT.index.to_list()), ["alias", "id_string"]].rename(columns={"alias": "id_gene"})
df_SAg = df_SA.groupby('id_gene').agg({'id_string': lambda x: x if len(x) == 1 else list(x)})
# Up
df_MM_CG['id_string'] = df_SAg['id_string']
df_MM_CG['Cyte_vs_Gonia'] = True
# Down
df_MM_CT['id_string'] = df_SAg['id_string']
df_MM_CT['Tid_vs_Cyte'] = True
# Merge Up/Down
df_MM = pd.merge(df_MM_CG, df_MM_CT, how='outer', left_index=True, right_index=True)
df_MM['id_string'] = df_MM.apply(combine_id_string_x_with_id_string_y, axis='columns')
df_MM['gene'] = df_MM_G['Gene name']
df_MM['biotype'] = df_MM_G['Gene type']
df_MM[['Cyte_vs_Gonia', 'Tid_vs_Cyte']] = df_MM[['Cyte_vs_Gonia', 'Tid_vs_Cyte']].fillna(False)
# Index Rows/Cols
maskcols = [
'id_string', 'gene', 'Cyte_vs_Gonia', 'Tid_vs_Cyte',
'logCPM_CyteGonia', 'logFC_CyteGonia', 'FDR_CyteGonia',
'logCPM_TidCyte', 'logFC_TidCyte', 'FDR_TidCyte',
'biotype'
]
df_MM = df_MM.loc[:, maskcols]
# To CSV
df_MM.to_csv('results/DE/MM-DE_genes.csv.gz')
#
# [D]rosophila [M]elanogaster (7227) - [A]liases
#
print('Mapping DM')
# Query bioMart for Gene Name/Description
ds_DM = Dataset(name='dmelanogaster_gene_ensembl', host='http://www.ensembl.org')
df_DM_G = ds_DM.query(attributes=['ensembl_gene_id', 'external_gene_name', 'gene_biotype']).set_index('Gene stable ID')
#
rCSVFileMA = "../01-diff-gene-exp/results/DM/DM-DGE_Middle_vs_Apical.csv"
rCSVFileMB = "../01-diff-gene-exp/results/DM/DM-DGE_Basal_vs_Middle.csv"
df_DM_MA = | pd.read_csv(rCSVFileMA, index_col=0) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.