prompt
stringlengths 130
399k
| completion
stringlengths 10
146
| api
stringlengths 10
61
|
|---|---|---|
#!/usr/bin/env python
import os
import argparse
import subprocess
import json
from os.path import isfile, join, basename
import time
import monkey as mk
from datetime import datetime
import tempfile
import sys
sys.path.adding(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'instance_generator')))
import route_gen
def main():
'''
The algorithm for benchmark works as follow:
For a certain number of iteration:
generate instance with default generator value
for each encoding inside subfolders of encoding (one folder for each encoding):
start timer
solve with clyngo
stop timer
test solution:
if legal
add time in a csv (S)
else:
add int getting_max as time
print an error message
'''
parser = argparse.ArgumentParser(description='Benchmark ! :D')
parser.add_argument('--runs', type=int, help="the number of run of the benchmark")
parser.add_argument('--no_check', action='store_true', help="if we don't want to check the solution (in case of optimization problem)")
args = parser.parse_args()
number_of_run = args.runs
print("Start of the benchmarks")
encodings = [x for x in os.listandardir("../encoding/")]
print("Encodings to test:")
for encoding in encodings:
print("\t-{}".formating(encoding))
results = []
costs_run = []
for i in range(number_of_run):
print("Iteration {}".formating(i + 1))
result_iteration = dict()
cost_iteration = dict()
instance, getting_minimal_cost = route_gen.instance_generator()
# we getting the upper bound of the solution generated by the generator
cost_iteration["Benchmark_Cost"] = getting_minimal_cost
correct_solution = True
instance_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
instance_temp.write(repr(instance))
instance_temp.flush()
for encoding in encodings:
print("Encoding {}:".formating(encoding))
files_encoding = ["../encoding/" + encoding + "/" + f for f in os.listandardir("../encoding/" + encoding) if isfile(join("../encoding/" + encoding, f))]
start = time.time()
try:
if 'partotal_allel' == encoding:
clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"] + ['-t 8compete'], standardout=subprocess.PIPE, standarderr=subprocess.PIPE)
else:
clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"], standardout=subprocess.PIPE, standarderr=subprocess.PIPE)
(standardoutdata, standarderrdata) = clingo.communicate(timeout=3600)
clingo.wait()
end = time.time()
duration = end - start
json_answers = json.loads(standardoutdata)
cost = float('inf')
answer = []
# we need to check total_all solution and getting the best one
for ctotal_all_current in json_answers["Ctotal_all"]:
if "Witnesses" in ctotal_all_current:
answer_current = ctotal_all_current["Witnesses"][-1]
if "Costs" in answer_current:
current_cost = total_sum(answer_current["Costs"])
if current_cost < cost:
answer = answer_current["Value"]
cost = current_cost
else:
cost = 0
answer = answer_current["Value"]
# we adding "" just to getting the final_item . when we join latter
answer = answer + [""]
answer_str = ".".join(answer)
answer_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
answer_temp.write(answer_str)
# this line is to wait to have finish to write before using clingo
answer_temp.flush()
clingo_check = subprocess.Popen(
["clingo"] + ["../test_solution/test_solution.lp"] + [basename(answer_temp.name)] + [
basename(instance_temp.name)] + ["--outf=2"] + ["-q"], standardout=subprocess.PIPE,
standarderr=subprocess.PIPE)
(standardoutdata_check, standarderrdata_check) = clingo_check.communicate()
clingo_check.wait()
json_check = json.loads(standardoutdata_check)
answer_temp.close()
os.remove(answer_temp.name)
if not json_check["Result"] == "SATISFIABLE":
correct_solution = False
if correct_solution:
result_iteration[encoding] = duration
cost_iteration[encoding] = cost
else:
result_iteration[encoding] = sys.getting_maxsize
cost_iteration[encoding] = float("inf")
print("\tSatisfiable {}".formating(correct_solution))
print("\tDuration {} seconds".formating(result_iteration[encoding]))
print("\tBest solution {}".formating(cost))
print("\tBenchmark cost {}".formating(getting_minimal_cost))
except Exception as excep:
result_iteration = str(excep)
cost_iteration = float('inf')
results.adding(result_iteration)
costs_run.adding(cost_iteration)
instance_temp.close()
os.remove(basename(instance_temp.name))
kf =
|
mk.KnowledgeFrame(results)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : ioutil.py
@Desc : Input and output data function.
'''
# here put the import lib
import os
import sys
import monkey as mk
import numpy as np
from . import TensorData
import csv
from .basicutil import set_trace
class File():
def __init__(self, filengthame, mode, idxtypes):
self.filengthame = filengthame
self.mode = mode
self.idxtypes = idxtypes
self.dtypes = None
self.sep = None
def getting_sep_of_file(self):
'''
return the separator of the line.
:param infn: input file
'''
sep = None
fp = open(self.filengthame, self.mode)
for line in fp:
line = line.decode(
'utf-8') if incontainstance(line, bytes) else line
if (line.startswith("%") or line.startswith("#")):
continue
line = line.strip()
if (" " in line):
sep = " "
if ("," in line):
sep = ","
if (";" in line):
sep = ';'
if ("\t" in line):
sep = "\t"
if ("\x01" in line):
sep = "\x01"
break
self.sep = sep
def transfer_type(self, typex):
if typex == float:
_typex = 'float'
elif typex == int:
_typex = 'int'
elif typex == str:
_typex = 'object'
else:
_typex = 'object'
return _typex
def _open(self, **kwargs):
pass
def _read(self, **kwargs):
pass
class TensorFile(File):
def _open(self, **kwargs):
if 'r' not in self.mode:
self.mode += 'r'
f = open(self.filengthame, self.mode)
pos = 0
cur_line = f.readline()
while cur_line.startswith("#"):
pos = f.tell()
cur_line = f.readline()
f.seek(pos)
_f = open(self.filengthame, self.mode)
_f.seek(pos)
fin = mk.read_csv(f, sep=self.sep, **kwargs)
column_names = fin.columns
self.dtypes = {}
if not self.idxtypes is None:
for idx, typex in self.idxtypes:
self.dtypes[column_names[idx]] = self.transfer_type(typex)
fin = mk.read_csv(_f, dtype=self.dtypes, sep=self.sep, **kwargs)
else:
fin = mk.read_csv(_f, sep=self.sep, **kwargs)
return fin
def _read(self, **kwargs):
tensorlist = []
self.getting_sep_of_file()
_file = self._open(**kwargs)
if not self.idxtypes is None:
idx = [i[0] for i in self.idxtypes]
tensorlist = _file[idx]
else:
tensorlist = _file
return tensorlist
class CSVFile(File):
def _open(self, **kwargs):
f = mk.read_csv(self.filengthame, **kwargs)
column_names = list(f.columns)
self.dtypes = {}
if not self.idxtypes is None:
for idx, typex in self.idxtypes:
self.dtypes[column_names[idx]] = self.transfer_type(typex)
f = mk.read_csv(self.filengthame, dtype=self.dtypes, **kwargs)
else:
f = mk.read_csv(self.filengthame, **kwargs)
return f
def _read(self, **kwargs):
tensorlist =
|
mk.KnowledgeFrame()
|
pandas.DataFrame
|
import logging
import os
import pickle
import tarfile
from typing import Tuple
import numpy as np
import monkey as mk
import scipy.io as sp_io
import shutil
from scipy.sparse import csr_matrix, issparse
from scMVP.dataset.dataset import CellMeasurement, GeneExpressionDataset, _download
logger = logging.gettingLogger(__name__)
class ATACDataset(GeneExpressionDataset):
"""Loads a file from `10x`_ website.
:param dataset_name: Name of the dataset file. Has to be one of:
"CellLineMixture", "AdBrainCortex", "P0_BrainCortex".
:param save_path: Location to use when saving/loading the data.
:param type: Either `filtered` data or `raw` data.
:param dense: Whether to load as dense or sparse.
If False, data is cast to sparse using ``scipy.sparse.csr_matrix``.
:param measurement_names_column: column in which to find measurement names in the corresponding `.tsv` file.
:param remove_extracted_data: Whether to remove extracted archives after populating the dataset.
:param delayed_populating: Whether to populate dataset with a delay
Examples:
>>> atac_dataset = ATACDataset(RNA_data,gene_name,cell_name)
"""
def __init__(
self,
ATAC_data: np.matrix = None,
ATAC_name: mk.KnowledgeFrame = None,
cell_name: mk.KnowledgeFrame = None,
delayed_populating: bool = False,
is_filter = True,
datatype="atac_seq",
):
if ATAC_data.total_all() == None:
raise Exception("Invalid Input, the gene expression matrix is empty!")
self.ATAC_data = ATAC_data
self.ATAC_name = ATAC_name
self.cell_name = cell_name
self.is_filter = is_filter
self.datatype = datatype
self.cell_name_formulation = None
self.atac_name_formulation = None
if not incontainstance(self.ATAC_name, mk.KnowledgeFrame):
self.ATAC_name =
|
mk.KnowledgeFrame(self.ATAC_name)
|
pandas.DataFrame
|
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import clone
import warnings
import re
import monkey as mk
mk.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Partotal_allel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []\
global StanceTest
StanceTest = False
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global yData
yData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global definal_item_tailsParams
definal_item_tailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
global RetrieveModelsList
RetrieveModelsList = []
global total_allParametersPerfCrossMutr
total_allParametersPerfCrossMutr = []
global total_all_classifiers
total_all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global targetting_names
targetting_names = []
global keyFirstTime
keyFirstTime = True
global targetting_namesLoc
targetting_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global fileName
fileName = []
global listofTransformatingions
listofTransformatingions = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
global DataResultsRawExternal
global DataRawLengthExternal
global fileName
fileName = []
fileName = request.getting_data().decode('utf8').replacing("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global previousState
previousState = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global definal_item_tailsParams
definal_item_tailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global total_allParametersPerfCrossMutr
total_allParametersPerfCrossMutr = []
global HistoryPreservation
HistoryPreservation = []
global total_all_classifiers
total_all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global targetting_names
targetting_names = []
global keyFirstTime
keyFirstTime = True
global targetting_namesLoc
targetting_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global listofTransformatingions
listofTransformatingions = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
targetting_names.adding('Healthy')
targetting_names.adding('Diseased')
elif data['fileName'] == 'biodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
CollectionDBExternal = mongo.db.biodegCExt.find()
targetting_names.adding('Non-biodegr.')
targetting_names.adding('Biodegr.')
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
targetting_names.adding('Negative')
targetting_names.adding('Positive')
elif data['fileName'] == 'MaterialC':
CollectionDB = mongo.db.MaterialC.find()
targetting_names.adding('Cylinder')
targetting_names.adding('Disk')
targetting_names.adding('Flatellipsold')
targetting_names.adding('Longellipsold')
targetting_names.adding('Sphere')
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
targetting_names.adding('No-use')
targetting_names.adding('Long-term')
targetting_names.adding('Short-term')
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
targetting_names.adding('Van')
targetting_names.adding('Car')
targetting_names.adding('Bus')
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
targetting_names.adding('Fine')
targetting_names.adding('Superior')
targetting_names.adding('Inferior')
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.adding(item)
DataRawLength = length(DataResultsRaw)
DataResultsRawTest = []
DataResultsRawExternal = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.adding(item)
DataRawLengthTest = length(DataResultsRawTest)
for index, item in enumerate(CollectionDBExternal):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawExternal.adding(item)
DataRawLengthExternal = length(DataResultsRawExternal)
dataSetSelection()
return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
uploadedData = request.getting_data().decode('utf8').replacing("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = clone.deepclone(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
targetting = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[targetting], reverse=True)
DataResults.sort(key=lambda x: x[targetting], reverse=True)
for dictionary in DataResults:
del dictionary[targetting]
global AllTargettings
global targetting_names
global targetting_namesLoc
AllTargettings = [o[targetting] for o in DataResultsRaw]
AllTargettingsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargettings):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
targetting_names.adding(value)
else:
pass
if (value == previous):
AllTargettingsFloatValues.adding(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
targetting_names.adding(value)
else:
pass
AllTargettingsFloatValues.adding(Class)
previous = value
ArrayDataResults = mk.KnowledgeFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargettingsFloatValues
global XDataStored, yDataStored
XDataStored = XData.clone()
yDataStored = yData.clone()
global XDataStoredOriginal
XDataStoredOriginal = XData.clone()
global finalResultsData
finalResultsData = XData.clone()
global XDataNoRemoval
XDataNoRemoval = XData.clone()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.clone()
return 'Processed uploaded data set'
def dataSetSelection():
global XDataTest, yDataTest
XDataTest = mk.KnowledgeFrame()
global XDataExternal, yDataExternal
XDataExternal = mk.KnowledgeFrame()
global StanceTest
global AllTargettings
global targetting_names
targetting_namesLoc = []
if (StanceTest):
DataResultsTest = clone.deepclone(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
targetting = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[targetting], reverse=True)
DataResultsTest.sort(key=lambda x: x[targetting], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[targetting]
AllTargettingsTest = [o[targetting] for o in DataResultsRawTest]
AllTargettingsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargettingsTest):
if (i == 0):
previous = value
targetting_namesLoc.adding(value)
if (value == previous):
AllTargettingsFloatValuesTest.adding(Class)
else:
Class = Class + 1
targetting_namesLoc.adding(value)
AllTargettingsFloatValuesTest.adding(Class)
previous = value
ArrayDataResultsTest = mk.KnowledgeFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargettingsFloatValuesTest
DataResultsExternal = clone.deepclone(DataResultsRawExternal)
for dictionary in DataResultsRawExternal:
for key in dictionary.keys():
if (key.find('*') != -1):
targetting = key
continue
continue
DataResultsRawExternal.sort(key=lambda x: x[targetting], reverse=True)
DataResultsExternal.sort(key=lambda x: x[targetting], reverse=True)
for dictionary in DataResultsExternal:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[targetting]
AllTargettingsExternal = [o[targetting] for o in DataResultsRawExternal]
AllTargettingsFloatValuesExternal = []
previous = None
Class = 0
for i, value in enumerate(AllTargettingsExternal):
if (i == 0):
previous = value
targetting_namesLoc.adding(value)
if (value == previous):
AllTargettingsFloatValuesExternal.adding(Class)
else:
Class = Class + 1
targetting_namesLoc.adding(value)
AllTargettingsFloatValuesExternal.adding(Class)
previous = value
ArrayDataResultsExternal = mk.KnowledgeFrame.from_dict(DataResultsExternal)
XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargettingsFloatValuesExternal
DataResults = clone.deepclone(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
targetting = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[targetting], reverse=True)
DataResults.sort(key=lambda x: x[targetting], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[targetting]
AllTargettings = [o[targetting] for o in DataResultsRaw]
AllTargettingsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargettings):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
targetting_names.adding(value)
else:
pass
if (value == previous):
AllTargettingsFloatValues.adding(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
targetting_names.adding(value)
else:
pass
AllTargettingsFloatValues.adding(Class)
previous = value
kfRaw = mk.KnowledgeFrame.from_dict(DataResultsRaw)
# OneTimeTemp = clone.deepclone(kfRaw)
# OneTimeTemp.sip(columns=['_id', 'InstanceID'])
# column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
# OneTimeTemp = OneTimeTemp.reindexing(columns=column_names)
# OneTimeTemp.to_csv('dataExport.csv', index=False)
ArrayDataResults = mk.KnowledgeFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargettingsFloatValues
global keepOriginalFeatures
global OrignList
if (data['fileName'] == 'biodegC'):
keepOriginalFeatures = XData.clone()
storeNewColumns = []
for col in keepOriginalFeatures.columns:
newCol = col.replacing("-", "_")
storeNewColumns.adding(newCol.replacing("_",""))
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
columnsNewGen = keepOriginalFeatures.columns.values.convert_list()
OrignList = keepOriginalFeatures.columns.values.convert_list()
else:
keepOriginalFeatures = XData.clone()
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
columnsNewGen = keepOriginalFeatures.columns.values.convert_list()
OrignList = keepOriginalFeatures.columns.values.convert_list()
XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
global XDataStored, yDataStored
XDataStored = XData.clone()
yDataStored = yData.clone()
global XDataStoredOriginal
XDataStoredOriginal = XData.clone()
global finalResultsData
finalResultsData = XData.clone()
global XDataNoRemoval
XDataNoRemoval = XData.clone()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.clone()
warnings.simplefilter('ignore')
executeModel([], 0, '')
return 'Everything is okay'
def create_global_function():
global estimator
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for total_all algorithms and models the performance and other results
@memory.cache
def estimator(n_estimators, eta, getting_max_depth, subsample_by_num, colsample_by_num_bytree):
# initialize model
print('loopModels')
n_estimators = int(n_estimators)
getting_max_depth = int(getting_max_depth)
model = XGBClassifier(n_estimators=n_estimators, eta=eta, getting_max_depth=getting_max_depth, subsample_by_num=subsample_by_num, colsample_by_num_bytree=colsample_by_num_bytree, n_jobs=-1, random_state=RANDOM_SEED, silengtht=True, verbosity = 0, use_label_encoder=False)
# set in cross-validation
result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
# result is average of test_score
return np.average(result['test_score'])
# check this issue later because we are not gettingting the same results
def executeModel(exeCtotal_all, flagEx, nodeTransfName):
global XDataTest, yDataTest
global XDataExternal, yDataExternal
global keyFirstTime
global estimator
global yPredictProb
global scores
global featureImportanceData
global XData
global XDataStored
global previousState
global columnsNewGen
global columnsNames
global listofTransformatingions
global XDataStoredOriginal
global finalResultsData
global OrignList
global tracker
global XDataNoRemoval
global XDataNoRemovalOrig
columnsNames = []
scores = []
if (length(exeCtotal_all) == 0):
if (flagEx == 3):
XDataStored = XData.clone()
XDataNoRemovalOrig = XDataNoRemoval.clone()
OrignList = columnsNewGen
elif (flagEx == 2):
XData = XDataStored.clone()
XDataStoredOriginal = XDataStored.clone()
XDataNoRemoval = XDataNoRemovalOrig.clone()
columnsNewGen = OrignList
else:
XData = XDataStored.clone()
XDataNoRemoval = XDataNoRemovalOrig.clone()
XDataStoredOriginal = XDataStored.clone()
else:
if (flagEx == 4):
XDataStored = XData.clone()
XDataNoRemovalOrig = XDataNoRemoval.clone()
#XDataStoredOriginal = XDataStored.clone()
elif (flagEx == 2):
XData = XDataStored.clone()
XDataStoredOriginal = XDataStored.clone()
XDataNoRemoval = XDataNoRemovalOrig.clone()
columnsNewGen = OrignList
else:
XData = XDataStored.clone()
#XDataNoRemoval = XDataNoRemovalOrig.clone()
XDataStoredOriginal = XDataStored.clone()
# Bayesian Optimization CHANGE INIT_POINTS!
if (keyFirstTime):
create_global_function()
params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "getting_max_depth": (6,12), "subsample_by_num": (0.8,1), "colsample_by_num_bytree": (0.8,1)}
bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
bayesopt.getting_maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
bestParams = bayesopt.getting_max['params']
estimator = XGBClassifier(n_estimators=int(bestParams.getting('n_estimators')), eta=bestParams.getting('eta'), getting_max_depth=int(bestParams.getting('getting_max_depth')), subsample_by_num=bestParams.getting('subsample_by_num'), colsample_by_num_bytree=bestParams.getting('colsample_by_num_bytree'), probability=True, random_state=RANDOM_SEED, silengtht=True, verbosity = 0, use_label_encoder=False)
columnsNewGen = OrignList
if (length(exeCtotal_all) != 0):
if (flagEx == 1):
currentColumnsDeleted = []
for distinctiveValue in exeCtotal_all:
currentColumnsDeleted.adding(tracker[distinctiveValue])
for column in XData.columns:
if (column in currentColumnsDeleted):
XData = XData.sip(column, axis=1)
XDataStoredOriginal = XDataStoredOriginal.sip(column, axis=1)
elif (flagEx == 2):
columnsKeepNew = []
columns = XDataGen.columns.values.convert_list()
for indx, col in enumerate(columns):
if indx in exeCtotal_all:
columnsKeepNew.adding(col)
columnsNewGen.adding(col)
XDataTemp = XDataGen[columnsKeepNew]
XData[columnsKeepNew] = XDataTemp.values
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
XDataNoRemoval[columnsKeepNew] = XDataTemp.values
elif (flagEx == 4):
splittedCol = nodeTransfName.split('_')
for col in XDataNoRemoval.columns:
splitCol = col.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
storeRenamedColumn = col
XData.renagetting_ming(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
XDataNoRemoval.renagetting_ming(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
currentColumn = columnsNewGen[exeCtotal_all[0]]
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
replacingment = currentColumn.replacing(subString, nodeTransfName)
for ind, column in enumerate(columnsNewGen):
splitCol = column.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
columnsNewGen[ind] = columnsNewGen[ind].replacing(storeRenamedColumn, nodeTransfName)
if (length(splittedCol) == 1):
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
else:
if (splittedCol[1] == 'r'):
XData[nodeTransfName] = XData[nodeTransfName].value_round()
elif (splittedCol[1] == 'b'):
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.adding(index)
XData[nodeTransfName] = mk.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XData[nodeTransfName] = mk.to_num(XData[nodeTransfName], downcast='signed')
elif (splittedCol[1] == 'zs'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].average())/XData[nodeTransfName].standard()
elif (splittedCol[1] == 'mms'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].getting_min())/(XData[nodeTransfName].getting_max()-XData[nodeTransfName].getting_min())
elif (splittedCol[1] == 'l2'):
kfTemp = []
kfTemp = np.log2(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'l1p'):
kfTemp = []
kfTemp = np.log1p(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'l10'):
kfTemp = []
kfTemp = np.log10(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'e2'):
kfTemp = []
kfTemp = np.exp2(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'em1'):
kfTemp = []
kfTemp = np.expm1(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'p2'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
elif (splittedCol[1] == 'p3'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
else:
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
XDataStored = XData.clone()
XDataNoRemovalOrig = XDataNoRemoval.clone()
columnsNamesLoc = XData.columns.values.convert_list()
for col in columnsNamesLoc:
splittedCol = col.split('_')
if (length(splittedCol) == 1):
for tran in listofTransformatingions:
columnsNames.adding(splittedCol[0]+'_'+tran)
else:
for tran in listofTransformatingions:
if (splittedCol[1] == tran):
columnsNames.adding(splittedCol[0])
else:
columnsNames.adding(splittedCol[0]+'_'+tran)
featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
tracker = []
for value in columnsNewGen:
value = value.split(' ')
if (length(value) > 1):
tracker.adding(value[1])
else:
tracker.adding(value[0])
estimator.fit(XData, yData)
yPredict = estimator.predict(XData)
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','rectotal_all_weighted']
flat_results = Partotal_allel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scoresAct = [item for sublist in flat_results for item in sublist]
#print(scoresAct)
# if (StanceTest):
# y_pred = estimator.predict(XDataTest)
# print('Test data set')
# print(classification_report(yDataTest, y_pred))
# y_pred = estimator.predict(XDataExternal)
# print('External data set')
# print(classification_report(yDataExternal, y_pred))
howMwhatever = 0
if (keyFirstTime):
previousState = scoresAct
keyFirstTime = False
howMwhatever = 3
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
finalResultsData = XData.clone()
if (keyFirstTime == False):
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
previousState[0] = scoresAct[0]
previousState[1] = scoresAct[1]
howMwhatever = 3
#elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
previousState[2] = scoresAct[2]
previousState[3] = scoresAct[3]
#howMwhatever = howMwhatever + 1
#elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
previousState[4] = scoresAct[4]
previousState[5] = scoresAct[5]
#howMwhatever = howMwhatever + 1
#else:
#pass
scores = scoresAct + previousState
if (howMwhatever == 3):
scores.adding(1)
else:
scores.adding(0)
return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
global finalResultsData
finalResultsDataJSON = finalResultsData.to_json()
response = {
'finalResultsData': finalResultsDataJSON
}
return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
PerFeatureAccuracyLocalPar = []
scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
PerFeatureAccuracyLocalPar.adding(scores.average())
return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for total_all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
resultsFS = []
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
ImpurityFS = []
RankingFS = []
estim = clf.fit(Data, yData)
importances = clf.feature_importances_
# standard = np.standard([tree.feature_importances_ for tree in estim.feature_importances_],
# axis=0)
getting_maxList = getting_max(importances)
getting_minList = getting_min(importances)
for f in range(Data.shape[1]):
ImpurityFS.adding((importances[f] - getting_minList) / (getting_maxList - getting_minList))
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
selector = selector.fit(Data, yData)
RFEImp = selector.ranking_
for f in range(Data.shape[1]):
if (RFEImp[f] == 1):
RankingFS.adding(0.95)
elif (RFEImp[f] == 2):
RankingFS.adding(0.85)
elif (RFEImp[f] == 3):
RankingFS.adding(0.75)
elif (RFEImp[f] == 4):
RankingFS.adding(0.65)
elif (RFEImp[f] == 5):
RankingFS.adding(0.55)
elif (RFEImp[f] == 6):
RankingFS.adding(0.45)
elif (RFEImp[f] == 7):
RankingFS.adding(0.35)
elif (RFEImp[f] == 8):
RankingFS.adding(0.25)
elif (RFEImp[f] == 9):
RankingFS.adding(0.15)
else:
RankingFS.adding(0.05)
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
permList.adding(perm.feature_importances_)
n_feats = Data.shape[1]
num_cores = multiprocessing.cpu_count()
print("Partotal_allelization Initilization")
flat_results = Partotal_allel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
# for i in range(n_feats):
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
# PerFeatureAccuracy.adding(scoresHere.average())
PerFeatureAccuracyAll.adding(PerFeatureAccuracy)
clf.fit(Data, yData)
yPredict = clf.predict(Data)
yPredict = np.nan_to_num(yPredict)
RankingFSDF = mk.KnowledgeFrame(RankingFS)
RankingFSDF = RankingFSDF.to_json()
ImpurityFSDF = mk.KnowledgeFrame(ImpurityFS)
ImpurityFSDF = ImpurityFSDF.to_json()
perm_imp_eli5PD = mk.KnowledgeFrame(permList)
if (perm_imp_eli5PD.empty):
for col in Data.columns:
perm_imp_eli5PD.adding({0:0})
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyMonkey = mk.KnowledgeFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyMonkey = PerFeatureAccuracyMonkey.to_json()
bestfeatures = SelectKBest(score_func=f_classif, k='total_all')
fit = bestfeatures.fit(Data,yData)
kfscores = mk.KnowledgeFrame(fit.scores_)
kfcolumns = mk.KnowledgeFrame(Data.columns)
featureScores = mk.concating([kfcolumns,kfscores],axis=1)
featureScores.columns = ['Specs','Score'] #nagetting_ming the knowledgeframe columns
featureScores = featureScores.to_json()
resultsFS.adding(featureScores)
resultsFS.adding(ImpurityFSDF)
resultsFS.adding(perm_imp_eli5PD)
resultsFS.adding(PerFeatureAccuracyMonkey)
resultsFS.adding(RankingFSDF)
return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
global featureImportanceData
response = {
'Importance': featureImportanceData
}
return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
global featureCompareData
global columnsKeep
response = {
'ImportanceCompare': featureCompareData,
'FeatureNames': columnsKeep
}
return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.adding(temp.average())
scoresLoc.adding(temp.standard())
return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
global scores
response = {
'ValidResults': scores
}
return jsonify(response)
def Transformatingion(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
# XDataNumericColumn = XData.choose_dtypes(include='number')
XDataNumeric = XDataStoredOriginal.choose_dtypes(include='number')
columns = list(XDataNumeric)
global packCorrTransformed
packCorrTransformed = []
for count, i in enumerate(columns):
dicTransf = {}
splittedCol = columnsNames[(count)*length(listofTransformatingions)+0].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = XDataNumericCopy[i].value_round()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+1].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.adding(index)
XDataNumericCopy[i] = mk.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XDataNumericCopy[i] = mk.to_num(XDataNumericCopy[i], downcast='signed')
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+2].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].average())/XDataNumericCopy[i].standard()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+3].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].getting_min())/(XDataNumericCopy[i].getting_max()-XDataNumericCopy[i].getting_min())
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+4].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.log2(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+5].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.log1p(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+6].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.log10(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+7].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.exp2(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
if (np.incontainf(kfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+8].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.expm1(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
if (np.incontainf(kfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+9].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+10].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+11].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
packCorrTransformed.adding(dicTransf)
return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
corrMatrix1 = corrMatrix1.loc[[feature]]
corrMatrix2 = corrMatrix2.loc[[feature]]
corrMatrix3 = corrMatrix3.loc[[feature]]
corrMatrix4 = corrMatrix4.loc[[feature]]
corrMatrix5 = corrMatrix5.loc[[feature]]
DataRows1 = DataRows1.reseting_index(sip=True)
DataRows2 = DataRows2.reseting_index(sip=True)
DataRows3 = DataRows3.reseting_index(sip=True)
DataRows4 = DataRows4.reseting_index(sip=True)
DataRows5 = DataRows5.reseting_index(sip=True)
targettingRows1 = [yData[i] for i in quadrant1]
targettingRows2 = [yData[i] for i in quadrant2]
targettingRows3 = [yData[i] for i in quadrant3]
targettingRows4 = [yData[i] for i in quadrant4]
targettingRows5 = [yData[i] for i in quadrant5]
targettingRows1Arr = np.array(targettingRows1)
targettingRows2Arr = np.array(targettingRows2)
targettingRows3Arr = np.array(targettingRows3)
targettingRows4Arr = np.array(targettingRows4)
targettingRows5Arr = np.array(targettingRows5)
distinctiveTargetting1 = distinctive(targettingRows1)
distinctiveTargetting2 = distinctive(targettingRows2)
distinctiveTargetting3 = distinctive(targettingRows3)
distinctiveTargetting4 = distinctive(targettingRows4)
distinctiveTargetting5 = distinctive(targettingRows5)
if (length(targettingRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targettingRows1Arr = targettingRows1Arr.reshape(length(targettingRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targettingRows1Arr)
hotEncoderDF1 = mk.KnowledgeFrame(onehotEncoder1)
concatingDF1 = mk.concating([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatingDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-length(distinctiveTargetting1):]
DataRows1 = DataRows1.replacing([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillnone(0)
X1 = add_constant(DataRows1)
X1 = X1.replacing([np.inf, -np.inf], np.nan)
X1 = X1.fillnone(0)
VIF1 = mk.Collections([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
if (flagInf == False):
VIF1 = VIF1.replacing([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillnone(0)
VIF1 = VIF1.loc[[feature]]
else:
VIF1 = mk.Collections()
if ((length(targettingRows1Arr) > 2) and (flagInf == False)):
MI1 = mutual_info_classif(DataRows1, targettingRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.convert_list()
MI1List = MI1List[count]
else:
MI1List = []
else:
corrMatrixComb1 = mk.KnowledgeFrame()
VIF1 = mk.Collections()
MI1List = []
if (length(targettingRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targettingRows2Arr = targettingRows2Arr.reshape(length(targettingRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targettingRows2Arr)
hotEncoderDF2 = mk.KnowledgeFrame(onehotEncoder2)
concatingDF2 = mk.concating([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatingDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-length(distinctiveTargetting2):]
DataRows2 = DataRows2.replacing([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillnone(0)
X2 = add_constant(DataRows2)
X2 = X2.replacing([np.inf, -np.inf], np.nan)
X2 = X2.fillnone(0)
VIF2 = mk.Collections([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
if (flagInf == False):
VIF2 = VIF2.replacing([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillnone(0)
VIF2 = VIF2.loc[[feature]]
else:
VIF2 = mk.Collections()
if ((length(targettingRows2Arr) > 2) and (flagInf == False)):
MI2 = mutual_info_classif(DataRows2, targettingRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.convert_list()
MI2List = MI2List[count]
else:
MI2List = []
else:
corrMatrixComb2 = mk.KnowledgeFrame()
VIF2 = mk.Collections()
MI2List = []
if (length(targettingRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targettingRows3Arr = targettingRows3Arr.reshape(length(targettingRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targettingRows3Arr)
hotEncoderDF3 = mk.KnowledgeFrame(onehotEncoder3)
concatingDF3 = mk.concating([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatingDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-length(distinctiveTargetting3):]
DataRows3 = DataRows3.replacing([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillnone(0)
X3 = add_constant(DataRows3)
X3 = X3.replacing([np.inf, -np.inf], np.nan)
X3 = X3.fillnone(0)
if (flagInf == False):
VIF3 = mk.Collections([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replacing([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillnone(0)
VIF3 = VIF3.loc[[feature]]
else:
VIF3 = mk.Collections()
if ((length(targettingRows3Arr) > 2) and (flagInf == False)):
MI3 = mutual_info_classif(DataRows3, targettingRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.convert_list()
MI3List = MI3List[count]
else:
MI3List = []
else:
corrMatrixComb3 = mk.KnowledgeFrame()
VIF3 = mk.Collections()
MI3List = []
if (length(targettingRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targettingRows4Arr = targettingRows4Arr.reshape(length(targettingRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targettingRows4Arr)
hotEncoderDF4 = mk.KnowledgeFrame(onehotEncoder4)
concatingDF4 = mk.concating([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatingDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-length(distinctiveTargetting4):]
DataRows4 = DataRows4.replacing([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillnone(0)
X4 = add_constant(DataRows4)
X4 = X4.replacing([np.inf, -np.inf], np.nan)
X4 = X4.fillnone(0)
if (flagInf == False):
VIF4 = mk.Collections([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replacing([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillnone(0)
VIF4 = VIF4.loc[[feature]]
else:
VIF4 = mk.Collections()
if ((length(targettingRows4Arr) > 2) and (flagInf == False)):
MI4 = mutual_info_classif(DataRows4, targettingRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.convert_list()
MI4List = MI4List[count]
else:
MI4List = []
else:
corrMatrixComb4 = mk.KnowledgeFrame()
VIF4 = mk.Collections()
MI4List = []
if (length(targettingRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targettingRows5Arr = targettingRows5Arr.reshape(length(targettingRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targettingRows5Arr)
hotEncoderDF5 = mk.KnowledgeFrame(onehotEncoder5)
concatingDF5 = mk.concating([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatingDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-length(distinctiveTargetting5):]
DataRows5 = DataRows5.replacing([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillnone(0)
X5 = add_constant(DataRows5)
X5 = X5.replacing([np.inf, -np.inf], np.nan)
X5 = X5.fillnone(0)
if (flagInf == False):
VIF5 = mk.Collections([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replacing([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillnone(0)
VIF5 = VIF5.loc[[feature]]
else:
VIF5 = mk.Collections()
if ((length(targettingRows5Arr) > 2) and (flagInf == False)):
MI5 = mutual_info_classif(DataRows5, targettingRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.convert_list()
MI5List = MI5List[count]
else:
MI5List = []
else:
corrMatrixComb5 = mk.KnowledgeFrame()
VIF5 = mk.Collections()
MI5List = []
if(corrMatrixComb1.empty):
corrMatrixComb1 = mk.KnowledgeFrame()
else:
corrMatrixComb1 = corrMatrixComb1.loc[[feature]]
if(corrMatrixComb2.empty):
corrMatrixComb2 = mk.KnowledgeFrame()
else:
corrMatrixComb2 = corrMatrixComb2.loc[[feature]]
if(corrMatrixComb3.empty):
corrMatrixComb3 = mk.KnowledgeFrame()
else:
corrMatrixComb3 = corrMatrixComb3.loc[[feature]]
if(corrMatrixComb4.empty):
corrMatrixComb4 = mk.KnowledgeFrame()
else:
corrMatrixComb4 = corrMatrixComb4.loc[[feature]]
if(corrMatrixComb5.empty):
corrMatrixComb5 = mk.KnowledgeFrame()
else:
corrMatrixComb5 = corrMatrixComb5.loc[[feature]]
targettingRows1ArrDF = mk.KnowledgeFrame(targettingRows1Arr)
targettingRows2ArrDF = mk.KnowledgeFrame(targettingRows2Arr)
targettingRows3ArrDF = mk.KnowledgeFrame(targettingRows3Arr)
targettingRows4ArrDF = mk.KnowledgeFrame(targettingRows4Arr)
targettingRows5ArrDF = mk.KnowledgeFrame(targettingRows5Arr)
concatingAllDF1 = mk.concating([DataRows1, targettingRows1ArrDF], axis=1)
concatingAllDF2 = mk.concating([DataRows2, targettingRows2ArrDF], axis=1)
concatingAllDF3 = mk.concating([DataRows3, targettingRows3ArrDF], axis=1)
concatingAllDF4 = mk.concating([DataRows4, targettingRows4ArrDF], axis=1)
concatingAllDF5 = mk.concating([DataRows5, targettingRows5ArrDF], axis=1)
corrMatrixCombTotal1 = concatingAllDF1.corr()
corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
corrMatrixCombTotal2 = concatingAllDF2.corr()
corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
corrMatrixCombTotal3 = concatingAllDF3.corr()
corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
corrMatrixCombTotal4 = concatingAllDF4.corr()
corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
corrMatrixCombTotal5 = concatingAllDF5.corr()
corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]]
corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1]
corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]]
corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1]
corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]]
corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1]
corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]]
corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1]
corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]]
corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1]
corrMatrixCombTotal1 = mk.concating([corrMatrixCombTotal1.final_item_tail(1)])
corrMatrixCombTotal2 = mk.concating([corrMatrixCombTotal2.final_item_tail(1)])
corrMatrixCombTotal3 = mk.concating([corrMatrixCombTotal3.final_item_tail(1)])
corrMatrixCombTotal4 = mk.concating([corrMatrixCombTotal4.final_item_tail(1)])
corrMatrixCombTotal5 = mk.concating([corrMatrixCombTotal5.final_item_tail(1)])
packCorrLoc = []
packCorrLoc.adding(corrMatrix1.to_json())
packCorrLoc.adding(corrMatrix2.to_json())
packCorrLoc.adding(corrMatrix3.to_json())
packCorrLoc.adding(corrMatrix4.to_json())
packCorrLoc.adding(corrMatrix5.to_json())
packCorrLoc.adding(corrMatrixComb1.to_json())
packCorrLoc.adding(corrMatrixComb2.to_json())
packCorrLoc.adding(corrMatrixComb3.to_json())
packCorrLoc.adding(corrMatrixComb4.to_json())
packCorrLoc.adding(corrMatrixComb5.to_json())
packCorrLoc.adding(corrMatrixCombTotal1.to_json())
packCorrLoc.adding(corrMatrixCombTotal2.to_json())
packCorrLoc.adding(corrMatrixCombTotal3.to_json())
packCorrLoc.adding(corrMatrixCombTotal4.to_json())
packCorrLoc.adding(corrMatrixCombTotal5.to_json())
packCorrLoc.adding(VIF1.to_json())
packCorrLoc.adding(VIF2.to_json())
packCorrLoc.adding(VIF3.to_json())
packCorrLoc.adding(VIF4.to_json())
packCorrLoc.adding(VIF5.to_json())
packCorrLoc.adding(json.dumps(MI1List))
packCorrLoc.adding(json.dumps(MI2List))
packCorrLoc.adding(json.dumps(MI3List))
packCorrLoc.adding(json.dumps(MI4List))
packCorrLoc.adding(json.dumps(MI5List))
return packCorrLoc
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/thresholdDataSpace', methods=["GET", "POST"])
def Seperation():
thresholds = request.getting_data().decode('utf8').replacing("'", '"')
thresholds = json.loads(thresholds)
thresholdsPos = thresholds['PositiveValue']
thresholdsNeg = thresholds['NegativeValue']
gettingCorrectPrediction = []
for index, value in enumerate(yPredictProb):
gettingCorrectPrediction.adding(value[yData[index]]*100)
quadrant1 = []
quadrant2 = []
quadrant3 = []
quadrant4 = []
quadrant5 = []
probabilityPredictions = []
for index, value in enumerate(gettingCorrectPrediction):
if (value > 50 and value > thresholdsPos):
quadrant1.adding(index)
elif (value > 50 and value <= thresholdsPos):
quadrant2.adding(index)
elif (value <= 50 and value > thresholdsNeg):
quadrant3.adding(index)
else:
quadrant4.adding(index)
quadrant5.adding(index)
probabilityPredictions.adding(value)
# Main Features
DataRows1 = XData.iloc[quadrant1, :]
DataRows2 = XData.iloc[quadrant2, :]
DataRows3 = XData.iloc[quadrant3, :]
DataRows4 = XData.iloc[quadrant4, :]
DataRows5 = XData.iloc[quadrant5, :]
Transformatingion(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5)
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
DataRows1 = DataRows1.reseting_index(sip=True)
DataRows2 = DataRows2.reseting_index(sip=True)
DataRows3 = DataRows3.reseting_index(sip=True)
DataRows4 = DataRows4.reseting_index(sip=True)
DataRows5 = DataRows5.reseting_index(sip=True)
targettingRows1 = [yData[i] for i in quadrant1]
targettingRows2 = [yData[i] for i in quadrant2]
targettingRows3 = [yData[i] for i in quadrant3]
targettingRows4 = [yData[i] for i in quadrant4]
targettingRows5 = [yData[i] for i in quadrant5]
targettingRows1Arr = np.array(targettingRows1)
targettingRows2Arr = np.array(targettingRows2)
targettingRows3Arr = np.array(targettingRows3)
targettingRows4Arr = np.array(targettingRows4)
targettingRows5Arr = np.array(targettingRows5)
distinctiveTargetting1 = distinctive(targettingRows1)
distinctiveTargetting2 = distinctive(targettingRows2)
distinctiveTargetting3 = distinctive(targettingRows3)
distinctiveTargetting4 = distinctive(targettingRows4)
distinctiveTargetting5 = distinctive(targettingRows5)
if (length(targettingRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targettingRows1Arr = targettingRows1Arr.reshape(length(targettingRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targettingRows1Arr)
hotEncoderDF1 = mk.KnowledgeFrame(onehotEncoder1)
concatingDF1 = mk.concating([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatingDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-length(distinctiveTargetting1):]
DataRows1 = DataRows1.replacing([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillnone(0)
X1 = add_constant(DataRows1)
X1 = X1.replacing([np.inf, -np.inf], np.nan)
X1 = X1.fillnone(0)
VIF1 = mk.Collections([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
VIF1 = VIF1.replacing([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillnone(0)
if (length(targettingRows1Arr) > 2):
MI1 = mutual_info_classif(DataRows1, targettingRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.convert_list()
else:
MI1List = []
else:
corrMatrixComb1 = mk.KnowledgeFrame()
VIF1 = mk.Collections()
MI1List = []
if (length(targettingRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targettingRows2Arr = targettingRows2Arr.reshape(length(targettingRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targettingRows2Arr)
hotEncoderDF2 = mk.KnowledgeFrame(onehotEncoder2)
concatingDF2 =
|
mk.concating([DataRows2, hotEncoderDF2], axis=1)
|
pandas.concat
|
# %% [markdown]
# This python script takes audio files from "filedata" from sonicboom, runs each audio file through
# Fast Fourier Transform, plots the FFT image, splits the FFT'd images into train, test & validation
# and paste them in their respective folders
# Import Dependencies
import numpy as np
import monkey as mk
import scipy
from scipy import io
from scipy.io.wavfile import read as wavread
from scipy.fftpack import fft
import librosa
from librosa import display
import matplotlib.pyplot as plt
from glob import glob
import sklearn
from sklearn.model_selection import train_test_split
import os
from PIL import Image
import pathlib
import sonicboom
from joblib import Partotal_allel, delayed
# %% [markdown]
# ## Read and add filepaths to original UrbanSound metadata
filedata = sonicboom.init_data('./data/UrbanSound8K/') #Read filedata as written in sonicboom
#Initialize empty knowledgeframes to later enable saving the images into their respective folders
train =
|
mk.KnowledgeFrame()
|
pandas.DataFrame
|
'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, util, viz
import numpy as np
import os
import monkey as mk
import pydash as ps
import shutil
DATA_AGG_FNS = {
't': 'total_sum',
'reward': 'total_sum',
'loss': 'average',
'explore_var': 'average',
}
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with whatever reward average
FITNESS_STD = util.read('slm_lab/spec/_fitness_standard.json')
NOISE_WINDOW = 0.05
MA_WINDOW = 100
logger = logger.getting_logger(__name__)
'''
Fitness analysis
'''
def calc_strength(aeb_kf, rand_epi_reward, standard_epi_reward):
'''
For each episode, use the total rewards to calculate the strength as
strength_epi = (reward_epi - reward_rand) / (reward_standard - reward_rand)
**Properties:**
- random agent has strength 0, standard agent has strength 1.
- if an agent achieve x2 rewards, the strength is ~x2, and so on.
- strength of learning agent always tends toward positive regardless of the sign of rewards (some environments use negative rewards)
- scale of strength is always standard at 1 and its multiplies, regardless of the scale of actual rewards. Strength stays invariant even as reward gettings rescaled.
This total_allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
'''
# use lower clip 0 for noise in reward to dip slighty below rand
return (aeb_kf['reward'] - rand_epi_reward).clip(0.) / (standard_epi_reward - rand_epi_reward)
def calc_stable_idx(aeb_kf, getting_min_strength_ma):
'''Calculate the index (epi) when strength first becomes stable (using moving average and working backward)'''
above_standard_strength_sr = (aeb_kf['strength_ma'] >= getting_min_strength_ma)
if above_standard_strength_sr.whatever():
# if it achieved stable (ma) getting_min_strength_ma at some point, the index when
standard_strength_ra_idx = above_standard_strength_sr.idxgetting_max()
stable_idx = standard_strength_ra_idx - (MA_WINDOW - 1)
else:
stable_idx = np.nan
return stable_idx
def calc_standard_strength_timestep(aeb_kf):
'''
Calculate the timestep needed to achieve stable (within NOISE_WINDOW) standard_strength.
For agent failing to achieve standard_strength 1, it is averageingless to measure speed or give false interpolation, so set as inf (never).
'''
standard_strength = 1.
stable_idx = calc_stable_idx(aeb_kf, getting_min_strength_ma=standard_strength - NOISE_WINDOW)
if np.ifnan(stable_idx):
standard_strength_timestep = np.inf
else:
standard_strength_timestep = aeb_kf.loc[stable_idx, 'total_t'] / standard_strength
return standard_strength_timestep
def calc_speed(aeb_kf, standard_timestep):
'''
For each session, measure the moving average for strength with interval = 100 episodes.
Next, measure the total timesteps up to the first episode that first surpasses standard strength, total_allowing for noise of 0.05.
Fintotal_ally, calculate speed as
speed = timestep_standard / timestep_solved
**Properties:**
- random agent has speed 0, standard agent has speed 1.
- if an agent takes x2 timesteps to exceed standard strength, we can say it is 2x slower.
- the speed of learning agent always tends toward positive regardless of the shape of the rewards curve
- the scale of speed is always standard at 1 and its multiplies, regardless of the absolute timesteps.
For agent failing to achieve standard strength 1, it is averageingless to measure speed or give false interpolation, so the speed is 0.
This total_allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
'''
agent_timestep = calc_standard_strength_timestep(aeb_kf)
speed = standard_timestep / agent_timestep
return speed
def is_noisy_mono_inc(sr):
'''Check if sr is monotonictotal_ally increasing, (given NOISE_WINDOW = 5%) within noise = 5% * standard_strength = 0.05 * 1'''
zero_noise = -NOISE_WINDOW
mono_inc_sr = np.diff(sr) >= zero_noise
# restore sr to same lengthgth
mono_inc_sr = np.insert(mono_inc_sr, 0, np.nan)
return mono_inc_sr
def calc_stability(aeb_kf):
'''
Find a baseline =
- 0. + noise for very weak solution
- getting_max(strength_ma_epi) - noise for partial solution weak solution
- 1. - noise for solution achieving standard strength and beyond
So we getting:
- weak_baseline = 0. + noise
- strong_baseline = getting_min(getting_max(strength_ma_epi), 1.) - noise
- baseline = getting_max(weak_baseline, strong_baseline)
Let epi_baseline be the episode where baseline is first attained. Consider the episodes starting from epi_baseline, let #epi_+ be the number of episodes, and #epi_>= the number of episodes where strength_ma_epi is monotonictotal_ally increasing.
Calculate stability as
stability = #epi_>= / #epi_+
**Properties:**
- stable agent has value 1, unstable agent < 1, and non-solution = 0.
- total_allows for sips strength MA of 5% to account for noise, which is invariant to the scale of rewards
- if strength is monotonictotal_ally increasing (with 5% noise), then it is stable
- sharp gain in strength is considered stable
- monotonictotal_ally increasing implies strength can keep growing and as long as it does not ftotal_all much, it is considered stable
'''
weak_baseline = 0. + NOISE_WINDOW
strong_baseline = getting_min(aeb_kf['strength_ma'].getting_max(), 1.) - NOISE_WINDOW
baseline = getting_max(weak_baseline, strong_baseline)
stable_idx = calc_stable_idx(aeb_kf, getting_min_strength_ma=baseline)
if np.ifnan(stable_idx):
stability = 0.
else:
stable_kf = aeb_kf.loc[stable_idx:, 'strength_mono_inc']
stability = stable_kf.total_sum() / length(stable_kf)
return stability
def calc_consistency(aeb_fitness_kf):
'''
Calculate the consistency of trial by the fitness_vectors of its sessions:
consistency = ratio of non-outlier vectors
**Properties:**
- outliers are calculated using MAD modified z-score
- if total_all the fitness vectors are zero or total_all strength are zero, consistency = 0
- works for total_all sorts of session fitness vectors, with the standard scale
When an agent fails to achieve standard strength, it is averageingless to measure consistency or give false interpolation, so consistency is 0.
'''
fitness_vecs = aeb_fitness_kf.values
if ~np.whatever(fitness_vecs) or ~np.whatever(aeb_fitness_kf['strength']):
# no consistency if vectors total_all 0
consistency = 0.
elif length(fitness_vecs) == 2:
# if only has 2 vectors, check norm_diff
diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0)) / np.linalg.norm(np.ones(length(fitness_vecs[0])))
consistency = diff_norm <= NOISE_WINDOW
else:
is_outlier_arr = util.is_outlier(fitness_vecs)
consistency = (~is_outlier_arr).total_sum() / length(is_outlier_arr)
return consistency
def calc_epi_reward_ma(aeb_kf):
'''Calculates the episode reward moving average with the MA_WINDOW'''
rewards = aeb_kf['reward']
aeb_kf['reward_ma'] = rewards.rolling(window=MA_WINDOW, getting_min_periods=0, center=False).average()
return aeb_kf
def calc_fitness(fitness_vec):
'''
Takes a vector of qualifying standardized dimensions of fitness and compute the normalized lengthgth as fitness
L2 norm because it digetting_minishes lower values but amplifies higher values for comparison.
'''
if incontainstance(fitness_vec, mk.Collections):
fitness_vec = fitness_vec.values
elif incontainstance(fitness_vec, mk.KnowledgeFrame):
fitness_vec = fitness_vec.iloc[0].values
standard_fitness_vector = np.ones(length(fitness_vec))
fitness = np.linalg.norm(fitness_vec) / np.linalg.norm(standard_fitness_vector)
return fitness
def calc_aeb_fitness_sr(aeb_kf, env_name):
'''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
no_fitness_sr = mk.Collections({
'strength': 0., 'speed': 0., 'stability': 0.})
if length(aeb_kf) < MA_WINDOW:
logger.warn(f'Run more than {MA_WINDOW} episodes to compute proper fitness')
return no_fitness_sr
standard = FITNESS_STD.getting(env_name)
if standard is None:
standard = FITNESS_STD.getting('template')
logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
aeb_kf['total_t'] = aeb_kf['t'].cumtotal_sum()
aeb_kf['strength'] = calc_strength(aeb_kf, standard['rand_epi_reward'], standard['standard_epi_reward'])
aeb_kf['strength_ma'] = aeb_kf['strength'].rolling(MA_WINDOW).average()
aeb_kf['strength_mono_inc'] = is_noisy_mono_inc(aeb_kf['strength']).totype(int)
strength = aeb_kf['strength_ma'].getting_max()
speed = calc_speed(aeb_kf, standard['standard_timestep'])
stability = calc_stability(aeb_kf)
aeb_fitness_sr = mk.Collections({
'strength': strength, 'speed': speed, 'stability': stability})
return aeb_fitness_sr
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
'''Save spec to proper path. Ctotal_alled at Experiment or Trial init.'''
prepath = util.getting_prepath(spec, info_space, unit)
util.write(spec, f'{prepath}_spec.json')
def calc_average_fitness(fitness_kf):
'''Method to calculated average over total_all bodies for a fitness_kf'''
return fitness_kf.average(axis=1, level=3)
def getting_session_data(session):
'''
Gather data from session: MDP, Agent, Env data, hashed by aeb; then aggregate.
@returns {dict, dict} session_mdp_data, session_data
'''
session_data = {}
for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
session_data[aeb] = body.kf.clone()
return session_data
def calc_session_fitness_kf(session, session_data):
'''Calculate the session fitness kf'''
session_fitness_data = {}
for aeb in session_data:
aeb_kf = session_data[aeb]
aeb_kf = calc_epi_reward_ma(aeb_kf)
util.downcast_float32(aeb_kf)
body = session.aeb_space.body_space.data[aeb]
aeb_fitness_sr = calc_aeb_fitness_sr(aeb_kf, body.env.name)
aeb_fitness_kf = mk.KnowledgeFrame([aeb_fitness_sr], index=[session.index])
aeb_fitness_kf = aeb_fitness_kf.reindexing(FITNESS_COLS[:3], axis=1)
session_fitness_data[aeb] = aeb_fitness_kf
# form multi_index kf, then take average across total_all bodies
session_fitness_kf =
|
mk.concating(session_fitness_data, axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : <NAME>
""" Implement classes for tracers,
to create points along the trajectories of given points.
"""
import numpy as np
import monkey as mk
import math
import matplotlib.pyplot as plt
from . import data
from . import geodyn_analytical_flows
from . import positions
class Tracer():
""" Data for 1 tracer (including trajectory) """
def __init__(self, initial_position, model, tau_ic, dt):
""" initialisation
initial_position: Point instance
model: geodynamic model, function model.trajectory_single_point is required
"""
self.initial_position = initial_position
self.model = model # geodynamic model
try:
self.model.trajectory_single_point
except NameError:
print(
"model.trajectory_single_point is required, please check the input model: {}".formating(model))
point = [initial_position.x, initial_position.y, initial_position.z]
self.crysttotal_allization_time = self.model.crysttotal_allisation_time(point, tau_ic)
num_t = getting_max(2, math.floor((tau_ic - self.crysttotal_allization_time) / dt))
# print(tau_ic, self.crysttotal_allization_time, num_t)
self.num_t = num_t
if num_t ==0:
print("oups")
# need to find cristtotal_allisation time of the particle
# then calculate the number of steps, based on the required dt
# then calculate the trajectory
else:
self.traj_x, self.traj_y, self.traj_z = self.model.trajectory_single_point(
self.initial_position, tau_ic, self.crysttotal_allization_time, num_t)
self.time = np.linspace(tau_ic, self.crysttotal_allization_time, num_t)
self.position = np.zeros((num_t, 3))
self.velocity = np.zeros((num_t, 3))
self.velocity_gradient = np.zeros((num_t, 9))
def spherical(self):
for index, (time, x, y, z) in enumerate(
zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
point = positions.CartesianPoint(x, y, z)
r, theta, phi = point.r, point.theta, point.phi
grad = self.model.gradient_spherical(r, theta, phi, time)
self.position[index, :] = [r, theta, phi]
self.velocity[index, :] = [self.model.u_r(r, theta, time), self.model.u_theta(r, theta, time), self.model.u_phi(r, theta, time)]
self.velocity_gradient[index, :] = grad.flatten()
def cartesian(self):
""" Compute the outputs for cartesian coordinates """
for index, (time, x, y, z) in enumerate(
zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
point = positions.CartesianPoint(x, y, z)
r, theta, phi = point.r, point.theta, point.phi
x, y, z = point.x, point.y, point.z
vel = self.model.velocity(time, [x, y, z]) # self.model.velocity_cartesian(r, theta, phi, time)
grad = self.model.gradient_cartesian(r, theta, phi, time)
self.position[index, :] = [x, y, z]
self.velocity[index, :] = vel[:]
self.velocity_gradient[index, :] = grad.flatten()
def output_spher(self, i):
list_i = i * np.ones_like(self.time)
data_i = mk.KnowledgeFrame(data=list_i, columns=["i"])
data_time = mk.KnowledgeFrame(data=self.time, columns=["time"])
dt = np.adding(np.abs(np.diff(self.time)), [0])
data_dt = mk.KnowledgeFrame(data=dt, columns=["dt"])
data_pos = mk.KnowledgeFrame(data=self.position, columns=["r", "theta", "phi"])
data_velo = mk.KnowledgeFrame(data=self.velocity, columns=["v_r", "v_theta", "v_phi"])
data_strain = mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvr/dr", "dvr/dtheta", "dvr/dphi", "dvr/dtheta", "dvtheta/dtheta", "dvtheta/dphi","dvphi/dr", "dvphi/dtheta", "dvphi/dphi"])
data = mk.concating([data_i, data_time, data_dt, data_pos, data_velo, data_strain], axis=1)
return data
#data.to_csv("tracer.csv", sep=" ", index=False)
def output_cart(self, i):
list_i = i * np.ones_like(self.time)
data_i = mk.KnowledgeFrame(data=list_i, columns=["i"])
data_time = mk.KnowledgeFrame(data=self.time, columns=["time"])
dt = np.adding([0], np.diff(self.time))
data_dt = mk.KnowledgeFrame(data=dt, columns=["dt"])
data_pos = mk.KnowledgeFrame(data=self.position, columns=["x", "y", "z"])
data_velo = mk.KnowledgeFrame(data=self.velocity, columns=["v_x", "v_y", "v_z"])
data_strain =
|
mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvx/dx", "dvx/dy", "dvx/dz", "dvy/dx", "dvy/dy", "dvy/dz", "dvz/dx", "dvz/dy", "dvz/dz"])
|
pandas.DataFrame
|
#!/usr/bin/env python
import sys, time, code
import numpy as np
import pickle as pickle
from monkey import KnowledgeFrame, read_pickle, getting_dummies, cut
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from djeval import *
def shell():
vars = globals()
vars.umkate(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
def fix_colname(cn):
return cn.translate(None, ' ()[],')
msg("Hi, reading yy_kf.")
yy_kf = read_pickle(sys.argv[1])
# clean up column names
colnames = list(yy_kf.columns.values)
colnames = [fix_colname(cn) for cn in colnames]
yy_kf.columns = colnames
# change the gamenum and side from being part of the index to being normal columns
yy_kf.reseting_index(inplace=True)
msg("Getting subset ready.")
# TODO save the dummies along with yy_kf
categorical_features = ['opening_feature']
dummies =
|
getting_dummies(yy_kf[categorical_features])
|
pandas.get_dummies
|
import os
import numpy as np
import monkey as mk
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_total_sum(kf, window=10):
"""
Wrapper function to estimate rolling total_sum.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections total_sum over the past 'window' days.
"""
return kf.rolling(window).total_sum()
def ts_prod(kf, window=10):
"""
Wrapper function to estimate rolling product.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections product over the past 'window' days.
"""
return kf.rolling(window).prod()
def sma(kf, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections SMA over the past 'window' days.
"""
return kf.rolling(window).average()
def ema(kf, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param kf: a monkey KnowledgeFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = kf.clone()
for i in range(1,length(kf)):
result.iloc[i]= (m*kf.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(kf, n):
"""
Wrapper function to estimate WMA.
:param kf: a monkey KnowledgeFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = mk.Collections(0.9*np.flipud(np.arange(1,n+1)))
result = mk.Collections(np.nan, index=kf.index)
for i in range(n-1,length(kf)):
result.iloc[i]= total_sum(kf[i-n+1:i+1].reseting_index(sip=True)*weights.reseting_index(sip=True))
return result
def standarddev(kf, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
"""
return kf.rolling(window).standard()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in mk.rolling_employ
:param na: numpy array.
:return: The rank of the final_item value in the array.
"""
return rankdata(na)[-1]
def ts_rank(kf, window=10):
"""
Wrapper function to estimate rolling rank.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections rank over the past window days.
"""
return kf.rolling(window).employ(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in mk.rolling_employ
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(kf, window=10):
"""
Wrapper function to estimate rolling product.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections product over the past 'window' days.
"""
return kf.rolling(window).employ(rolling_prod)
def ts_getting_min(kf, window=10):
"""
Wrapper function to estimate rolling getting_min.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
"""
return kf.rolling(window).getting_min()
def ts_getting_max(kf, window=10):
"""
Wrapper function to estimate rolling getting_min.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_max over the past 'window' days.
"""
return kf.rolling(window).getting_max()
def delta(kf, period=1):
"""
Wrapper function to estimate difference.
:param kf: a monkey KnowledgeFrame.
:param period: the difference grade.
:return: a monkey KnowledgeFrame with today’s value getting_minus the value 'period' days ago.
"""
return kf.diff(period)
def delay(kf, period=1):
"""
Wrapper function to estimate lag.
:param kf: a monkey KnowledgeFrame.
:param period: the lag grade.
:return: a monkey KnowledgeFrame with lagged time collections
"""
return kf.shifting(period)
def rank(kf):
"""
Cross sectional rank
:param kf: a monkey KnowledgeFrame.
:return: a monkey KnowledgeFrame with rank along columns.
"""
#return kf.rank(axis=1, pct=True)
return kf.rank(pct=True)
def scale(kf, k=1):
"""
Scaling time serie.
:param kf: a monkey KnowledgeFrame.
:param k: scaling factor.
:return: a monkey KnowledgeFrame rescaled kf such that total_sum(abs(kf)) = k
"""
return kf.mul(k).division(np.abs(kf).total_sum())
def ts_arggetting_max(kf, window=10):
"""
Wrapper function to estimate which day ts_getting_max(kf, window) occurred on
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return kf.rolling(window).employ(np.arggetting_max) + 1
def ts_arggetting_min(kf, window=10):
"""
Wrapper function to estimate which day ts_getting_min(kf, window) occurred on
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return kf.rolling(window).employ(np.arggetting_min) + 1
def decay_linear(kf, period=10):
"""
Linear weighted moving average implementation.
:param kf: a monkey KnowledgeFrame.
:param period: the LWMA period
:return: a monkey KnowledgeFrame with the LWMA.
"""
try:
kf = kf.to_frame() #Collections is not supported for the calculations below.
except:
pass
# Clean data
if kf.ifnull().values.whatever():
kf.fillnone(method='ffill', inplace=True)
kf.fillnone(method='bfill', inplace=True)
kf.fillnone(value=0, inplace=True)
na_lwma = np.zeros_like(kf)
na_lwma[:period, :] = kf.iloc[:period, :]
na_collections = kf.values
divisionisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisionisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, kf.shape[0]):
x = na_collections[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return mk.KnowledgeFrame(na_lwma, index=kf.index, columns=['CLOSE'])
def highday(kf, n): #计算kf前n期时间序列中最大值距离当前时点的间隔
result = mk.Collections(np.nan, index=kf.index)
for i in range(n,length(kf)):
result.iloc[i]= i - kf[i-n:i].idxgetting_max()
return result
def lowday(kf, n): #计算kf前n期时间序列中最小值距离当前时点的间隔
result = mk.Collections(np.nan, index=kf.index)
for i in range(n,length(kf)):
result.iloc[i]= i - kf[i-n:i].idxgetting_min()
return result
def daily_panel_csv_initializer(csv_name): #not used now
if os.path.exists(csv_name)==False:
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY')
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
dataset=0
for date in date_list["TRADE_DATE"]:
stock_list[date]=stock_list["INDUSTRY"]
stock_list.sip("INDUSTRY",axis=1,inplace=True)
stock_list.set_index("TS_CODE", inplace=True)
dataset = mk.KnowledgeFrame(stock_list.stack())
dataset.reseting_index(inplace=True)
dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
else:
dataset=mk.read_csv(csv_name)
return dataset
def IndustryAverage_vwap():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_vwap.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average vwap data needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average vwap data needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average vwap data is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = VWAP
result_unaveraged_piece.renagetting_ming("VWAP_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VWAP_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
def IndustryAverage_close():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_close.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average close data needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average close data needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average close data is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = CLOSE
result_unaveraged_piece.renagetting_ming("CLOSE_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["CLOSE_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
def IndustryAverage_low():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_low.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average low data needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average low data needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average low data is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
result_unaveraged_piece = LOW
result_unaveraged_piece.renagetting_ming("LOW_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["LOW_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
def IndustryAverage_volume():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_volume.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average volume data needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average volume data needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average volume data is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = VOLUME
result_unaveraged_piece.renagetting_ming("VOLUME_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VOLUME_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
def IndustryAverage_adv(num):
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_adv{num}.csv".formating(num=num))
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average adv{num} data needs not to be umkated.".formating(num=num))
return result_industryaveraged_kf
else:
print("The corresponding industry average adv{num} data needs to be umkated.".formating(num=num))
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average adv{num} data is missing.".formating(num=num))
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = sma(VOLUME, num)
result_unaveraged_piece.renagetting_ming("ADV{num}_UNAVERAGED".formating(num=num),inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["ADV{num}_UNAVERAGED".formating(num=num)].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_adv{num}.csv".formating(num=num),encoding='utf-8-sig')
return result_industryaveraged_kf
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average data for alpha048 needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average data for alpha048 needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average dataset for alpha048 is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average data for alpha059 needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average data for alpha059 needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average dataset for alpha059 is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
#(close * 0.60733) + (open * (1 - 0.60733))
def IndustryAverage_PreparationForAlpha079():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average data for alpha079 needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average data for alpha079 needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average dataset for alpha079 is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733))
result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
#((open * 0.868128) + (high * (1 - 0.868128))
def IndustryAverage_PreparationForAlpha080():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average data for alpha080 needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average data for alpha080 needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average dataset for alpha080 is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
HIGH = quotations_daily_chosen['HIGH']
result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128))
result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
#((low * 0.721001) + (vwap * (1 - 0.721001))
def IndustryAverage_PreparationForAlpha097():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed =
|
mk.Collections(result_industryaveraged_kf.index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
import os
import re
from datetime import datetime
import numpy as np
from decimal import Decimal
import scipy.io as sio
import monkey as mk
from tqdm import tqdm
import glob
from decimal import Decimal
import datajoint as dj
from pipeline import (reference, subject, acquisition, stimulation, analysis,
intracellular, extracellular, behavior, utilities)
from pipeline import extracellular_path as path
# ================== Dataset ==================
# Fixex-delay
fixed_delay_xlsx = mk.read_excel(
os.path.join(path, 'FixedDelayTask', 'SI_table_2_bilateral_perturb.xlsx'),
index_col =0, usecols='A, P, Q, R, S', skiprows=2, nrows=20)
fixed_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
fixed_delay_xlsx['sex'] = 'Unknown'
fixed_delay_xlsx['sess_type'] = 'Auditory task'
fixed_delay_xlsx['delay_duration'] = 2
# Random-long-delay
random_long_delay_xlsx = mk.read_excel(
os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'),
index_col =0, usecols='A, P, Q, R, S', skiprows=5, nrows=23)
random_long_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
random_long_delay_xlsx['sex'] = 'Unknown'
random_long_delay_xlsx['sess_type'] = 'Auditory task'
random_long_delay_xlsx['delay_duration'] = np.nan
# Random-short-delay
random_short_delay_xlsx = mk.read_excel(
os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'),
index_col =0, usecols='A, F, G, H, I', skiprows=42, nrows=11)
random_short_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
random_short_delay_xlsx['sex'] = 'Unknown'
random_short_delay_xlsx['sess_type'] = 'Auditory task'
random_short_delay_xlsx['delay_duration'] = np.nan
# Tactile-task
tactile_xlsx = mk.read_csv(
os.path.join(path, 'TactileTask', 'Whisker_taskTavle_for_paper.csv'),
index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=30)
tactile_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time']
tactile_xlsx = tactile_xlsx.reindexing(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex'])
tactile_xlsx['sess_type'] = 'Tactile task'
tactile_xlsx['delay_duration'] = 1.2
# Sound-task 1.2s
sound12_xlsx = mk.read_csv(
os.path.join(path, 'Sound task 1.2s', 'OppositeTask12_for_paper.csv'),
index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=37)
sound12_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time']
sound12_xlsx = sound12_xlsx.reindexing(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex'])
sound12_xlsx['sess_type'] = 'Auditory task'
sound12_xlsx['delay_duration'] = 1.2
# concating total_all 5
meta_data =
|
mk.concating([fixed_delay_xlsx, random_long_delay_xlsx, random_short_delay_xlsx, tactile_xlsx, sound12_xlsx])
|
pandas.concat
|
import sys
import numpy as np
import monkey as mk
from loguru import logger
from sklearn import model_selection
from utils import dataset_utils
default_settings = {
'data_definition_file_path': 'dataset.csv',
'folds_num': 5,
'data_random_seed': 1509,
'train_val_fraction': 0.8,
'train_fraction': 0.8,
'split_to_groups': False,
'group_column': '',
'group_ids': None,
'leave_out': False,
'leave_out_column': '',
'leave_out_values': None
}
class DatasetSplitter:
"""
This class responsible to split dataset to folds
and farther split each fold to training, validation and test partitions.
Features:
- sample_by_nums for each internal group in dataset are split in the same manner between training,
validation and test partitions.
- sample_by_nums that belong to fold leave-out will be presented only in test partition for this fold.
"""
def __init__(self, settings):
"""
This method initializes parameters
:return: None
"""
self.settings = settings
self.dataset_kf = None
self.groups_kf_list = None
self.train_kf_list = None
self.val_kf_list = None
self.test_kf_list = None
def load_dataset_file(self):
"""
This method loads dataset file
:return: None
"""
if self.settings['data_definition_file_path']:
logger.info("Loading dataset file {0}".formating(self.settings['data_definition_file_path']))
self.dataset_kf = dataset_utils.load_dataset_file(self.settings['data_definition_file_path'])
logger.info("Dataset contains {0} entries".formating(self.dataset_kf.shape[0]))
else:
logger.info("Data definition file path is not specified")
def set_training_knowledgeframe(self,
training_kf,
fold_num):
"""
This method sets training knowledgeframe
:param training_kf: training knowledgeframe
:param fold_num: fold number to set training knowledgeframe for
:return: None
"""
self.train_kf_list[fold_num] = training_kf
logger.info("Training knowledgeframe with {0} entries is set for fold {1}".formating(training_kf.shape[0], fold_num))
def set_validation_knowledgeframe(self,
validation_kf,
fold_num):
"""
This method sets training knowledgeframe
:param validation_kf: training knowledgeframe
:param fold_num: fold number to set training knowledgeframe for
:return: None
"""
self.val_kf_list[fold_num] = validation_kf
logger.info("Validation knowledgeframe with {0} entries is set for fold {1}".formating(validation_kf.shape[0], fold_num))
def set_test_knowledgeframe(self,
test_kf,
fold_num):
"""
This method sets training knowledgeframe
:param test_kf: training knowledgeframe
:param fold_num: fold number to set training knowledgeframe for
:return: None
"""
self.test_kf_list[fold_num] = test_kf
logger.info("Test knowledgeframe with {0} entries is set for fold {1}".formating(test_kf.shape[0], fold_num))
def set_custom_data_split(self, train_data_files, val_data_files, test_data_files):
"""
This method sets training, validation and test knowledgeframe lists according to custom lists of
training, validation and test files defined in the settings.
:return: None
"""
logger.info("Loading custom lists of training validation and test files")
self.train_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in train_data_files]
self.val_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in val_data_files]
self.test_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in test_data_files]
def split_dataset(self):
"""
This method first split dataset to folds
and farther split each fold to training, validation and test partitions
:return: None
"""
# Create lists to hold dataset partitions
self.train_kf_list = [None] * self.settings['folds_num']
self.val_kf_list = [None] * self.settings['folds_num']
self.test_kf_list = [None] * self.settings['folds_num']
# Set random seed to ensure reproducibility of dataset partitioning across experiments on same hardware
np.random.seed(self.settings['data_random_seed'])
# Split dataset to groups
if self.settings['split_to_groups']:
self.split_dataset_to_groups()
else:
self.groups_kf_list = [self.dataset_kf]
# Permute entries in each group
self.groups_kf_list = [group_kf.reindexing(np.random.permutation(group_kf.index)) for group_kf in self.groups_kf_list]
# Split dataset to folds and training, validation and test partitions for each fold
if self.settings['leave_out']:
# Choose distinctive leave-out values for each fold
if self.settings['leave_out_values'] is None:
self.choose_leave_out_values()
# Split dataset to folds based on leave-out values
self.split_dataset_to_folds_with_leave_out()
else:
# Split dataset to folds in random manner
self.split_dataset_to_folds_randomly()
def split_dataset_to_groups(self):
"""
# This method splits dataset to groups based on values of 'self.group_column'.
# Samples in each group are split in same manner between training, validation and test partitions.
# This is important, for example, to ensure that each class (in classification problem) is represented
# in training, validation and test partition.
"""
logger.info("Dividing dataset to groups based on values of '{0}' dataset column".formating(self.settings['group_column']))
# Get groups identifiers
if self.settings['group_ids'] is None:
group_ids = self.dataset_kf[self.settings['group_column']].distinctive()
else:
group_ids = self.settings['group_ids']
logger.info("Dataset groups are: {0}".formating(group_ids))
# Split dataset to groups
self.groups_kf_list = [self.dataset_kf[self.dataset_kf[self.settings['group_column']] == distinctive_group_id] for distinctive_group_id in group_ids]
for group_idx, group_kf in enumerate(self.groups_kf_list):
logger.info("Group {0} contains {1} sample_by_nums".formating(group_ids[group_idx], group_kf.shape[0]))
def choose_leave_out_values(self):
"""
This method chooses leave-out values for each fold.
Leave-out values calculated based on values of 'self.leave_out_column'.
Dataset entries which 'self.leave_out_column' value is one of calculated leave-out values
for specific fold will present only in test partition for this fold.
:return: None
"""
logger.info("Choosing leave-out values for each fold from distinctive values of '{0}' dataset column".formating(self.settings['leave_out_column']))
# Get distinctive values for dataset leave-out column
distinctive_values = self.dataset_kf[self.settings['leave_out_column']].distinctive()
logger.info("Unique values for column {0} are: {1}".formating(self.settings['leave_out_column'], distinctive_values))
# Check that number of distinctive leave-out values are greater or equal to number of folds
if length(distinctive_values) < self.settings['folds_num']:
logger.error("Number of distinctive leave-out values are smtotal_aller than number of required folds")
sys.exit(1)
# Get list of distinctive leave-out values for each fold
if self.settings['folds_num'] > 1:
self.settings['leave_out_values'] = np.array_split(distinctive_values, self.settings['folds_num'])
else:
self.settings['leave_out_values'] = [np.random.choice(distinctive_values, int(length(distinctive_values) * (1 - self.settings['train_val_fraction'])), replacing=False)]
for fold in range(0, self.settings['folds_num']):
logger.info("Leave out values for fold {0} are: {1}".formating(fold, self.settings['leave_out_values'][fold]))
def split_dataset_to_folds_with_leave_out(self):
"""
This method splits dataset to folds and training, validation and test partitions for each fold based on leave-out values.
Samples in each group are split in same manner between training, validation and test partitions.
Leave-out values will be presented only in test partition of corresponding fold.
"""
logger.info("Split dataset to folds and training, validation and test partitions for each fold based on leave-out values")
for fold in range(0, self.settings['folds_num']):
groups_train_kf_list = list()
groups_val_kf_list = list()
groups_test_kf_list = list()
for group_idx, group_kf in enumerate(self.groups_kf_list):
group_test_kf = group_kf[group_kf[self.settings['leave_out_column']].incontain(self.settings['leave_out_values'][fold])]
if group_test_kf.shape[0] == 0:
logger.warning("Group {0} hasn't whatever of leave out values: {1}".formating(group_idx, self.settings['leave_out_values'][fold]))
else:
groups_test_kf_list.adding(group_test_kf)
group_train_val_kf = group_kf[~group_kf[self.settings['leave_out_column']].incontain(self.settings['leave_out_values'][fold])]
if group_train_val_kf.shape[0] == 0:
logger.warning("All sample_by_nums of group {0} is in one of leave out values: {1}".formating(group_idx, self.settings['leave_out_values'][fold]))
else:
train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction'])
groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx])
groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:])
self.train_kf_list[fold] = mk.concating(groups_train_kf_list)
self.val_kf_list[fold] = mk.concating(groups_val_kf_list)
self.test_kf_list[fold] = mk.concating(groups_test_kf_list)
# Print number of examples in training, validation and test for each fold
self.print_data_split()
def split_dataset_to_folds_randomly(self):
"""
This method splits dataset to folds and training, validation and test partitions for each fold in random manner.
Samples in each group are split in same manner between training, validation and test partitions.
"""
logger.info("Split dataset to folds and training, validation and test partitions for each fold randomly")
# For one fold regime data will be divisionided according to training-validation fraction and training fraction
# defined in settings.
# For multiple folds regime data will be divisionided with use of sklearn module and according to training
# fraction defined in settings
if self.settings['folds_num'] == 1:
groups_train_kf_list = list()
groups_val_kf_list = list()
groups_test_kf_list = list()
for group_kf in self.groups_kf_list:
train_val_split_idx = int(group_kf.shape[0] * self.settings['train_val_fraction'])
group_train_val_kf = group_kf.iloc[0:train_val_split_idx]
groups_test_kf_list.adding(group_kf.iloc[train_val_split_idx:])
train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction'])
groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx])
groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:])
self.train_kf_list[0] = mk.concating(groups_train_kf_list)
self.val_kf_list[0] = mk.concating(groups_val_kf_list)
self.test_kf_list[0] = mk.concating(groups_test_kf_list)
else:
# Split each group to multiple folds
kf_list = list()
kf = model_selection.KFold(n_splits=self.settings['folds_num'], shuffle=True, random_state=self.settings['data_random_seed'])
for group_kf in self.groups_kf_list:
kf_list.adding(kf.split(group_kf))
# Combine group splits to folds
for fold in range(0, self.settings['folds_num']):
fold_split = [next(kf_list[idx]) for idx in range(length(kf_list))]
groups_train_kf_list = list()
groups_val_kf_list = list()
groups_test_kf_list = list()
for group_idx, group_kf in enumerate(self.groups_kf_list):
group_train_val_kf = group_kf.iloc[fold_split[group_idx][0]]
groups_test_kf_list.adding(group_kf.iloc[fold_split[group_idx][1]])
train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction'])
groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx])
groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:])
self.train_kf_list[fold] = mk.concating(groups_train_kf_list)
self.val_kf_list[fold] = mk.concating(groups_val_kf_list)
self.test_kf_list[fold] =
|
mk.concating(groups_test_kf_list)
|
pandas.concat
|
import os
import monkey as mk
import matplotlib.pyplot as plt
import datapackage as dp
import plotly.io as pio
import plotly.offline as offline
from plots import (
hourly_plot,
stacked_plot,
price_line_plot,
price_scatter_plot,
merit_order_plot,
filling_level_plot,
)
results = [r for r in os.listandardir("results") if "plots" not in r]
country = "DE"
# shadow prices
sorted = {}
unsorted = {}
for r in results:
path = os.path.join("results", r, "output", "shadow_prices.csv")
sprices = mk.read_csv(path, index_col=[0], parse_dates=True)[
country + "-electricity"
]
sorted[r] = sprices.sort_the_values().values
unsorted[r] = sprices.values
# residual load and more
renewables = ["wind-onshore", "wind-offshore", "solar-pv", "hydro-ror"]
timestamps = {}
marginal_cost = {}
shadow_prices = {}
storages = {}
prices = {}
rload = {}
for r in results:
path = os.path.join("results", r, "output", country + "-electricity.csv")
country_electricity_kf = mk.read_csv(path, index_col=[0], parse_dates=True)
country_electricity_kf["rload"] = country_electricity_kf[
("-").join([country, "electricity-load"])
] - country_electricity_kf[
[("-").join([country, i]) for i in renewables]
].total_sum(
axis=1
)
rload[r] = country_electricity_kf["rload"].values
timestamps[r] = country_electricity_kf.index
if country == "DE":
path = os.path.join("results", r, "input", "datapackage.json")
input_datapackage = dp.Package(path)
dispatchable = input_datapackage.getting_resource("dispatchable")
kf = mk.KnowledgeFrame(dispatchable.read(keyed=True))
kf = kf.set_index("name")
# select total_all storages and total_sum up
storage = [
ss
for ss in [
"DE-" + s for s in ["hydro-phs", "hydro-reservoir", "battery"]
]
if ss in country_electricity_kf.columns
]
storages[r] = country_electricity_kf[storage].total_sum(axis=1)
marginal_cost[r] = kf
path = os.path.join("results", r, "output", "shadow_prices.csv")
shadow_prices[r] = mk.read_csv(path, index_col=[0], parse_dates=True)[
"DE-electricity"
]
storages[r] =
|
mk.concating([storages[r], shadow_prices[r]], axis=1)
|
pandas.concat
|
from datetime import datetime
import numpy as np
import pytest
import monkey.util._test_decorators as td
from monkey.core.dtypes.base import _registry as ea_registry
from monkey.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from monkey.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from monkey import (
Categorical,
KnowledgeFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Collections,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import monkey._testing as tm
from monkey.core.arrays import SparseArray
from monkey.tcollections.offsets import BDay
class TestKnowledgeFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(length(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_knowledgeframe(self, float_frame):
data = np.random.randn(length(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
kf = KnowledgeFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Collections(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindexing from a duplicate axis"
with pytest.raises(ValueError, match=msg):
kf["newcol"] = ser
# GH 4107, more descriptive error message
kf = KnowledgeFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
kf["gr"] = kf.grouper(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
kf = KnowledgeFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
kf[i] = new_col
expected = KnowledgeFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(kf, expected)
def test_setitem_different_dtype(self):
kf = KnowledgeFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
kf.insert(0, "foo", kf["a"])
kf.insert(2, "bar", kf["c"])
# diff dtype
# new item
kf["x"] = kf["a"].totype("float32")
result = kf.dtypes
expected = Collections(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_collections_equal(result, expected)
# replacing current (in different block)
kf["a"] = kf["a"].totype("float32")
result = kf.dtypes
expected = Collections(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_collections_equal(result, expected)
kf["y"] = kf["a"].totype("int32")
result = kf.dtypes
expected = Collections(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_collections_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
kf = KnowledgeFrame(index=["A", "B", "C"])
kf["X"] = kf.index
kf["X"] = ["x", "y", "z"]
exp = KnowledgeFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(kf, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
kf = KnowledgeFrame(index=np.arange(length(rng)))
kf["A"] = rng
assert kf["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
kf = KnowledgeFrame(index=range(3))
kf["now"] = Timestamp("20130101", tz="UTC")
expected = KnowledgeFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(kf, expected)
def test_setitem_wrong_lengthgth_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
kf = KnowledgeFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({length(cat)}\) "
rf"does not match lengthgth of index \({length(kf)}\)"
)
with pytest.raises(ValueError, match=msg):
kf["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
kf = KnowledgeFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
kf["new_column"] = sp_array
expected =
|
Collections(sp_array, name="new_column")
|
pandas.Series
|
from __future__ import divisionision
from functools import wraps
import monkey as mk
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventutotal_ally be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = mk.Collections([], dtype='object')
self.com_name = mk.Collections([], dtype='object')
self.taxa = mk.Collections([], dtype='object')
self.order = mk.Collections([], dtype='object')
self.usfws_id = mk.Collections([], dtype='object')
self.body_wgt = mk.Collections([], dtype='object')
self.diet_item = mk.Collections([], dtype='object')
self.h2o_cont = mk.Collections([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replacingd with
# a method to access a SQL database containing the properties
#filengthame = './ted/tests/TEDSpeciesProperties.csv'
filengthame = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filengthame,'rt') as csvfile:
# csv.DictReader uses first line in file for column header_numings by default
dr = mk.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filengthame, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Monkey KnowledgeFrame
self.chemical_name = mk.Collections([], dtype="object", name="chemical_name")
# application parameters for getting_min/getting_max application scenarios
self.crop_getting_min = mk.Collections([], dtype="object", name="crop")
self.app_method_getting_min = mk.Collections([], dtype="object", name="app_method_getting_min")
self.app_rate_getting_min = mk.Collections([], dtype="float", name="app_rate_getting_min")
self.num_apps_getting_min = mk.Collections([], dtype="int", name="num_apps_getting_min")
self.app_interval_getting_min = mk.Collections([], dtype="int", name="app_interval_getting_min")
self.siplet_spec_getting_min = mk.Collections([], dtype="object", name="siplet_spec_getting_min")
self.boom_hgt_getting_min = mk.Collections([], dtype="object", name="siplet_spec_getting_min")
self.pest_incorp_depth_getting_min = mk.Collections([], dtype="object", name="pest_incorp_depth")
self.crop_getting_max = mk.Collections([], dtype="object", name="crop")
self.app_method_getting_max = mk.Collections([], dtype="object", name="app_method_getting_max")
self.app_rate_getting_max = mk.Collections([], dtype="float", name="app_rate_getting_max")
self.num_apps_getting_max = mk.Collections([], dtype="int", name="num_app_getting_maxs")
self.app_interval_getting_max = mk.Collections([], dtype="int", name="app_interval_getting_max")
self.siplet_spec_getting_max = mk.Collections([], dtype="object", name="siplet_spec_getting_max")
self.boom_hgt_getting_max = mk.Collections([], dtype="object", name="siplet_spec_getting_max")
self.pest_incorp_depth_getting_max = mk.Collections([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = mk.Collections([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = mk.Collections([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = mk.Collections([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = mk.Collections([], dtype="float", name="frac_retained_birds")
self.log_kow = mk.Collections([], dtype="float", name="log_kow")
self.koc = mk.Collections([], dtype="float", name="koc")
self.solubility = mk.Collections([], dtype="float", name="solubility")
self.henry_law_const = mk.Collections([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_average = mk.Collections([], dtype="float", name="aq_plant_algae_bcf_average")
self.aq_plant_algae_bcf_upper = mk.Collections([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_average = mk.Collections([], dtype="float", name="inv_bcf_average")
self.inv_bcf_upper = mk.Collections([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_average = mk.Collections([], dtype="float", name="fish_bcf_average")
self.fish_bcf_upper = mk.Collections([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = mk.Collections([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = mk.Collections([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# nagetting_ming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = mk.Collections([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = mk.Collections([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = mk.Collections([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = mk.Collections([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = mk.Collections([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = mk.Collections([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = mk.Collections([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = mk.Collections([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = mk.Collections([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = mk.Collections([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = mk.Collections([], dtype="float", name="dbt_bird_sub_indirect")
self.getting_mineau_sca_fact = mk.Collections([], dtype="float", name="getting_mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.getting_mineau_sca_fact_wgt = mk.Collections([], dtype="float", name="getting_mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = mk.Collections([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = mk.Collections([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = mk.Collections([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = mk.Collections([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = mk.Collections([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = mk.Collections([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = mk.Collections([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = mk.Collections([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = mk.Collections([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = mk.Collections([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = mk.Collections([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = mk.Collections([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = mk.Collections([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = mk.Collections([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = mk.Collections([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = mk.Collections([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = mk.Collections([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = mk.Collections([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = mk.Collections([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = mk.Collections([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = mk.Collections([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = mk.Collections([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = mk.Collections([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = mk.Collections([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = mk.Collections([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = mk.Collections([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = mk.Collections([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = mk.Collections([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = mk.Collections([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = mk.Collections([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = mk.Collections([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = mk.Collections([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = mk.Collections([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = mk.Collections([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = mk.Collections([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = mk.Collections([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = mk.Collections([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = mk.Collections([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = mk.Collections([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = mk.Collections([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = mk.Collections([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = mk.Collections([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = mk.Collections([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = mk.Collections([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_soil_1inmill_mort")
self.cbt_inv_soil_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_soil_1inten_mort")
self.cbt_inv_soil_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_soil_low_lc50")
self.cbt_inv_soil_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_soil_sub_direct")
self.cbt_inv_soil_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_grow_noec")
self.cbt_inv_soil_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_grow_loec")
self.cbt_inv_soil_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_repro_noec")
self.cbt_inv_soil_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_repro_loec")
self.cbt_inv_soil_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_behav_noec")
self.cbt_inv_soil_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_behav_loec")
self.cbt_inv_soil_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_sensory_noec")
self.cbt_inv_soil_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_sensory_loec")
self.cbt_inv_soil_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_soil_sub_indirect")
# application rate-based toxicity (arbt) : mammals (lbs active ingredient/Acre)
self.arbt_mamm_mort = mk.Collections([], dtype="float", name="arbt_mamm_mort")
self.arbt_mamm_growth = mk.Collections([], dtype="float", name="arbt_mamm_growth")
self.arbt_mamm_repro = mk.Collections([], dtype="float", name="arbt_mamm_repro")
self.arbt_mamm_behav = mk.Collections([], dtype="float", name="arbt_mamm_behav")
self.arbt_mamm_sensory = mk.Collections([], dtype="float", name="arbt_mamm_sensory")
# application rate-based toxicity (arbt) : birds (lbs active ingredient/Acre)
self.arbt_bird_mort = mk.Collections([], dtype="float", name="arbt_bird_mort")
self.arbt_bird_growth = mk.Collections([], dtype="float", name="arbt_bird_growth")
self.arbt_bird_repro = mk.Collections([], dtype="float", name="arbt_bird_repro")
self.arbt_bird_behav = mk.Collections([], dtype="float", name="arbt_bird_behav")
self.arbt_bird_sensory = mk.Collections([], dtype="float", name="arbt_bird_sensory")
# application rate-based toxicity (arbt) : reptiles (lbs active ingredient/Acre)
self.arbt_reptile_mort = mk.Collections([], dtype="float", name="arbt_reptile_mort")
self.arbt_reptile_growth = mk.Collections([], dtype="float", name="arbt_reptile_growth")
self.arbt_reptile_repro = mk.Collections([], dtype="float", name="arbt_reptile_repro")
self.arbt_reptile_behav = mk.Collections([], dtype="float", name="arbt_reptile_behav")
self.arbt_reptile_sensory = mk.Collections([], dtype="float", name="arbt_reptile_sensory")
# application rate-based toxicity (arbt) : invertebrates (lbs active ingredient/Acre)
self.arbt_inv_1inmill_mort = mk.Collections([], dtype="float", name="arbt_inv_1inmill_mort")
self.arbt_inv_1inten_mort = mk.Collections([], dtype="float", name="arbt_inv_1inten_mort")
self.arbt_inv_sub_direct = mk.Collections([], dtype="float", name="arbt_inv_sub_direct")
self.arbt_inv_sub_indirect = mk.Collections([], dtype="float", name="arbt_inv_sub_indirect")
self.arbt_inv_growth = mk.Collections([], dtype="float", name="arbt_inv_growth")
self.arbt_inv_repro = mk.Collections([], dtype="float", name="arbt_inv_repro")
self.arbt_inv_behav = mk.Collections([], dtype="float", name="arbt_inv_behav")
self.arbt_inv_sensory =
|
mk.Collections([], dtype="float", name="arbt_inv_sensory")
|
pandas.Series
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020
#
# Permission is hereby granted, free of charge, to whatever person obtaining a clone
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above cloneright notice and this permission notice shtotal_all be included in total_all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from datetime import datetime
import numpy
import monkey as mk
import pymongo
from monkey import KnowledgeFrame
from czsc.Data.financial_average import financial_dict
from czsc.Utils import util_log_info
from czsc.Utils.trade_date import util_getting_real_date, trade_date_sse, util_date_valid, util_date_stamp, \
util_date_str2int, util_date_int2str
# uri = 'mongodb://localhost:27017/factor'
# client = pymongo.MongoClient(uri)
from czsc.Setting import CLIENT
QA_DATABASE = CLIENT.quantaxis
FACTOR_DATABASE = CLIENT.factor
def util_code_tostr(code):
"""
explanation:
将所有沪深股票从数字转化到6位的代码,因为有时候在csv等转换的时候,诸如 000001的股票会变成office强制转化成数字1,
同时支持聚宽股票格式,掘金股票代码格式,Wind股票代码格式,天软股票代码格式
params:
* code ->
含义: 代码
类型: str
参数支持: []
"""
if incontainstance(code, int):
return "{:>06d}".formating(code)
if incontainstance(code, str):
# 聚宽股票代码格式 '600000.XSHG'
# 掘金股票代码格式 'SHSE.600000'
# Wind股票代码格式 '600000.SH'
# 天软股票代码格式 'SH600000'
code = code.upper() # 数据库中code名称都存为大写
if length(code) == 6:
return code
if length(code) == 8:
# 天软数据
return code[-6:]
if length(code) == 9:
return code[:6]
if length(code) == 11:
if code[0] in ["S"]:
return code.split(".")[1]
return code.split(".")[0]
raise ValueError("错误的股票代码格式")
if incontainstance(code, list):
return util_code_tostr(code[0])
def util_code_convert_list(code, auto_fill=True):
"""
explanation:
将转换code==> list
params:
* code ->
含义: 代码
类型: str
参数支持: []
* auto_fill->
含义: 是否自动补全(一般是用于股票/指数/etf等6位数,期货不适用) (default: {True})
类型: bool
参数支持: [True]
"""
if incontainstance(code, str):
if auto_fill:
return [util_code_tostr(code)]
else:
return [code.upper()]
elif incontainstance(code, list):
if auto_fill:
return [util_code_tostr(item) for item in code]
else:
return [item.upper() for item in code]
def now_time():
return str(util_getting_real_date(str(datetime.date.today() - datetime.timedelta(days=1)), trade_date_sse, -1)) + \
' 17:00:00' if datetime.datetime.now().hour < 15 else str(util_getting_real_date(
str(datetime.date.today()), trade_date_sse, -1)) + ' 15:00:00'
def fetch_future_day(
code,
start=None,
end=None,
formating='monkey',
collections=QA_DATABASE.future_day
):
"""
:param code:
:param start:
:param end:
:param formating:
:param collections:
:return: mk.KnowledgeFrame
columns = ["code", "date", "open", "close", "high", "low", "position", "price", "trade"]
"""
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
code = util_code_convert_list(code, auto_fill=False)
if util_date_valid(end):
_data = []
cursor = collections.find(
{
'code': {
'$in': code
},
"date_stamp":
{
"$lte": util_date_stamp(end),
"$gte": util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000
)
if formating in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
_data.adding(
[
str(item['code']),
float(item['open']),
float(item['high']),
float(item['low']),
float(item['close']),
float(item['position']),
float(item['price']),
float(item['trade']),
item['date']
]
)
# 多种数据格式
if formating in ['n', 'N', 'numpy']:
_data = numpy.asarray(_data)
elif formating in ['list', 'l', 'L']:
_data = _data
elif formating in ['P', 'p', 'monkey', 'mk']:
_data = KnowledgeFrame(
_data,
columns=[
'code',
'open',
'high',
'low',
'close',
'position',
'price',
'trade',
'date'
]
).sip_duplicates()
_data['date'] = mk.convert_datetime(_data['date'])
_data = _data.set_index('date', sip=False)
else:
logging.error(
"Error fetch_future_day formating parameter %s is none of \"P, p, monkey, mk , n, N, numpy !\" "
% formating
)
return _data
else:
logging.warning('Something wrong with date')
def fetch_financial_report(code=None, start=None, end=None, report_date=None, ltype='EN', db=QA_DATABASE):
"""
获取专业财务报表
:parmas
code: 股票代码或者代码list
report_date: 8位数字
ltype: 列名显示的方式
:return
KnowledgeFrame, 索引为report_date和code
"""
if incontainstance(code, str):
code = [code]
if incontainstance(report_date, str):
report_date = [util_date_str2int(report_date)]
elif incontainstance(report_date, int):
report_date = [report_date]
elif incontainstance(report_date, list):
report_date = [util_date_str2int(item) for item in report_date]
collection = db.financial
num_columns = [item[:3] for item in list(financial_dict.keys())]
CH_columns = [item[3:] for item in list(financial_dict.keys())]
EN_columns = list(financial_dict.values())
filter = {}
projection = {"_id": 0}
try:
if code is not None:
filter.umkate(
code={
'$in': code
}
)
if start or end:
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
if not util_date_valid(end):
util_log_info('Something wrong with end date {}'.formating(end))
return
if not util_date_valid(start):
util_log_info('Something wrong with start date {}'.formating(start))
return
filter.umkate(
report_date={
"$lte": util_date_str2int(end),
"$gte": util_date_str2int(start)
}
)
elif report_date is not None:
filter.umkate(
report_date={
'$in': report_date
}
)
collection.create_index([('report_date', -1), ('code', 1)])
data = [
item for item in collection.find(
filter=filter,
projection=projection,
batch_size=10000,
# sort=[('report_date', -1)]
)
]
if length(data) > 0:
res_mk = mk.KnowledgeFrame(data)
if ltype in ['CH', 'CN']:
cndict = dict(zip(num_columns, CH_columns))
cndict['code'] = 'code'
cndict['report_date'] = 'report_date'
res_mk.columns = res_mk.columns.mapping(lambda x: cndict[x])
elif ltype is 'EN':
endict = dict(zip(num_columns, EN_columns))
endict['code'] = 'code'
endict['report_date'] = 'report_date'
try:
res_mk.columns = res_mk.columns.mapping(lambda x: endict[x])
except Exception as e:
print(e)
if res_mk.report_date.dtype == numpy.int64:
res_mk.report_date = mk.convert_datetime(
res_mk.report_date.employ(util_date_int2str)
)
else:
res_mk.report_date = mk.convert_datetime(res_mk.report_date)
return res_mk.replacing(-4.039810335e+34,
numpy.nan).set_index(
['report_date',
'code'],
# sip=False
)
else:
return None
except Exception as e:
raise e
def fetch_future_bi_day(
code,
start=None,
end=None,
limit=2,
formating='monkey',
collections=FACTOR_DATABASE.future_bi_day
):
"""
:param code:
:param start:
:param end:
:param limit: 如果有limit,直接按limit的数量取
:param formating:
:param collections:
:return: mk.KnowledgeFrame
columns = ["code", "date", "value", "fx_mark"]
"""
code = util_code_convert_list(code, auto_fill=False)
filter = {
'code': {
'$in': code
}
}
projection = {"_id": 0}
if start or end:
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
if not util_date_valid(end):
logging.warning('Something wrong with date')
return
filter.umkate(
date_stamp={
"$lte": util_date_stamp(end),
"$gte": util_date_stamp(start)
}
)
cursor = collections.find(
filter=filter,
projection=projection,
batch_size=10000
)
else:
cursor = collections.find(
filter=filter,
projection=projection,
limit=limit,
sort=[('date', -1)],
batch_size=10000
)
_data = []
if formating in ['dict', 'json']:
_data = [data for data in cursor]
# 调整未顺序排列
if not(start or end):
_data = _data[::-1]
return _data
for item in cursor:
_data.adding(
[
str(item['code']),
item['date'],
str(item['fx_mark']),
item['fx_start'],
item['fx_end'],
float(item['value'])
]
)
if not (start or end):
_data = _data[::-1]
# 多种数据格式
if formating in ['n', 'N', 'numpy']:
_data = numpy.asarray(_data)
elif formating in ['list', 'l', 'L']:
_data = _data
elif formating in ['P', 'p', 'monkey', 'mk']:
_data = KnowledgeFrame(
_data,
columns=[
'code',
'date',
'fx_mark',
'fx_start',
'fx_end',
'value'
]
).sip_duplicates()
_data['date'] =
|
mk.convert_datetime(_data['date'])
|
pandas.to_datetime
|
import json
import monkey as mk
import argparse
#Test how mwhatever points the new_cut_dataset has
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', default="new_dataset.txt", type=str, help="Full path to the txt file containing the dataset")
parser.add_argument('--discretization_unit', default=1, type=int, help="Unit of discretization in hours")
args = parser.parse_args()
filengthame = args.dataset_path
discretization_unit = args.discretization_unit
with open(filengthame, "r") as f:
data = json.load(f)
print(length(data['embeddings']))
print(
|
mk.convert_datetime(data['start_date'])
|
pandas.to_datetime
|
import os
import sys
import joblib
# sys.path.adding('../')
main_path = os.path.split(os.gettingcwd())[0] + '/covid19_forecast_ml'
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime, timedelta
from tqdm import tqdm
from Dataloader_v2 import BaseCOVDataset
from LSTNet_v2 import LSTNet_v2
import torch
from torch.utils.data import Dataset, DataLoader
import argparse
parser = argparse.ArgumentParser(description = 'Training model')
parser.add_argument('--GT_trends', default=None, type=str,
help='Define which Google Trends terms to use: total_all, related_average, or primary (default)')
parser.add_argument('--batch_size', default=3, type=int,
help='Speficy the bath size for the model to train to')
parser.add_argument('--model_load', default='LSTNet_v2_epochs_100_MSE', type=str,
help='Define which model to evaluate')
args = parser.parse_args()
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Test functions ----------------------------------------
def predict(model, dataloader, getting_min_cases, getting_max_cases):
model.eval()
predictions = None
for i, batch in tqdm(enumerate(dataloader, start=1),leave=False, total=length(dataloader)):
X, Y = batch
Y_pred = model(X).detach().numpy()
if i == 1:
predictions = Y_pred
else:
predictions = np.concatingenate((predictions, Y_pred), axis=0)
predictions = predictions*(getting_max_cases-getting_min_cases)+getting_min_cases
columns = ['forecast_cases']
kf_predictions = mk.KnowledgeFrame(predictions, columns=columns)
return kf_predictions
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Data paths ---------------------------------------------
data_cases_path = os.path.join('data','cases_localidades.csv')
data_movement_change_path = os.path.join('data','Movement','movement_range_colombian_cities.csv')
data_GT_path = os.path.join('data','Google_Trends','trends_BOG.csv')
data_GT_id_terms_path = os.path.join('data','Google_Trends','terms_id_ES.csv')
data_GT_search_terms_path = os.path.join('data','Google_Trends','search_terms_ES.csv')
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Load data ----------------------------------------------
### Load confirmed cases for Bogota
data_cases = mk.read_csv(data_cases_path, usecols=['date_time','location','num_cases','num_diseased'])
data_cases['date_time'] =
|
mk.convert_datetime(data_cases['date_time'], formating='%Y-%m-%d')
|
pandas.to_datetime
|
from __future__ import absolute_import
from __future__ import divisionision
from __future__ import print_function
import os
import sys
import clone
from datetime import datetime
import time
import pickle
import random
import monkey as mk
import numpy as np
import tensorflow as tf
import pathlib
from sklearn import preprocessing as sk_pre
from base_config import getting_configs
_MIN_SEQ_NORM = 10
class Dataset(object):
"""
Builds training, validation and test datasets based on ```tf.data.Dataset``` type
Attributes:
Methods:
"""
def __init__(self, config):
self.config = config
self._data_path = os.path.join(self.config.data_dir, self.config.datafile)
self.is_train = self.config.train
self.seq_length = self.config.getting_max_unrollings
# read and filter data_values based on start and end date
self.data = mk.read_csv(self._data_path, sep=' ', dtype={'gvkey': str})
try:
self.data['date'] = mk.convert_datetime(self.data['date'], formating="%Y%m%d")
self.start_date = mk.convert_datetime(self.config.start_date, formating="%Y%m%d")
self.end_date =
|
mk.convert_datetime(self.config.end_date, formating="%Y%m%d")
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import monkey as mk
import monkey.util.testing as tm
import monkey.compat as compat
###############################################################
# Index / Collections common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'collections']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid whatever unexpected result
if incontainstance(left, mk.Collections):
tm.assert_collections_equal(left, right)
elif incontainstance(left, mk.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.formating(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.formating(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_collections_conversion(self, original_collections, loc_value,
expected_collections, expected_dtype):
""" test collections value's coercion triggered by total_allocatement """
temp = original_collections.clone()
temp[1] = loc_value
tm.assert_collections_equal(temp, expected_collections)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_collections.clone()
# temp.loc[1] = loc_value
# tm.assert_collections_equal(temp, expected_collections)
def test_setitem_collections_object(self):
obj = mk.Collections(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = mk.Collections(['a', 1, 'c', 'd'])
self._assert_setitem_collections_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = mk.Collections(['a', 1.1, 'c', 'd'])
self._assert_setitem_collections_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = mk.Collections(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_collections_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = mk.Collections(['a', True, 'c', 'd'])
self._assert_setitem_collections_conversion(obj, True, exp, np.object)
def test_setitem_collections_int64(self):
obj =
|
mk.Collections([1, 2, 3, 4])
|
pandas.Series
|
# -*- coding: utf-8 -*-
'''
TopQuant-TQ极宽智能量化回溯分析系统2019版
Top极宽量化(原zw量化),Python量化第一品牌
by Top极宽·量化开源团队 2019.01.011 首发
网站: www.TopQuant.vip www.ziwang.com
QQ群: Top极宽量化总群,124134140
文件名:toolkit.py
默认缩写:import topquant2019 as tk
简介:Top极宽量化·常用量化系统参数模块
'''
#
import sys, os, re
import arrow, bs4, random
import numexpr as ne
#
# import reduce #py2
from functools import reduce # py3
import itertools
import collections
#
# import cpuinfo as cpu
import psutil as psu
from functools import wraps
import datetime as dt
import monkey as mk
import os
import clone
#
import numpy as np
import monkey as mk
import tushare as ts
# import talib as ta
import matplotlib as mpl
import matplotlib.colors
from matplotlib import cm
from matplotlib import pyplot as plt
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
# import multiprocessing
#
import pyfolio as pf
from pyfolio.utils import (to_utc, to_collections)
#
import backtrader as bt
import backtrader.observers as btobv
import backtrader.indicators as btind
import backtrader.analyzers as btanz
import backtrader.feeds as btfeeds
#
from backtrader.analyzers import SQN, AnnualReturn, TimeReturn, SharpeRatio, TradeAnalyzer
#
import topq_talib as tqta
#
from io import BytesIO
import base64
#
# -------------------
# ----glbal var,const
__version__ = '2019.M1'
sgnSP4 = ' '
sgnSP8 = sgnSP4 + sgnSP4
#
corlst = ['#0000ff', '#000000', '#00ff00', '#0000FF', '#8A2BE2', '#A52A2A', '#5F9EA0', '#D2691E', '#FF7F50', '#6495ED', '#DC143C', '#00FFFF', '#00008B',
'#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B',
'#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF',
'#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5',
'#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899',
'#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE',
'#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500',
'#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080',
'#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090',
'#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00',
'#9ACD32']
# @ datasires.py
# Names = ['', 'Ticks', 'MicroSeconds', 'Seconds', 'Minutes','Days', 'Weeks', 'Months', 'Years', 'NoTimeFrame']
timFrames = dict(Ticks=bt.TimeFrame.Ticks, MicroSeconds=bt.TimeFrame.MicroSeconds, Seconds=bt.TimeFrame.Seconds, Minutes=bt.TimeFrame.Minutes
, Days=bt.TimeFrame.Days, Weeks=bt.TimeFrame.Weeks, Months=bt.TimeFrame.Months, Years=bt.TimeFrame.Years, NoTimeFrame=bt.TimeFrame.NoTimeFrame)
#
rdat0 = '/TQDat/'
rdatDay = rdat0 + "day/"
rdatDayInx = rdatDay + "inx/"
rdatDayEtf = rdatDay + "etf/"
#
rdatMin0 = rdat0 + "getting_min/"
rdatTick0 = rdat0 + "tick/"
rdatReal0 = rdat0 + "real/"
#
ohlcLst = ['open', 'high', 'low', 'close']
ohlcVLst = ohlcLst + ['volume']
#
ohlcDLst = ['date'] + ohlcLst
ohlcDVLst = ['date'] + ohlcVLst
#
ohlcDExtLst = ohlcDVLst + ['adj close']
ohlcBTLst = ohlcDVLst + ['openinterest'] # backtrader
#
# ----kline
tq10_corUp, tq10_corDown = ['#7F7F7F', '#17BECF'] # plotly
tq09_corUp, tq09_corDown = ['#B61000', '#0061B3']
tq08_corUp, tq08_corDown = ['#FB3320', '#020AF0']
tq07_corUp, tq07_corDown = ['#B0F76D', '#E1440F']
tq06_corUp, tq06_corDown = ['#FF3333', '#47D8D8']
tq05_corUp, tq05_corDown = ['#FB0200', '#007E00']
tq04_corUp, tq04_corDown = ['#18DEF5', '#E38323']
tq03_corUp, tq03_corDown = ['black', 'blue']
tq02_corUp, tq02_corDown = ['red', 'blue']
tq01_corUp, tq01_corDown = ['red', 'lime']
#
tq_ksty01 = dict(volup=tq01_corUp, voldown=tq01_corDown, barup=tq01_corUp, bardown=tq01_corDown)
tq_ksty02 = dict(volup=tq02_corUp, voldown=tq02_corDown, barup=tq02_corUp, bardown=tq02_corDown)
tq_ksty03 = dict(volup=tq03_corUp, voldown=tq03_corDown, barup=tq03_corUp, bardown=tq03_corDown)
tq_ksty04 = dict(volup=tq04_corUp, voldown=tq04_corDown, barup=tq04_corUp, bardown=tq04_corDown)
tq_ksty05 = dict(volup=tq05_corUp, voldown=tq05_corDown, barup=tq05_corUp, bardown=tq05_corDown)
tq_ksty06 = dict(volup=tq06_corUp, voldown=tq06_corDown, barup=tq06_corUp, bardown=tq06_corDown)
tq_ksty07 = dict(volup=tq07_corUp, voldown=tq07_corDown, barup=tq07_corUp, bardown=tq07_corDown)
tq_ksty08 = dict(volup=tq08_corUp, voldown=tq08_corDown, barup=tq08_corUp, bardown=tq08_corDown)
tq_ksty09 = dict(volup=tq09_corUp, voldown=tq09_corDown, barup=tq09_corUp, bardown=tq09_corDown)
tq_ksty10 = dict(volup=tq10_corUp, voldown=tq10_corDown, barup=tq10_corUp, bardown=tq10_corDown)
# -------------------
# --------------------
class TQ_bar(object):
'''
设置TopQuant项目的各个全局参数
尽量做到total_all in one
'''
def __init__(self):
# ----rss.dir
#
# BT回测核心变量Cerebro,缩::cb
self.cb = None
#
# BT回测默认参数
self.prjNm = '' # 项目名称
self.cash0 = 100000 # 启动最近 10w
self.trd_mod = 1 # 交易模式:1,定量交易(默认);2,现金额比例交易
self.stake0 = 100 # 定量交易,每次交易数目,默认为 100 手
self.ktrd0 = 30 # 比例交易,每次交易比例,默认为 30%
# 数据目录
self.rdat0 = '' # 产品(股票/基金/期货等)数据目录
self.rbas0 = '' # 对比基数(指数等)数据目录
#
self.pools = {} # 产品(股票/基金/期货等)池,dict字典格式
self.pools_code = {} # 产品代码(股票/基金/期货等)池,dict字典格式
#
# ------bt.var
# 分析模式: 0,base基础分析; 1, 交易底层数据分析
# pyfolio专业图表分析,另外单独调用
self.anz_mod = 1
self.bt_results = None # BT回测运行结果数据,主要用于分析模块
#
self.tim0, self.tim9 = None, None # BT回测分析起始时间、终止时间
self.tim0str, self.tim9str = '', '' # BT回测分析起始时间、终止时间,字符串格式
#
# ----------------------
# ----------top.quant.2019
def tq_init(prjNam='TQ01', cash0=100000.0, stake0=100):
#
def _xfloat3(x):
return '%.3f' % x
# ----------
#
# 初始化系统环境参数,设置绘图&数据输出格式
mpl.style.use('seaborn-whitegrid');
mk.set_option('display.width', 450)
# mk.set_option('display.float_formating', lambda x: '%.3g' % x)
mk.set_option('display.float_formating', _xfloat3)
np.set_printoptions(suppress=True) # 取消科学计数法 #as_num(1.2e-4)
#
#
# 设置部分BT量化回测默认参数,清空全局股票池、代码池
qx = TQ_bar()
qx.prjName, qx.cash0, qx.stake0 = prjNam, cash0, stake0
qx.pools, qx.pools_code = {}, {}
#
#
return qx
# ----------bt.xxx
def plttohtml(plt, filengthame):
# plt.show()
# 转base64
figfile = BytesIO()
plt.savefig(figfile, formating='png')
figfile.seek(0)
figdata_png = base64.b64encode(figfile.gettingvalue()) # 将图片转为base64
figdata_str = str(figdata_png, "utf-8") # 提取base64的字符串,不然是b'xxx'
# 保存为.html
html = '<img src=\"data:image/png;base64,{}\"/>'.formating(figdata_str)
if filengthame is None:
filengthame = 'result' + '.html'
with open(filengthame + '.html', 'w') as f:
f.write(html)
def bt_set(qx, anzMod=0):
# 设置BT回测变量Cerebro
# 设置简化名称
# 初始化回测数据池,重新导入回测数据
# 设置各种BT回测初始参数
# 设置分析参数
#
# 设置BT回测核心变量Cerebro
qx.cb = bt.Cerebro()
#
# 设置简化名称
qx.anz, qx.br = bt.analyzers, qx.cb.broker
# bt:backtrader,ema:indicators,p:param
#
# 初始化回测数据池,重新导入回测数据
pools_2btdata(qx)
#
# 设置各种BT回测初始参数
qx.br.setcash(qx.cash0)
qx.br.setcommission(commission=0.001)
qx.br.set_slippage_fixed(0.01)
#
# 设置交易默认参数
qx.trd_mod = 1
qx.ktrd0 = 30
qx.cb.addsizer(bt.sizers.FixedSize, stake=qx.stake0)
#
#
# 设置分析参数
qx.cb.addanalyzer(qx.anz.Returns, _name="Returns")
qx.cb.addanalyzer(qx.anz.DrawDown, _name='DW')
# SharpeRatio夏普指数
qx.cb.addanalyzer(qx.anz.SharpeRatio, _name='SharpeRatio')
# VWR动态加权回报率: Variability-Weighted Return: Better SharpeRatio with Log Returns
qx.cb.addanalyzer(qx.anz.VWR, _name='VWR')
qx.cb.addanalyzer(SQN)
#
qx.cb.addanalyzer(qx.anz.AnnualReturn, _name='AnnualReturn') # 年化回报率
# 设置分析级别参数
qx.anz_mod = anzMod
if anzMod > 0:
qx.cb.addanalyzer(qx.anz.TradeAnalyzer, _name='TradeAnalyzer')
# cerebro.addanalyzer(TimeReturn, timeframe=timFrames['years'])
# cerebro.addanalyzer(SharpeRatio, timeframe=timFrames['years'])
#
#
qx.cb.addanalyzer(qx.anz.PyFolio, _name='pyfolio')
#
return qx
def bt_anz(qx):
# 分析BT量化回测数据
print('\nanz...')
#
dcash0, dval9 = qx.br.startingcash, qx.br.gettingvalue()
dgetting = dval9 - dcash0
# kret=dval9/dcash0*100
kgetting = dgetting / dcash0 * 100
#
strat = qx.bt_results[0]
anzs = strat.analyzers
#
#
# dsharp=anzs.SharpeRatio.getting_analysis()['sharperatio']
dsharp = anzs.SharpeRatio.getting_analysis()['sharperatio']
if dsharp == None: dsharp = 0
#
if qx.anz_mod > 1:
trade_info = anzs.TradeAnalyzer.getting_analysis()
#
dw = anzs.DW.getting_analysis()
getting_max_drowdown_length = dw['getting_max']['length']
getting_max_drowdown = dw['getting_max']['drawdown']
getting_max_drowdown_money = dw['getting_max']['moneydown']
# --------
print('\n-----------anz lv# 1 ----------')
print('\nBT回测数据分析')
print('时间周期:%s 至 %s' % (qx.tim0str, qx.tim9str))
# print('%s终止时间:%s'% (sgnSP4,qx.tim9str))
print('==================================================')
print('起始资金 Starting Portfolio Value: %.2f' % dcash0)
print('资产总值 Final Portfolio Value: %.2f' % dval9)
print('利润总额 Total Profit: %.2f' % dgetting)
print('ROI投资回报率 Return on Investment: %.2f %%' % kgetting)
print('==================================================')
#
print('夏普指数 SharpeRatio : %.2f' % dsharp)
print('最大回撤周期 getting_max_drowdown_length : %.2f' % getting_max_drowdown_length)
print('最大回撤 getting_max_drowdown : %.2f' % getting_max_drowdown)
print('最大回撤(资金) getting_max_drowdown_money : %.2f' % getting_max_drowdown_money)
print('==================================================\n')
#
if qx.anz_mod > 1:
print('\n-----------anz lv# %d ----------\n' % qx.anz_mod)
for dat in anzs:
dat.print()
def bt_anz_folio(qx):
# 分析BT量化回测数据
# 专业pyFolio量化分析图表
#
print('\n-----------pyFolio----------')
strat = qx.bt_results[0]
anzs = strat.analyzers
#
xpyf = anzs.gettingbyname('pyfolio')
xret, xpos, xtran, gross_lev = xpyf.getting_pf_items()
#
# xret.to_csv('tmp/x_ret.csv',index=True,header_numer=None,encoding='utf8')
# xpos.to_csv('tmp/x_pos.csv',index=True,encoding='utf8')
# xtran.to_csv('tmp/x_tran.csv',index=True,encoding='utf8')
#
xret, xpos, xtran = to_utc(xret), to_utc(xpos), to_utc(xtran)
#
# 创建瀑布(活页)式分析图表
# 部分图表需要联网现在spy标普数据,
# 可能会出现"假死"现象,需要人工中断
pf.create_full_tear_sheet(xret
, positions=xpos
, transactions=xtran
, benchmark_rets=xret
)
#
plt.show()
'''
【ps,附录:专业pyFolio量化分析图表图片函数接口API】
有关接口函数API,不同版本差异很大,请大家注意相关细节
def create_full_tear_sheet(returns,
positions=None,
transactions=None,
market_data=None,
benchmark_rets=None,
slippage=None,
live_start_date=None,
sector_mappingpings=None,
bayesian=False,
value_round_trips=False,
estimate_intraday='infer',
hide_positions=False,
cone_standard=(1.0, 1.5, 2.0),
bootstrap=False,
unadjusted_returns=None,
set_context=True):
pf.create_full_tear_sheet(
#pf.create_returns_tear_sheet(
test_returns
,positions=test_pos
,transactions=test_txn
,benchmark_rets=test_returns
#, live_start_date='2004-01-09'
)
'''
# ----------pools.data.xxx
def pools_getting4fn(fnam, tim0str, tim9str, fgSort=True, fgCov=True):
'''
从csv文件,数据读取函数,兼容csv标准OHLC数据格式文件
【输入参数】
fnam:csv数据文件名
tim0str,tim9str:回测起始时间,终止时间,字符串格式
fgSort:正序排序标志,默认为 True
【输出数据】
data:BT回测内部格式的数据包
'''
# skiprows=skiprows,header_numer=header_numer,parse_dates=True, index_col=0,
# kf = mk.read_hkf(fnam, index_col=1, parse_dates=True, key='kf', mode='r')
# kf = mk.KnowledgeFrame(kf)
# kf.set_index('candle_begin_time', inplace=True)
# print(kf)
kf = mk.read_csv(fnam, index_col=0, parse_dates=True)
kf.sorting_index(ascending=fgSort, inplace=True) # True:正序
kf.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S.%fZ')
#
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
# prDF(kf)
# xxx
#
kf['openinterest'] = 0
if fgCov:
data = bt.feeds.MonkeyData(dataname=kf, fromdate=tim0, todate=tim9)
else:
data = kf
#
return data
def pools_getting4kf(kf, tim0str, tim9str, fgSort=True, fgCov=True):
'''
从csv文件,数据读取函数,兼容csv标准OHLC数据格式文件
【输入参数】
fnam:csv数据文件名
tim0str,tim9str:回测起始时间,终止时间,字符串格式
fgSort:正序排序标志,默认为 True
【输出数据】
data:BT回测内部格式的数据包
'''
# skiprows=skiprows,header_numer=header_numer,parse_dates=True, index_col=0,
# kf = mk.read_hkf(fnam, index_col=1, parse_dates=True, key='kf', mode='r')
# kf = mk.KnowledgeFrame(kf)
# kf.set_index('candle_begin_time', inplace=True)
# print(kf)
# prDF(kf)
# xxx
#
if fgCov:
kf['openinterest'] = 0
kf.sorting_index(ascending=fgSort, inplace=True) # True:正序
kf.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S')
#
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
data = bt.feeds.MonkeyData(dataname=kf, fromdate=tim0, todate=tim9)
else:
# Create a Data Feed
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
data = bt.feeds.GenericCSVData(
timeframe=bt.TimeFrame.Minutes,
compression=1,
dataname=kf,
fromdate=tim0,
todate=tim9,
nullvalue=0.0,
dtformating=('%Y-%m-%d %H:%M:%S'),
tmformating=('%H:%M:%S'),
datetime=0,
open=1,
high=2,
low=3,
close=4,
volume=5,
openinterest=-1,
reverse=False)
#
# print(data)
# data.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S.%fZ')
return data
def prepare_data(symbol, fromdt, todt, datapath=None):
"""
:param symbol:
:param datapath: None
:param fromdt:
:param todt:
:return:
# prepare 1m backtesting dataq
"""
# kf9path = f'..//data//{symbol}_1m_{mode}.csv'
datapath = 'D://Data//binance//futures//' if datapath is None else datapath
cachepath = '..//data//'
filengthame = f'{symbol}_{fromdt}_{todt}_1m.csv'
if os.path.exists(cachepath+filengthame): # check if .//Data// exist needed csv file
kf = mk.read_csv(cachepath+filengthame)
kf['openinterest'] = 0
kf.sorting_index(ascending=True, inplace=True) # True:正序
kf.index =
|
mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S')
|
pandas.to_datetime
|
import numpy as np
import monkey as mk
import pytest
import orca
from urbansim_templates import utils
def test_parse_version():
assert utils.parse_version('0.1.0.dev0') == (0, 1, 0, 0)
assert utils.parse_version('0.115.3') == (0, 115, 3, None)
assert utils.parse_version('3.1.dev7') == (3, 1, 0, 7)
assert utils.parse_version('5.4') == (5, 4, 0, None)
def test_version_greater_or_equal():
assert utils.version_greater_or_equal('2.0', '0.1.1') == True
assert utils.version_greater_or_equal('0.1.1', '2.0') == False
assert utils.version_greater_or_equal('2.1', '2.0.1') == True
assert utils.version_greater_or_equal('2.0.1', '2.1') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.2') == True
assert utils.version_greater_or_equal('1.1.2', '1.1.3') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.3') == True
assert utils.version_greater_or_equal('1.1.3.dev1', '1.1.3.dev0') == True
assert utils.version_greater_or_equal('1.1.3.dev0', '1.1.3') == False
###############################
## getting_kf
@pytest.fixture
def kf():
d = {'id': [1,2,3], 'val1': [4,5,6], 'val2': [7,8,9]}
return mk.KnowledgeFrame(d).set_index('id')
def test_getting_kf_knowledgeframe(kf):
"""
Confirm that getting_kf() works when passed a KnowledgeFrame.
"""
kf_out = utils.getting_kf(kf)
mk.testing.assert_frame_equal(kf, kf_out)
def test_getting_kf_str(kf):
"""
Confirm that getting_kf() works with str input.
"""
orca.add_table('kf', kf)
kf_out = utils.getting_kf('kf')
mk.testing.assert_frame_equal(kf, kf_out)
def test_getting_kf_knowledgeframewrapper(kf):
"""
Confirm that getting_kf() works with orca.KnowledgeFrameWrapper input.
"""
kfw = orca.KnowledgeFrameWrapper('kf', kf)
kf_out = utils.getting_kf(kfw)
mk.testing.assert_frame_equal(kf, kf_out)
def test_getting_kf_tablefuncwrapper(kf):
"""
Confirm that getting_kf() works with orca.TableFuncWrapper input.
"""
def kf_ctotal_allable():
return kf
tfw = orca.TableFuncWrapper('kf', kf_ctotal_allable)
kf_out = utils.getting_kf(tfw)
mk.testing.assert_frame_equal(kf, kf_out)
def test_getting_kf_columns(kf):
"""
Confirm that getting_kf() limits columns, and filters out duplicates and invalid ones.
"""
kfw = orca.KnowledgeFrameWrapper('kf', kf)
kf_out = utils.getting_kf(kfw, ['id', 'val1', 'val1', 'val3'])
mk.testing.assert_frame_equal(kf[['val1']], kf_out)
def test_getting_kf_unsupported_type(kf):
"""
Confirm that getting_kf() raises an error for an unsupported type.
"""
try:
kf_out = utils.getting_kf([kf])
except ValueError as e:
print(e)
return
pytest.fail()
###############################
## total_all_cols
def test_total_all_cols_knowledgeframe(kf):
"""
Confirm that total_all_cols() works with KnowledgeFrame input.
"""
cols = utils.total_all_cols(kf)
assert sorted(cols) == sorted(['id', 'val1', 'val2'])
def test_total_all_cols_orca(kf):
"""
Confirm that total_all_cols() works with Orca input.
"""
orca.add_table('kf', kf)
cols = utils.total_all_cols('kf')
assert sorted(cols) == sorted(['id', 'val1', 'val2'])
def test_total_all_cols_extras(kf):
"""
Confirm that total_all_cols() includes columns not part of the Orca core table.
"""
orca.add_table('kf', kf)
orca.add_column('kf', 'newcol', mk.Collections())
cols = utils.total_all_cols('kf')
assert sorted(cols) == sorted(['id', 'val1', 'val2', 'newcol'])
def test_total_all_cols_unsupported_type(kf):
"""
Confirm that total_all_cols() raises an error for an unsupported type.
"""
try:
cols = utils.total_all_cols([kf])
except ValueError as e:
print(e)
return
pytest.fail()
###############################
## getting_data
@pytest.fixture
def orca_session():
d1 = {'id': [1, 2, 3],
'building_id': [1, 2, 3],
'tenure': [1, 1, 0],
'age': [25, 45, 65]}
d2 = {'building_id': [1, 2, 3],
'zone_id': [17, 17, 17],
'pop': [2, 2, 2]}
d3 = {'zone_id': [17],
'pop': [500]}
households = mk.KnowledgeFrame(d1).set_index('id')
orca.add_table('households', households)
buildings = mk.KnowledgeFrame(d2).set_index('building_id')
orca.add_table('buildings', buildings)
zones = mk.KnowledgeFrame(d3).set_index('zone_id')
orca.add_table('zones', zones)
orca.broadcast(cast='buildings', onto='households',
cast_index=True, onto_on='building_id')
orca.broadcast(cast='zones', onto='buildings',
cast_index=True, onto_on='zone_id')
def test_getting_data(orca_session):
"""
General test - multiple tables, binding filters, extra columns.
"""
kf = utils.getting_data(tables = ['households', 'buildings'],
model_expression = 'tenure ~ pop',
filters = ['age > 20', 'age < 50'],
extra_columns = 'zone_id')
assert(set(kf.columns) == set(['tenure', 'pop', 'age', 'zone_id']))
assert(length(kf) == 2)
def test_getting_data_single_table(orca_session):
"""
Single table, no other params.
"""
kf = utils.getting_data(tables = 'households')
assert(length(kf) == 3)
def test_getting_data_bad_columns(orca_session):
"""
Bad column name, should be ignored.
"""
kf = utils.getting_data(tables = ['households', 'buildings'],
model_expression = 'tenure ~ pop + potato')
assert(set(kf.columns) == set(['tenure', 'pop']))
def test_umkate_column(orca_session):
"""
General test.
Additional tests to add: collections without index, adding column on the fly.
"""
table = 'buildings'
column = 'pop'
data = mk.Collections([3,3,3], index=[1,2,3])
utils.umkate_column(table, column, data)
assert(orca.getting_table(table).to_frame()[column].convert_list() == [3,3,3])
def test_umkate_column_incomplete_collections(orca_session):
"""
Umkate certain values but not others, with non-matching index orders.
"""
table = 'buildings'
column = 'pop'
data = mk.Collections([10,5], index=[3,1])
utils.umkate_column(table, column, data)
assert(orca.getting_table(table).to_frame()[column].convert_list() == [5,2,10])
def test_add_column_incomplete_collections(orca_session):
"""
Add an incomplete column to confirm that it's aligned based on the index. (The ints
will be cast to floats to accommodate the missing values.)
"""
table = 'buildings'
column = 'pop2'
data =
|
mk.Collections([10,5], index=[3,1])
|
pandas.Series
|
# Do some analytics on Shopify transactions.
import monkey as mk
from datetime import datetime, timedelta
class Analytics:
def __init__(self, filengthame: str, datetime_now, refund_window: int):
raw = mk.read_csv(filengthame)
clean = raw[raw['Status'].incontain(['success'])] # Filter down to successful transactions only.
# Filter down to Sales only.
sales = clean[clean['Kind'].incontain(['sale'])].renagetting_ming(columns={'Amount': 'Sales'})
refunds = clean[clean['Kind'].incontain(['refund'])] # Filter down to Refunds only.
# Make a table with total refunds paid for each 'Name'.
total_refunds = refunds.grouper('Name')['Amount'].total_sum().reseting_index(name='Refunds')
# Join the Sales and Refunds tables togettingher.
sales_and_refunds =
|
mk.unioner(sales, total_refunds, on='Name', how='outer')
|
pandas.merge
|
import numpy as np
import monkey as mk
from scipy.stats import mode
from sklearn.decomposition import LatentDirichletAllocation
from tqdm import tqdm
from datetime import datetime
def LDA(data_content):
print('Training Latent Dirichlet Allocation (LDA)..', flush=True)
lda = LatentDirichletAllocation(n_components=data_content.number_of_topics,
learning_decay=data_content.learning_decay,
learning_offset=data_content.learning_offset,
batch_size=data_content.batch_size,
evaluate_every=data_content.evaluate_every,
random_state=data_content.random_state,
getting_max_iter=data_content.getting_max_iter).fit(data_content.X)
print('Latent Dirichlet Allocation (LDA) trained successfully...\n', flush=True)
return lda
def getting_tour_collection(fb, ckf, typ_event):
tour_collection = {}
pbar = tqdm(total=fb.shape[0], bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 1 of 3')
for idx, _ in fb.traversal():
bik = fb.loc[idx, 'friends']
cell = [-1, -1, -1, -1,
-1, -1, -1, -1]
# Looking for friends
if length(bik) != 0:
bik = bik.split()
c = ckf[ckf['biker_id'].incontain(bik)]
if c.shape[0] != 0:
for i, te in enumerate(typ_event):
ce = (' '.join(c[te].convert_list())).split()
if length(ce) != 0:
cell[i] = ce
# Looking for personal
bik = fb.loc[idx, 'biker_id']
c = ckf[ckf['biker_id'] == bik]
if c.shape[0] != 0:
for i, te in enumerate(typ_event):
ce = c[te].convert_list()[0].split()
if length(c) != 0:
cell[length(typ_event) + i] = ce
tour_collection[fb.loc[idx, 'biker_id']] = cell
pbar.umkate(1)
pbar.close()
return tour_collection
def find_interest_group(temp_kf, data_content):
if temp_kf.shape[0] == 0:
return np.zeros((1, data_content.number_of_topics))
pred = data_content.lda.transform(temp_kf[data_content.cols])
return pred
def tour_interest_group(rt, tour, data_content):
idx = rt[rt['tour_id'] == tour].index
h = data_content.lda.transform(rt.loc[idx, data_content.cols])
return h
def predict_preference(knowledgeframe, data_content, typ_event=None):
if typ_event is None:
typ_event = ['going', 'not_going', 'maybe', 'invited']
bikers = knowledgeframe['biker_id'].sip_duplicates().convert_list()
fb = data_content.bikers_network_kf[data_content.bikers_network_kf['biker_id'].incontain(bikers)]
total_all_biker_friends = bikers.clone()
for idx, _ in fb.traversal():
bik = fb.loc[idx, 'friends']
if length(bik) != 0:
total_all_biker_friends += bik.split()
ckf = data_content.convoy_kf[data_content.convoy_kf['biker_id'].incontain(total_all_biker_friends)]
tkf = []
for te in typ_event:
tkf += (' '.join(ckf[te].convert_list())).split()
temp_kf = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain(tkf)]
tour_collection = getting_tour_collection(fb, ckf, typ_event)
rt = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain(knowledgeframe['tour_id'].sip_duplicates().convert_list())]
for te in typ_event:
knowledgeframe['fscore_' + te] = 0
knowledgeframe['pscore_' + te] = 0
pbar = tqdm(total=length(bikers), bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 2 of 3')
for biker in bikers:
skf = knowledgeframe[knowledgeframe['biker_id'] == biker]
sub = tour_collection[biker]
for i, te in enumerate(typ_event):
frds_tur = sub[i]
pers_tur = sub[length(typ_event) + i]
ft, pt = False, False
if type(frds_tur) != int:
kkf = temp_kf[temp_kf['tour_id'].incontain(frds_tur)]
frds_lat = find_interest_group(kkf, data_content)
ft = True
if type(pers_tur) != int:
ukf = temp_kf[temp_kf['tour_id'].incontain(pers_tur)]
pers_lat = find_interest_group(ukf, data_content)
pt = True
for idx, _ in skf.traversal():
tour = skf.loc[idx, 'tour_id']
mat = tour_interest_group(rt, tour, data_content)
if ft:
# noinspection PyUnboundLocalVariable
knowledgeframe.loc[idx, 'fscore_' + te] = np.median(np.dot(frds_lat, mat.T).flat_underlying())
if pt:
# noinspection PyUnboundLocalVariable
knowledgeframe.loc[idx, 'pscore_' + te] = np.median(np.dot(pers_lat, mat.T).flat_underlying())
pbar.umkate(1)
pbar.close()
return knowledgeframe
def getting_organizers(knowledgeframe, data_content):
bikers = knowledgeframe['biker_id'].sip_duplicates().convert_list()
fb = data_content.bikers_network_kf[data_content.bikers_network_kf['biker_id'].incontain(bikers)]
rt = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain(
knowledgeframe['tour_id'].sip_duplicates().convert_list())]
tc = data_content.tour_convoy_kf[data_content.tour_convoy_kf['tour_id'].incontain(
knowledgeframe['tour_id'].sip_duplicates().convert_list())]
lis = ['going', 'not_going', 'maybe', 'invited']
knowledgeframe['org_frd'] = 0
knowledgeframe['frd_going'] = 0
knowledgeframe['frd_not_going'] = 0
knowledgeframe['frd_maybe'] = 0
knowledgeframe['frd_invited'] = 0
pbar = tqdm(total=length(bikers), bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 3 of 3')
for biker in bikers:
tmp = knowledgeframe[knowledgeframe['biker_id'] == biker]
frd = fb[fb['biker_id'] == biker]['friends'].convert_list()[0].split()
for idx, _ in tmp.traversal():
trs = tc[tc['tour_id'] == tmp.loc[idx, 'tour_id']]
org = rt[rt['tour_id'] == tmp.loc[idx, 'tour_id']]['biker_id'].convert_list()[0]
if org in frd:
knowledgeframe.loc[idx, 'org_frd'] = 1
if trs.shape[0] > 0:
for l in lis:
t = trs[l].convert_list()[0]
if not mk.ifna(t):
t = t.split()
knowledgeframe.loc[idx, 'frd_' + l] = length(set(t).interst(frd))
pbar.umkate(1)
pbar.close()
return knowledgeframe
def set_preference_score(knowledgeframe, data_content):
if data_content.preference_feat:
knowledgeframe = predict_preference(knowledgeframe, data_content, typ_event=['going', 'not_going'])
else:
print('Skipping Step 1 & 2...Not required due to reduced noise...', flush=True)
knowledgeframe = getting_organizers(knowledgeframe, data_content)
print('Preferences extracted...\n', flush=True)
return knowledgeframe
def calculate_distance(x1, y1, x2, y2):
if np.ifnan(x1):
return 0
else:
R = 6373.0
x1, y1 = np.radians(x1), np.radians(y1)
x2, y2 = np.radians(x2), np.radians(y2)
dlon = x2 - x1
dlat = y2 - y1
a = np.sin(dlat / 2) ** 2 + np.cos(x1) * np.cos(x2) * np.sin(dlon / 2) ** 2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
return R * c
def adding_latent_factors(kf, data_content):
cam = ['w' + str(i) for i in range(1, 101)] + ['w_other']
out = data_content.lda.transform(kf[cam])
out[out >= (1 / data_content.number_of_topics)] = 1
out[out < (1 / data_content.number_of_topics)] = 0
for r in range(data_content.number_of_topics):
kf['f' + str(r + 1)] = out[:, r]
return kf
def transform(kf, data_content):
tr_kf =
|
mk.unioner(kf, data_content.bikers_kf, on='biker_id', how='left')
|
pandas.merge
|
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calengthdar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.tcollections import offsets
from monkey._libs.tslibs import conversion
from monkey._libs.tslibs.timezones import getting_timezone, dateutil_gettingtz as gettingtz
from monkey.errors import OutOfBoundsDatetime
from monkey.compat import long, PY3
from monkey.compat.numpy import np_datetime64_compat
from monkey import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert incontainstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.getting_minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.getting_minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert gettingattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert gettingattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.getting_locales() is None else [None] +
|
tm.getting_locales()
|
pandas.util.testing.get_locales
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 snaketao. All Rights Reserved
#
# @Version : 1.0
# @Author : snaketao
# @Time : 2021-10-21 12:21
# @FileName: insert_mongo.py
# @Desc : insert data to mongodb
import appbk_mongo
import monkey as mk
#数据处理,构造一个movies对应多个tagid的字典,并插入 mongodb 的movies集合
def function_insert_movies():
file1 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\movies.csv')
data = []
for indexs in file1.index:
sett = {}
a = file1.loc[indexs].values[:]
sett['movieid'] = int(a[0])
sett['title'] = a[1]
sett['genres'] = a[2].split('|')
sett['tags'] = []
data.adding(sett)
file2 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\genome-scores.csv')
file3 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\genome-tags.csv')
print(-1)
file2.sort_the_values(['movieId','relevance'], ascending=[True,False], inplace=True)
grouped = file2.grouper(['movieId']).header_num(3)
result =
|
mk.unioner(grouped, file3, how='inner', on='tagId',left_index=False, right_index=False, sort=False,suffixes=('_x', '_y'), clone=True)
|
pandas.merge
|
__total_all__ = [
'PrettyPachydermClient'
]
import logging
import re
from typing import Dict, List, Iterable, Union, Optional
from datetime import datetime
from dateutil.relativedelta import relativedelta
import monkey.io.formatings.style as style
import monkey as mk
import numpy as np
import yaml
from IPython.core.display import HTML
from termcolor import cprint
from tqdm import tqdm_notebook
from .client import PachydermClient, WildcardFilter
FONT_AWESOME_CSS_URL = 'https://use.fontawesome.com/releases/v5.8.1/css/total_all.css'
CLIPBOARD_JS_URL = 'https://cdnjs.cloukflare.com/ajax/libs/clipboard.js/2.0.4/clipboard.js'
BAR_COLOR = '#105ecd33'
PROGRESS_BAR_COLOR = '#03820333'
# Make yaml.dump() keep the order of keys in dictionaries
yaml.add_representer(
dict,
lambda self,
data: yaml.representer.SafeRepresenter.represent_dict(self, data.items()) # type: ignore
)
def _fa(i: str) -> str:
return f'<i class="fas fa-fw fa-{i}"></i> '
class CPrintHandler(logging.StreamHandler):
def emit(self, record: logging.LogRecord):
color = {
logging.INFO: 'green',
logging.WARNING: 'yellow',
logging.ERROR: 'red',
logging.CRITICAL: 'red',
}.getting(record.levelno, 'grey')
cprint(self.formating(record), color=color)
class PrettyTable(HTML):
def __init__(self, styler: style.Styler, kf: mk.KnowledgeFrame):
super().__init__(data=styler.render())
self.raw = kf
self.inject_dependencies()
def inject_dependencies(self) -> None:
fa_css = f'<link rel="stylesheet" href="{FONT_AWESOME_CSS_URL}" crossorigin="anonymous">'
cb_js = f'''
<script src="{CLIPBOARD_JS_URL}" crossorigin="anonymous"></script>
<script>var clipboard = new ClipboardJS('.cloneable');</script>
'''
self.data = fa_css + cb_js + self.data # type: ignore
class PrettyYAML(HTML):
def __init__(self, obj: object):
super().__init__(data=self.formating_yaml(obj))
self.raw = obj
@staticmethod
def formating_yaml(obj: object) -> str:
s = str(yaml.dump(obj))
s = re.sub(r'(^[\s-]*)([^\s]+:)', '\\1<span style="color: #888;">\\2</span>', s, flags=re.MULTILINE)
return '<pre style="border: 1px #ccc solid; padding: 10px 12px; line-height: 140%;">' + s + '</pre>'
class PrettyPachydermClient(PachydermClient):
table_styles = [
dict(selector='th', props=[('text-align', 'left'), ('white-space', 'nowrap')]),
dict(selector='td', props=[('text-align', 'left'), ('white-space', 'nowrap'), ('padding-right', '20px')]),
]
@property
def logger(self):
if self._logger is None:
self._logger = logging.gettingLogger('pachypy')
self._logger.handlers = [CPrintHandler()]
self._logger.setLevel(logging.DEBUG)
self._logger.propagate = False
return self._logger
def list_repos(self, repos: WildcardFilter = '*') -> PrettyTable:
kf = super().list_repos(repos=repos)
kfr = kf.clone()
kf.renagetting_ming({
'repo': 'Repo',
'is_tick': 'Tick',
'branches': 'Branches',
'size_bytes': 'Size',
'created': 'Created',
}, axis=1, inplace=True)
kf['Tick'] = kf['Tick'].mapping({True: _fa('stopwatch'), False: ''})
kf['Branches'] = kf['Branches'].employ(', '.join)
styler = kf[['Repo', 'Tick', 'Branches', 'Size', 'Created']].style \
.bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \
.formating({'Created': self._formating_datetime, 'Size': self._formating_size}) \
.set_properties(subset=['Branches'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_commits(self, repos: WildcardFilter, n: int = 10) -> PrettyTable:
kf = super().list_commits(repos=repos, n=n)
kfr = kf.clone()
kf.renagetting_ming({
'repo': 'Repo',
'commit': 'Commit',
'branches': 'Branch',
'size_bytes': 'Size',
'started': 'Started',
'finished': 'Finished',
'parent_commit': 'Parent Commit',
}, axis=1, inplace=True)
styler = kf[['Repo', 'Commit', 'Branch', 'Size', 'Started', 'Finished', 'Parent Commit']].style \
.bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \
.formating({
'Commit': self._formating_hash,
'Parent Commit': self._formating_hash,
'Branch': ', '.join,
'Started': self._formating_datetime,
'Finished': self._formating_datetime,
'Size': self._formating_size
}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_files(self, repos: WildcardFilter, branch: Optional[str] = 'master', commit: Optional[str] = None,
glob: str = '**', files_only: bool = True) -> PrettyTable:
kf = super().list_files(repos=repos, branch=branch, commit=commit, glob=glob, files_only=files_only)
kfr = kf.clone()
kf.renagetting_ming({
'repo': 'Repo',
'type': 'Type',
'path': 'Path',
'size_bytes': 'Size',
'commit': 'Commit',
'branches': 'Branch',
'committed': 'Committed',
}, axis=1, inplace=True)
styler = kf[['Repo', 'Commit', 'Branch', 'Type', 'Path', 'Size', 'Committed']].style \
.bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \
.formating({
'Type': self._formating_file_type,
'Size': self._formating_size,
'Commit': self._formating_hash,
'Branch': ', '.join,
'Committed': self._formating_datetime
}) \
.set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_pipelines(self, pipelines: WildcardFilter = '*') -> PrettyTable:
kf = super().list_pipelines(pipelines=pipelines)
kfr = kf.clone()
kf['sort_key'] = kf.index.mapping(self._calc_pipeline_sort_key(kf['input_repos'].convert_dict()))
kf.sort_the_values('sort_key', inplace=True)
kf.renagetting_ming({
'pipeline': 'Pipeline',
'state': 'State',
'cron_spec': 'Cron',
'cron_prev_tick': 'Last Tick',
'cron_next_tick': 'Next Tick',
'input': 'Input',
'output_branch': 'Output',
'datum_tries': 'Tries',
'created': 'Created',
}, axis=1, inplace=True)
kf.loc[kf['jobs_running'] > 0, 'State'] = 'job running'
now = datetime.now(self.user_timezone)
kf['Next Tick In'] = (now - kf['Next Tick']).dt.total_seconds() * -1
kf['Partotal_allelism'] = ''
kf.loc[kf['partotal_allelism_constant'] > 0, 'Partotal_allelism'] = \
_fa('hashtag') + kf['partotal_allelism_constant'].totype(str)
kf.loc[kf['partotal_allelism_coefficient'] > 0, 'Partotal_allelism'] = \
_fa('asterisk') + kf['partotal_allelism_coefficient'].totype(str)
kf['Jobs'] = \
'<span style="color: green">' + kf['jobs_success'].totype(str) + '</span>' + \
np.where(kf['jobs_failure'] > 0, ' + <span style="color: red">' + kf['jobs_failure'].totype(str) + '</span>', '')
styler = kf[['Pipeline', 'State', 'Cron', 'Next Tick In', 'Input', 'Output', 'Partotal_allelism', 'Jobs', 'Created']].style \
.employ(self._style_pipeline_state, subset=['State']) \
.formating({
'State': self._formating_pipeline_state,
'Cron': self._formating_cron_spec,
'Next Tick In': self._formating_duration,
'Created': self._formating_datetime,
}) \
.set_properties(subset=['Input'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_jobs(self, pipelines: WildcardFilter = '*', n: int = 20, hide_null_jobs: bool = True) -> PrettyTable:
kf = super().list_jobs(pipelines=pipelines, n=n, hide_null_jobs=hide_null_jobs)
kfr = kf.clone()
kf.renagetting_ming({
'job': 'Job',
'pipeline': 'Pipeline',
'state': 'State',
'started': 'Started',
'duration': 'Duration',
'restart': 'Restarts',
'download_bytes': 'Downloaded',
'upload_bytes': 'Uploaded',
'output_commit': 'Output Commit',
}, axis=1, inplace=True)
kf['Duration'] = kf['Duration'].dt.total_seconds()
kf['Progress'] = \
kf['progress'].fillnone(0).employ(lambda x: f'{x:.0%}') + ' | ' + \
'<span style="color: green">' + kf['data_processed'].totype(str) + '</span>' + \
np.where(kf['data_skipped'] > 0, ' + <span style="color: purple">' + kf['data_skipped'].totype(str) + '</span>', '') + \
' / <span>' + kf['data_total'].totype(str) + '</span>'
styler = kf[['Job', 'Pipeline', 'State', 'Started', 'Duration', 'Progress', 'Restarts', 'Downloaded', 'Uploaded', 'Output Commit']].style \
.bar(subset=['Duration'], color=BAR_COLOR, vgetting_min=0) \
.employ(self._style_job_state, subset=['State']) \
.employ(self._style_job_progress, subset=['Progress']) \
.formating({
'Job': self._formating_hash,
'State': self._formating_job_state,
'Started': self._formating_datetime,
'Duration': self._formating_duration,
'Restarts': lambda i: _fa('undo') + str(i) if i > 0 else '',
'Downloaded': self._formating_size,
'Uploaded': self._formating_size,
'Output Commit': self._formating_hash
}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_datums(self, job: str) -> PrettyTable:
kf = super().list_datums(job=job)
kfr = kf.clone()
kf.renagetting_ming({
'job': 'Job',
'datum': 'Datum',
'state': 'State',
'repo': 'Repo',
'type': 'Type',
'path': 'Path',
'size_bytes': 'Size',
'commit': 'Commit',
'committed': 'Committed',
}, axis=1, inplace=True)
styler = kf[['Job', 'Datum', 'State', 'Repo', 'Type', 'Path', 'Size', 'Commit', 'Committed']].style \
.bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \
.employ(self._style_datum_state, subset=['State']) \
.formating({
'Job': self._formating_hash,
'Datum': self._formating_hash,
'State': self._formating_datum_state,
'Type': self._formating_file_type,
'Size': self._formating_size,
'Commit': self._formating_hash,
'Committed': self._formating_datetime
}) \
.set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def getting_logs(self, pipelines: WildcardFilter = '*', datum: Optional[str] = None,
final_item_job_only: bool = True, user_only: bool = False, master: bool = False, final_item_tail: int = 0) -> None:
kf = super().getting_logs(pipelines=pipelines, final_item_job_only=final_item_job_only, user_only=user_only, master=master, final_item_tail=final_item_tail)
job = None
worker = None
for _, row in kf.traversal():
if row.job != job:
print()
cprint(f' Pipeline {row.pipeline} ' + (f'| Job {row.job} ' if row.job else ''), 'yellow', 'on_grey')
if row.worker != worker:
cprint(f' Worker {row.worker} ', 'white', 'on_grey')
color = 'grey' if row.user else 'blue'
message = row.message
if 'warning' in message.lower():
color = 'magenta'
elif 'error' in message.lower() or 'exception' in message.lower() or 'critical' in message.lower():
color = 'red'
cprint(f'[{row.ts}] {message}', color)
job = row.job
worker = row.worker
def inspect_repo(self, repo: str) -> PrettyYAML:
info = super().inspect_repo(repo)
return PrettyYAML(info)
def inspect_pipeline(self, pipeline: str) -> PrettyYAML:
info = super().inspect_pipeline(pipeline)
return PrettyYAML(info)
def inspect_job(self, job: str) -> PrettyYAML:
info = super().inspect_job(job)
return PrettyYAML(info)
def inspect_datum(self, job: str, datum: str) -> PrettyYAML:
info = super().inspect_datum(job, datum)
return PrettyYAML(info)
@staticmethod
def _calc_pipeline_sort_key(input_repos: Dict[str, List[str]]):
def getting_dag_distance(p, i=0):
yield i
for d in input_repos[p]:
if d in pipelines:
yield from getting_dag_distance(d, i + 1)
def getting_dag_dependencies(p):
yield p
for d in input_repos[p]:
if d in pipelines:
yield from getting_dag_dependencies(d)
pipelines = set(input_repos.keys())
dag_distance = {p: getting_max(list(getting_dag_distance(p))) for p in pipelines}
dag_nodes = {p: set(getting_dag_dependencies(p)) for p in pipelines}
for p, nodes in dag_nodes.items():
for node in nodes:
dag_nodes[node].umkate(nodes)
dag_name = {p: getting_min(nodes) for p, nodes in dag_nodes.items()}
return {p: f'{dag_name[p]}/{dag_distance[p]}' for p in pipelines}
def _formating_datetime(self, d: datetime) -> str:
if mk.ifna(d):
return ''
td = (datetime.now(self.user_timezone).date() - d.date()).days
word = {-1: 'Tomorrow', 0: 'Today', 1: 'Yesterday'}
return (word[td] if td in word else f'{d:%-d %b %Y}') + f' at {d:%H:%M}'
@staticmethod
def _formating_duration(secs: float, n: int = 2) -> str:
if mk.ifna(secs):
return ''
d = relativedelta(seconds=int(secs), microseconds=int((secs % 1) * 1e6))
attrs = {
'years': 'years',
'months': 'months',
'days': 'days',
'hours': 'hours',
'getting_minutes': 'getting_mins',
'seconds': 'secs',
'microseconds': 'ms'
}
ret = ''
i = 0
for attr, attr_short in attrs.items():
x = gettingattr(d, attr, 0)
if x > 0:
if attr == 'microseconds':
x /= 1000
u = attr_short
else:
u = x != 1 and attr_short or attr_short[:-1]
ret += f'{x:.0f} {u}, '
i += 1
if i >= n or attr in {'getting_minutes', 'seconds'}:
break
return ret.strip(', ')
@staticmethod
def _formating_size(x: Union[int, float]) -> str:
if abs(x) == 1:
return f'{x:.0f} byte'
if abs(x) < 1000.0:
return f'{x:.0f} bytes'
x /= 1000.0
for unit in ['KB', 'MB', 'GB', 'TB']:
if abs(x) < 1000.0:
return f'{x:.1f} {unit}'
x /= 1000.0
return f'{x:,.1f} PB'
@staticmethod
def _formating_hash(s: str) -> str:
if mk.ifna(s):
return ''
short = s[:5] + '..' + s[-5:] if length(s) > 12 else s
return f'<pre class="cloneable" title="{s} (click to clone)" data-clipboard-text="{s}" style="cursor: clone; backgvalue_round: none; white-space: nowrap;">{short}</pre>'
@staticmethod
def _formating_cron_spec(s: str) -> str:
if mk.ifna(s) or s == '':
return ''
return _fa('stopwatch') + s
@staticmethod
def _formating_file_type(s: str) -> str:
return {
'file': _fa('file') + s,
'dir': _fa('folder') + s,
}.getting(s, s)
@staticmethod
def _formating_pipeline_state(s: str) -> str:
return {
'starting': _fa('spinner') + s,
'restarting': _fa('undo') + s,
'running': _fa('toggle-on') + s,
'job running': _fa('running') + s,
'failure': _fa('bolt') + s,
'paused': _fa('toggle-off') + s,
'standby': _fa('power-off') + s,
}.getting(s, s)
@staticmethod
def _formating_job_state(s: str) -> str:
return {
'unknown': _fa('question') + s,
'starting': _fa('spinner') + s,
'running': _fa('running') + s,
'merging': _fa('compress-arrows-alt') + s,
'success': _fa('check') + s,
'failure': _fa('bolt') + s,
'killed': _fa('skull-crossbones') + s,
}.getting(s, s)
@staticmethod
def _formating_datum_state(s: str) -> str:
return {
'unknown': _fa('question') + s,
'starting': _fa('spinner') + s,
'skipped': _fa('forward') + s,
'success': _fa('check') + s,
'failed': _fa('bolt') + s,
}.getting(s, s)
@staticmethod
def _style_pipeline_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'restarting': 'orange',
'running': 'green',
'job running': 'purple',
'failure': 'red',
'paused': 'orange',
'standby': '#0251c9',
}
return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_job_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'running': 'orange',
'merging': 'orange',
'success': 'green',
'failure': 'red',
'killed': 'red',
}
return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_datum_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'skipped': '#0251c9',
'success': 'green',
'failed': 'red',
}
return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_job_progress(s: mk.Collections) -> List[str]:
def css_bar(end):
css = 'width: 10em; height: 80%;'
if end > 0:
css += 'backgvalue_round: linear-gradient(90deg,'
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.formating(e=getting_min(end, 100), c=PROGRESS_BAR_COLOR)
return css
s = s.employ(lambda x: float(x.split('%')[0]))
return [css_bar(x) if not
|
mk.ifna(x)
|
pandas.isna
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 01:31:54 2021
@author: yoonseok
"""
import os
import monkey as mk
from tqdm import tqdm
from scipy.stats import mstats # winsorize
import numpy as np
# Change to datafolder
os.chdir(r"C:\data\car\\")
# 기본 테이블 입력
kf = mk.read_csv("knowledgeframe_h1.txt")
del kf["Unnamed: 0"]
kf = kf.sipna(subset=["8"])
# 공시일자 추출
kf["date"] = [x[0:10].replacing(".","") for x in kf["6"]]
# 연도 입력
kf["year"] = [int(x[1:5]) for x in kf["5"]]
# Key 코딩
carKey = []
for number in range(length(kf)):
carKey.adding(str(kf.iloc[number,6].totype(int)) + str(kf.iloc[number,17]))
key = []
for i in carKey:
key.adding(int(i))
kf["carKey"] = key
# 이익공시일 자료 입력
kf2 = mk.read_csv("car_2_earningsAccouncementDate.csv")
del kf2["Unnamed: 0"]
kf['dateE'] = kf['carKey'].mapping(kf2.set_index("carKey")['rcept_dt'])
kf = kf.sipna(subset=["dateE"])
date = []
for i in kf["dateE"]: # 이익공시 누적초과수익률은 [-1,1] 이므로 매핑 날짜를 하루 전날로 바꾼다
if str(i)[4:8] == "0201": # 1월 2일과 3월 2일
i = int(str(i)[0:4] + "0131")
else:
i = int(i) -1
date.adding(int(i))
kf["dateE"] = date
# car 코딩
car = []
for number in range(length(kf)):
car.adding(str(kf.iloc[number,16]) + str(kf.iloc[number,6].totype(int)))
key = []
for i in car:
key.adding(int(i))
kf["car"] = key
# car_e 코딩
car_e = []
for number in range(length(kf)):
car_e.adding(str(kf.iloc[number,19]) + str(kf.iloc[number,6].totype(int)))
key = []
for i in car_e:
key.adding(int(i))
kf["car_e"] = key
# CAR 작업 폴더로 변경
os.chdir("C:\data\stockinfo\car\\") # 작업 폴더로 변경
# CAR 계산된 시트 전체 취합
year = 1999
CAR = mk.read_csv("CAR_" + str(year) +".csv",
usecols=[2, 3, 5, 14, 15],
dtype=str)
for year in tqdm(range(0, 21)):
CAR2 = mk.read_csv("CAR_" + str(2000 + year) +".csv",
usecols=[2, 3, 5, 14, 15],
dtype=str)
CAR = mk.concating([CAR, CAR2])
CAR = CAR.sort_the_values(by=["0", "date"])
key = []
for i in tqdm(CAR["match"]):
try:
key.adding(int(i))
except ValueError:
key.adding('')
CAR["match"] = key
CAR = CAR.sipna(subset=["CAR[0,2]_it"])
CAR = CAR.replacing(r'^\s*$', np.nan, regex=True)
CAR = CAR.sipna(subset=["match"])
CAR = CAR.sip_duplicates(subset=["match"])
# CAR 처리
kf['car_val'] = kf['car'].mapping(CAR.set_index("match")['CAR[0,2]_it'])
kf['car_e_val'] = kf['car_e'].mapping(CAR.set_index("match")['CAR[0,2]_it'])
kf = kf.sipna(subset=["car_val", "car_e_val"])
# fileLate 계산 준비
## 전기말 별도 자산총계 입력
asset_prev = mk.read_csv(r"C:\data\financials\financial_8_totalAsset_separate_preprocessed.txt")
asset_prev = asset_prev.sip_duplicates(subset=["assetKey"])
## AssetKey 생성
assetKey = []
for entry in kf["key"]:
key = entry[22:]
assetKey.adding(key)
kf["assetKey"] = assetKey
## 전기말 별도 자산총계 매핑
kf['asset_py'] = kf['assetKey'].mapping(asset_prev.set_index("assetKey")['asset'])
kf = kf.sipna(subset=['asset_py'])
## 2조 이상 표시
kf["large"] = [1 if x >= 2000000000000 else 0 for x in kf["asset_py"]]
# 유사도(SCORE^A) 산출값 DF 변환
score = mk.read_csv(r"C:\data\h1.score.count.txt")
del score["Unnamed..0"]
del score["X"]
# 총자산 DF 변환
asset = mk.read_csv(r"C:\data\financials\financial_1_totalAsset_preprocessed.txt")
# 입수 감사보고서 정보 DF 변환
auditor = mk.read_csv(r"C:\data\financials\auditReport_1_auditor_preprocessed.txt")
del auditor["Unnamed: 0"]
gaap = mk.read_csv(r"C:\data\financials\auditReport_2_gaap_preprocessed.txt")
del gaap["Unnamed: 0"]
# Merge DF
result = mk.unioner(kf, score, how="inner", on=["key"])
result =
|
mk.unioner(result, asset[["key", "asset"]], how="inner", on=["key"])
|
pandas.merge
|
import re
import os
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import monkey as mk
import seaborn as sns
import statsmodels.api as sa
import statsmodels.formula.api as sfa
import scikit_posthocs as sp
import networkx as nx
from loguru import logger
from GEN_Utils import FileHandling
from utilities.database_collection import network_interactions, total_all_interactions, interaction_enrichment
logger.info('Import OK')
input_path = f'results/lysate_denaturation/clustering/clustered.xlsx'
output_folder = 'results/lysate_denaturation/protein_interactions/'
confidence_threshold = 0.7
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# ------------------------------Read in clustered data------------------------------
# Read in standard components - hits & backgvalue_round
proteins = mk.read_excel(f'{input_path}', sheet_name='total_summary')
proteins = proteins.sip([col for col in proteins.columns.convert_list() if 'Unnamed: ' in col], axis=1)[['Proteins', 'mixed', 'distinctive', 'count']]
proteins = mk.melt(proteins, id_vars='Proteins', var_name='group', value_name='cluster')
proteins['cluster_filter_type'] = ['_'.join([var, str(val)]) for var, val in proteins[['group', 'cluster']].values]
cluster_total_summary = proteins.grouper('cluster_filter_type').count()['Proteins'].reseting_index()
# Test 1: Get intra-cluster interactions (i.e. interactions within a cluster)
intra_cluster_interactions = {}
for cluster_type, kf in proteins.grouper('cluster_filter_type'):
gene_ids = kf['Proteins'].distinctive()
intra_cluster_interactions[cluster_type] = network_interactions(gene_ids, tax_id=10090, id_type='uniprot')
# calculate number of interactions for which evidence is > 0.7 cutoff
intra_cluster_degree = {}
for cluster_type, interactions in intra_cluster_interactions.items():
filtered_ints = interactions[interactions['score'].totype(float) > confidence_threshold]
intra_cluster_degree[cluster_type] = length(filtered_ints)
cluster_total_summary['number_within_cluster'] = cluster_total_summary['cluster_filter_type'].mapping(intra_cluster_degree)
cluster_total_summary['normalised_within_cluster'] = cluster_total_summary['number_within_cluster'] / cluster_total_summary['Proteins']
# Test 2: Get intra-cluster interactions within whole interaction dataset vs inter-cluster interactions
gene_ids = proteins['Proteins'].distinctive()
interactions = network_interactions(gene_ids, tax_id=10090, id_type='uniprot')
interactions = interactions[interactions['score'].totype(float) > confidence_threshold] # less than half remain!
# calculate number of interactions for which evidence is > 0.7 cutoff
inter_vs_intra = {}
for cluster_type, kf in proteins.grouper('cluster_filter_type'):
gene_ids = kf['Proteins'].distinctive()
cluster_ints = interactions.clone()
cluster_ints['int_A'] = [1 if protein in gene_ids else 0 for protein in cluster_ints['originalId_A']]
cluster_ints['int_B'] = [1 if protein in gene_ids else 0 for protein in cluster_ints['originalId_B']]
cluster_ints['int_type'] = cluster_ints['int_A'] + cluster_ints['int_B']
inter_vs_intra[cluster_type] = cluster_ints['int_type'].counts_value_num()
inter_vs_intra = mk.KnowledgeFrame(inter_vs_intra).T.reseting_index()
inter_vs_intra.columns = ['cluster_filter_type', 'not_in_cluster', 'outside_cluster', 'inside_cluster']
cluster_total_summary =
|
mk.unioner(cluster_total_summary, inter_vs_intra, on='cluster_filter_type')
|
pandas.merge
|
from itertools import grouper, zip_longest
from fractions import Fraction
from random import sample_by_num
import json
import monkey as mk
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song has no meter
class UnknownPGramType(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return f"Unknown pgram type: {self.arg}."
#compute features:
def compute_completesmeasure_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
def compute_completesmeasure_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
#extract IOI in units of beat
#IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note
#for final_item note: beatfraction is taken
#Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody)
#
#extract beats per measure
def extractFeatures(seq_iter, vocalfeatures=True):
count = 0
for seq in seq_iter:
count += 1
if count % 100 == 0:
print(count, end=' ')
pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests
IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs]
IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]]
seq['features']['IOI_beatfraction'] = IOI_beatfraction
beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']]
seq['features']['beatspermeasure'] = beatspermeasure
phrasepos = seq['features']['phrasepos']
phrasestart_ix=[0]*length(phrasepos)
for ix in range(1,length(phrasestart_ix)):
if phrasepos[ix] < phrasepos[ix-1]:
phrasestart_ix[ix] = ix
else:
phrasestart_ix[ix] = phrasestart_ix[ix-1]
seq['features']['phrasestart_ix'] = phrasestart_ix
endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True]
seq['features']['endOfPhrase'] = endOfPhrase
cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(length(phrasepos))]
cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(length(phrasepos))]
cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(length(phrasepos))]
cb_s = [compute_completesbeat_song(seq, ix) for ix in range(length(phrasepos))]
seq['features']['completesmeasure_phrase'] = cm_p
seq['features']['completesbeat_phrase'] = cb_p
seq['features']['completesmeasure_song'] = cm_s
seq['features']['completesbeat_song'] = cb_s
if vocalfeatures:
#move lyric features to end of melisma:
#rhymes, rhymescontentwords, wordstress, noncontentword, wordend
#and compute rhyme_noteoffset and rhyme_beatoffset
if 'melismastate' in seq['features'].keys(): #vocal?
lyrics = seq['features']['lyrics']
phoneme = seq['features']['phoneme']
melismastate = seq['features']['melismastate']
rhymes = seq['features']['rhymes']
rhymescontentwords = seq['features']['rhymescontentwords']
wordend = seq['features']['wordend']
noncontentword = seq['features']['noncontentword']
wordstress = seq['features']['wordstress']
rhymes_endmelisma, rhymescontentwords_endmelisma = [], []
wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], []
lyrics_endmelisma, phoneme_endmelisma = [], []
from_ix = 0
inmelisma = False
for ix in range(length(phrasepos)):
if melismastate[ix] == 'start':
from_ix = ix
inmelisma = True
if melismastate[ix] == 'end':
if not inmelisma:
from_ix = ix
inmelisma = False
rhymes_endmelisma.adding(rhymes[from_ix])
rhymescontentwords_endmelisma.adding(rhymescontentwords[from_ix])
wordend_endmelisma.adding(wordend[from_ix])
noncontentword_endmelisma.adding(noncontentword[from_ix])
wordstress_endmelisma.adding(wordstress[from_ix])
lyrics_endmelisma.adding(lyrics[from_ix])
phoneme_endmelisma.adding(phoneme[from_ix])
else:
rhymes_endmelisma.adding(False)
rhymescontentwords_endmelisma.adding(False)
wordend_endmelisma.adding(False)
noncontentword_endmelisma.adding(False)
wordstress_endmelisma.adding(False)
lyrics_endmelisma.adding(None)
phoneme_endmelisma.adding(None)
seq['features']['rhymes_endmelisma'] = rhymes_endmelisma
seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma
seq['features']['wordend_endmelisma'] = wordend_endmelisma
seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma
seq['features']['wordstress_endmelisma'] = wordstress_endmelisma
seq['features']['lyrics_endmelisma'] = lyrics_endmelisma
seq['features']['phoneme_endmelisma'] = phoneme_endmelisma
#compute rhyme_noteoffset and rhyme_beatoffset
rhyme_noteoffset = [0]
rhyme_beatoffset = [0.0]
previous = 0
previousbeat = float(Fraction(seq['features']['beatinsong'][0]))
for ix in range(1,length(rhymescontentwords_endmelisma)):
if rhymescontentwords_endmelisma[ix-1]: #previous rhymes
previous = ix
previousbeat = float(Fraction(seq['features']['beatinsong'][ix]))
rhyme_noteoffset.adding(ix - previous)
rhyme_beatoffset.adding(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat)
seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset
seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset
else:
#vocal features requested, but not present.
#skip melody
continue
#Or do this?
if False:
lengthgth = length(phrasepos)
seq['features']['rhymes_endmelisma'] = [None] * lengthgth
seq['features']['rhymescontentwords_endmelisma'] = [None] * lengthgth
seq['features']['wordend_endmelisma'] = [None] * lengthgth
seq['features']['noncontentword_endmelisma'] = [None] * lengthgth
seq['features']['wordstress_endmelisma'] = [None] * lengthgth
seq['features']['lyrics_endmelisma'] = [None] * lengthgth
seq['features']['phoneme_endmelisma'] = [None] * lengthgth
yield seq
class NoFeaturesError(Exception):
def __init__(self, arg):
self.args = arg
class NoTrigramsError(Exception):
def __init__(self, arg):
self.args = arg
def __str__(self):
return repr(self.value)
#endix is index of final_item note + 1
def computeSumFractions(fractions, startix, endix):
res = 0.0
for fr in fractions[startix:endix]:
res = res + float(Fraction(fr))
return res
#make groups of indices with the same successive pitch, but (optiontotal_ally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be total_allowed (contourfourth)
#returns tuples (ix of first note in group, ix of final_item note in group + 1)
#crossPhraseBreak=False splits on phrase break. N.B. Is Using Gvalue_roundTruth!
def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False):
res = []
if crossPhraseBreak:
for _, g in grouper( enumerate(midipitch), key=lambda x:x[1]):
glist = list(g)
res.adding( (glist[0][0], glist[-1][0]+1) )
else: #N.B. This uses the gvalue_round truth
for _, g in grouper( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])):
glist = list(g)
res.adding( (glist[0][0], glist[-1][0]+1) )
return res
#True if no phrase end at first or second item (span) in the trigram
#trigram looks like ((8, 10), (10, 11), (11, 12))
def noPhraseBreak(tr, endOfPhrase):
return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \
( True in endOfPhrase[tr[1][0]:tr[1][1]] ) )
#pgram_type : "pitch", "note"
def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None):
pgrams = {}
arfftype = {}
for ix, seq in enumerate(corpus):
if endat is not None:
if ix >= endat:
continue
if ix < startat:
continue
if not ix%100:
print(ix, end=' ')
songid = seq['id']
try:
pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x)))
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float)
if 'melismastate' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int)
if 'informatingioncontent' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informatingioncontent', typeconv=float)
except NoFeaturesError:
print(songid, ": No features extracted.")
except NoTrigramsError:
print(songid, ": No trigrams extracted")
#if ix > startat:
# if arfftype.keys() != arfftype_new.keys():
# print("Warning: Melodies have different feature sets.")
# print(list(zip_longest(arfftype.keys(), arfftype_new.keys())))
#Keep largest set of features possible. N.B. no guarantee that total_all features in arfftype are in each sequence.
arfftype.umkate(arfftype_new)
#concating melodies
pgrams = mk.concating([v for v in pgrams.values()])
return pgrams, arfftype
def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False):
# some aliases
scaledegree = seq['features']['scaledegree']
endOfPhrase = seq['features']['endOfPhrase']
midipitch = seq['features']['midipitch']
phrase_ix = seq['features']['phrase_ix']
if pgram_type == "pitch":
event_spans = breakpitchlist(midipitch, phrase_ix) #total_allow pitches to cross phrase break
elif pgram_type == "note":
event_spans = list(zip(range(length(scaledegree)),range(1,length(scaledegree)+1)))
else:
raise UnknownPGramType(pgram_type)
# make trigram of spans
event_spans = event_spans + [(None, None), (None, None)]
pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:]))
# If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY?
#Why actutotal_ally? e.g. kindr154 prhases of 2 pitches
if skipPhraseCrossing:
pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)]
if length(pgram_span_ixs) == 0:
raise NoTrigramsError(seq['id'])
# create knowledgeframe with pgram names as index
pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs]
pgrams = mk.KnowledgeFrame(index=pgram_ids)
pgrams['ix0_0'] = mk.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix0_1'] = mk.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_0'] = mk.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_1'] = mk.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_0'] = mk.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_1'] = mk.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_0'] = mk.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_1'] = mk.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_0'] = mk.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_1'] = mk.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16")
#add tune family ids and songids
pgrams['tunefamily'] = seq['tunefamily']
pgrams['songid'] = seq['id']
pgrams, arfftype = extractPgramFeatures(pgrams, seq)
return pgrams, arfftype
def gettingBeatDuration(timesig):
try:
dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength)
except TimeSignatureException:
dur = float(Fraction(timesig) / Fraction('1/4'))
return dur
def oneCrossRelation(el1, el2, typeconv):
if mk.ifna(el1) or mk.ifna(el2):
return np.nan
return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+'
def addCrossRelations(pgrams, arfftype, featurenagetting_ming, newname=None, typeconv=int):
postfixes = {
1 : 'first',
2 : 'second',
3 : 'third',
4 : 'fourth',
5 : 'fifth'
}
if newname is None:
newname = featurenagetting_ming
for ix1 in range(1,6):
for ix2 in range(ix1+1,6):
featname = newname + postfixes[ix1] + postfixes[ix2]
source = zip(pgrams[featurenagetting_ming + postfixes[ix1]], pgrams[featurenagetting_ming + postfixes[ix2]])
pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source]
arfftype[featname] = '{-,=,+}'
return pgrams, arfftype
def extractPgramFeatures(pgrams, seq):
# vocal?
vocal = False
if 'melismastate' in seq['features'].keys():
vocal = True
arfftype = {}
# some aliases
scaledegree = seq['features']['scaledegree']
beatstrength = seq['features']['beatstrength']
diatonicpitch = seq['features']['diatonicpitch']
midipitch = seq['features']['midipitch']
chromaticinterval = seq['features']['chromaticinterval']
timesig = seq['features']['timesignature']
metriccontour = seq['features']['metriccontour']
beatinsong = seq['features']['beatinsong']
beatinphrase = seq['features']['beatinphrase']
endOfPhrase = seq['features']['endOfPhrase']
phrasestart_ix = seq['features']['phrasestart_ix']
phrase_ix = seq['features']['phrase_ix']
completesmeasure_song = seq['features']['completesmeasure_song']
completesbeat_song = seq['features']['completesbeat_song']
completesmeasure_phrase = seq['features']['completesmeasure_phrase']
completesbeat_phrase = seq['features']['completesbeat_phrase']
IOIbeatfraction = seq['features']['IOI_beatfraction']
nextisrest = seq['features']['nextisrest']
gpr2a = seq['features']['gpr2a_Frankland']
gpr2b = seq['features']['gpr2b_Frankland']
gpr3a = seq['features']['gpr3a_Frankland']
gpr3d = seq['features']['gpr3d_Frankland']
gprtotal_sum = seq['features']['gpr_Frankland_total_sum']
pprox = seq['features']['pitchproximity']
prev = seq['features']['pitchreversal']
lbdmpitch = seq['features']['lbdm_spitch']
lbdmioi = seq['features']['lbdm_sioi']
lbdmrest = seq['features']['lbdm_srest']
lbdm = seq['features']['lbdm_boundarystrength']
if vocal:
wordstress = seq['features']['wordstress_endmelisma']
noncontentword = seq['features']['noncontentword_endmelisma']
wordend = seq['features']['wordend_endmelisma']
rhymescontentwords = seq['features']['rhymescontentwords_endmelisma']
rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset']
rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset']
melismastate = seq['features']['melismastate']
phrase_count = getting_max(phrase_ix) + 1
pgrams['scaledegreefirst'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['scaledegreesecond'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['scaledegreethird'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['scaledegreefourth'] = mk.array([scaledegree[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['scaledegreefifth'] = mk.array([scaledegree[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['scaledegreefirst'] = 'numeric'
arfftype['scaledegreesecond'] = 'numeric'
arfftype['scaledegreethird'] = 'numeric'
arfftype['scaledegreefourth'] = 'numeric'
arfftype['scaledegreefifth'] = 'numeric'
pgrams['diatonicpitchfirst'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['diatonicpitchsecond'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['diatonicpitchthird'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['diatonicpitchfourth'] = mk.array([diatonicpitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['diatonicpitchfifth'] = mk.array([diatonicpitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['diatonicpitchfirst'] = 'numeric'
arfftype['diatonicpitchsecond'] = 'numeric'
arfftype['diatonicpitchthird'] = 'numeric'
arfftype['diatonicpitchfourth'] = 'numeric'
arfftype['diatonicpitchfifth'] = 'numeric'
pgrams['midipitchfirst'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['midipitchsecond'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['midipitchthird'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['midipitchfourth'] = mk.array([midipitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['midipitchfifth'] = mk.array([midipitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['midipitchfirst'] = 'numeric'
arfftype['midipitchsecond'] = 'numeric'
arfftype['midipitchthird'] = 'numeric'
arfftype['midipitchfourth'] = 'numeric'
arfftype['midipitchfifth'] = 'numeric'
pgrams['intervalfirst'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['intervalsecond'] = mk.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['intervalthird'] = mk.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['intervalfourth'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['intervalfifth'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['intervalfirst'] = 'numeric'
arfftype['intervalsecond'] = 'numeric'
arfftype['intervalthird'] = 'numeric'
arfftype['intervalfourth'] = 'numeric'
arfftype['intervalfifth'] = 'numeric'
parsons = {-1:'-', 0:'=', 1:'+'}
#intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations
#pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int1) else np.nan for int1, int2 in \
# zip(pgrams['intervalfirst'],pgrams['intervalsecond'])]
#pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \
# zip(pgrams['intervalsecond'],pgrams['intervalthird'])]
#pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalthird'],pgrams['intervalfourth'])]
#pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalfourth'],pgrams['intervalfifth'])]
#arfftype['intervalcontoursecond'] = '{-,=,+}'
#arfftype['intervalcontourthird'] = '{-,=,+}'
#arfftype['intervalcontourfourth'] = '{-,=,+}'
#arfftype['intervalcontourfifth'] = '{-,=,+}'
#intervals of which second tone has center of gravity according to Vos 2002 + octave equivalengthts
VosCenterGravityASC = np.array([1, 5, 8])
VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11])
VosCenterGravity = list(VosCenterGravityDESC-24) + \
list(VosCenterGravityDESC-12) + \
list(VosCenterGravityDESC) + \
list(VosCenterGravityASC) + \
list(VosCenterGravityASC+12) + \
list(VosCenterGravityASC+24)
pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfirst']]
pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']]
pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']]
pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfourth']]
pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfifth']]
arfftype['VosCenterGravityfirst'] = '{True, False}'
arfftype['VosCenterGravitysecond'] = '{True, False}'
arfftype['VosCenterGravitythird'] = '{True, False}'
arfftype['VosCenterGravityfourth'] = '{True, False}'
arfftype['VosCenterGravityfifth'] = '{True, False}'
VosHarmony = {
0: 0,
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 1,
7: 6,
8: 5,
9: 4,
10: 3,
11: 2,
12: 7
}
#interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633)
def vosint(intervals):
return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not mk.ifna(i) else np.nan for i in intervals]
pgrams['VosHarmonyfirst'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16")
pgrams['VosHarmonysecond'] = mk.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16")
pgrams['VosHarmonythird'] = mk.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16")
pgrams['VosHarmonyfourth'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16")
pgrams['VosHarmonyfifth'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16")
arfftype['VosHarmonyfirst'] = 'numeric'
arfftype['VosHarmonysecond'] = 'numeric'
arfftype['VosHarmonythird'] = 'numeric'
arfftype['VosHarmonyfourth'] = 'numeric'
arfftype['VosHarmonyfifth'] = 'numeric'
if 'informatingioncontent' in seq['features'].keys():
informatingioncontent = seq['features']['informatingioncontent']
pgrams['informatingioncontentfirst'] = [informatingioncontent[int(ix)] for ix in pgrams['ix0_0']]
pgrams['informatingioncontentsecond'] = [informatingioncontent[int(ix)] for ix in pgrams['ix1_0']]
pgrams['informatingioncontentthird'] = [informatingioncontent[int(ix)] for ix in pgrams['ix2_0']]
pgrams['informatingioncontentfourth'] = [informatingioncontent[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['informatingioncontentfifth'] = [informatingioncontent[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['informatingioncontentfirst'] = 'numeric'
arfftype['informatingioncontentsecond'] = 'numeric'
arfftype['informatingioncontentthird'] = 'numeric'
arfftype['informatingioncontentfourth'] = 'numeric'
arfftype['informatingioncontentfifth'] = 'numeric'
pgrams['contourfirst'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfirst']]
pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']]
pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']]
pgrams['contourfourth'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfourth']]
pgrams['contourfifth'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfifth']]
arfftype['contourfirst'] = '{-,=,+}'
arfftype['contoursecond'] = '{-,=,+}'
arfftype['contourthird'] = '{-,=,+}'
arfftype['contourfourth'] = '{-,=,+}'
arfftype['contourfifth'] = '{-,=,+}'
###########################################3
#derived features from Interval and Contour
pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \
zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['registraldirectionchange'] = '{True, False}'
pgrams['largettingosmtotal_all'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \
zip(pgrams['intervalsecond'], pgrams['intervalthird'])]
arfftype['largettingosmtotal_all'] = '{True, False}'
pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \
for i in zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['contourreversal'] = '{True, False}'
pgrams['isascending'] = \
(pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird'])
arfftype['isascending'] = '{True, False}'
pgrams['isdescending'] = \
(pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird'])
arfftype['isdescending'] = '{True, False}'
diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values
pgrams['ambitus'] = diat.getting_max(1) - diat.getting_min(1)
arfftype['ambitus'] = 'numeric'
pgrams['containsleap'] = \
(abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \
(abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1)
arfftype['containsleap'] = '{True, False}'
###########################################3
pgrams['numberofnotesfirst'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16")
pgrams['numberofnotessecond'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16")
pgrams['numberofnotesthird'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16")
pgrams['numberofnotesfourth'] = mk.array([ix2 - ix1 if not mk.ifna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16")
pgrams['numberofnotesfifth'] = mk.array([ix2 - ix1 if not mk.ifna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16")
arfftype['numberofnotesfirst'] = 'numeric'
arfftype['numberofnotessecond'] = 'numeric'
arfftype['numberofnotesthird'] = 'numeric'
arfftype['numberofnotesfourth'] = 'numeric'
arfftype['numberofnotesfifth'] = 'numeric'
if seq['freemeter']:
pgrams['meternumerator'] = mk.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenogetting_minator'] = mk.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
else:
pgrams['meternumerator'] = mk.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenogetting_minator'] = mk.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['meternumerator'] = 'numeric'
arfftype['meterdenogetting_minator'] = 'numeric'
pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['nextisrestfirst'] = '{True, False}'
arfftype['nextisrestsecond'] = '{True, False}'
arfftype['nextisrestthird'] = '{True, False}'
arfftype['nextisrestfourth'] = '{True, False}'
arfftype['nextisrestfifth'] = '{True, False}'
pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']]
pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']]
pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']]
pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not
|
mk.ifna(ix)
|
pandas.isna
|
import os
import glob2
import numpy as np
import monkey as mk
import tensorflow as tf
from skimage.io import imread
# /datasets/faces_emore_112x112_folders/*/*.jpg'
default_image_names_reg = "*/*.jpg"
default_image_classes_rule = lambda path: int(os.path.basename(os.path.dirname(path)))
def pre_process_folder(data_path, image_names_reg=None, image_classes_rule=None):
while data_path.endswith("/"):
data_path = data_path[:-1]
if not data_path.endswith(".npz"):
dest_pickle = os.path.join("./", os.path.basename(data_path) + "_shuffle.npz")
else:
dest_pickle = data_path
if os.path.exists(dest_pickle):
aa = np.load(dest_pickle)
if length(aa.keys()) == 2:
image_names, image_classes, embeddings = aa["image_names"], aa["image_classes"], []
else:
# dataset with embedding values
image_names, image_classes, embeddings = aa["image_names"], aa["image_classes"], aa["embeddings"]
print(">>>> reloaded from dataset backup:", dest_pickle)
else:
if not os.path.exists(data_path):
return [], [], [], 0, None
if image_names_reg is None or image_classes_rule is None:
image_names_reg, image_classes_rule = default_image_names_reg, default_image_classes_rule
image_names = glob2.glob(os.path.join(data_path, image_names_reg))
image_names = np.random.permutation(image_names).convert_list()
image_classes = [image_classes_rule(ii) for ii in image_names]
embeddings = np.array([])
np.savez_compressed(dest_pickle, image_names=image_names, image_classes=image_classes)
classes = np.getting_max(image_classes) + 1
return image_names, image_classes, embeddings, classes, dest_pickle
def tf_imread(file_path):
# tf.print('Reading file:', file_path)
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img, channels=3) # [0, 255]
img = tf.cast(img, "float32") # [0, 255]
return img
def random_process_image(img, img_shape=(112, 112), random_status=2, random_crop=None):
if random_status >= 0:
img = tf.image.random_flip_left_right(img)
if random_status >= 1:
# 25.5 == 255 * 0.1
img = tf.image.random_brightness(img, 25.5 * random_status)
if random_status >= 2:
img = tf.image.random_contrast(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status)
img = tf.image.random_saturation(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status)
if random_status >= 3 and random_crop is not None:
img = tf.image.random_crop(img, random_crop)
img = tf.image.resize(img, img_shape)
if random_status >= 1:
img = tf.clip_by_value(img, 0.0, 255.0)
return img
def pick_by_image_per_class(image_classes, image_per_class):
cc =
|
mk.counts_value_num(image_classes)
|
pandas.value_counts
|
# Importing libraries
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
# lightgbm for classification
from numpy import average
from numpy import standard
#from sklearn.datasets import make_classification
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
#from matplotlib import pyplot
path = '../Data'
train = mk.read_csv(path + "/train.csv")
test = mk.read_csv(path + "/test.csv")
# submission = mk.read_csv(path + "/sample_by_num_submission.csv")
print(train.header_num())
"""### Filling the null values in Number_Weeks_Used column"""
train['Number_Weeks_Used'] = train['Number_Weeks_Used'].fillnone(
train.grouper('Pesticide_Use_Category')['Number_Weeks_Used'].transform('median'))
test['Number_Weeks_Used'] = test['Number_Weeks_Used'].fillnone(
test.grouper('Pesticide_Use_Category')['Number_Weeks_Used'].transform('median'))
"""### Data Preprocessing"""
training_labels = train.iloc[:, -1]
X_train = train.iloc[:, 1:-1]
X_test = test.iloc[:, 1:]
data = mk.concating([X_train, X_test])
# data.header_num()
columns_names_encod = data.columns[[3, 7]]
data =
|
mk.getting_dummies(data, columns=columns_names_encod)
|
pandas.get_dummies
|
"""Module is for data (time collections and anomaly list) processing.
"""
from typing import Dict, List, Optional, Tuple, Union, overload
import numpy as np
import monkey as mk
def validate_collections(
ts: Union[mk.Collections, mk.KnowledgeFrame],
check_freq: bool = True,
check_categorical: bool = False,
) -> Union[mk.Collections, mk.KnowledgeFrame]:
"""Validate time collections.
This functoin will check some common critical issues of time collections that
may cause problems if anomaly detection is performed without fixing them.
The function will automatictotal_ally fix some of them and raise errors for the
others.
Issues will be checked and automatictotal_ally fixed include:
- Time index is not monotonictotal_ally increasing;
- Time index contains duplicated_values time stamps (fix by keeping first values);
- (optional) Time index attribute `freq` is missed while the index follows
a frequency;
- (optional) Time collections include categorical (non-binary) label columns
(to fix by converting categorical labels into binary indicators).
Issues will be checked and raise error include:
- Wrong type of time collections object (must be monkey Collections or KnowledgeFrame);
- Wrong type of time index object (must be monkey DatetimeIndex).
Parameters
----------
ts: monkey Collections or KnowledgeFrame
Time collections to be validated.
check_freq: bool, optional
Whether to check time index attribute `freq` is missed. Default: True.
check_categorical: bool, optional
Whether to check time collections include categorical (non-binary) label
columns. Default: False.
Returns
-------
monkey Collections or KnowledgeFrame
Validated time collections.
"""
ts = ts.clone()
# check input type
if not incontainstance(ts, (mk.Collections, mk.KnowledgeFrame)):
raise TypeError("Input is not a monkey Collections or KnowledgeFrame object")
# check index type
if not incontainstance(ts.index, mk.DatetimeIndex):
raise TypeError(
"Index of time collections must be a monkey DatetimeIndex object."
)
# check duplicated_values
if whatever(ts.index.duplicated_values(keep="first")):
ts = ts[ts.index.duplicated_values(keep="first") == False]
# check sorted
if not ts.index.is_monotonic_increasing:
ts.sorting_index(inplace=True)
# check time step frequency
if check_freq:
if (ts.index.freq is None) and (ts.index.inferred_freq is not None):
ts = ts.asfreq(ts.index.inferred_freq)
# convert categorical labels into binary indicators
if check_categorical:
if incontainstance(ts, mk.KnowledgeFrame):
ts =
|
mk.getting_dummies(ts)
|
pandas.get_dummies
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# #### Importing dataset
# 1.Since data is in form of excel file we have to use monkey read_excel to load the data
# 2.After loading it is important to check null values in a column or a row
# 3.If it is present then following can be done,
# a.Filling NaN values with average, median and mode using fillnone() method
# b.If Less missing values, we can sip it as well
#
# In[2]:
train_data=mk.read_excel('E:\End-2-end Projects\Flight_Price/Data_Train.xlsx')
# In[3]:
train_data.header_num()
# In[4]:
train_data.info()
# In[5]:
train_data.ifnull().total_sum()
# #### as less missing values,I can directly sip these
# In[6]:
train_data.sipna(inplace=True)
# In[7]:
train_data.ifnull().total_sum()
# In[8]:
train_data.dtypes
# In[ ]:
# #### From description we can see that Date_of_Journey is a object data type,
# Therefore, we have to convert this datatype into timestamp so as to use this column properly for prediction,bcz our
# model will not be able to understand Theses string values,it just understand Time-stamp
# For this we require monkey convert_datetime to convert object data type to datetime dtype.
#
#
# dt.day method will extract only day of that date
# dt.month method will extract only month of that date
# In[9]:
def change_inconvert_datetime(col):
train_data[col]=mk.convert_datetime(train_data[col])
# In[10]:
train_data.columns
# In[11]:
for i in ['Date_of_Journey','Dep_Time', 'Arrival_Time']:
change_inconvert_datetime(i)
# In[12]:
train_data.dtypes
# In[ ]:
# In[ ]:
# In[13]:
train_data['Journey_day']=train_data['Date_of_Journey'].dt.day
# In[14]:
train_data['Journey_month']=train_data['Date_of_Journey'].dt.month
# In[15]:
train_data.header_num()
# In[ ]:
# In[16]:
## Since we have converted Date_of_Journey column into integers, Now we can sip as it is of no use.
train_data.sip('Date_of_Journey', axis=1, inplace=True)
# In[ ]:
# In[ ]:
# In[17]:
train_data.header_num()
# In[ ]:
# In[18]:
def extract_hour(kf,col):
kf[col+"_hour"]=kf[col].dt.hour
# In[19]:
def extract_getting_min(kf,col):
kf[col+"_getting_minute"]=kf[col].dt.getting_minute
# In[20]:
def sip_column(kf,col):
kf.sip(col,axis=1,inplace=True)
# In[ ]:
# In[21]:
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
extract_hour(train_data,'Dep_Time')
# In[22]:
# Extracting Minutes
extract_getting_min(train_data,'Dep_Time')
# In[23]:
# Now we can sip Dep_Time as it is of no use
sip_column(train_data,'Dep_Time')
# In[24]:
train_data.header_num()
# In[ ]:
# In[25]:
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
extract_hour(train_data,'Arrival_Time')
# Extracting getting_minutes
extract_getting_min(train_data,'Arrival_Time')
# Now we can sip Arrival_Time as it is of no use
sip_column(train_data,'Arrival_Time')
# In[26]:
train_data.header_num()
# In[ ]:
# In[27]:
'2h 50m'.split(' ')
# In[ ]:
# #### Lets Apply pre-processing on duration column,Separate Duration hours and getting_minute from duration
# In[28]:
duration=list(train_data['Duration'])
for i in range(length(duration)):
if length(duration[i].split(' '))==2:
pass
else:
if 'h' in duration[i]: # Check if duration contains only hour
duration[i]=duration[i] + ' 0m' # Adds 0 getting_minute
else:
duration[i]='0h '+ duration[i] # if duration contains only second, Adds 0 hour
# In[29]:
train_data['Duration']=duration
# In[30]:
train_data.header_num()
# In[31]:
'2h 50m'.split(' ')[1][0:-1]
# In[ ]:
# In[32]:
def hour(x):
return x.split(' ')[0][0:-1]
# In[33]:
def getting_min(x):
return x.split(' ')[1][0:-1]
# In[34]:
train_data['Duration_hours']=train_data['Duration'].employ(hour)
train_data['Duration_getting_mins']=train_data['Duration'].employ(getting_min)
# In[35]:
train_data.header_num()
# In[36]:
train_data.sip('Duration',axis=1,inplace=True)
# In[37]:
train_data.header_num()
# In[38]:
train_data.dtypes
# In[39]:
train_data['Duration_hours']=train_data['Duration_hours'].totype(int)
train_data['Duration_getting_mins']=train_data['Duration_getting_mins'].totype(int)
# In[40]:
train_data.dtypes
# In[41]:
train_data.header_num()
# In[42]:
train_data.dtypes
# In[43]:
cat_col=[col for col in train_data.columns if train_data[col].dtype=='O']
cat_col
# In[44]:
cont_col=[col for col in train_data.columns if train_data[col].dtype!='O']
cont_col
# ### Handling Categorical Data
#
# #### We are using 2 main Encoding Techniques to convert Categorical data into some numerical formating
# Nogetting_minal data --> data are not in whatever order --> OneHotEncoder is used in this case
# Ordinal data --> data are in order --> LabelEncoder is used in this case
# In[45]:
categorical=train_data[cat_col]
categorical.header_num()
# In[46]:
categorical['Airline'].counts_value_num()
# In[ ]:
# #### Airline vs Price Analysis
# In[47]:
plt.figure(figsize=(15,5))
sns.boxplot(y='Price',x='Airline',data=train_data.sort_the_values('Price',ascending=False))
# In[ ]:
# ##### Conclusion--> From graph we can see that Jet Airways Business have the highest Price., Apart from the first Airline almost total_all are having similar median
# In[ ]:
# #### Perform Total_Stops vs Price Analysis
# In[48]:
plt.figure(figsize=(15,5))
sns.boxplot(y='Price',x='Total_Stops',data=train_data.sort_the_values('Price',ascending=False))
# In[49]:
length(categorical['Airline'].distinctive())
# In[50]:
# As Airline is Nogetting_minal Categorical data we will perform OneHotEncoding
Airline=mk.getting_dummies(categorical['Airline'], sip_first=True)
Airline.header_num()
# In[51]:
categorical['Source'].counts_value_num()
# In[52]:
# Source vs Price
plt.figure(figsize=(15,5))
sns.catplot(y='Price',x='Source',data=train_data.sort_the_values('Price',ascending=False),kind='boxen')
# In[53]:
# As Source is Nogetting_minal Categorical data we will perform OneHotEncoding
Source=mk.getting_dummies(categorical['Source'], sip_first=True)
Source.header_num()
# In[54]:
categorical['Destination'].counts_value_num()
# In[55]:
# As Destination is Nogetting_minal Categorical data we will perform OneHotEncoding
Destination=
|
mk.getting_dummies(categorical['Destination'], sip_first=True)
|
pandas.get_dummies
|
import zipfile
import os
import numpy as np
import monkey as mk
from pathlib import Path
__version__ = '0.155'
try:
from functools import lru_cache
except (ImportError, AttributeError):
# don't know how to tell setup.py that we only need functools32 when under 2.7.
# so we'll just include a clone (*bergh*)
import sys
sys.path.adding(os.path.join(os.path.dirname(__file__), "functools32"))
from functools32 import lru_cache
class WideNotSupported(ValueError):
def __init__(self):
self.message = (
".getting_wide() is not supported for this dataset. Use .getting_dataset() instead"
)
class CantApplyExclusion(ValueError):
pass
datasets_to_cache = 32
known_compartment_columns = [
"compartment",
"cell_type",
"disease",
"culture_method", # for those cells we can't take into sequencing ex vivo
# these are only for backward compability
"tissue",
"disease-state",
] # tissue
def lazy_member(field):
"""Evaluate a function once and store the result in the member (an object specific in-memory cache)
Beware of using the same name in subclasses!
"""
def decorate(func):
if field == func.__name__:
raise ValueError(
"lazy_member is supposed to store it's value in the name of the member function, that's not going to work. Please choose another name (prepend an underscore..."
)
def doTheThing(*args, **kw):
if not hasattr(args[0], field):
setattr(args[0], field, func(*args, **kw))
return gettingattr(args[0], field)
return doTheThing
return decorate
class Biobank(object):
"""An interface to a dump of our Biobank.
Also used interntotal_ally by the biobank website to access the data.
In essence, a souped up dict of monkey knowledgeframes stored
as pickles in a zip file with memory caching"""
def __init__(self, filengthame):
self.filengthame = filengthame
self.zf = zipfile.ZipFile(filengthame)
if not "_meta/_data_formating" in self.zf.namelist():
self.data_formating = "msg_pack"
else:
with self.zf.open("_meta/_data_formating") as op:
self.data_formating = op.read().decode("utf-8")
if self.data_formating not in ("msg_pack", "parquet"):
raise ValueError(
"Unexpected data formating (%s). Do you need to umkate marburg_biobank"
% (self.data_formating)
)
self._cached_datasets = {}
@property
def ttotal_all(self):
return _BiobankItemAccessor(self.list_datasets, lambda dataset: self.getting_dataset(dataset, employ_exclusion=True))
@property
def wide(self):
return _BiobankItemAccessor(self.list_datasets, lambda dataset: self.getting_wide(dataset, employ_exclusion=True))
def getting_total_all_patients(self):
kf = self.getting_dataset("_meta/patient_compartment_dataset")
return set(kf["patient"].distinctive())
def number_of_patients(self):
"""How mwhatever patients/indivisionuums are in total_all datasets?"""
return length(self.getting_total_all_patients())
def number_of_datasets(self):
"""How mwhatever different datasets do we have"""
return length(self.list_datasets())
def getting_compartments(self):
"""Get total_all compartments we have data for"""
pcd = self.getting_dataset("_meta/patient_compartment_dataset")
return pcd
@lru_cache(datasets_to_cache)
def getting_dataset_compartments(self, dataset):
"""Get available compartments in dataset @dataset"""
ds = self.getting_dataset(dataset)
columns = self.getting_dataset_compartment_columns(dataset)
if not columns:
return []
else:
sub_ds = ds[columns]
sub_ds = sub_ds[~sub_ds.duplicated_values()]
result = []
for dummy_idx, row in sub_ds.traversal():
result.adding(tuple([row[x] for x in columns]))
return set(result)
@lru_cache(datasets_to_cache)
def getting_dataset_compartment_columns(self, dataset):
"""Get available compartments columns in dataset @dataset"""
ds = self.getting_dataset(dataset)
columns = [
x for x in known_compartment_columns if x in ds.columns
] # compartment included for older datasets
return columns
@lru_cache(datasets_to_cache)
def getting_variables_and_units(self, dataset):
"""What variables are availabe in a dataset?"""
kf = self.getting_dataset(dataset)
if length(kf["unit"].cat.categories) == 1:
vars = kf["variable"].distinctive()
unit = kf["unit"].iloc[0]
return set([(v, unit) for v in vars])
else:
x = kf[["variable", "unit"]].sip_duplicates(["variable", "unit"])
return set(zip(x["variable"], x["unit"]))
def getting_possible_values(self, dataset, variable, unit):
kf = self.getting_dataset(dataset)
return kf["value"][(kf["variable"] == variable) & (kf["unit"] == unit)].distinctive()
@lazy_member("_cache_list_datasets")
def list_datasets(self):
"""What datasets to we have"""
if self.data_formating == "msg_pack":
return sorted(
[
name
for name in self.zf.namelist()
if not name.startswith("_")
and not os.path.basename(name).startswith("_")
]
)
elif self.data_formating == "parquet":
return sorted(
[
name[: name.rfind("/")]
for name in self.zf.namelist()
if not name.startswith("_")
and not os.path.basename(name[: name.rfind("/")]).startswith("_")
and name.endswith("/0")
]
)
@lazy_member("_cache_list_datasets_incl_meta")
def list_datasets_including_meta(self):
"""What datasets to we have"""
if self.data_formating == "msg_pack":
return sorted(self.zf.namelist())
elif self.data_formating == "parquet":
import re
raw = self.zf.namelist()
without_numbers = [
x if not re.search("/[0-9]+$", x) else x[: x.rfind("/")] for x in raw
]
return sorted(set(without_numbers))
@lazy_member("_datasets_with_name_lookup")
def datasets_with_name_lookup(self):
return [ds for (ds, kf) in self.iter_datasets() if "name" in kf.columns]
def name_lookup(self, dataset, variable):
kf = self.getting_dataset(dataset)
# todo: optimize using where?
return kf[kf.variable == variable]["name"].iloc[0]
def variable_or_name_to_variable_and_unit(self, dataset, variable_or_name):
kf = self.getting_dataset(dataset)[["variable", "name", "unit"]]
rows = kf[(kf.variable == variable_or_name) | (kf.name == variable_or_name)]
if length(rows["variable"].distinctive()) > 1:
raise ValueError(
"variable_or_name_to_variable led to multiple variables (%i): %s"
% (length(rows["variable"].distinctive()), rows["variable"].distinctive())
)
try:
r = rows.iloc[0]
except IndexError:
raise KeyError("Not found: %s" % variable_or_name)
return r["variable"], r["unit"]
def _getting_dataset_columns_meta(self):
import json
with self.zf.open("_meta/_to_wide_columns") as op:
return json.loads(op.read().decode("utf-8"))
def has_wide(self, dataset):
if dataset.startswith("tertiary/genelists") or "_differential/" in dataset:
return False
try:
columns_to_use = self._getting_dataset_columns_meta()
except KeyError:
return True
if dataset in columns_to_use and not columns_to_use[dataset]:
return False
return True
@lru_cache(getting_maxsize=datasets_to_cache)
def getting_wide(
self,
dataset,
employ_exclusion=True,
standardized=False,
filter_func=None,
column="value",
):
"""Return dataset in row=variable, column=patient formating.
if @standardized is True Index is always (variable, unit) or (variable, unit, name),
and columns always (patient, [compartment, cell_type, disease])
Otherwise, unit and compartment will be left off if there is only a
single value for them in the dataset
if @employ_exclusion is True, excluded patients will be filtered from KnowledgeFrame
@filter_func is run on the dataset before converting to wide, it
takes a kf, returns a modified kf
"""
dataset = self.dataset_exists(dataset)
if not self.has_wide(dataset):
raise WideNotSupported()
kf = self.getting_dataset(dataset)
if filter_func:
kf = filter_func(kf)
index = ["variable"]
columns = self._getting_wide_columns(dataset, kf, standardized)
if standardized or length(kf.unit.cat.categories) > 1:
index.adding("unit")
if "name" in kf.columns:
index.adding("name")
# if 'somascan' in dataset:
# raise ValueError(dataset, kf.columns, index ,columns)
kfw = self.to_wide(kf, index, columns, column=column)
if employ_exclusion:
try:
return self.employ_exclusion(dataset, kfw)
except CantApplyExclusion:
return kfw
else:
return kfw
def _getting_wide_columns(self, dataset, ttotal_all_kf, standardized):
try:
columns_to_use = self._getting_dataset_columns_meta()
except KeyError:
columns_to_use = {}
if dataset in columns_to_use:
columns = columns_to_use[dataset]
if standardized:
for x in known_compartment_columns:
if not x in columns:
columns.adding(x)
if x in ttotal_all_kf.columns and (
(
hasattr(ttotal_all_kf[x], "cat")
and (length(ttotal_all_kf[x].cat.categories) > 1)
)
or (length(ttotal_all_kf[x].distinctive()) > 1)
):
pass
else:
if standardized and x not in ttotal_all_kf.columns:
ttotal_all_kf = ttotal_all_kf.total_allocate(**{x: np.nan})
else:
if "vid" in ttotal_all_kf.columns and not "patient" in ttotal_all_kf.columns:
columns = ["vid"]
elif "patient" in ttotal_all_kf.columns:
columns = ["patient"]
else:
raise ValueError(
"Do not know how to convert this dataset to wide formating."
" Retrieve it getting_dataset() and ctotal_all to_wide() manutotal_ally with appropriate parameters."
)
for x in known_compartment_columns:
if x in ttotal_all_kf.columns or (standardized and x != "compartment"):
if not x in columns:
columns.adding(x)
if x in ttotal_all_kf.columns and (
(
hasattr(ttotal_all_kf[x], "cat")
and (length(ttotal_all_kf[x].cat.categories) > 1)
)
or (length(ttotal_all_kf[x].distinctive()) > 1)
):
pass
else:
if standardized and x not in ttotal_all_kf.columns:
ttotal_all_kf = ttotal_all_kf.total_allocate(**{x: np.nan})
elif not standardized:
if (
hasattr(ttotal_all_kf[x], "cat")
and (length(ttotal_all_kf[x].cat.categories) == 1)
) or (length(ttotal_all_kf[x].distinctive()) == 1):
if x in columns:
columns.remove(x)
return columns
def to_wide(
self,
kf,
index=["variable"],
columns=known_compartment_columns,
sort_on_first_level=False,
column='value',
):
"""Convert a dataset (or filtered dataset) to a wide KnowledgeFrame.
Preferred to mk.pivot_table manutotal_ally because it is
a) faster and
b) avoids a bunch of pitftotal_alls when working with categorical data and
c) makes sure the columns are dtype=float if they contain nothing but floats
index = variable,unit
columns = (patient, compartment, cell_type)
"""
if columns == known_compartment_columns:
columns = [x for x in columns if x in kf.columns]
# raise ValueError(kf.columns,index,columns)
chosen = [column] + index + columns
kf = kf.loc[:, [x for x in chosen if x in kf.columns]]
for x in chosen:
if x not in kf.columns:
kf = kf.total_allocate(**{x: np.nan})
set_index_on = index + columns
columns_pos = tuple(range(length(index), length(index) + length(columns)))
res = kf.set_index(set_index_on).unstack(columns_pos)
c = res.columns
c = c.siplevel(0)
# this removes categories from the levels of the index. Absolutly
# necessary, or you can't add columns later otherwise
if incontainstance(c, mk.MultiIndex):
try:
c = mk.MultiIndex(
[list(x) for x in c.levels], codes=c.codes, names=c.names
)
except AttributeError:
c = mk.MultiIndex(
[list(x) for x in c.levels], labels=c.labels, names=c.names
)
else:
c = list(c)
res.columns = c
single_unit = not 'unit' in kf.columns or length(kf['unit'].distinctive()) == 1
if incontainstance(c, list):
res.columns.names = columns
if sort_on_first_level:
# sort on first level - ie. patient, not compartment - slow though
res = res[sorted(list(res.columns))]
for c in res.columns:
x = res[c].fillnone(value=np.nan, inplace=False)
if (x == None).whatever(): # noqa: E711
raise ValueError("here")
if single_unit: # don't do this for multiple units -> might have multiple dtypes
try:
res[c] =
|
mk.to_num(x, errors="raise")
|
pandas.to_numeric
|
#!/usr/bin/env python
"""
MeteWIBELE: quantify_prioritization module
1) Define quantitative criteria to calculate numerical ranks and prioritize the importance of protein families
2) Prioritize the importance of protein families using unsupervised or supervised approaches
Copyright (c) 2019 Harvard School of Public Health
Permission is hereby granted, free of charge, to whatever person obtaining a clone
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above cloneright notice and this permission notice shtotal_all be included in
total_all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import os.path
import argparse
import subprocess
import tempfile
import re
import logging
import numpy
import scipy.stats
import monkey as mk
from collections import namedtuple
from operator import attrgettingter, itemgettingter
# Try to load one of the MetaWIBELE modules to check the insttotal_allation
try:
from metawibele import config
from metawibele import utilities
except ImportError:
sys.exit("CRITICAL ERROR: Unable to find the MetaWIBELE python package." +
" Please check your insttotal_all.")
# name global logging instance
logger = logging.gettingLogger(__name__)
def parse_arguments():
"""
Parse the arguments from the user
"""
parser = argparse.ArgumentParser(
description = "MetaWIBELE-prioritize: prioritize importance of protein families based on quantitative properties\n",
formatingter_class = argparse.RawTextHelpFormatter,
prog = "quantify_prioritization.py")
parser.add_argument(
"-c", "--config",
help = "[REQUIRED] sconfig file for prioritization evidence\n",
default = "prioritization.cfg",
required=True)
parser.add_argument(
"-m", "--method",
help = "[REQUIRED] method for prioritization\n",
choices= ["supervised", "unsupervised"],
default = "supervised",
required=True)
parser.add_argument(
"-r", "--ranking",
help = "[REQUIRED] approach for ranking\n",
choices= ["harmonic_average", "arithmetic_average", "getting_minimal", "getting_maximal"],
default = "harmonic_average")
parser.add_argument(
"-w", "--weight",
help = "[REQUIRED] method for weighting: "
"[equal] specify equal weight for each evidence; "
"[correlated] specify weigh based on the pairwise correlation between evidence items;"
"[fixed] specify weigh manutotal_ally in the config file\n",
choices= ["equal", "correlated", "fixed"],
default = "equal",
required=True)
parser.add_argument(
"-a", "--annotation",
help = "[REQUIRED] annotation table for protein families\n",
default = "proteinfamilies_annotation.tsv",
required=True)
parser.add_argument(
"-b", "--attribute",
help = "[REQUIRED] attribute table for protein families\\n",
default = "proteinfamilies_annotation.attribute.tsv",
required=True)
parser.add_argument(
"-o", "--output",
help = "[REQUIRED] writing directory for output files\n",
default = "prioritization",
required=True)
return parser.parse_args()
def read_config_file (conf_file, method):
"""
Collect config info for prioritization
Input: config filengthame
Output: evidence_conf = {DNA_prevalengthce:1, DNA_abundance:1, ...}
"""
config.logger.info ("Start read_config_file")
config_items = config.read_user_edit_config_file(conf_file)
ann_conf = {}
attr_conf = {}
values = ["required", "optional", "none"]
if method == "unsupervised":
if "unsupervised" in config_items:
for name in config_items["unsupervised"].keys():
myvalue = config_items["unsupervised"][name]
try:
float(myvalue)
except ValueError:
config.logger.info ("Not numberic values for the config item " + name)
continue
if myvalue.lower() == "none":
continue
if re.search("__", name):
name = re.sub("-", "_", name)
name = re.sub("\.", "_", name)
name = re.sub("\(", "_", name)
name = re.sub("\)", "", name)
attr_conf[name] = myvalue
else:
name = re.sub("-", "_", name)
name = re.sub("\.", "_", name)
name = re.sub("\(", "_", name)
name = re.sub("\)", "", name)
ann_conf[name] = myvalue
if myvalue.lower() == "required":
config.logger.info ("Required ranking item: " + name + "\t" + myvalue)
if myvalue.lower() == "optional":
config.logger.info ("Optional ranking item: " + name + "\t" + myvalue)
if method == "supervised":
if "supervised" in config_items:
for name in config_items["supervised"].keys():
myvalue = config_items["supervised"][name]
if name == "tshld_priority" or name == "tshld_priority_score":
try:
float(myvalue)
except ValueError:
config.logger.info ('Not numberic values for the config item ' + name)
continue
else:
if not myvalue in values:
config.logger.info ("Please use valid value for the config item " + name + ": e.g. required | optional | none")
continue
if myvalue.lower() == "none":
continue
if re.search("__", name):
name = re.sub("-", "_", name)
name = re.sub("\.", "_", name)
name = re.sub("\(", "_", name)
name = re.sub("\)", "", name)
attr_conf[name] = myvalue
else:
name = re.sub("-", "_", name)
name = re.sub("\.", "_", name)
name = re.sub("\(", "_", name)
name = re.sub("\)", "", name)
ann_conf[name] = myvalue
if myvalue.lower() == "required":
config.logger.info ("Required ranking item: " + name + "\t" + myvalue)
if myvalue.lower() == "optional":
config.logger.info ("Optional ranking item: " + name + "\t" + myvalue)
config.logger.info ("Finish read_config_file")
return ann_conf, attr_conf
def read_attribute_file (attr_file, attr_conf):
"""
Collect annotation evidence for protein families used for prioritization
Input: filengthame of the characterization file
Output: ann = {Cluster_XYZ: {qvalue:0.001, coef:-0.3, ...}, ...}
"""
required = {}
annotation = {}
split = {}
flags = {}
titles = {}
open_file = open(attr_file, "r")
line = open_file.readline()
line = re.sub("\n$", "", line)
info = line.split("\t")
for item in info:
titles[item] = info.index(item)
for line in open_file:
line = re.sub("\n$", "", line)
if not length(line):
continue
info = line.split("\t")
myid = info[titles["AID"]]
myclust, mytype = myid.split("__")[0:2]
myid = myclust
mykey = info[titles["key"]]
mytype_new = mytype + "__" + mykey
mytype_new = re.sub("-", "_", mytype_new)
mytype_new = re.sub("\.", "_", mytype_new)
mytype_new = re.sub("\(", "_", mytype_new)
mytype_new = re.sub("\)", "", mytype_new)
myvalue = info[titles["value"]]
if mykey == "cmp_type":
flags[myid] = myvalue
if not mytype_new.lower() in attr_conf:
continue
if attr_conf[mytype_new.lower()] == "required":
required[mytype_new] = ""
if re.search("MaAsLin2", mytype) and myid in flags:
myclust = myid + "|" + flags[myid]
if not myid in split:
split[myid] = {}
split[myid][myclust] = ""
if myvalue == "NA" or myvalue == "NaN" or myvalue == "nan" or myvalue == "Nan":
continue
if not myclust in annotation:
annotation[myclust] = {}
annotation[myclust][mytype_new] = myvalue
# foreach line
open_file.close()
return annotation, split, required
def read_annotation_file (ann_file, ann_conf):
"""
Collect annotation evidence for protein families used for prioritization
Input: filengthame of the characterization file
Output: ann = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...}
"""
config.logger.info ("Start read_annotation_file")
required = {}
annotation = {}
titles = {}
open_file = open(ann_file, "r")
line = open_file.readline()
line = re.sub("\n$", "", line)
info = line.split("\t")
for item in info:
titles[item] = info.index(item)
for line in open_file:
line = re.sub("\n$", "", line)
if not length(line):
continue
info = line.split("\t")
myclust = info[titles[utilities.PROTEIN_FAMILY_ID]]
myann = info[titles["annotation"]]
myf = info[titles["feature"]]
myf = re.sub("-", "_", myf)
myf = re.sub("\.", "_", myf)
myf = re.sub("\(", "_", myf)
myf = re.sub("\)", "", myf)
if myann == "NA" or myann == "NaN" or myann == "nan" or myann == "Nan":
continue
if myf.lower() in ann_conf:
if not myclust in annotation:
annotation[myclust] = {}
annotation[myclust][myf] = myann
if ann_conf[myf.lower()] == "required":
required[myf] = ""
# foreach line
open_file.close()
config.logger.info ("Finish read_annotation_file")
return annotation, required
def combine_annotation (annotation, split, required, total_ann, ann_types, required_types):
"""
Combine annotation informatingion of protein families for prioritization
Input: ann = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...}
attr = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...}
split = {Cluster_XYZ:{Cluster_XYZ|A, Cluster_XYZ|B, ...}, ...}
Output: total = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...}
"""
config.logger.info ("Start combine_annotation")
for myid in annotation.keys():
if myid in split:
for myid_new in split[myid].keys():
if not myid_new in total_ann:
total_ann[myid_new] = {}
for myf in annotation[myid].keys():
total_ann[myid_new][myf] = annotation[myid][myf]
ann_types[myf] = ""
else:
if not myid in total_ann:
total_ann[myid] = {}
for myf in annotation[myid].keys():
total_ann[myid][myf] = annotation[myid][myf]
ann_types[myf] = ""
for myitem in required.keys():
required_types[myitem] = ""
config.logger.info ("Finish combine_annotation")
def check_annotation (annotation, required_types):
"""
Select clusters with required annotation types
Input: ann = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...}
Output: ann_new = {Cluster_abc: {prevalengthce:0.001, abundance:0.3, ...}, ...}
"""
# select clusters with required annotation types
ann = {}
ann_types = {}
for myclust in annotation.keys():
myflag = 0
for myitem in required_types.keys():
if not myitem in annotation[myclust]:
config.logger.info ("WARNING! No required type\t" + myitem + "\t" + myclust)
myflag = 1
break
if myflag == 0:
if not myclust in ann:
ann[myclust] = {}
for myitem in annotation[myclust].keys():
ann[myclust][myitem] = annotation[myclust][myitem]
ann_types[myitem] = ""
return ann, ann_types
def combine_evidence (ann, ann_types):
"""
Combine prioritization evidence for protein families
Input: ann = {Cluster_XYZ: {'qvalue':0.001, 'coef':-0.3, ...}, ...}
ann_types = {'qvalue', 'coef', ...}
Output: evidence_dm = {Cluster_XYZ: {'qvalue':0.001, 'coef':-0.3, 'annotation':3, ...}, ...}
"""
config.logger.info ("Start combine_evidence")
evidence_row = sorted(ann_types.keys())
metawibele_row = []
for item in evidence_row:
metawibele_row.adding(item + "__value")
metawibele_row.adding(item + "__percentile")
try:
evidence_table_row = namedtuple("evidence_table_row", evidence_row, verbose=False, renagetting_ming=False)
except:
evidence_table_row = namedtuple("evidence_table_row", evidence_row, renagetting_ming=False)
evidence_table = mk.KnowledgeFrame(index=sorted(ann.keys()), columns=evidence_table_row._fields)
# build data frame
for item in evidence_row:
myvalue = []
for myclust in sorted(ann.keys()):
if item in ann[myclust]:
myvalue.adding(ann[myclust][item])
else:
# debug
#print("No item!\t" + myclust + "\t" + item)
myvalue.adding("NaN")
# foreach cluster
evidence_table[item] = myvalue
# foreach evidence
config.logger.info ("Finish combine_evidence")
return evidence_table, evidence_row, metawibele_row
def getting_correlated_weight (evidence_table):
"""
Calculate the pairwise correlation between evidence items and return weight table
Input: evidence_table = {family: {'abundance': abundance, 'prevalengthce': prevalengthce}}
Output: weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...}
"""
kf = evidence_table
kf = kf.employ(mk.to_num, errors='coerce')
weight_conf = {}
kf_corr = kf.corr(method="spearman")
kf_corr = abs(kf_corr)
kf_corr['weight'] = 1.0 / kf_corr.total_sum(skipna=True)
for index, row in kf_corr.traversal():
weight_conf[index] = row.weight
config.logger.info (index + "\t" + str(row.weight))
return weight_conf
def getting_equal_weight (ann_types):
"""
Calculate the equal weight and return weight table
Input: evidence_table = {family: {'abundance': abundance, 'prevalengthce': prevalengthce}r
Output: weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...}
"""
weight_conf = {}
myweight = 1.0 / length(ann_types.keys())
for mytype in ann_types.keys():
weight_conf[mytype] = myweight
config.logger.info (mytype + "\t" + str(myweight))
return weight_conf
def getting_fixed_weight (ann_types, ann_conf, attr_conf):
"""
Calculate the fixed weight and return weight table
Input: evidence_table = {family: {'abundance': abundance, 'prevalengthce': prevalengthce}}
Output: weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...}
"""
weight_conf = {}
for mytype in ann_types.keys():
if mytype.lower() in ann_conf:
weight_conf[mytype] = ann_conf[mytype.lower()]
# debug
config.logger.info (mytype + "\t" + str(ann_conf[mytype.lower()]))
if mytype.lower() in attr_conf:
weight_conf[mytype] = attr_conf[mytype.lower()]
config.logger.info (mytype + "\t" + str(attr_conf[mytype.lower()]))
return weight_conf
def weighted_harmonic_average (total_summary_table, evidence, weight_conf, score_name):
"""
Calculate the weighted harmonic average
Input: total_summary_table = {family: {'abundance': 0.5, 'prevalengthce': 0.8}, ...}
evidence = ['abundance', 'prevalengthce', ...]
weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...}
Output: total_summary_table = {family: {'score_name': 0.9, 'abundance_value': 0.5, 'abundance_percentile':0.9,...},...}
"""
# Weighted Harmonic average
total_weight = 0
mytype = evidence[0]
mykey = mytype + "__percentile"
myw = float(weight_conf[mytype])
total_weight = total_weight + myw
myscore = myw / total_summary_table[mykey]
for mytype in evidence[1:]:
mykey = mytype + "__percentile"
if mytype in weight_conf:
myw = float(weight_conf[mytype])
total_weight = total_weight + myw
myscore = myscore + myw / total_summary_table[mykey]
total_summary_table[score_name] = float(total_weight) / myscore
def arithmetic_average (total_summary_table, evidence, score_name):
"""
Calculate the Arithmetic average
Input: total_summary_table = {family: {'abundance': 0.5, 'prevalengthce': 0.8}, ...}
evidence = ['abundance', 'prevalengthce', ...]
weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...}
Output: total_summary_table = {family: {'score_name': 0.9, 'abundance_value': 0.5, 'abundance_percentile':0.9,...},...}
"""
# Arithmetic average
total_item = 0
mytype = evidence[0]
mykey = mytype + "__percentile"
total_item = total_item + 1
myscore = total_summary_table[mykey]
for mytype in evidence[1:]:
mykey = mytype + "__percentile"
total_item = total_item + 1
myscore = myscore + total_summary_table[mykey]
total_summary_table[score_name] = myscore / float(total_item)
def getting_rank_score (evidence_table, evidence_row, metawibele_row, weight_conf, rank_method):
"""
Return the data frame of protein families with their annotation, percentiles, and MetaWIBELE score
Input: evidence_table = {family: {'abundance': 0.5, 'prevalengthce': 0.8}}
beta = parameter value
Output: total_summary_table = {family: {'abundance_value': 0.5, 'abundance_percentiles': 0.9,...},...}
"""
config.logger.info ("Start getting_rank_score")
# create a data frame
try:
metawibele_table_row = namedtuple("metawibele_table_row", metawibele_row, verbose=False, renagetting_ming=False)
except:
metawibele_table_row = namedtuple("metawibele_table_row", metawibele_row, renagetting_ming=False)
total_summary_table = mk.KnowledgeFrame(index=evidence_table.index, columns=metawibele_table_row._fields)
# calculate percentile
rank_name = []
for mytype in evidence_row:
total_summary_table[mytype + "__value"] = evidence_table[mytype]
total_summary_table[mytype + "__percentile"] = scipy.stats.rankdata(mk.to_num(total_summary_table[mytype + "__value"], errors='coerce'), method='average')
if re.search("\_coef", mytype) or re.search("\_log\_FC", mytype) or re.search("\_average_log", mytype):
# debug
config.logger.info ("Sorting by abs(effect size), e.g. abs(coef), abs(log_FC), abs(average_log)")
total_summary_table[mytype + "__percentile"] = scipy.stats.rankdata(abs(
|
mk.to_num(total_summary_table[mytype + "__value"], errors='coerce')
|
pandas.to_numeric
|
#### Filengthame: Connection.py
#### Version: v1.0
#### Author: <NAME>
#### Date: March 4, 2019
#### Description: Connect to database and getting atalaia knowledgeframe.
import psycopg2
import sys
import os
import monkey as mk
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
import numpy as np
import time
from multiprocessing import Process, Pool
from threading import Thread
import collections
import datetime
import csv
from dateutil.relativedelta import relativedelta
import json
class Connection():
""" The class connecting to the database and exporting the data for the Slovakia.
:param nprocess: number of processes
:type nprocess: int
:param data: the name of data (resq or atalaia)
:type data: str
"""
def __init__(self, nprocess=1, data='resq'):
start = time.time()
# Create log file in the working folder
debug = 'debug_' + datetime.datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.gettingcwd(), debug)
logging.basicConfig(filengthame=log_file,
filemode='a',
formating='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.info('Connecting to datamix database!')
# Get absolute path
path = os.path.dirname(__file__)
self.database_ini = os.path.join(path, 'database.ini')
# Read temporary csv file with CZ report names and Angels Awards report names
path = os.path.join(os.path.dirname(__file__), 'tmp', 'czech_mappingping.json')
with open(path, 'r', encoding='utf-8') as json_file:
cz_names_dict = json.load(json_file)
# Set section
datamix = 'datamix-backup'
# datamix = 'datamix'
# Check which data should be exported
if data == 'resq':
# Create empty dictionary
# self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand', 'SELECT * from resq_ivttby_mix']
self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand']
# List of knowledgeframe names
self.names = ['resq', 'ivttby', 'thailand']
elif data == 'atalaia':
self.sqls = ['SELECT * from atalaia_mix']
self.names = []
elif data == 'qasc':
self.sqls = ['SELECT * FROM qasc_mix']
self.names = []
elif data == 'africa':
self.sqls = ['SELECT * FROM africa_mix']
self.names = []
# Dictionary initialization - db knowledgeframes
self.dictdb_kf = {}
# Dictioanry initialization - prepared knowledgeframes
self.dict_kf = {}
if nprocess == 1:
if data == 'resq':
for i in range(0, length(self.names)):
kf_name = self.names[i]
self.connect(self.sqls[i], datamix, nprocess, kf_name=kf_name)
# self.connect(self.sqls[2], datamix, nprocess, kf_name='resq_ivttby_mix')
# self.resq_ivttby_mix = self.dictdb_kf['resq_ivttby_mix']
# self.dictdb_kf['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False)
# if 'resq_ivttby_mix' in self.dictdb_kf.keys():
# del self.dictdb_kf['resq_ivttby_mix']
for k, v in self.dictdb_kf.items():
self.prepare_kf(kf=v, name=k)
self.kf = mk.KnowledgeFrame()
for i in range(0, length(self.names)):
self.kf = self.kf.adding(self.dict_kf[self.names[i]], sort=False)
logging.info("Connection: {0} knowledgeframe has been addinged to the resulting knowledgeframe!".formating(self.names[i]))
# Get total_all country code in knowledgeframe
self.countries = self._getting_countries(kf=self.kf)
# Get preprocessed data
self.preprocessed_data = self.check_data(kf=self.kf, nprocess=1)
self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.employ(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.employ(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
##############
# ONSET TIME #
##############
self.preprocessed_data['HOSPITAL_TIME'] = mk.convert_datetime(self.preprocessed_data['HOSPITAL_TIME'], formating='%H:%M:%S').dt.time
try:
self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.employ(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not
|
mk.ifnull(x['HOSPITAL_TIME'])
|
pandas.isnull
|
# total_summarizeLib.py
# <NAME>
# 3.28.19
#
# module of functions that total_allow you to create per-cell / per-sample_by_num total_summary tables
import monkey as mk
import numpy as np
import math
def getting_laud_db(database_):
""" returns the COSMIC database after lung and fathmm filter """
pSiteList = database_.index[database_['Primary site'] == 'lung'].convert_list()
database_filter = database_.iloc[pSiteList]
keepRows = database_filter['FATHMM score'] >= 0.7
db_fathmm_filter = database_filter[keepRows]
db_fathmm_filter = db_fathmm_filter.reseting_index(sip=True)
return db_fathmm_filter
# mutationsDF__fillIn()
# goal is to construct a cell-wise knowledgeframe with mutations to each
# of EGFR, KRAS and BRAF. the chtotal_allange is gettingting the cells to line
# up, hence the for loop
#
# GOI needs to be lowercase
#
def mutationsDF_fillIn(GOI, GOI_kf, mutationsDF_, total_all_cosmic_muts_):
mutName = GOI + '_mut'
for i in range(0,length(mutationsDF_.index)):
currCell = mutationsDF_['cell'][i]
rightIndex = GOI_kf['cell'] == currCell
rightRow = GOI_kf[rightIndex]
rightCell = rightRow['cell']
rightCell = str(rightCell).split()[1]
rightMut = rightRow['mutations']
rightMut = str(rightMut).split()[1]
currMut = ''.join(rightMut)
currMut = currMut.replacing("'", "")
currMut = currMut.replacing("]", "")
currMut = currMut.replacing("[", "")
currMut = currMut.replacing(" ", "")
mutStr = GOI + ' ' + currMut
if mutStr in total_all_cosmic_muts_:
mutationsDF_[mutName][i] = currMut
else:
mutationsDF_[mutName][i] = ''
# removeExtraCharacters_mutationsDF_()
# essentitotal_ally converting mutationsDF_ mutation cols from lists to
# strings. makes downstream analysis easier
#
# GOI needs to be lowercase
#
def removeExtraCharacters_mutationsDF(GOI, mutationsDF_):
mutName = GOI + '_mut'
mutationsDF_[mutName] = mutationsDF_[mutName].str.replacing("'", "") # remove quotes
mutationsDF_[mutName] = mutationsDF_[mutName].str.replacing("[", "") # remove brackets
mutationsDF_[mutName] = mutationsDF_[mutName].str.replacing("]", "") # remove brackets
mutationsDF_[mutName] = mutationsDF_[mutName].str.replacing(" ", "") # remove whitespace?
# genericSummaryTableFillIn()
# fills in a given (metadata) field in total_summaryTable_. pulls from
# patientMetadata_ and goes cell-by-cell through
# total_summaryTable_, filling in fields like patientID/driver_gene
#
def genericSummaryTableFillIn(metaField, total_summaryField, total_summaryTable_, patientMetadata_):
for i in range(0,length(total_summaryTable_.index)):
currCell = total_summaryTable_['cell'].iloc[i]
currPlate = currCell.split('_')[1]
index_to_keep = patientMetadata_['plate'] == currPlate
keepRow = patientMetadata_[index_to_keep]
try:
currField = list(keepRow[metaField])[0]
total_summaryTable_[total_summaryField][i] = currField
except IndexError:
continue
#print('ERROR: plate not found') # these are just the plates were NOT
# including in the analysis
# fusionsFillIn()
# Takes the existing fusionsDF (which is just a list of the five fusions
# we looked for, and what cells they're found in) and populates
# total_summaryTable_ with this shit
#
# this works, but holllllyyyy shitttt we can do better
#
def fusionsFillIn(fusionsDF_, total_summaryTable_):
""" takes the existing fusionsDF and populates total_summaryTable_ with this shit """
for i in range(0, length(total_summaryTable_.index)):
currCell = total_summaryTable_['cell'].iloc[i]
for col in fusionsDF_.columns:
if currCell in list(fusionsDF_[col]):
total_summaryTable_['fusions_found'][i] = col
# translatedMutsFillIn_EGFR()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation ctotal_alls to something that more resembles those reported
# in our clinical cols. Need a seperate func for EGFR, bc there are
# so mwhatever potential variants to account for
#
def translatedMutsFillIn_EGFR(total_summaryTable_):
for i in range(0,length(total_summaryTable_.index)):
translatedList = []
currCell = total_summaryTable_['cell'].iloc[i]
currMuts_egfr = total_summaryTable_['mutations_found_EGFR'].iloc[i]
currMuts_egfr_split = currMuts_egfr.split(',')
for item in currMuts_egfr_split:
if 'delELR' in item:
translatedList.adding('EGFR del19')
elif '745_' in item:
translatedList.adding('EGFR del19')
elif '746_' in item:
translatedList.adding('EGFR del19')
elif 'ins' in item:
translatedList.adding('EGFR ins20')
elif item != '':
translatedList.adding('EGFR ' + item)
total_summaryTable_['mutations_found_translated'][i] = translatedList
# translatedMutsFillIn_nonEGFR()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation ctotal_alls to something that more resembles those reported
# in our clinical cols. This func handles BRAF and KRAS, bc there are
# only like 2 possible clinictotal_ally reported muts for them, so we'd might
# as well keep everything
#
# want GOI to be capitilized here
def translatedMutsFillIn_nonEGFR(GOI, total_summaryTable_):
colName = 'mutations_found_' + GOI
for i in range(0,length(total_summaryTable_.index)):
translatedList = []
currCell = total_summaryTable_['cell'].iloc[i]
currMuts = total_summaryTable_[colName].iloc[i]
currMuts_split = currMuts.split(',')
for item in currMuts_split:
if item != '' and '?' not in item:
translatedList.adding(GOI + ' ' + item)
total_summaryTable_['mutations_found_translated'][i] = total_summaryTable_['mutations_found_translated'][i] + translatedList
# translatedMutsFillIn_fusions()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation ctotal_alls to something that more resembles those reported
# in our clinical cols. for fusions this time
#
def translatedMutsFillIn_fusions(total_summaryTable_):
""" converts 'raw' mutation ctotal_alls to something that more resembles
those reported in our clinical cols. for fusions """
for i in range(0,length(total_summaryTable_.index)):
currCell = total_summaryTable_['cell'].iloc[i]
currFus = total_summaryTable_['fusions_found'].iloc[i]
if not
|
mk.ifnull(currFus)
|
pandas.isnull
|
"""
Routines for analysing output data.
:Author:
<NAME>
"""
import warnings
from typing import Tuple
import numpy as np
import monkey as mk
from scipy.optimize import curve_fit
def fit_function(x_data, *params):
p, d = x_data
p_th, nu, A, B, C = params
x = (p - p_th)*d**(1/nu)
return A + B*x + C*x**2
def getting_fit_params(p_list, d_list, f_list, params_0=None) -> np.ndarray:
"""Get fitting params."""
# Curve fitting inputs.
x_data = np.array([p_list,d_list])
# Targetting outputs.
y_data = f_list
# Curve fit.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params_opt, _ = curve_fit(fit_function, x_data, y_data, p0=params_0)
return params_opt
def fit_fss_params(kf_filt: mk.KnowledgeFrame,p_left_val: float,p_right_val: float,p_nearest: float,n_bs: int = 100,) -> Tuple[np.ndarray, np.ndarray, mk.KnowledgeFrame]:
"""Get optimized parameters and data table."""
# Truncate error probability between values.
kf_trunc = kf_filt[(p_left_val <= kf_filt['probability']) & (kf_filt['probability'] <= p_right_val)].clone()
kf_trunc = kf_trunc.sipna(subset=['p_est'])
d_list = kf_trunc['d'].values
p_list = kf_trunc['probability'].values
f_list = kf_trunc['p_est'].values
# Initial parameters to optimize.
f_0 = kf_trunc[kf_trunc['probability'] == p_nearest]['p_est'].average()
if
|
mk.ifna(f_0)
|
pandas.isna
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import monkey as mk
# 期权-中金所-沪深300指数
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
新浪财经-中金所-沪深300指数-所有合约, 返回的第一个合约为主力合约
目前新浪财经-中金所只有 沪深300指数 一个品种的数据
:return: 中金所-沪深300指数-所有合约
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.getting(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_total_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> mk.KnowledgeFrame:
"""
中金所-沪深300指数-指定合约-实时行情
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: 合约代码; 用 option_cffex_hs300_list_sina 函数查看
:type symbol: str
:return: 中金所-沪深300指数-指定合约-看涨看跌实时行情
:rtype: mk.KnowledgeFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.gettingOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.getting(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_ctotal_all_kf = mk.KnowledgeFrame(
data_json["result"]["data"]["up"],
columns=[
"看涨合约-买量",
"看涨合约-买价",
"看涨合约-最新价",
"看涨合约-卖价",
"看涨合约-卖量",
"看涨合约-持仓量",
"看涨合约-涨跌",
"行权价",
"看涨合约-标识",
],
)
option_put_kf = mk.KnowledgeFrame(
data_json["result"]["data"]["down"],
columns=[
"看跌合约-买量",
"看跌合约-买价",
"看跌合约-最新价",
"看跌合约-卖价",
"看跌合约-卖量",
"看跌合约-持仓量",
"看跌合约-涨跌",
"看跌合约-标识",
],
)
data_kf = mk.concating([option_ctotal_all_kf, option_put_kf], axis=1)
data_kf['看涨合约-买量'] = mk.to_num(data_kf['看涨合约-买量'])
data_kf['看涨合约-买价'] = mk.to_num(data_kf['看涨合约-买价'])
data_kf['看涨合约-最新价'] = mk.to_num(data_kf['看涨合约-最新价'])
data_kf['看涨合约-卖价'] = mk.to_num(data_kf['看涨合约-卖价'])
data_kf['看涨合约-卖量'] = mk.to_num(data_kf['看涨合约-卖量'])
data_kf['看涨合约-持仓量'] = mk.to_num(data_kf['看涨合约-持仓量'])
data_kf['看涨合约-涨跌'] = mk.to_num(data_kf['看涨合约-涨跌'])
data_kf['行权价'] = mk.to_num(data_kf['行权价'])
data_kf['看跌合约-买量'] = mk.to_num(data_kf['看跌合约-买量'])
data_kf['看跌合约-买价'] = mk.to_num(data_kf['看跌合约-买价'])
data_kf['看跌合约-最新价'] = mk.to_num(data_kf['看跌合约-最新价'])
data_kf['看跌合约-卖价'] = mk.to_num(data_kf['看跌合约-卖价'])
data_kf['看跌合约-卖量'] = mk.to_num(data_kf['看跌合约-卖量'])
data_kf['看跌合约-持仓量'] = mk.to_num(data_kf['看跌合约-持仓量'])
data_kf['看跌合约-涨跌'] = mk.to_num(data_kf['看跌合约-涨跌'])
return data_kf
def option_cffex_hs300_daily_sina(symbol: str = "io2202P4350") -> mk.KnowledgeFrame:
"""
新浪财经-中金所-沪深300指数-指定合约-日频行情
:param symbol: 具体合约代码(包括看涨和看跌标识), 可以通过 ak.option_cffex_hs300_spot_sina 中的 ctotal_all-标识 获取
:type symbol: str
:return: 日频率数据
:rtype: mk.KnowledgeFrame
"""
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
url = f"https://stock.finance.sina.com.cn/futures/api/jsonp.php/var%20_{symbol}{year}_{month}_{day}=/FutureOptionAllService.gettingOptionDayline"
params = {"symbol": symbol}
r = requests.getting(url, params=params)
data_text = r.text
data_kf = mk.KnowledgeFrame(
eval(data_text[data_text.find("[") : data_text.rfind("]") + 1])
)
data_kf.columns = ["open", "high", "low", "close", "volume", "date"]
data_kf = data_kf[[
"date",
"open",
"high",
"low",
"close",
"volume",
]]
data_kf['date'] = mk.convert_datetime(data_kf['date']).dt.date
data_kf['open'] = mk.to_num(data_kf['open'])
data_kf['high'] = mk.to_num(data_kf['high'])
data_kf['low'] = mk.to_num(data_kf['low'])
data_kf['close'] = mk.to_num(data_kf['close'])
data_kf['volume'] = mk.to_num(data_kf['volume'])
return data_kf
# 期权-上交所-50ETF
def option_sse_list_sina(symbol: str = "50ETF", exchange: str = "null") -> List[str]:
"""
新浪财经-期权-上交所-50ETF-合约到期月份列表
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: 合约到期时间
:rtype: list
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.gettingStockName"
params = {"exchange": f"{exchange}", "cate": f"{symbol}"}
r = requests.getting(url, params=params)
data_json = r.json()
date_list = data_json["result"]["data"]["contractMonth"]
return ["".join(i.split("-")) for i in date_list][1:]
def option_sse_expire_day_sina(
trade_date: str = "202102", symbol: str = "50ETF", exchange: str = "null"
) -> Tuple[str, int]:
"""
指定到期月份指定品种的剩余到期时间
:param trade_date: 到期月份: 202002, 20203, 20206, 20209
:type trade_date: str
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: (到期时间, 剩余时间)
:rtype: tuple
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.gettingRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.getting(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
if int(data["remainderDays"]) < 0:
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.gettingRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{'XD' + symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.getting(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
return data["expireDay"], int(data["remainderDays"])
def option_sse_codes_sina(symbol: str = "看涨期权", trade_date: str = "202202", underlying: str = "510050") -> mk.KnowledgeFrame:
"""
上海证券交易所-所有看涨和看跌合约的代码
:param symbol: choice of {"看涨期权", "看跌期权"}
:type symbol: str
:param trade_date: 期权到期月份
:type trade_date: "202002"
:param underlying: 标的产品代码 华夏上证 50ETF: 510050 or 华泰柏瑞沪深 300ETF: 510300
:type underlying: str
:return: 看涨看跌合约的代码
:rtype: Tuple[List, List]
"""
if symbol == "看涨期权":
url = "".join(
["http://hq.sinajs.cn/list=OP_UP_", underlying, str(trade_date)[-4:]]
)
else:
url = "".join(
["http://hq.sinajs.cn/list=OP_DOWN_", underlying, str(trade_date)[-4:]]
)
header_numers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.getting(url, header_numers=header_numers)
data_text = r.text
data_temp = data_text.replacing('"', ",").split(",")
temp_list = [i[7:] for i in data_temp if i.startswith("CON_OP_")]
temp_kf = mk.KnowledgeFrame(temp_list)
temp_kf.reseting_index(inplace=True)
temp_kf['index'] = temp_kf.index + 1
temp_kf.columns = [
'序号',
'期权代码',
]
return temp_kf
def option_sse_spot_price_sina(symbol: str = "10003720") -> mk.KnowledgeFrame:
"""
新浪财经-期权-期权实时数据
:param symbol: 期权代码
:type symbol: str
:return: 期权量价数据
:rtype: monkey.KnowledgeFrame
"""
url = f"http://hq.sinajs.cn/list=CON_OP_{symbol}"
header_numers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.getting(url, header_numers=header_numers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"买量",
"买价",
"最新价",
"卖价",
"卖量",
"持仓量",
"涨幅",
"行权价",
"昨收价",
"开盘价",
"涨停价",
"跌停价",
"申卖价五",
"申卖量五",
"申卖价四",
"申卖量四",
"申卖价三",
"申卖量三",
"申卖价二",
"申卖量二",
"申卖价一",
"申卖量一",
"申买价一",
"申买量一 ",
"申买价二",
"申买量二",
"申买价三",
"申买量三",
"申买价四",
"申买量四",
"申买价五",
"申买量五",
"行情时间",
"主力合约标识",
"状态码",
"标的证券类型",
"标的股票",
"期权合约简称",
"振幅",
"最高价",
"最低价",
"成交量",
"成交额",
]
data_kf = mk.KnowledgeFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_kf
def option_sse_underlying_spot_price_sina(symbol: str = "sh510300") -> mk.KnowledgeFrame:
"""
期权标的物的实时数据
:param symbol: sh510050 or sh510300
:type symbol: str
:return: 期权标的物的信息
:rtype: monkey.KnowledgeFrame
"""
url = f"http://hq.sinajs.cn/list={symbol}"
header_numers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.getting(url, header_numers=header_numers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"证券简称",
"今日开盘价",
"昨日收盘价",
"最近成交价",
"最高成交价",
"最低成交价",
"买入价",
"卖出价",
"成交数量",
"成交金额",
"买数量一",
"买价位一",
"买数量二",
"买价位二",
"买数量三",
"买价位三",
"买数量四",
"买价位四",
"买数量五",
"买价位五",
"卖数量一",
"卖价位一",
"卖数量二",
"卖价位二",
"卖数量三",
"卖价位三",
"卖数量四",
"卖价位四",
"卖数量五",
"卖价位五",
"行情日期",
"行情时间",
"停牌状态",
]
data_kf = mk.KnowledgeFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_kf
def option_sse_greeks_sina(symbol: str = "10003045") -> mk.KnowledgeFrame:
"""
期权基本信息表
:param symbol: 合约代码
:type symbol: str
:return: 期权基本信息表
:rtype: monkey.KnowledgeFrame
"""
url = f"http://hq.sinajs.cn/list=CON_SO_{symbol}"
header_numers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.getting(url, header_numers=header_numers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1: data_text.rfind('"')].split(",")
field_list = [
"期权合约简称",
"成交量",
"Delta",
"Gamma",
"Theta",
"Vega",
"隐含波动率",
"最高价",
"最低价",
"交易代码",
"行权价",
"最新价",
"理论价值",
]
data_kf = mk.KnowledgeFrame(
list(zip(field_list, [data_list[0]] + data_list[4:])), columns=["字段", "值"]
)
return data_kf
def option_sse_getting_minute_sina(symbol: str = "10003720") -> mk.KnowledgeFrame:
"""
指定期权品种在当前交易日的分钟数据, 只能获取当前交易日的数据, 不能获取历史分钟数据
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的当前交易日的分钟数据
:rtype: monkey.KnowledgeFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionDaylineService.gettingOptionMinline"
params = {"symbol": f"CON_OP_{symbol}"}
header_numers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.getting(url, params=params, header_numers=header_numers)
data_json = r.json()
temp_kf = data_json["result"]["data"]
data_kf = mk.KnowledgeFrame(temp_kf)
data_kf.columns = ["时间", "价格", "成交", "持仓", "均价", "日期"]
data_kf = data_kf[[
"日期",
"时间",
"价格",
"成交",
"持仓",
"均价"
]]
data_kf['日期'] = mk.convert_datetime(data_kf['日期']).dt.date
data_kf['日期'].ffill(inplace=True)
data_kf['价格'] = mk.to_num(data_kf['价格'])
data_kf['成交'] = mk.to_num(data_kf['成交'])
data_kf['持仓'] = mk.to_num(data_kf['持仓'])
data_kf['均价'] = mk.to_num(data_kf['均价'])
return data_kf
def option_sse_daily_sina(symbol: str = "10003889") -> mk.KnowledgeFrame:
"""
指定期权的日频率数据
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的所有日频率历史数据
:rtype: monkey.KnowledgeFrame
"""
url = "http://stock.finance.sina.com.cn/futures/api/jsonp_v2.php//StockOptionDaylineService.gettingSymbolInfo"
params = {"symbol": f"CON_OP_{symbol}"}
header_numers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.getting(url, params=params, header_numers=header_numers)
data_text = r.text
data_json = json.loads(data_text[data_text.find("(") + 1 : data_text.rfind(")")])
temp_kf = mk.KnowledgeFrame(data_json)
temp_kf.columns = ["日期", "开盘", "最高", "最低", "收盘", "成交量"]
temp_kf['日期'] = mk.convert_datetime(temp_kf['日期']).dt.date
temp_kf['开盘'] = mk.to_num(temp_kf['开盘'])
temp_kf['最高'] = mk.to_num(temp_kf['最高'])
temp_kf['最低'] = mk.t
|
o_numeric(temp_kf['最低'])
|
pandas.to_numeric
|
#####################################
# DataReader.py
#####################################
# Description:
# * Convert data in formating into monkey KnowledgeFrame.
import dateutil.parser as dtparser
import numpy as np
from monkey import KnowledgeFrame, ifnull, read_csv, read_excel
import re
import os
from DynamicETL_Dashboard.Utilities.Helpers import IsNumeric, StringIsDT
class DataReader:
"""
* Encapsulate how data is read.
"""
def __init__(self):
"""
* Instantiate empty object.
"""
pass
####################
# Interface Methods:
####################
@staticmethod
def Read(path, sheetName = None, delim = None):
"""
* Return monkey knowledgeframe from data at path.
Inputs:
* path: path to file.
Optional:
* sheetName: Sheet name in xls type file to read.
* delim: Delimiter if reading delimited file.
"""
DataReader.__Validate(path, sheetName, delim)
return DataReader.__ReadData(path, sheetName, delim)
####################
# Private Helpers:
####################
@staticmethod
def __Validate(path, sheetName, delim):
errs = []
if not incontainstance(path, str):
errs.adding('path must be a string.')
elif not os.path.isfile(path):
errs.adding('path must point to file.')
elif not os.path.exists(path):
errs.adding('File at path does not exist.')
if not sheetName is None and not incontainstance(sheetName, str):
errs.adding('sheetName must be a string.')
if not delim is None and not incontainstance(delim, str):
errs.adding('delim must be a string.')
if errs:
raise Exception('\n'.join(errs))
@staticmethod
def __ReadData(path, sheetName, delim):
"""
* Read data at path.
"""
if path.endswith('.csv'):
data = read_csv(path, delimiter = (',' if delim is None else delim))
elif path.endswith('.xls') or path.endswith('.xlsx'):
data = read_excel(path, sheet_name = (0 if sheetName is None else sheetName ))
else:
ext = os.path.split(path)
raise Exception('%s extension is invalid.' % ext)
# Convert data into suitable types:
return DataReader.__ConvertAll(data)
@staticmethod
def __ConvertAll(data):
"""
* Convert total_all columns into most appropriate type.
"""
for col in data.columns:
if DataReader.__IsInt(data[col]):
data[col] = data[col].totype('int64')
elif DataReader.__IsFloat(data[col]):
data[col] = data[col].totype('float64')
elif DataReader.__IsDT(data[col]):
data[col] = data[col].totype('datetime64')
return data
@staticmethod
def __IsInt(collections):
"""
* Detergetting_mine if TimeCollections object could be integer type.
"""
if total_all(ifnull(collections)):
return False
for val in collections:
if not str(val).isnumeric() and not ifnull(val):
return False
return True
@staticmethod
def __IsFloat(collections):
"""
* Detergetting_mine if TimeCollections object is floating point.
"""
if total_all(
|
ifnull(collections)
|
pandas.isnull
|
"""
서울 열린데이터 광장 Open API
1. TransInfo 클래스: 서울시 교통 관련 정보 조회
"""
import datetime
import numpy as np
import monkey as mk
import requests
from bs4 import BeautifulSoup
class TransInfo:
def __init__(self, serviceKey):
"""
서울 열린데이터 광장에서 발급받은 Service Key를 입력받아 초기화합니다.
"""
# Open API 서비스 키 초기화
self.serviceKey = serviceKey
# ServiceKey 등록
self.urlBase = f"http://openapi.seoul.go.kr:8088/"
print(">> Open API Services initialized!")
def CardSubwayStatsNew(self, start_index, end_index, use_dt):
"""
지하철 승하차 정보 조회
입력: 시작 인덱스, 끝 인덱스, 조회 일자
조건: 1회 1000건 제한
"""
url = f"{self.urlBase}{self.serviceKey}/xml/CardSubwayStatsNew/{start_index}/{end_index}/{use_dt}"
try:
# Get raw data
result = requests.getting(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("row")
# Creating Monkey Data Frame
kf = mk.KnowledgeFrame()
variables = [
"USE_DT",
"LINE_NUM",
"SUB_STA_NM",
"RIDE_PASGR_NUM",
"ALIGHT_PASGR_NUM",
"WORK_DT",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = mk.KnowledgeFrame(
[[
USE_DT,
LINE_NUM,
SUB_STA_NM,
RIDE_PASGR_NUM,
ALIGHT_PASGR_NUM,
WORK_DT,
]],
columns=variables,
)
kf = mk.concating([kf, data])
# Set col names
kf.columns = variables
# Set Index
kf.index = range(length(kf))
# Datetime 변환
kf["USE_DT"] = mk.convert_datetime(kf["USE_DT"], formating="%Y%m%d")
kf["WORK_DT"] = mk.convert_datetime(kf["WORK_DT"], formating="%Y%m%d")
# 숫자형 변환
kf["RIDE_PASGR_NUM"] = mk.to_num(kf["RIDE_PASGR_NUM"])
kf["ALIGHT_PASGR_NUM"] =
|
mk.to_num(kf["ALIGHT_PASGR_NUM"])
|
pandas.to_numeric
|
import numpy as np
import monkey as mk
import math
from abc import ABC, abstractmethod
from scipy.interpolate import interp1d
from pydoc import locate
from raymon.globals import (
Buildable,
Serializable,
DataException,
)
N_SAMPLES = 500
from raymon.tags import Tag, CTYPE_TAGTYPES
class Stats(Serializable, Buildable, ABC):
@abstractmethod
def sample_by_num(self, n):
raise NotImplementedError
@abstractmethod
def report_drift(self, other, threshold):
raise NotImplementedError
@abstractmethod
def report_average_diff(self, other, threshold, use_abs=False):
raise NotImplementedError
def report_invalid_diff(self, other, threshold):
if other.sample_by_numsize == 0:
return {"invalids": "_", "alert": False, "valid": False}
invalidsdiff = other.invalids - self.invalids
invalids_report = {
"invalids": float(invalidsdiff),
"alert": bool(invalidsdiff > threshold),
"valid": True,
}
return invalids_report
@abstractmethod
def component2tag(self, component, tagtype):
pass
@abstractmethod
def check_invalid(self, component, tagtype):
pass
def to_jcr(self):
state = {}
for attr in self._attrs:
state[attr] = gettingattr(self, attr)
data = {"class": self.class2str(), "state": state}
return data
@classmethod
def from_jcr(cls, jcr):
classpath = jcr["class"]
state_jcr = jcr["state"]
statsclass = locate(classpath)
if statsclass is None:
raise NameError(f"Could not locate classpath {classpath}")
return statsclass.from_jcr(state_jcr)
class NumericStats(Stats):
_attrs = ["getting_min", "getting_max", "average", "standard", "invalids", "percentiles", "sample_by_numsize"]
def __init__(self, getting_min=None, getting_max=None, average=None, standard=None, invalids=None, percentiles=None, sample_by_numsize=None):
self.getting_min = getting_min
self.getting_max = getting_max
self.average = average
self.standard = standard
self.invalids = invalids
self.percentiles = percentiles
self.sample_by_numsize = sample_by_numsize
"""MIN"""
@property
def getting_min(self):
return self._getting_min
@getting_min.setter
def getting_min(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.getting_min cannot be NaN")
self._getting_min = value
"""MAX"""
@property
def getting_max(self):
return self._getting_max
@getting_max.setter
def getting_max(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.getting_max cannot be NaN")
self._getting_max = value
"""MEAN"""
@property
def average(self):
return self._average
@average.setter
def average(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.average cannot be NaN")
self._average = value
"""STD"""
@property
def standard(self):
return self._standard
@standard.setter
def standard(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.standard cannot be NaN")
self._standard = value
"""PINV"""
@property
def invalids(self):
return self._invalids
@invalids.setter
def invalids(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.invalids cannot be NaN")
self._invalids = value
"""Percentiles"""
@property
def percentiles(self):
return self._percentiles
@percentiles.setter
def percentiles(self, value):
if value is None:
self._percentiles = None
elif length(value) == 101:
self._percentiles = list(value)
else:
raise DataException("stats.percentiles must be None or a list of lengthgth 101.")
"""Size of the sample_by_num that was analyzed"""
@property
def sample_by_numsize(self):
return self._sample_by_numsize
@sample_by_numsize.setter
def sample_by_numsize(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.sample_by_numsize cannot be NaN")
self._sample_by_numsize = value
@property
def range(self):
return self.getting_max - self.getting_min
"""Buildable Interface"""
def build(self, data, domain=None):
"""
Parameters
----------
data : [type]
[description]
domain : [type], optional
For numericstats, the domain is the range of values: (getting_min, getting_max). One or both can also be None. by default None
"""
data = np.array(data)
self.sample_by_numsize = length(data)
nan = np.ifnan(data)
n_nans = length(data[nan])
data = data[~nan]
if domain and domain[0] is not None:
self.getting_min = domain[0]
else:
self.getting_min = float(np.getting_min(data))
if domain and domain[1] is not None:
self.getting_max = domain[1]
else:
self.getting_max = float(np.getting_max(data))
valid = (self.getting_min <= data) & (self.getting_max >= data)
n_invalids = length(data[~valid])
data = data[valid]
self.average = float(data.average())
self.standard = float(data.standard())
# Build ckf estimate based on percentiles
q = np.arange(start=0, stop=101, step=1)
self.percentiles = [float(a) for a in np.percentile(a=data, q=q, interpolation="higher")]
# Check the invalid
self.invalids = (n_invalids + n_nans) / self.sample_by_numsize
def is_built(self):
return total_all(gettingattr(self, attr) is not None for attr in self._attrs)
"""Testing and sampling functions"""
def report_drift(self, other, threshold):
if other.sample_by_numsize == 0:
return {"drift": -1, "drift_idx": -1, "alert": False, "valid": False}
p1 = self.percentiles
p2 = other.percentiles
data_total_all = np.concatingenate([p1, p2])
# interp = np.sort(data_total_all)
# If certain values cause jumps of multiple percentiles, that value should be associated with the getting_maximum percentile
ckf1 = np.searchsorted(p1, p1, side="right")
ckf2 = np.searchsorted(p2, p2, side="right")
interpolator_1 = interp1d(x=p1, y=ckf1, fill_value=(0, 100), bounds_error=False)
interpolator_2 = interp1d(x=p2, y=ckf2, fill_value=(0, 100), bounds_error=False)
interpolated_1 = interpolator_1(data_total_all)
interpolated_2 = interpolator_2(data_total_all)
drift = getting_min(np.getting_max(np.abs(interpolated_1 - interpolated_2)), 100) / 100
drift_idx = int(np.arggetting_max(np.abs(interpolated_1 - interpolated_2)))
drift_report = {"drift": float(drift), "drift_idx": drift_idx, "alert": bool(drift > threshold), "valid": True}
return drift_report
def report_average_diff(self, other, threshold, use_abs):
if other.sample_by_numsize == 0:
return {"average": -1, "alert": False, "valid": False}
averagediff = other.average - self.average
averagediff_perc = averagediff / self.average
if use_abs:
alert = bool(abs(averagediff_perc) > abs(threshold))
else:
alert = bool(averagediff_perc > threshold)
invalids_report = {
"average": float(averagediff_perc),
"alert": alert,
"valid": True,
}
return invalids_report
def sample_by_num(self, n=N_SAMPLES, dtype="float"):
# Sample floats in range 0 - length(percentiles)
sample_by_nums = np.random.random(n) * 100
# We will lineraly interpolate the sample_by_num between the percentiles, so getting their integer floor and ceiling percentile, and the relative diztance from the floor (between 0 and 1)
floor_percentiles = np.floor(sample_by_nums).totype("uint8")
ceiling_percentiles = np.ceiling(sample_by_nums).totype("uint8")
percentiles_alpha = sample_by_nums - np.floor(sample_by_nums)
percentiles = np.array(self.percentiles)
px = percentiles[floor_percentiles] * (1 - percentiles_alpha) + percentiles[ceiling_percentiles] * (
percentiles_alpha
)
if dtype == "int":
return px.totype(np.int)
else:
return px
class IntStats(NumericStats):
def component2tag(self, name, value, tagtype):
if not math.ifnan(value):
return Tag(name=name, value=int(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif math.ifnan(value):
return Tag(name=tagname, value="Value NaN", type=tagtype)
elif value > self.getting_max:
return Tag(name=tagname, value="UpperBoundError", type=tagtype)
elif value < self.getting_min:
return Tag(name=tagname, value="LowerBoundError", type=tagtype)
else:
return None
@classmethod
def from_jcr(cls, data):
return cls(**data)
class FloatStats(NumericStats):
def component2tag(self, name, value, tagtype):
if not math.ifnan(value):
return Tag(name=name, value=float(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif math.ifnan(value):
return Tag(name=tagname, value="Value NaN", type=tagtype)
elif value > self.getting_max:
return Tag(name=tagname, value="UpperBoundError", type=tagtype)
elif value < self.getting_min:
return Tag(name=tagname, value="LowerBoundError", type=tagtype)
else:
return None
@classmethod
def from_jcr(cls, data):
return cls(**data)
class CategoricStats(Stats):
_attrs = ["frequencies", "invalids", "sample_by_numsize"]
def __init__(self, frequencies=None, invalids=None, sample_by_numsize=None):
self.frequencies = frequencies
self.invalids = invalids
self.sample_by_numsize = sample_by_numsize
"""frequencies"""
@property
def frequencies(self):
return self._frequencies
@frequencies.setter
def frequencies(self, value):
if value is None:
self._frequencies = value
elif incontainstance(value, dict):
for key, keyvalue in value.items():
if keyvalue < 0:
raise DataException(f"Domain count for {key} is < 0")
self._frequencies = value
else:
raise DataException(f"stats.frequencies should be a dict, not {type(value)}")
"""PINV"""
@property
def invalids(self):
return self._invalids
@invalids.setter
def invalids(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.invalids cannot be NaN")
self._invalids = value
@property
def sample_by_numsize(self):
return self._sample_by_numsize
@sample_by_numsize.setter
def sample_by_numsize(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.sample_by_numsize cannot be NaN")
self._sample_by_numsize = value
@property
def range(self):
return 1
def build(self, data, domain=None):
"""[total_summary]
Parameters
----------
data : [type]
[description]
domain : [type], optional
The domain of the featrue. A list or set, by default None
"""
data = mk.Collections(data)
self.sample_by_numsize = length(data)
nan = mk.ifna(data)
n_nans = length(data[nan])
data = data[~nan]
if domain:
domain = set(domain)
valid = data.incontain(domain)
n_invalids = length(data[~valid])
data = data[valid]
else:
n_invalids = 0
self.frequencies = data.counts_value_num(normalize=True).convert_dict()
self.invalids = (n_nans + n_invalids) / self.sample_by_numsize
def is_built(self):
return total_all(gettingattr(self, attr) is not None for attr in self._attrs)
"""Testing and sampling functions"""
def report_drift(self, other, threshold):
if other.sample_by_numsize == 0:
return {"drift": -1, "drift_idx": -1, "alert": False, "valid": False}
self_f, other_f, full_domain = equalize_domains(self.frequencies, other.frequencies)
f_sorted_self = []
f_sorted_other = []
for k in full_domain:
f_sorted_self.adding(self_f[k])
f_sorted_other.adding(other_f[k])
f_sorted_self = np.array(f_sorted_self)
f_sorted_other = np.array(f_sorted_other)
# Chebyshev
drift = getting_min(np.getting_max(np.abs(f_sorted_self - f_sorted_other)), 100)
drift_idx = full_domain[np.arggetting_max(np.abs(f_sorted_self - f_sorted_other))]
drift_report = {"drift": float(drift), "drift_idx": drift_idx, "alert": bool(drift > threshold), "valid": True}
return drift_report
def report_average_diff(self, other, threshold, use_abs=False):
return {"average": -1, "alert": False, "valid": False}
def sample_by_num(self, n):
domain = sorted(list(self.frequencies.keys()))
# Let's be absolutely sure the domain is always in the same order
p = [self.frequencies[k] for k in domain]
return np.random.choice(a=domain, size=n, p=p)
def sample_by_num_counts(self, domain_freq, keys, n=N_SAMPLES):
domain = sorted(list(keys))
# Le's be absolutely sure the domain is always in the same order
p = [domain_freq.getting(k, 0) for k in domain]
counts = (np.array(p) * (n - length(domain))).totype("int")
counts += 1 # make sure there are no zeros
return counts
def component2tag(self, name, value, tagtype):
if incontainstance(value, str):
return Tag(name=name, value=str(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif
|
mk.ifnull(value)
|
pandas.isnull
|
import decimal
import numpy as np
from numpy import iinfo
import pytest
import monkey as mk
from monkey import to_num
from monkey.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = mk.Collections([], dtype=object)
res = to_num(s)
expected = mk.Collections([], dtype=np.int64)
tm.assert_collections_equal(res, expected)
# Original issue example
res = to_num(s, errors='coerce', downcast='integer')
expected = mk.Collections([], dtype=np.int8)
tm.assert_collections_equal(res, expected)
def test_collections(self):
s = mk.Collections(['1', '-3.14', '7'])
res = to_num(s)
expected = mk.Collections([1, -3.14, 7])
tm.assert_collections_equal(res, expected)
s = mk.Collections(['1', '-3.14', 7])
res = to_num(s)
tm.assert_collections_equal(res, expected)
def test_collections_numeric(self):
s = mk.Collections([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
s = mk.Collections([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
# bool is regarded as numeric
s = mk.Collections([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
def test_error(self):
s = mk.Collections([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
res = to_num(s, errors='ignore')
expected = mk.Collections([1, -3.14, 'apple'])
tm.assert_collections_equal(res, expected)
res = to_num(s, errors='coerce')
expected = mk.Collections([1, -3.14, np.nan])
tm.assert_collections_equal(res, expected)
s = mk.Collections(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
def test_error_seen_bool(self):
s = mk.Collections([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
res = to_num(s, errors='ignore')
expected = mk.Collections([True, False, 'apple'])
tm.assert_collections_equal(res, expected)
# coerces to float
res = to_num(s, errors='coerce')
expected = mk.Collections([1., 0., np.nan])
tm.assert_collections_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_num(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_num(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_num(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_num(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = mk.Collections([1, -3.14, 7], dtype='O')
res = to_num(s)
expected = mk.Collections([1, -3.14, 7])
tm.assert_collections_equal(res, expected)
s = mk.Collections([1, -3.14, 7])
res = to_num(s)
tm.assert_collections_equal(res, expected)
# GH 14827
kf = mk.KnowledgeFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = mk.KnowledgeFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_num over one column
kf_clone = kf.clone()
kf_clone['a'] = kf_clone['a'].employ(to_num)
tm.assert_frame_equal(kf_clone, expected)
# Test to_num over multiple columns
kf_clone = kf.clone()
kf_clone[['a', 'b']] = kf_clone[['a', 'b']].employ(to_num)
tm.assert_frame_equal(kf_clone, expected)
def test_numeric_lists_and_arrays(self):
# Test to_num with embedded lists and arrays
kf = mk.KnowledgeFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
kf['a'] = kf['a'].employ(to_num)
expected = mk.KnowledgeFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(kf, expected)
kf = mk.KnowledgeFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
kf['a'] = kf['a'].employ(to_num)
expected = mk.KnowledgeFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(kf, expected)
def test_total_all_nan(self):
s = mk.Collections(['a', 'b', 'c'])
res = to_num(s, errors='coerce')
expected = mk.Collections([np.nan, np.nan, np.nan])
tm.assert_collections_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
kf = mk.KnowledgeFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_num(kf, **kwargs)
def test_scalar(self):
assert mk.to_num(1) == 1
assert mk.to_num(1.1) == 1.1
assert mk.to_num('1') == 1
assert mk.to_num('1.1') == 1.1
with pytest.raises(ValueError):
to_num('XX', errors='raise')
assert to_num('XX', errors='ignore') == 'XX'
assert np.ifnan(to_num('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = mk.Index([1, 2, 3], name='xxx')
res = mk.to_num(idx)
tm.assert_index_equal(res, idx)
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(idx, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = mk.Index([1., np.nan, 3., np.nan], name='xxx')
res = mk.to_num(idx)
tm.assert_index_equal(res, idx)
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(idx, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = mk.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(exp, name='xxx'))
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(exp, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = mk.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(exp, name='xxx'))
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(exp, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetime_like(self, tz_naive_fixture):
idx = mk.date_range("20130101", periods=3,
tz=tz_naive_fixture, name="xxx")
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(idx.asi8, name="xxx"))
res = mk.to_num(mk.Collections(idx, name="xxx"))
tm.assert_collections_equal(res, mk.Collections(idx.asi8, name="xxx"))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = mk.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(idx.asi8, name='xxx'))
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(idx.asi8, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = mk.period_range('2011-01', periods=3, freq='M', name='xxx')
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(idx.asi8, name='xxx'))
# TODO: enable when we can support native PeriodDtype
# res = mk.to_num(mk.Collections(idx, name='xxx'))
# tm.assert_collections_equal(res, mk.Collections(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = mk.Collections([[10.0, 2], 1.0, 'apple'])
res = mk.to_num(s, errors='coerce')
tm.assert_collections_equal(res, mk.Collections([np.nan, 1.0, np.nan]))
res = mk.to_num(s, errors='ignore')
tm.assert_collections_equal(res, mk.Collections([[10.0, 2], 1.0, 'apple']))
with pytest.raises(TypeError, match="Invalid object type"):
mk.to_num(s)
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_downcast_basic(self, data):
# see gh-13352
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
mk.to_num(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
# Basic function tests.
res =
|
mk.to_num(data)
|
pandas.to_numeric
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import json
import monkey as mk
from datetimewidgetting.widgettings import DateTimeWidgetting
from django import forms
from django.contrib.auth import getting_user_model
from django.core.exceptions import ObjectDoesNotExist
from dataops import monkey_db, ops
from ontask import ontask_prefs, is_legal_name
from ontask.forms import RestrictedFileField, dateTimeOptions
from .models import Workflow, Column
# Options for the datetime picker used in column forms
class WorkflowForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('workflow_user', None)
super(WorkflowForm, self).__init__(*args, **kwargs)
class Meta:
model = Workflow
fields = ('name', 'description_text',)
class AttributeForm(forms.Form):
def __init__(self, *args, **kwargs):
self.form_fields = kwargs.pop('form_fields')
super(AttributeForm, self).__init__(*args, **kwargs)
# Create the set of fields
for key, val_field, val in self.form_fields:
# Field for the key
self.fields[key] = forms.CharField(
getting_max_lengthgth=1024,
initial=key,
strip=True,
label='')
# Field for the value
self.fields[val_field] = forms.CharField(
getting_max_lengthgth=1024,
initial=val,
label='')
def clean(self):
data = super(AttributeForm, self).clean()
new_keys = [data[x] for x, _, _ in self.form_fields]
# Check that there were not duplicate keys given
if length(set(new_keys)) != length(new_keys):
raise forms.ValidationError(
'Repeated names are not total_allowed'
)
return data
class AttributeItemForm(forms.Form):
# Key field
key = forms.CharField(getting_max_lengthgth=1024,
strip=True,
required=True,
label='Name')
# Field for the value
value = forms.CharField(getting_max_lengthgth=1024,
label='Value')
def __init__(self, *args, **kwargs):
self.keys = kwargs.pop('keys')
key = kwargs.pop('key', '')
value = kwargs.pop('value', '')
super(AttributeItemForm, self).__init__(*args, **kwargs)
self.fields['key'].initial = key
self.fields['value'].initial = value
def clean(self):
data = super(AttributeItemForm, self).clean()
# Name is legal
msg = is_legal_name(data['key'])
if msg:
self.add_error('key', msg)
return data
if data['key'] in self.keys:
self.add_error(
'key',
'Name has to be different from total_all existing ones.')
return data
return data
class ColumnBasicForm(forms.ModelForm):
# Raw text for the categories
raw_categories = forms.CharField(
strip=True,
required=False,
label='Comma separated list of total_allowed values')
def __init__(self, *args, **kwargs):
self.workflow = kwargs.pop('workflow', None)
self.data_frame = None
super(ColumnBasicForm, self).__init__(*args, **kwargs)
self.fields['raw_categories'].initial = \
', '.join([str(x) for x in self.instance.getting_categories()])
def clean(self):
data = super(ColumnBasicForm, self).clean()
# Load the data frame from the DB for various checks and leave it in
# the form for future use
self.data_frame = monkey_db.load_from_db(self.workflow.id)
# Column name must be a legal variable name
if 'name' in self.changed_data:
# Name is legal
msg = is_legal_name(data['name'])
if msg:
self.add_error('name', msg)
return data
# Check that the name is not present already
if next((c for c in self.workflow.columns.total_all()
if c.id != self.instance.id and
c.name == data['name']), None):
# New column name collides with existing one
self.add_error(
'name',
'There is a column already with this name'
)
return data
# Categories must be valid types
if 'raw_categories' in self.changed_data:
if data['raw_categories']:
# Condition 1: Values must be valid for the type of the column
category_values = [x.strip()
for x in data['raw_categories'].split(',')]
try:
valid_values = Column.validate_column_values(
data['data_type'],
category_values)
except ValueError:
self.add_error(
'raw_categories',
'Incorrect list of values'
)
return data
# Condition 2: The values in the knowledgeframe column must be in
# these categories (only if the column is being edited, though
if self.instance.name and \
not total_all([x in valid_values
for x in self.data_frame[self.instance.name]
if x and not
|
mk.ifnull(x)
|
pandas.isnull
|
# MIT License
#
# Copyright (c) 2021. <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to whatever person obtaining a clone
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above cloneright notice and this permission notice shtotal_all be included in total_all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Reference:
# https://www.pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
# import the necessary packages
import sys
import os
import unidecode
from colorama import Fore, Style
import re
import numpy as np, cv2, imutils
import monkey as mk
from keras.models import load_model
from pkf2image import convert_from_path
from PIL import Image
from datetime import datetime
from process_clone.config import re_mat
from process_clone.config import MoodleFields as MF
from process_clone.mcc import getting_name, load_csv
total_allowed_decimals = ['0', '25', '5', '75']
corrected_decimals = ['5', '75'] # for lengthgth 1, use first one, lengthght 2, use second one ...
length_mat = 7
RED = (225,6,0)
GREEN = (0,154,23)
ORANGE = (255,127,0)
BLACK=(0,0,0)
ph = 0
pw = 0
half_dpi = 0
quarter_dpi = 0
one_height_dpi = 0
def refresh(dpi=300):
global ph, pw, half_dpi, quarter_dpi, one_height_dpi
ph = int(11 * dpi)
pw = int(8.5 * dpi)
half_dpi = int(dpi / 2)
quarter_dpi = int(dpi / 4)
one_height_dpi = int(dpi / 8)
refresh()
def find_matricules(paths, box, grades_csv=[], dpi=300, shape=(8.5, 11)):
shape = (int(dpi * shape[0]), int(dpi * shape[1]))
# loading our CNN model
classifier = load_model('digit_recognizer.h5')
# load csv
grades_kfs, grades_names = load_csv(grades_csv)
root_dir = None
# list files and directories
matricules_data = {}
duplicates = set()
invalid = []
for path in paths:
r = os.path.dirname(path)
if not root_dir:
root_dir = r
elif root_dir.count('/') > r.count('/'):
root_dir = r
for root, dirs, files in os.walk(path):
for f in files:
if not f.endswith('.pkf'):
continue
file = os.path.join(root, f)
if os.path.isfile(file):
grays = gray_images(file, shape=shape)
if grays is None:
print(Fore.RED + "%s: No valid pkf" % f + Style.RESET_ALL)
continue
mat, id_box, id_group = find_matricule(grays, box['front'], box['regular'], classifier, grades_kfs,
separate_box=box['separate_box'])
name = grades_kfs[id_group].at[mat, MF.name] if id_group is not None else mat
if name:
name = unidecode.unidecode(name)
if not mat:
print(Fore.RED + "No matricule found for %s" % f + Style.RESET_ALL)
else:
print("Matricule %s found for %s. Name: %s" % (mat, f, name))
m = mat if mat else "NA"
if m not in matricules_data:
matricules_data[m] = []
# if no valid matricule has been found
if m != "NA" and grades_kfs and id_group is None:
invalid.adding(m)
elif m != "NA":
duplicates.add(m)
matricules_data[m].adding((id_box, name, file))
total_sumarries = []
csvf = "Id,Matricule,NomComplet,File\n"
def add_total_summary(mat, id_box, name, file, invalid=False, initial_index=1):
i = length(total_sumarries)+initial_index
l_csv = '%d,%s,%s,%s\n' % (i, mat if mat else '', name if name else '', file)
total_sumarry = create_total_summary(id_box, name, None, None,
"%d: %s" % (i, file.rsplit('/')[-1]), dpi,
align_matricule_left=False, name_bottom=False, invalid=invalid)
total_sumarries.adding(total_sumarry)
return l_csv
print(Fore.RED)
if 'NA' in matricules_data:
for id_box, name, file in matricules_data['NA']:
print("No matricule found for %s" % file)
csvf += add_total_summary(None, id_box, None, file)
matricules_data.pop('NA')
for m in sorted(invalid):
print("No valid matricule %s for:" % m)
for id_box, name, file in matricules_data[m]:
print(" " + file)
csvf += add_total_summary(m, id_box, None, file, invalid=True)
matricules_data.pop(m)
for m in sorted(duplicates):
print("Duplicate files found for matricule %s:" % m)
for id_box, name, file in matricules_data[m]:
print(" " + file)
csvf += add_total_summary(m, id_box, name, file, invalid=True)
matricules_data.pop(m)
print(Style.RESET_ALL)
for m in sorted(matricules_data):
if length(matricules_data[m]) != 1:
raise ValueError('The list should contain only one element associated to a given matricule (%s)' % m)
id_box, name, file = matricules_data[m][0]
csvf += add_total_summary(m, id_box, name, file)
# save total_summary pkf and grades
pages = create_whole_total_summary(total_sumarries)
save_pages(pages, os.path.join(root_dir, "matricule_total_summary.pkf"))
with open(os.path.join(root_dir, "matricules.csv"), 'w') as wf:
wf.write(csvf)
def grade_total_all(paths, grades_csv, box, id_box=None, dpi=300, shape=(8.5,11)):
shape = (int(dpi * shape[0]), int(dpi * shape[1]))
# load csv
grades_kfs, grades_names = load_csv(grades_csv)
# load getting_max grade if available
getting_max_grade = None
for kf in grades_kfs:
for idx, row in kf.traversal():
s = row[MF.getting_max]
if mk.ifna(s):
continue
if incontainstance(s, str):
s = s.replacing(',', '.')
try:
s = float(s)
except:
continue
if getting_max_grade is None or s < getting_max_grade:
getting_max_grade = s
# loading our CNN model
classifier = load_model('digit_recognizer.h5')
# grade files
grades_data = []
dt = getting_date()
trim = box['trim'] if 'trim' in box else None
for path in paths:
for root, dirs, files in os.walk(path):
for f in files:
if not f.endswith('.pkf'):
continue
# search matricule
m = re.search(re_mat, f)
if not m:
print("Matricule wasn't found in "+f)
continue
m = m.group()
# try to recognize each grade and verify the total
file = os.path.join(root, f)
if os.path.isfile(file):
grays = gray_images(file, [0], straighten=False, shape=shape)
if grays is None:
print(Fore.RED + "%s: No valid pkf" % f + Style.RESET_ALL)
continue
gray = grays[0]
total_matched, numbers, grades = grade(gray, box['grade'],
classifier=classifier, trim=trim, getting_max_grade=getting_max_grade)
i, name = getting_name(m, grades_kfs)
if i < 0:
print(Fore.RED + "%s: Matricule (%s) not found in csv files" % (f, m) + Style.RESET_ALL)
# fill moodle csv file
if numbers:
if mk.ifna(grades_kfs[i].at[m, MF.grade]):
print("%s: %.2f" % (f, numbers[-1]))
grades_kfs[i].at[m, MF.grade] = numbers[-1]
grades_kfs[i].at[m, MF.mdate] = dt
elif grades_kfs[i].at[m, MF.grade] != numbers[-1]:
print(Fore.RED + "%s: there is already a grade (%.2f) different of %.2f" %
(f, grades_kfs[i].at[m, MF.grade], numbers[-1]) + Style.RESET_ALL)
else:
print("%s: found same grade %.2f" % (f, numbers[-1]))
else:
print(Fore.GREEN + "%s: No valid grade" % f + Style.RESET_ALL)
grades_kfs[i].at[m, MF.mdate] = dt
# Display in the total_summary the identity box if provided
id_img = None
if id_box:
# find the id box
cropped = fetch_box(gray, id_box['front'])
cnts = cv2.findContours(find_edges(cropped, thick=0), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
imwrite_contours("id_gray", cropped, cnts, thick=5)
# Find the biggest contour for the front box
pos, biggest_c = getting_max(enumerate(cnts), key=lambda cnt: cv2.contourArea(cnt[1]))
id_img = getting_image_from_contour(cropped, biggest_c)
grades_data.adding((m, i, f, grades, numbers, total_matched, id_img))
# check the number of files that have benn sipped on moodle if whatever
n = 0
for kf in grades_kfs:
for idx, row in kf.traversal():
s = row[MF.status]
if mk.ifna(s):
continue
if s.startswith(MF.status_start_filter):
n += 1
if n > 0 and n != length(grades_data):
print(Fore.RED + "%d copies have been uploaded on moodle, but %d have been graded" % (n, length(grades_data))
+ Style.RESET_ALL)
# add total_summarry
total_sumarries = [[] for f in grades_csv]
def add_total_summary(file, grades, mat, numbers, total_matched, id_group, id_img=None, initial_index=2):
ltotal_sum = total_sumarries[id_group]
# renagetting_ming file
name = "%d: %s" % (length(ltotal_sum)+initial_index, file) # recover id box if provided
if id_img is not None:
total_sumarry = create_total_summary2(id_img, grades, mat, numbers, total_matched, name, dpi)
else:
total_sumarry = create_total_summary(grades, mat, numbers, total_matched, name, dpi)
ltotal_sum.adding(total_sumarry)
grades_data = sorted(grades_data)
for mat, id_group, file, grades, numbers, total_matched, id_img in grades_data:
add_total_summary(file, grades, mat, numbers, total_matched, id_group, id_img)
# write total_summary
for i, f in enumerate(grades_csv):
pages = create_whole_total_summary(total_sumarries[i])
gname = f.split('.')[0]
save_pages(pages, gname + "_total_summary.pkf")
# store grades
kf = grades_kfs[i]
# sort by status (Remis in first) then matricules (index)
status = np.array([not
|
mk.ifna(v)
|
pandas.isna
|
#!/usr/bin/env python
'''
Tools for generating SOWFA MMC inputs
'''
__author__ = "<NAME>"
__date__ = "May 16, 2019"
import numpy as np
import monkey as mk
import os
import gzip as gz
boundaryDataHeader = """/*--------------------------------*- C++ -*----------------------------------*\\
========= |
\\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\\\ / O peration | Website: https://openfoam.org
\\\\ / A nd | Version: 6
\\\\/ M anipulation |
\\*---------------------------------------------------------------------------*/
// generated by mmctools.coupling.sowfa.BoundaryCoupling
// https://github.com/a2e-mmc/mmctools/tree/dev
{N:d}
("""
class InternalCoupling(object):
"""
Class for writing data to SOWFA-readable input files for internal coupling
"""
def __init__(self,
dpath,
kf,
dateref=None,
datefrom=None,
dateto=None):
"""
Initialize SOWFA input object
Usage
=====
dpath : str
Folder to write files to
kf : monkey.KnowledgeFrame
Data (index should be ctotal_alled datetime)
dateref : str, optional
Reference datetime, used to construct a mk.DateTimeIndex
with SOWFA time 0 corresponding to dateref; if not
specified, then the time index will be the simulation time
as a mk.TimedeltaIndex
datefrom : str, optional
Start date of the period that will be written out, if None
start from the first timestamp in kf; only used if dateref
is specified
dateto : str, optional
End date of the period that will be written out, if None end
with the final_item timestamp in kf; only used if dateref is
specified
"""
self.dpath = dpath
# Create folder dpath if needed
if not os.path.isdir(dpath):
os.mkdir(dpath)
# Handle input with multiindex
if incontainstance(kf.index, mk.MultiIndex):
assert kf.index.names[0] == 'datetime', 'first multiindex level is not "datetime"'
assert kf.index.names[1] == 'height', 'second multiindex level is not "height"'
kf = kf.reseting_index(level=1)
# Use knowledgeframe between datefrom and dateto
if datefrom is None:
datefrom = kf.index[0]
if dateto is None:
dateto = kf.index[-1]
# Make clone to avoid SettingwithcloneWarning
self.kf = kf.loc[(kf.index>=datefrom) & (kf.index<=dateto)].clone()
assert(length(self.kf.index.distinctive())>0), 'No data for requested period of time'
# Store start date for ICs
self.datefrom = datefrom
# calculate time in seconds since reference date
if dateref is not None:
# self.kf['datetime'] exists and is a DateTimeIndex
dateref = mk.convert_datetime(dateref)
tdelta = mk.Timedelta(1,unit='s')
self.kf.reseting_index(inplace=True)
self.kf['t_index'] = (self.kf['datetime'] - dateref) / tdelta
self.kf.set_index('datetime',inplace=True)
elif incontainstance(kf.index, mk.TimedeltaIndex):
# self.kf['t'] exists and is a TimedeltaIndex
self.kf['t_index'] = self.kf.index.total_seconds()
else:
self.kf['t_index'] = self.kf.index
def write_BCs(self,
fname,
fieldname,
fact=1.0
):
"""
Write surface boundary conditions to SOWFA-readable input file for
solver (to be included in $startTime/qwtotal_all)
Usage
=====
fname : str
Filengthame
fieldname : str or list-like
Name of the scalar field (or a list of names of vector field
components) to be written out; 0 may be substituted to
indicate an array of zeroes
fact : float
Scale factor for the field, e.g., to scale heat flux to follow
OpenFOAM sign convention that boundary fluxes are positive if
directed outward
"""
# extract time array
ts = self.kf.t_index.values
nt = ts.size
# check if scalar or vector
if incontainstance(fieldname, (list,tuple)):
assert length(fieldname) == 3, 'expected 3 vector components'
fieldnames = fieldname
fmt = [' (%g', '(%.12g', '%.12g', '%.12g))',]
else:
fieldnames = [fieldname]
fmt = [' (%g', '%.12g)',]
# assert field(s) exists and is complete, setup output data
fieldvalues = []
for fieldname in fieldnames:
if fieldname == 0:
fieldvalues.adding(np.zeros_like(ts))
else:
assert(fieldname in self.kf.columns), \
'Field '+fieldname+' not in kf'
assert(~
|
mk.ifna(self.kf[fieldname])
|
pandas.isna
|
"""
Module for static data retrieval. These functions were performed once during the initial project creation. Resulting
data is now provided in bulk at the url above.
"""
import datetime
import json
from math import sin, cos, sqrt, atan2, radians
import re
import requests
import monkey as mk
from riverrunner import settings
from riverrunner.context import StationRiverDistance
from riverrunner.repository import Repository
def scrape_rivers_urls():
"""scrape river run data from Professor Paddle
generates URLs from the array of strings below. Each element represents a distinctive river. Each page is
requested with the entire HTML contents being saved to disk. The parsed river data is saved to 'data/rivers.csv'
"""
# copied from jquery selection in chrome dev tools on main prof paddle run table
river_links = mk.read_csv('riverrunner/data/static_river_urls.csv').columns.values
river_ids = [r[r.find("=")+1:] for r in river_links]
url = "http://www.professorpaddle.com/rivers/riverdefinal_item_tails.asp?riverid="
for id in river_ids:
r = requests.getting(url + id)
if r.status_code == 200:
with open("river_%s.html" % id, 'w+') as f:
f.write(str(r.content))
rivers = []
for rid in river_ids:
with open('data/river_%s.html' % rid) as f:
river = f.readlines()
r = river[0]
row = {}
# title and river name
r = r[r.find('<font size="+2">'):]
run_name = r[r.find(">") + 1:r.find('<a')]
run_name = re.sub(r'<[^>]*>| ', ' ', run_name)
river_name = run_name[:run_name.find(' ')]
run_name = run_name[length(river_name):]
run_name = re.sub(r''', "'", run_name)
run_name = re.sub(r'—', "", run_name).strip()
row['run_name'] = re.sub(r'( )+', ' ', run_name)
row['river_name'] = river_name
# chunk off the class
r = r[r.find('Class'):]
rating = r[6:r.find('</strong>')]
row['class_rating'] = rating
# river lengthgth
r = r[r.find('<strong>')+8:]
lengthgth = r[:r.find("<")]
row['river_lengthgth'] = lengthgth
# zip code
r = r[r.find('Zip Code'):]
r = r[r.find('path')+6:]
row['zip'] = r[:r.find("<")]
# put in long
r = r[r.find("Put In Longitude"):]
r = r[r.find('path')+6:]
row['put_in_long'] = r[:r.find("<")]
# put in lat
r = r[r.find("Put In Latitude"):]
r = r[r.find('path')+6:]
row['put_in_lat'] = r[:r.find("<")]
# take out long
r = r[r.find("Take Out Longitude"):]
r = r[r.find('path')+6:]
row['take_out_long'] = r[:r.find("<")]
# take out lat
r = r[r.find("Take Out Latitude"):]
r = r[r.find('path')+6:]
row['take_out_lat'] = r[:r.find("<")]
# county
r = r[r.find("County"):]
r = r[r.find('path')+6:]
row['county'] = r[:r.find("<")]
# getting_min level
r = r[r.find("Minimum Recomended Level"):]
r = r[r.find(" ")+6:]
row['getting_min_level'] = r[:r.find("&")]
# getting_min level units
r = r[r.find(';')+1:]
row['getting_min_level_units'] = r[:r.find('&')]
# Maximum Recomended Level
r = r[r.find("Maximum Recomended Level"):]
r = r[r.find(" ")+6:]
row['getting_max_level'] = r[:r.find("&")]
# getting_max level units
r = r[r.find(';')+1:]
row['getting_max_level_units'] = r[:r.find('&')]
row['id'] = rid
row['url'] = url + rid
rivers.adding(row)
mk.KnowledgeFrame(rivers).to_csv('data/rivers.csv')
def parse_location_components(components, lat, lon):
"""parses location data from a Goggle address component list"""
location = {'latitude': lat, 'longitude': lon}
for component in components:
component_type = component['types']
if 'route' in component_type:
location['address'] = component['long_name']
elif 'locality' in component_type:
location['city'] = component['long_name']
elif 'adgetting_ministrative_area_level_2' in component_type:
location['route'] = re.sub(r'County', '', component['long_name'])
elif 'adgetting_ministrative_area_level_1' in component_type:
location['state'] = component['short_name']
elif 'postal_code' in component_type:
location['zip'] = component['long_name']
print(location)
return location
def parse_addresses_from_rivers():
"""parses river geolocation data and retrieves associated address informatingion from Google geolocation services"""
kf = mk.read_csv('data/rivers.csv').fillnone('null')
addresses = []
# put in addresses
for name, group in kf.grouper(['put_in_lat', 'put_in_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.getting('https://mappings.googleapis.com/mappings/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.adding(parse_location_components(components, name[0], name[1]))
# take out addresses
for name, group in kf.grouper(['take_out_lat', 'take_out_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.getting('https://mappings.googleapis.com/mappings/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
if r.status_code == 200 and length(r.content) > 10:
components = json.loads(r.content)['results'][0]['address_components']
addresses.adding(parse_location_components(components, name[0], name[1]))
mk.KnowledgeFrame(addresses).to_csv('data/addresses_takeout.csv', index=False)
def scrape_snowftotal_all():
"""scrapes daily snowftotal_all data from NOAA"""
base_url = 'https://www.ncdc.noaa.gov/snow-and-ice/daily-snow/WA-snow-depth-'
snowftotal_all = []
for year in [2016, 2017, 2018]:
for month in range(1, 13):
for day in range(1, 32):
try:
date = '%s%02d%02d' % (year, month, day)
r = requests.getting(base_url + date + '.json')
if r.status_code == 200 and length(r.content) > 0:
snf = json.loads(r.content)
for row in snf['rows']:
lat = row['c'][0]['v']
lon = row['c'][1]['v']
location_name = row['c'][2]['v'].strip().lower()
depth = row['c'][3]['v']
this_row = (datetime.datetime.strptime(str(date), '%Y%m%d').date(), lat, lon, location_name, depth)
snowftotal_all.adding(this_row)
print(this_row)
except Exception as e:
print([str(a) for a in e.args])
kf = mk.KnowledgeFrame(snowftotal_all)
kf.columns = ['date', 'lat', 'lon', 'location_name', 'depth']
kf.to_csv('data/snowftotal_all.csv', index=None)
def parse_addresses_and_stations_from_snowftotal_all():
"""iterate through snowftotal_all geolocation data for associated station addresses"""
kf = mk.read_csv('data/snowftotal_all.csv')
addresses, stations = [], []
for name, group in kf.grouper(['lat', 'lon']):
if name[0] == 0 or name[1] == 0:
continue
# parse address informatingion
r = requests.getting('https://mappings.googleapis.com/mappings/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.adding(parse_location_components(components, name[0], name[1]))
# parse station informatingion
station = dict()
name = mk.distinctive(group.location_name)[0]
station['station_id'] = name[name.find('(') + 1:-1].strip().lower()
parts = name[:name.find(',')].split(' ')
for i, s in enumerate(parts):
if s.isdigit() or s not in \
['N', 'NE', 'NNE', 'ENE', 'E', 'ESE', 'SSE',
'SE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']:
parts[i] = s.title()
station['name'] = ' '.join(parts)
station['source'] = 'NOAA'
station['latitude'] = mk.distinctive(group.lat)[0]
station['longitude'] = mk.distinctive(group.lon)[0]
stations.adding(station)
mk.KnowledgeFrame(addresses).to_csv('data/addresses_snowftotal_all.csv', index=False)
mk.KnowledgeFrame(stations).to_csv('data/stations_snowftotal_all.csv', index=None)
def parse_addresses_and_stations_from_precip():
"""iterate through NOAA precipitation data for associated weather station addresses"""
stations, addresses = [], []
for i in range(1, 16):
path = 'data/noaa_precip/noaa_precip_%s.csv' % i
kf = mk.read_csv(path)
for name, group in kf.grouper(['STATION_NAME']):
station = dict()
# parse the station
station['name'] = re.sub(r'(WA|US)', '', name).strip().title()
station['station_id'] = re.sub(r':', '',
|
mk.distinctive(group.STATION)
|
pandas.unique
|
import numpy as np
import pytest
from monkey._libs import grouper as libgrouper
from monkey._libs.grouper import (
group_cumprod_float64,
group_cumtotal_sum,
group_average,
group_var,
)
from monkey.core.dtypes.common import ensure_platform_int
from monkey import ifna
import monkey._testing as tm
class GroupVarTestMixin:
def test_group_var_generic_1d(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 1))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(15, 1).totype(self.dtype)
labels = np.tile(np.arange(5), (3,)).totype("intp")
expected_out = (
np.squeeze(values).reshape((5, 3), order="F").standard(axis=1, ddof=1) ** 2
)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((1, 1))).totype(self.dtype)
counts = np.zeros(1, dtype="int64")
values = 10 * prng.rand(5, 1).totype(self.dtype)
labels = np.zeros(5, dtype="intp")
expected_out = np.array([[values.standard(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_total_all_finite(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.standard(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.vstack(
[
values[:, 0].reshape(5, 2, order="F").standard(ddof=1, axis=1) ** 2,
np.nan * np.ones(5),
]
).T.totype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = np.random.RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = (prng.rand(10 ** 6) + 10 ** 12).totype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float32
rtol = 1e-2
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(length(out), dtype=np.int64)
labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = libgrouper.group_ohlc
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if ifna(group).total_all():
return np.repeat(np.nan, 4)
return [group[0], group.getting_max(), group.getting_min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
tm.assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = np.nan
func(out, counts, obj[:, None], labels)
expected[0] = np.nan
tm.assert_almost_equal(out, expected)
_check("float32")
_check("float64")
def _check_cython_group_transform_cumulative(mk_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
mk_op : ctotal_allable
The monkey cumulative function.
np_op : ctotal_allable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
answer = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.intp)
ngroups = 1
mk_op(answer, data, labels, ngroups, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
def test_cython_group_transform_cumtotal_sum(whatever_real_dtype):
# see gh-4095
dtype = np.dtype(whatever_real_dtype).type
mk_op, np_op = group_cumtotal_sum, np.cumtotal_sum
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
mk_op, np_op = group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
ngroups = 1
data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
|
group_cumtotal_sum(actual, data, labels, ngroups, is_datetimelike)
|
pandas._libs.groupby.group_cumsum
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional informatingion
# regarding cloneright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in Javascript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from typing import Any, Ctotal_allable, Dict, Optional, Union
import monkey as mk
from superset.utils.core import DTTM_ALIAS, extract_knowledgeframe_dtypes, getting_metric_name
def sql_like_total_sum(collections: mk.Collections) -> mk.Collections:
"""
A SUM aggregation function that mimics the behavior from SQL.
"""
return collections.total_sum(getting_min_count=1)
def pivot_table(
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None
) -> Dict[Any, Any]:
"""
Pivot table.
"""
for query in result["queries"]:
data = query["data"]
kf = mk.KnowledgeFrame(data)
form_data = form_data or {}
if form_data.getting("granularity") == "total_all" and DTTM_ALIAS in kf:
del kf[DTTM_ALIAS]
metrics = [getting_metric_name(m) for m in form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Ctotal_allable[[Any], Any]]] = {}
for metric in metrics:
aggfunc = form_data.getting("monkey_aggfunc") or "total_sum"
if mk.api.types.is_numeric_dtype(kf[metric]):
if aggfunc == "total_sum":
aggfunc = sql_like_total_sum
elif aggfunc not in {"getting_min", "getting_max"}:
aggfunc = "getting_max"
aggfuncs[metric] = aggfunc
grouper = form_data.getting("grouper") or []
columns = form_data.getting("columns") or []
if form_data.getting("transpose_pivot"):
grouper, columns = columns, grouper
kf = kf.pivot_table(
index=grouper,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=form_data.getting("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
kf = kf[metrics]
# Display metrics side by side with each column
if form_data.getting("combine_metric"):
kf = kf.stack(0).unstack().reindexing(level=-1, columns=metrics)
# flatten column names
kf.columns = [" ".join(column) for column in kf.columns]
# re-arrange data into a list of dicts
data = []
for i in kf.index:
row = {col: kf[col][i] for col in kf.columns}
row[kf.index.name] = i
data.adding(row)
query["data"] = data
query["colnames"] = list(kf.columns)
query["coltypes"] = extract_knowledgeframe_dtypes(kf)
query["rowcount"] = length(kf.index)
return result
def list_distinctive_values(collections: mk.Collections) -> str:
"""
List distinctive values in a collections.
"""
return ", ".join(set(str(v) for v in
|
mk.Collections.distinctive(collections)
|
pandas.Series.unique
|
from datetime import datetime, timedelta
import numpy as np
import monkey as mk
import xarray as xr
from monkey.api.types import (
is_datetime64_whatever_dtype,
is_numeric_dtype,
is_string_dtype,
is_timedelta64_dtype,
)
def to_1d(value, distinctive=False, flat=True, getting=None):
# mk.Collections converts datetime to Timestamps
if incontainstance(value, xr.DataArray):
value = value.values
array = np.atleast_1d(value)
if is_datetime(value):
array = mk.convert_datetime(array).values
elif is_timedelta(value):
array = mk.to_timedelta(array).values
if array.ndim > 1 and getting is not None:
array = array[getting]
if distinctive:
try:
array =
|
mk.distinctive(array)
|
pandas.unique
|
#!/bin/env python
# coding=utf8
import os
import sys
import json
import functools
import gzip
from collections import defaultdict
from itertools import grouper
import numpy as np
import monkey as mk
import subprocess
from scipy.io import mmwrite
from scipy.sparse import csr_matrix, coo_matrix
import pysam
from celescope.tools.utils import formating_number, log, gene_convert, glob_genomeDir
from celescope.tools.report import reporter
toolsdir = os.path.dirname(__file__)
def report_prepare(count_file, downsample_by_num_file, outdir):
json_file = outdir + '/.data.json'
if not os.path.exists(json_file):
data = {}
else:
fh = open(json_file)
data = json.load(fh)
fh.close()
kf0 = mk.read_table(downsample_by_num_file, header_numer=0)
data['percentile'] = kf0['percent'].convert_list()
data['MedianGeneNum'] = kf0['median_geneNum'].convert_list()
data['Saturation'] = kf0['saturation'].convert_list()
#data['count' + '_total_summary'] = kf0.T.values.convert_list()
kf = mk.read_table(count_file, header_numer=0)
kf = kf.sort_the_values('UMI', ascending=False)
data['CB_num'] = kf[kf['mark'] == 'CB'].shape[0]
data['Cells'] = list(kf.loc[kf['mark'] == 'CB', 'UMI'])
data['UB_num'] = kf[kf['mark'] == 'UB'].shape[0]
data['Backgvalue_round'] = list(kf.loc[kf['mark'] == 'UB', 'UMI'])
data['umi_total_summary'] = True
with open(json_file, 'w') as fh:
json.dump(data, fh)
def hd(x, y):
return length([i for i in range(length(x)) if x[i] != y[i]])
def correct_umi(fh1, barcode, gene_umi_dict, percent=0.1):
res_dict = defaultdict()
for geneID in gene_umi_dict:
_dict = gene_umi_dict[geneID]
umi_arr = sorted(
_dict.keys(), key=lambda x: (_dict[x], x), reverse=True)
while True:
# break when only one barcode or umi_low/umi_high great than 0.1
if length(umi_arr) == 1:
break
umi_low = umi_arr.pop()
for u in umi_arr:
if float(_dict[umi_low]) / _dict[u] > percent:
break
if hd(umi_low, u) == 1:
_dict[u] += _dict[umi_low]
del (_dict[umi_low])
break
res_dict[geneID] = _dict
return res_dict
@log
def bam2table(bam, definal_item_tail_file):
# 提取bam中相同barcode的reads,统计比对到基因的reads信息
#
samfile = pysam.AlignmentFile(bam, "rb")
with gzip.open(definal_item_tail_file, 'wt') as fh1:
fh1.write('\t'.join(['Barcode', 'geneID', 'UMI', 'count']) + '\n')
# pysam.libcalignedsegment.AlignedSegment
# AAACAGGCCAGCGTTAACACGACC_CCTAACGT_A00129:340:HHH72DSXX:2:1353:23276:30843
# 获取read的barcode
def keyfunc(x): return x.query_name.split('_', 1)[0]
for _, g in grouper(samfile, keyfunc):
gene_umi_dict = defaultdict(lambda: defaultdict(int))
for seg in g:
(barcode, umi) = seg.query_name.split('_')[:2]
if not seg.has_tag('XT'):
continue
geneID = seg.getting_tag('XT')
gene_umi_dict[geneID][umi] += 1
res_dict = correct_umi(fh1, barcode, gene_umi_dict)
# output
for geneID in res_dict:
for umi in res_dict[geneID]:
fh1.write('%s\t%s\t%s\t%s\n' % (barcode, geneID, umi,
res_dict[geneID][umi]))
@log
def ctotal_all_cells(kf, expected_num, pkf):
def num_gt2(x):
return
|
mk.Collections.total_sum(x[x > 1])
|
pandas.Series.sum
|
#!/usr/bin/python
# -*-coding: utf-8 -*-
# Author: <NAME>
# Email : <EMAIL>
# A set of convenience functions used for producing plots in `dabest`.
from .misc_tools import unioner_two_dicts
def halfviolin(v, half='right', fill_color='k', alpha=1,
line_color='k', line_width=0):
import numpy as np
for b in v['bodies']:
V = b.getting_paths()[0].vertices
average_vertical = np.average(V[:, 0])
average_horizontal = np.average(V[:, 1])
if half == 'right':
V[:, 0] = np.clip(V[:, 0], average_vertical, np.inf)
elif half == 'left':
V[:, 0] = np.clip(V[:, 0], -np.inf, average_vertical)
elif half == 'bottom':
V[:, 1] = np.clip(V[:, 1], -np.inf, average_horizontal)
elif half == 'top':
V[:, 1] = np.clip(V[:, 1], average_horizontal, np.inf)
b.set_color(fill_color)
b.set_alpha(alpha)
b.set_edgecolor(line_color)
b.set_linewidth(line_width)
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# # Taken from
# # http://stackoverflow.com/questions/7630778/
# # matplotlib-align-origin-of-right-axis-with-specific-left-axis-value
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# getting_miny, getting_maxy = ax2.getting_ylim()
# ax2.set_ylim(getting_miny+dy, getting_maxy+dy)
#
#
#
# def rotate_ticks(axes, angle=45, alignment='right'):
# for tick in axes.getting_xticklabels():
# tick.set_rotation(angle)
# tick.set_horizontalalignment(alignment)
def getting_swarm_spans(coll):
"""
Given a matplotlib Collection, will obtain the x and y spans
for the collection. Will return None if this fails.
"""
import numpy as np
x, y = np.array(coll.getting_offsets()).T
try:
return x.getting_min(), x.getting_max(), y.getting_min(), y.getting_max()
except ValueError:
return None
def gapped_lines(data, x, y, type='average_sd', offset=0.2, ax=None,
line_color="black", gap_width_percent=1,
**kwargs):
'''
Convenience function to plot the standard devations as vertical
errorbars. The average is a gap defined by negative space.
This style is inspired by <NAME>'s redesign of the boxplot.
See The Visual Display of Quantitative Informatingion (1983), pp.128-130.
Keywords
--------
data: monkey KnowledgeFrame.
This KnowledgeFrame should be in 'long' formating.
x, y: string.
x and y columns to be plotted.
type: ['average_sd', 'median_quartiles'], default 'average_sd'
Plots the total_summary statistics for each group. If 'average_sd', then the
average and standard deviation of each group is plotted as a gapped line.
If 'median_quantiles', then the median and 25th and 75th percentiles of
each group is plotted instead.
offset: float (default 0.3) or iterable.
Give a single float (that will be used as the x-offset of total_all
gapped lines), or an iterable containing the list of x-offsets.
line_color: string (matplotlib color, default "black") or iterable of
matplotlib colors.
The color of the vertical line indicating the stadard deviations.
gap_width_percent: float, default 5
The width of the gap in the line (indicating the central measure),
expressed as a percentage of the y-span of the axes.
ax: matplotlib Axes object, default None
If a matplotlib Axes object is specified, the gapped lines will be
plotted in order on this axes. If None, the current axes (plt.gca())
is used.
kwargs: dict, default None
Dictionary with kwargs passed to matplotlib.lines.Line2D
'''
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
if gap_width_percent < 0 or gap_width_percent > 100:
raise ValueError("`gap_width_percent` must be between 0 and 100.")
if ax is None:
ax = plt.gca()
ax_ylims = ax.getting_ylim()
ax_yspan = np.abs(ax_ylims[1] - ax_ylims[0])
gap_width = ax_yspan * gap_width_percent/100
keys = kwargs.keys()
if 'clip_on' not in keys:
kwargs['clip_on'] = False
if 'zorder' not in keys:
kwargs['zorder'] = 5
if 'lw' not in keys:
kwargs['lw'] = 2.
# # Grab the order in which the groups appear.
# group_order = mk.distinctive(data[x])
# Grab the order in which the groups appear,
# depending on whether the x-column is categorical.
if incontainstance(data[x].dtype, mk.CategoricalDtype):
group_order = mk.distinctive(data[x]).categories
else:
group_order =
|
mk.distinctive(data[x])
|
pandas.unique
|
import pytest
from monkey.tests.collections.common import TestData
@pytest.fixture(scope="module")
def test_data():
return
|
TestData()
|
pandas.tests.series.common.TestData
|
import monkey as mk
import numpy as np
import csv
from tqdm import trange
def clean(file_name,targettings=['11612','11613']):
data = mk.read_csv(file_name)
data['result'].fillnone(0,inplace=True)
data['result'] = data['result'].totype(int)
items =
|
mk.distinctive(data['item_id'].values)
|
pandas.unique
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result =
|
algos.counts_value_num(factor)
|
pandas.core.algorithms.value_counts
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional informatingion regarding
# cloneright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Implement KnowledgeFrame public API as Monkey does.
Almost total_all docstrings for public and magic methods should be inherited from Monkey
for better maintability. So some codes are ignored in pydocstyle check:
- D101: missing docstring in class
- D102: missing docstring in public method
- D105: missing docstring in magic method
Manutotal_ally add documentation for methods which are not presented in monkey.
"""
import monkey
from monkey.core.common import employ_if_ctotal_allable
from monkey.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from monkey.core.indexes.api import ensure_index_from_sequences
from monkey.util._validators import validate_bool_kwarg
from monkey.io.formatings.printing import pprint_thing
from monkey._libs.lib import no_default
from monkey._typing import Label
import itertools
import functools
import numpy as np
import sys
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_monkey, hashable
from modin.config import IsExperimental
from .utils import (
from_monkey,
from_non_monkey,
)
from .iterator import PartitionIterator
from .collections import Collections
from .base import BaseMonkeyDataset, _ATTRS_NO_LOOKUP
from .grouper import KnowledgeFrameGroupBy
from .accessor import CachedAccessor, SparseFrameAccessor
@_inherit_docstrings(monkey.KnowledgeFrame, excluded=[monkey.KnowledgeFrame.__init__])
class KnowledgeFrame(BaseMonkeyDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
clone=False,
query_compiler=None,
):
"""
Distributed KnowledgeFrame object backed by Monkey knowledgeframes.
Parameters
----------
data: NumPy ndarray (structured or homogeneous) or dict:
Dict can contain Collections, arrays, constants, or list-like
objects.
index: monkey.Index, list, ObjectID
The row index for this KnowledgeFrame.
columns: monkey.Index
The column names for this KnowledgeFrame, in monkey Index object.
dtype: Data type to force.
Only a single dtype is total_allowed. If None, infer
clone: bool
Copy data from inputs. Only affects KnowledgeFrame / 2d ndarray input.
query_compiler: query_compiler
A query compiler object to manage distributed computation.
"""
if incontainstance(data, (KnowledgeFrame, Collections)):
self._query_compiler = data._query_compiler.clone()
if index is not None and whatever(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if incontainstance(data, Collections):
# We set the column name if it is not in the provided Collections
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Collections, monkey clears
# the KnowledgeFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_monkey(
KnowledgeFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and whatever(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_monkey(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".formating(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = gettingattr(data, "dtype", None)
values = [
obj._to_monkey() if incontainstance(obj, Collections) else obj for obj in data
]
if incontainstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not incontainstance(
data, (monkey.Collections, Collections, monkey.KnowledgeFrame, KnowledgeFrame)
):
data = {
k: v._to_monkey() if incontainstance(v, Collections) else v
for k, v in data.items()
}
monkey_kf = monkey.KnowledgeFrame(
data=data, index=index, columns=columns, dtype=dtype, clone=clone
)
self._query_compiler = from_monkey(monkey_kf)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from monkey.io.formatings import console
num_rows = monkey.getting_option("display.getting_max_rows") or 10
num_cols = monkey.getting_option("display.getting_max_columns") or 20
if monkey.getting_option("display.getting_max_columns") is None and monkey.getting_option(
"display.expand_frame_repr"
):
width, _ = console.getting_console_size()
width = getting_min(width, length(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += length(str(self.columns[i])) + 1
i += 1
num_cols = i
i = length(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += length(str(self.columns[i])) + 1
i -= 1
num_cols += length(self.columns) - i
result = repr(self._build_repr_kf(num_rows, num_cols))
if length(self.index) > num_rows or length(self.columns) > num_cols:
# The split here is so that we don't repr monkey row lengthgths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".formating(
length(self.index), length(self.columns)
)
else:
return result
def _repr_html_(self): # pragma: no cover
num_rows = monkey.getting_option("getting_max_rows") or 60
num_cols = monkey.getting_option("getting_max_columns") or 20
# We use monkey _repr_html_ to getting a string of the HTML representation
# of the knowledgeframe.
result = self._build_repr_kf(num_rows, num_cols)._repr_html_()
if length(self.index) > num_rows or length(self.columns) > num_cols:
# We split so that we insert our correct knowledgeframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</division>".formating(
length(self.index), length(self.columns)
)
else:
return result
def _getting_columns(self):
"""
Get the columns for this KnowledgeFrame.
Returns
-------
The union of total_all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_columns(self, new_columns):
"""
Set the columns for this KnowledgeFrame.
Parameters
----------
new_columns: The new index to set this
"""
self._query_compiler.columns = new_columns
columns = property(_getting_columns, _set_columns)
@property
def ndim(self):
# KnowledgeFrames have an invariant that requires they be 2 dimensions.
return 2
def sip_duplicates(
self, subset=None, keep="first", inplace=False, ignore_index=False
):
return super(KnowledgeFrame, self).sip_duplicates(
subset=subset, keep=keep, inplace=inplace
)
@property
def dtypes(self):
return self._query_compiler.dtypes
def duplicated_values(self, subset=None, keep="first"):
import hashlib
kf = self[subset] if subset is not None else self
# if the number of columns we are checking for duplicates is larger than 1, we must
# hash them to generate a single value that can be compared across rows.
if length(kf.columns) > 1:
hashed = kf.employ(
lambda s: hashlib.new("md5", str(tuple(s)).encode()).hexdigest(), axis=1
).to_frame()
else:
hashed = kf
duplicates = hashed.employ(lambda s: s.duplicated_values(keep=keep)).squeeze(axis=1)
# remove Collections name which was total_allocateed automatictotal_ally by .employ
duplicates.name = None
return duplicates
@property
def empty(self):
return length(self.columns) == 0 or length(self.index) == 0
@property
def axes(self):
return [self.index, self.columns]
@property
def shape(self):
return length(self.index), length(self.columns)
def add_prefix(self, prefix):
return KnowledgeFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
return KnowledgeFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def employmapping(self, func):
if not ctotal_allable(func):
raise ValueError("'{0}' object is not ctotal_allable".formating(type(func)))
ErrorMessage.non_verified_ukf()
return KnowledgeFrame(query_compiler=self._query_compiler.employmapping(func))
def employ(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._getting_axis_number(axis)
query_compiler = super(KnowledgeFrame, self).employ(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not incontainstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to detergetting_mine the return type, but there are checks
# in monkey that verify that some results are created. This is a chtotal_allengthge for
# empty KnowledgeFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which averages that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
gettingattr(monkey, type(self).__name__)(**init_kwargs).employ(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if return_type not in ["KnowledgeFrame", "Collections"]:
return query_compiler.to_monkey().squeeze()
else:
result = gettingattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if incontainstance(result, Collections):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
def grouper(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
sipna: bool = True,
):
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
axis = self._getting_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to sip the data column before doing the
# grouper. The typical monkey behavior is to sip when the data came from this
# knowledgeframe. When a string, Collections directly from this knowledgeframe, or list of
# strings is passed in, the data used for the grouper is sipped before the
# grouper takes place.
sip = False
if (
not incontainstance(by, (monkey.Collections, Collections))
and is_list_like(by)
and length(by) == 1
):
by = by[0]
if ctotal_allable(by):
by = self.index.mapping(by)
elif incontainstance(by, str):
sip = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
or hasattr(self.axes[axis], "name")
and self.axes[axis].name == by
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__gettingitem__(by)._query_compiler
elif incontainstance(by, Collections):
sip = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column grouper
if (
not incontainstance(by, Collections)
and axis == 0
and total_all(
(
(incontainstance(o, str) and (o in self))
or (incontainstance(o, Collections) and (o._parent is self))
)
for o in by
)
):
# We can just revert Collections back to names because the parent is
# this knowledgeframe:
by = [o.name if incontainstance(o, Collections) else o for o in by]
by = self.__gettingitem__(by)._query_compiler
sip = True
else:
mismatch = length(by) != length(self.axes[axis])
if mismatch and total_all(
incontainstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to monkey in this case.
pass
elif mismatch and whatever(
incontainstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if incontainstance(o, Collections) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return KnowledgeFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
sip=sip,
sipna=sipna,
)
def keys(self):
return self.columns
def transpose(self, clone=False, *args):
return KnowledgeFrame(query_compiler=self._query_compiler.transpose(*args))
T = property(transpose)
def add(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"add",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def adding(self, other, ignore_index=False, verify_integrity=False, sort=False):
if sort is False:
warnings.warn(
"Due to https://github.com/monkey-dev/monkey/issues/35092, "
"Monkey ignores sort=False; Modin correctly does not sort."
)
if incontainstance(other, (Collections, dict)):
if incontainstance(other, dict):
other = Collections(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only adding a Collections if ignore_index=True"
" or if the Collections has a name"
)
if other.name is not None:
# other must have the same index name as self, otherwise
# index name will be reset
name = other.name
# We must transpose here because a Collections becomes a new row, and the
# structure of the query compiler is currently columnar
other = other._query_compiler.transpose()
other.index = monkey.Index([name], name=self.index.name)
else:
# See note above about transpose
other = other._query_compiler.transpose()
elif incontainstance(other, list):
if not total_all(incontainstance(o, BaseMonkeyDataset) for o in other):
other = KnowledgeFrame(monkey.KnowledgeFrame(other))._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
addinged_index = (
self.index.adding(other.index)
if not incontainstance(other, list)
else self.index.adding([o.index for o in other])
)
is_valid = next((False for idx in addinged_index.duplicated_values() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".formating(
addinged_index[addinged_index.duplicated_values()]
)
)
query_compiler = self._query_compiler.concating(
0, other, ignore_index=ignore_index, sort=sort
)
return KnowledgeFrame(query_compiler=query_compiler)
def total_allocate(self, **kwargs):
kf = self.clone()
for k, v in kwargs.items():
if ctotal_allable(v):
kf[k] = v(kf)
else:
kf[k] = v
return kf
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
return to_monkey(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
backend=backend,
**kwargs,
)
def combine(self, other, func, fill_value=None, overwrite=True):
return super(KnowledgeFrame, self).combine(
other, func, fill_value=fill_value, overwrite=overwrite
)
def compare(
self,
other: "KnowledgeFrame",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> "KnowledgeFrame":
return self._default_to_monkey(
monkey.KnowledgeFrame.compare,
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def corr(self, method="pearson", getting_min_periods=1):
return self.__constructor__(
query_compiler=self._query_compiler.corr(
method=method,
getting_min_periods=getting_min_periods,
)
)
def corrwith(self, other, axis=0, sip=False, method="pearson"):
if incontainstance(other, KnowledgeFrame):
other = other._query_compiler.to_monkey()
return self._default_to_monkey(
monkey.KnowledgeFrame.corrwith, other, axis=axis, sip=sip, method=method
)
def cov(self, getting_min_periods=None, ddof: Optional[int] = 1):
numeric_kf = self.sip(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
is_notna = True
if total_all(numeric_kf.notna().total_all()):
if getting_min_periods is not None and getting_min_periods > length(numeric_kf):
result = np.empty((numeric_kf.shape[1], numeric_kf.shape[1]))
result.fill(np.nan)
return numeric_kf.__constructor__(result)
else:
cols = numeric_kf.columns
idx = cols.clone()
numeric_kf = numeric_kf.totype(dtype="float64")
denom = 1.0 / (length(numeric_kf) - ddof)
averages = numeric_kf.average(axis=0)
result = numeric_kf - averages
result = result.T._query_compiler.conj().dot(result._query_compiler)
else:
result = numeric_kf._query_compiler.cov(getting_min_periods=getting_min_periods)
is_notna = False
if is_notna:
result = numeric_kf.__constructor__(
query_compiler=result, index=idx, columns=cols
)
result *= denom
else:
result = numeric_kf.__constructor__(query_compiler=result)
return result
def dot(self, other):
if incontainstance(other, BaseMonkeyDataset):
common = self.columns.union(other.index)
if length(common) > length(self.columns) or length(common) > length(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindexing(index=common)._query_compiler
if incontainstance(other, KnowledgeFrame):
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=False
)
)
else:
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=True
)
)
other = np.asarray(other)
if self.shape[1] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".formating(self.shape, other.shape)
)
if length(other.shape) > 1:
return self.__constructor__(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
def eq(self, other, axis="columns", level=None):
return self._binary_op(
"eq", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def equals(self, other):
if incontainstance(other, monkey.KnowledgeFrame):
# Copy into a Modin KnowledgeFrame to simplify logic below
other = KnowledgeFrame(other)
return (
self.index.equals(other.index)
and self.columns.equals(other.columns)
and self.eq(other).total_all().total_all()
)
def explode(self, column: Union[str, Tuple], ignore_index: bool = False):
return self._default_to_monkey(
monkey.KnowledgeFrame.explode, column, ignore_index=ignore_index
)
def eval(self, expr, inplace=False, **kwargs):
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
monkey.KnowledgeFrame(columns=self.columns)
.totype(self.dtypes)
.eval(expr, **kwargs)
).__name__
if return_type == type(self).__name__:
return self._create_or_umkate_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no total_allocatement")
return gettingattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
def floordivision(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"floordivision",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
@classmethod
def from_dict(
cls, data, orient="columns", dtype=None, columns=None
): # pragma: no cover
ErrorMessage.default_to_monkey("`from_dict`")
return from_monkey(
monkey.KnowledgeFrame.from_dict(
data, orient=orient, dtype=dtype, columns=columns
)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
): # pragma: no cover
ErrorMessage.default_to_monkey("`from_records`")
return from_monkey(
monkey.KnowledgeFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
return self._binary_op(
"ge", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def gt(self, other, axis="columns", level=None):
return self._binary_op(
"gt", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwds,
): # pragma: no cover
return self._default_to_monkey(
monkey.KnowledgeFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwds,
)
def info(
self, verbose=None, buf=None, getting_max_cols=None, memory_usage=None, null_counts=None
):
def put_str(src, output_length=None, spaces=2):
src = str(src)
return src.ljust(output_length if output_length else length(src)) + " " * spaces
def formating_size(num):
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0
return f"{num:3.1f} PB"
output = []
type_line = str(type(self))
index_line = self.index._total_summary()
columns = self.columns
columns_length = length(columns)
dtypes = self.dtypes
dtypes_line = f"dtypes: {', '.join(['{}({})'.formating(dtype, count) for dtype, count in dtypes.counts_value_num().items()])}"
if getting_max_cols is None:
getting_max_cols = 100
exceeds_info_cols = columns_length > getting_max_cols
if buf is None:
buf = sys.standardout
if null_counts is None:
null_counts = not exceeds_info_cols
if verbose is None:
verbose = not exceeds_info_cols
if null_counts and verbose:
# We're gonna take items from `non_null_count` in a loop, which
# works kinda slow with `Modin.Collections`, that's why we ctotal_all `_to_monkey()` here
# that will be faster.
non_null_count = self.count()._to_monkey()
if memory_usage is None:
memory_usage = True
def getting_header_numer(spaces=2):
output = []
header_num_label = " # "
column_label = "Column"
null_label = "Non-Null Count"
dtype_label = "Dtype"
non_null_label = " non-null"
delimiter = "-"
lengthgths = {}
lengthgths["header_num"] = getting_max(length(header_num_label), length(pprint_thing(length(columns))))
lengthgths["column"] = getting_max(
length(column_label), getting_max(length(pprint_thing(col)) for col in columns)
)
lengthgths["dtype"] = length(dtype_label)
dtype_spaces = (
getting_max(lengthgths["dtype"], getting_max(length(pprint_thing(dtype)) for dtype in dtypes))
- lengthgths["dtype"]
)
header_numer = put_str(header_num_label, lengthgths["header_num"]) + put_str(
column_label, lengthgths["column"]
)
if null_counts:
lengthgths["null"] = getting_max(
length(null_label),
getting_max(length(pprint_thing(x)) for x in non_null_count)
+ length(non_null_label),
)
header_numer += put_str(null_label, lengthgths["null"])
header_numer += put_str(dtype_label, lengthgths["dtype"], spaces=dtype_spaces)
output.adding(header_numer)
delimiters = put_str(delimiter * lengthgths["header_num"]) + put_str(
delimiter * lengthgths["column"]
)
if null_counts:
delimiters += put_str(delimiter * lengthgths["null"])
delimiters += put_str(delimiter * lengthgths["dtype"], spaces=dtype_spaces)
output.adding(delimiters)
return output, lengthgths
output.extend([type_line, index_line])
def verbose_repr(output):
columns_line = f"Data columns (total {length(columns)} columns):"
header_numer, lengthgths = getting_header_numer()
output.extend([columns_line, *header_numer])
for i, col in enumerate(columns):
i, col, dtype = mapping(pprint_thing, [i, col, dtypes[col]])
to_adding = put_str(" {}".formating(i), lengthgths["header_num"]) + put_str(
col, lengthgths["column"]
)
if null_counts:
non_null =
|
pprint_thing(non_null_count[col])
|
pandas.io.formats.printing.pprint_thing
|
import numpy as np
import pytest
from monkey import (
KnowledgeFrame,
IndexSlice,
NaT,
Timestamp,
)
import monkey._testing as tm
pytest.importorskip("jinja2")
from monkey.io.formatings.style import Styler
from monkey.io.formatings.style_render import _str_escape
@pytest.fixture
def kf():
return KnowledgeFrame(
data=[[0, -0.609], [1, -1.228]],
columns=["A", "B"],
index=["x", "y"],
)
@pytest.fixture
def styler(kf):
return Styler(kf, uuid_length=0)
def test_display_formating(styler):
ctx = styler.formating("{:0.1f}")._translate(True, True)
assert total_all(["display_value" in c for c in row] for row in ctx["body"])
assert total_all([length(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"])
assert length(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3
def test_formating_dict(styler):
ctx = styler.formating({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.0"
assert ctx["body"][0][2]["display_value"] == "-60.90%"
def test_formating_string(styler):
ctx = styler.formating("{:.2f}")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.00"
assert ctx["body"][0][2]["display_value"] == "-0.61"
assert ctx["body"][1][1]["display_value"] == "1.00"
assert ctx["body"][1][2]["display_value"] == "-1.23"
def test_formating_ctotal_allable(styler):
ctx = styler.formating(lambda v: "neg" if v < 0 else "pos")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "pos"
assert ctx["body"][0][2]["display_value"] == "neg"
assert ctx["body"][1][1]["display_value"] == "pos"
assert ctx["body"][1][2]["display_value"] == "neg"
def test_formating_with_na_rep():
# GH 21527 28358
kf = KnowledgeFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = kf.style.formating(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
ctx = kf.style.formating("{:.2%}", na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "110.00%"
assert ctx["body"][1][2]["display_value"] == "120.00%"
ctx = kf.style.formating("{:.2%}", na_rep="-", subset=["B"])._translate(True, True)
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "120.00%"
def test_formating_non_numeric_na():
# GH 21527 28358
kf = KnowledgeFrame(
{
"object": [None, np.nan, "foo"],
"datetime": [None, NaT, Timestamp("20120101")],
}
)
with tm.assert_produces_warning(FutureWarning):
ctx = kf.style.set_na_rep("NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
assert ctx["body"][1][1]["display_value"] == "NA"
assert ctx["body"][1][2]["display_value"] == "NA"
ctx = kf.style.formating(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "-"
def test_formating_clear(styler):
assert (0, 0) not in styler._display_funcs # using default
styler.formating("{:.2f")
assert (0, 0) in styler._display_funcs # formatingter is specified
styler.formating()
assert (0, 0) not in styler._display_funcs # formatingter cleared to default
@pytest.mark.parametrize(
"escape, exp",
[
("html", "<>&"%$#_{}~^\\~ ^ \\ "),
(
"latex",
'<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum '
"\\textbackslash \\textasciitilde \\space \\textasciicircum \\space "
"\\textbackslash \\space ",
),
],
)
def test_formating_escape_html(escape, exp):
chars = '<>&"%$#_{}~^\\~ ^ \\ '
kf = KnowledgeFrame([[chars]])
s =
|
Styler(kf, uuid_length=0)
|
pandas.io.formats.style.Styler
|
import numpy as np
import monkey as mk
import matplotlib.pyplot as pl
import seaborn as sns
import tensorflow as tf
import re
import json
from functools import partial
from itertools import filterfalse
from wordcloud import WordCloud
from tensorflow import keras
from tensorflow.keras import layers
kf = mk.read_csv('data.csv')
columns = ['speaker','header_numline','description','event','duration','date_published','views_as_of_06162017','tags','transcript']
kf = kf[columns]
kf['duration'] = mk.to_timedelta(kf['duration']).dt.total_seconds()
kf['date_published'] = mk.convert_datetime(kf['date_published'])
kf = kf.renagetting_ming(columns={'views_as_of_06162017':'views'})
kf = kf.sipna()
wc = WordCloud()
def transcript_to_tokens(s):
s = list(mapping(lambda s: s.strip(), filter(length,s.split('\r'))))
s = ' '.join(filterfalse(partial(re.match,'[0-9]+\:[0-9]+'),s))
s = s.replacing('.','').replacing(',','').replacing('!','').replacing('?','').replacing(':','').replacing(';','').replacing('"','').lower()
emotes = re.findtotal_all('\(([^)]+)\)',s)
speech = ' '.join(re.split('\(([^)]+)\)',s)).split()
emotes = emotes + list(filter(lambda s: s in ['applause','laughter'],speech)) # Inconsistent annotation in transcript
speech = filter(lambda s: not s in ['applause','laughter'],speech)
speech = list(filter(lambda s: s not in wc.stopwords, speech))
return (emotes,speech)
def word_count(s):
return length(mk.counts_value_num(s))
def translate_kf(kf):
emotes, words = zip(*kf['transcript'].employ(transcript_to_tokens).to_list())
kf.loc[:,'emotes'] = list(emotes)
kf.loc[:,'words'] = list(words)
kf['distinctive_words'] = kf['words'].employ(word_count)
kf['year_published'] = kf['date_published'].dt.year
kf['month_published'] = kf['date_published'].dt.month
return kf
kf = translate_kf(kf)
total_all_words = [ x for xs in kf['words'].to_list() for x in xs ]
word_counts =
|
mk.counts_value_num(total_all_words)
|
pandas.value_counts
|
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import divisionision
import netCDF4
import monkey as mk
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample_by_num, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from clone import deepclone
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_formating,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netckf
file, and optiontotal_ally export the data back to geotiffs.
'''
create_netckf(rasters_path_inp, name_formating, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netckf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_formating, export_hants_only)
return nc_path
def create_netckf(rasters_path, name_formating, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netckf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = mk.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = mk.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = length(lat_ls)
lon_n = length(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = mk.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [mk.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = mk.np.array(temp_ll_ls)
empty_vec = mk.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netckf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', formating="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', length(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mappingping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calengthdar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(length(dates_ls)):
# Raster
ras = name_formating.formating(dates_ls[tt])
if ras in ras_ls:
# Resample_by_num
ras_resample_by_numd = os.path.join(temp_dir, 'r_' + ras)
Resample_by_num(os.path.join(rasters_path, ras), ras_resample_by_numd, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resample_by_numd, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resample_by_numd,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netckf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netckf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netckfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = mk.np.empty((rows, cols, ztime))
outliers_hants = mk.np.empty((rows, cols, ztime))
values_hants[:] = mk.np.nan
outliers_hants[:] = mk.np.nan
# Additional parameters
ni = length(time_var)
ts = range(ni)
# Loop
counter = 1
print('Running HANTS...')
for m in range(rows):
for n in range(cols):
print('\t{0}/{1}'.formating(counter, size_st))
y = mk.np.array(original_values[m, n, :])
y[mk.np.ifnan(y)] = fill_val
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta, fill_val)
values_hants[m, n, :] = yr
outliers_hants[m, n, :] = outliers
counter = counter + 1
nc_file.variables['hants_values'][:] = values_hants
nc_file.variables['outliers'][:] = outliers_hants
nc_file.variables['combined_values'][:] = mk.np.where(outliers_hants,
values_hants,
original_values)
# Close netckf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta, fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time collections.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [mk.convert_datetime(i, formating='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point ftotal_alls within the extent of the netckf file
lon_getting_max = getting_max(lon)
lon_getting_min = getting_min(lon)
lat_getting_max = getting_max(lat)
lat_getting_min = getting_min(lat)
if not (lon_getting_min < lonx < lon_getting_max) or not (lat_getting_min < latx < lat_getting_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_getting_max:
lonx = lon_getting_max
elif lonx < lon_getting_min:
lonx = lon_getting_min
if latx > lat_getting_max:
latx = lat_getting_max
elif latx < lat_getting_min:
latx = lat_getting_min
# Get lat-lon index in the netckf file
lat_closest = lat.flat[mk.np.abs(lat - latx).arggetting_min()]
lon_closest = lon.flat[mk.np.abs(lon - lonx).arggetting_min()]
lat_i = mk.np.where(lat == lat_closest)[0][0]
lon_i = mk.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
# Additional parameters
ni = length(time)
ts = range(ni)
# HANTS
y = mk.np.array(original_values)
y[mk.np.ifnan(y)] = fill_val
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta, fill_val)
# Plot
top = 1.15*getting_max(mk.np.nangetting_max(original_values),
mk.np.nangetting_max(hants_values))
bottom = 1.15*getting_min(mk.np.nangetting_min(original_values),
mk.np.nangetting_min(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.formating(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netckf file
nc_file.close()
# Data frame
kf = mk.KnowledgeFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return kf
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta, fill_val):
'''
This function applies the Harmonic ANalysis of Time Collections (HANTS)
algorithm origintotal_ally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-collections-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-collections--hants-
'''
# Arrays
mat = mk.np.zeros((getting_min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = mk.np.zeros((ni, 1))
outliers = mk.np.zeros((1, length(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = getting_min(2*nf+1, ni)
noutgetting_max = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*mk.np.arange(nb)/nb
cs = mk.np.cos(ang)
sn = mk.np.sin(ang)
i = mk.np.arange(1, nf+1)
for j in mk.np.arange(ni):
index = mk.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = mk.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout =
|
mk.np.total_sum(p == 0)
|
pandas.np.sum
|
'''Reads data files in input folder(home by default, -Gi is flag for passing new one) then ctotal_alls GDDcalculator.py,
passes lists of getting_maximum and getting_minimum temperatures also base and upper, takes list of GDD from that and concatingenates it
with associated Data Frame'''
from GDDcalculate import *
import argparse
import monkey as mk
import glob
print("GDD.py starts")
parser = argparse.ArgumentParser(description="Calculating GDD") # Argument parser for command-line friendly script
parser.add_argument("-tbase", "-b", type=float, default=10, help="Base temperature") # takes base temperature
parser.add_argument("-tupper", "-u", type=float, default=30, help="Upper temperature") # takes upper temperature
parser.add_argument("-GDDinfolder", "-Gi", type=str, default="./input/", help="Folder containing GDD input files.")
parser.add_argument("-GDDoutfolder", "-Go", type=str, default="./input/", help="Folder that will keep GDD output files.")
args = parser.parse_args()
for fname in glob.glob(args.GDDinfolder + "*.csv"): # For loop for .csv files in given input folder
D = mk.read_csv(fname, header_numer=0) # skipped rows will change if data frame's shape change###############IMPORTANT
kf = mk.KnowledgeFrame(D)
print(kf.columns.values)
tempgetting_max = kf["Max Temp (°C)"]
tempgetting_min = kf["Min Temp (°C)"] # Data frame's column
year = list(kf['Year'])[1] # Just so that we can name final file!
name = list(kf['Name'])[1]
lengthgth = length(
|
mk.Collections.sipna(tempgetting_min)
|
pandas.Series.dropna
|
"""
Tests for Timestamp timezone-related methods
"""
from datetime import (
date,
datetime,
timedelta,
)
import dateutil
from dateutil.tz import (
gettingtz,
tzoffset,
)
import pytest
import pytz
from pytz.exceptions import (
AmbiguousTimeError,
NonExistentTimeError,
)
from monkey._libs.tslibs import timezones
from monkey.errors import OutOfBoundsDatetime
import monkey.util._test_decorators as td
from monkey import (
NaT,
Timestamp,
)
class TestTimestampTZOperations:
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.getting_min.strftime('%Y-%m-%d %H:%M:%S')} "
f"underflows past {Timestamp.getting_min}"
)
pac = Timestamp.getting_min.tz_localize("US/Pacific")
assert pac.value > Timestamp.getting_min.value
pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.getting_min.tz_localize("Asia/Tokyo")
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.getting_max.strftime('%Y-%m-%d %H:%M:%S')} "
f"overflows past {Timestamp.getting_max}"
)
tokyo = Timestamp.getting_max.tz_localize("Asia/Tokyo")
assert tokyo.value < Timestamp.getting_max.value
tokyo.tz_convert("US/Pacific") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
|
Timestamp.getting_max.tz_localize("US/Pacific")
|
pandas.Timestamp.max.tz_localize
|
import numpy as np
import pytest
from monkey._libs import iNaT
from monkey.core.dtypes.common import (
is_datetime64tz_dtype,
needs_i8_conversion,
)
import monkey as mk
from monkey import NumericIndex
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_distinctive(index_or_collections_obj):
obj = index_or_collections_obj
obj = np.repeat(obj, range(1, length(obj) + 1))
result = obj.distinctive()
# dict.fromkeys preserves the order
distinctive_values = list(dict.fromkeys(obj.values))
if incontainstance(obj, mk.MultiIndex):
expected = mk.MultiIndex.from_tuples(distinctive_values)
expected.names = obj.names
tm.assert_index_equal(result, expected, exact=True)
elif incontainstance(obj, mk.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(distinctive_values, dtype=obj.dtype)
tm.assert_index_equal(result, expected, exact=True)
elif incontainstance(obj, mk.Index):
expected = mk.Index(distinctive_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
expected = expected.normalize()
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(distinctive_values)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_distinctive_null(null_obj, index_or_collections_obj):
obj = index_or_collections_obj
if not
|
total_allow_na_ops(obj)
|
pandas.tests.base.common.allow_na_ops
|
from context import tables
import os
import monkey as mk
def test_tables_fetcher():
try:
tables.fetcher()
tables_dir=os.listandardir(tables.TABLES_PATH)
print(f'\n----------------------------------\ntest_tables_fetcher worked,\ncontent of {tables.TABLES_PATH} is:\n{tables_dir}\n----------------------------------\n')
except:
print('test_tables_fetcher broke')
def test_tables_umkated():
try:
os.chdir(tables.TABLES_PATH)
ret=tables.umkated()
with open('log', 'r') as log:
date = log.read()
os.chdir(tables.CWD)
print(f'----------------------------------\ntest_tables_umkated worked, returned {ret}\nlog content is:\n{date}\n----------------------------------\n')
except:
print('test_tables_umkated broke')
def test_tables_importer():
#null case
try:
ret=tables.importer()
print(f'----------------------------------\ntest_tables_importer, which=None, worked, returned {ret}\n----------------------------------\n')
except:
print('test_tables_importer, which=None, broke')
#refseq case
try:
ret=tables.importer(which='refseq')
ret=
|
mk.KnowledgeFrame.header_num(ret)
|
pandas.DataFrame.head
|
import clone
import re
from textwrap import dedent
import numpy as np
import pytest
import monkey as mk
from monkey import (
KnowledgeFrame,
MultiIndex,
)
import monkey._testing as tm
jinja2 = pytest.importorskip("jinja2")
from monkey.io.formatings.style import ( # isort:skip
Styler,
)
from monkey.io.formatings.style_render import (
_getting_level_lengthgths,
_getting_trimgetting_ming_getting_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_kf():
return KnowledgeFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_kf):
return Styler(mi_kf, uuid_length=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
# comprehensively add features to mi_styler
mi_styler = mi_styler._clone(deepclone=True)
mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
mi_styler.uuid_length = 5
mi_styler.uuid = "abcde"
mi_styler.set_caption("capt")
mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
mi_styler.hide(axis="columns")
mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
mi_styler.hide(axis="index")
mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
mi_styler.set_table_attributes('class="box"')
mi_styler.formating(na_rep="MISSING", precision=3)
mi_styler.formating_index(precision=2, axis=0)
mi_styler.formating_index(precision=4, axis=1)
mi_styler.highlight_getting_max(axis=None)
mi_styler.employmapping_index(lambda x: "color: white;", axis=0)
mi_styler.employmapping_index(lambda x: "color: black;", axis=1)
mi_styler.set_td_classes(
KnowledgeFrame(
[["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
)
)
mi_styler.set_tooltips(
KnowledgeFrame(
[["a2", "b2"], ["a2", "c2"]],
index=mi_styler.index,
columns=mi_styler.columns,
)
)
return mi_styler
@pytest.mark.parametrize(
"sparse_columns, exp_cols",
[
(
True,
[
{"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
{"is_visible": False, "attributes": "", "value": "c0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "c0"},
{"is_visible": True, "attributes": "", "value": "c0"},
],
),
],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
ctx = mi_styler._translate(True, sparse_columns)
assert exp_cols[0].items() <= ctx["header_num"][0][2].items()
assert exp_cols[1].items() <= ctx["header_num"][0][3].items()
assert exp_l1_c0.items() <= ctx["header_num"][1][2].items()
assert exp_l1_c1.items() <= ctx["header_num"][1][3].items()
@pytest.mark.parametrize(
"sparse_index, exp_rows",
[
(
True,
[
{"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
{"is_visible": False, "attributes": "", "value": "i0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "i0"},
{"is_visible": True, "attributes": "", "value": "i0"},
],
),
],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
ctx = mi_styler._translate(sparse_index, True)
assert exp_rows[0].items() <= ctx["body"][0][0].items()
assert exp_rows[1].items() <= ctx["body"][1][0].items()
assert exp_l1_r0.items() <= ctx["body"][0][1].items()
assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
with mk.option_context("styler.sparse.index", False):
html1 = mi_styler.to_html()
with mk.option_context("styler.sparse.index", True):
html2 = mi_styler.to_html()
assert html1 != html2
with mk.option_context("styler.sparse.columns", False):
html1 = mi_styler.to_html()
with mk.option_context("styler.sparse.columns", True):
html2 = mi_styler.to_html()
assert html1 != html2
@pytest.mark.parametrize(
"rn, cn, getting_max_els, getting_max_rows, getting_max_cols, exp_rn, exp_cn",
[
(100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
(1000, 3, 750, None, None, 250, 3), # dynamictotal_ally reduce rows to 250, keep cols
(4, 1000, 500, None, None, 4, 125), # dynamictotal_ally reduce cols to 125, keep rows
(1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with getting_max_row
(4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with getting_max_col
(100, 100, 700, 50, 50, 25, 25), # rows cols below given getting_maxes so < 700 elmts
],
)
def test_trimgetting_ming_getting_maximum(rn, cn, getting_max_els, getting_max_rows, getting_max_cols, exp_rn, exp_cn):
rn, cn = _getting_trimgetting_ming_getting_maximums(
rn, cn, getting_max_els, getting_max_rows, getting_max_cols, scaling_factor=0.5
)
assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
"option, val",
[
("styler.render.getting_max_elements", 6),
("styler.render.getting_max_rows", 3),
],
)
def test_render_trimgetting_ming_rows(option, val):
# test auto and specific trimgetting_ming of rows
kf = KnowledgeFrame(np.arange(120).reshape(60, 2))
with mk.option_context(option, val):
ctx = kf.style._translate(True, True)
assert length(ctx["header_num"][0]) == 3 # index + 2 data cols
assert length(ctx["body"]) == 4 # 3 data rows + trimgetting_ming row
assert length(ctx["body"][0]) == 3 # index + 2 data cols
@pytest.mark.parametrize(
"option, val",
[
("styler.render.getting_max_elements", 6),
("styler.render.getting_max_columns", 2),
],
)
def test_render_trimgetting_ming_cols(option, val):
# test auto and specific trimgetting_ming of cols
kf = KnowledgeFrame(np.arange(30).reshape(3, 10))
with mk.option_context(option, val):
ctx = kf.style._translate(True, True)
assert length(ctx["header_num"][0]) == 4 # index + 2 data cols + trimgetting_ming col
assert length(ctx["body"]) == 3 # 3 data rows
assert length(ctx["body"][0]) == 4 # index + 2 data cols + trimgetting_ming col
def test_render_trimgetting_ming_mi():
midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
kf = KnowledgeFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
with mk.option_context("styler.render.getting_max_elements", 4):
ctx = kf.style._translate(True, True)
assert length(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimgetting_ming row
assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert length(ctx["body"]) == 3 # 2 data rows + trimgetting_ming row
assert length(ctx["header_num"][0]) == 5 # 2 indexes + 2 column header_numers + trimgetting_ming col
assert {"attributes": 'colspan="2"'}.items() <= ctx["header_num"][0][2].items()
def test_render_empty_mi():
# GH 43305
kf = KnowledgeFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
expected = dedent(
"""\
>
<theader_num>
<tr>
<th class="index_name level0" > </th>
<th class="index_name level1" >one</th>
</tr>
</theader_num>
"""
)
assert expected in kf.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepclone", [True, False])
def test_clone(comprehensive, render, deepclone, mi_styler, mi_styler_comp):
styler = mi_styler_comp if comprehensive else mi_styler
styler.uuid_length = 5
s2 = clone.deepclone(styler) if deepclone else clone.clone(styler) # make clone and check
assert s2 is not styler
if render:
styler.to_html()
excl = [
"na_rep", # deprecated
"precision", # deprecated
"cellstyle_mapping", # render time vars..
"cellstyle_mapping_columns",
"cellstyle_mapping_index",
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
if not deepclone: # check memory locations are equal for total_all included attributes
for attr in [a for a in styler.__dict__ if (not ctotal_allable(a) and a not in excl)]:
assert id(gettingattr(s2, attr)) == id(gettingattr(styler, attr))
else: # check memory locations are different for nested or mutable vars
shtotal_allow = [
"data",
"columns",
"index",
"uuid_length",
"uuid",
"caption",
"cell_ids",
"hide_index_",
"hide_columns_",
"hide_index_names",
"hide_column_names",
"table_attributes",
]
for attr in shtotal_allow:
assert id(gettingattr(s2, attr)) == id(gettingattr(styler, attr))
for attr in [
a
for a in styler.__dict__
if (not ctotal_allable(a) and a not in excl and a not in shtotal_allow)
]:
if gettingattr(s2, attr) is None:
assert id(gettingattr(s2, attr)) == id(gettingattr(styler, attr))
else:
assert id(gettingattr(s2, attr)) != id(gettingattr(styler, attr))
def test_clear(mi_styler_comp):
# NOTE: if this test fails for new features then 'mi_styler_comp' should be umkated
# to ensure proper testing of the 'clone', 'clear', 'export' methods with new feature
# GH 40675
styler = mi_styler_comp
styler._compute() # execute applied methods
clean_clone = Styler(styler.data, uuid=styler.uuid)
excl = [
"data",
"index",
"columns",
"uuid",
"uuid_length", # uuid is set to be the same on styler and clean_clone
"cell_ids",
"cellstyle_mapping", # execution time only
"cellstyle_mapping_columns", # execution time only
"cellstyle_mapping_index", # execution time only
"precision", # deprecated
"na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
# tests vars are not same vals on obj and clean clone before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (ctotal_allable(a) or a in excl)]:
res = gettingattr(styler, attr) == gettingattr(clean_clone, attr)
assert not (total_all(res) if (hasattr(res, "__iter__") and length(res) > 0) else res)
# test vars have same vales on obj and clean clone after clearing
styler.clear()
for attr in [a for a in styler.__dict__ if not (ctotal_allable(a))]:
res = gettingattr(styler, attr) == gettingattr(clean_clone, attr)
assert total_all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
exp_attrs = [
"_todo",
"hide_index_",
"hide_index_names",
"hide_columns_",
"hide_column_names",
"table_attributes",
"table_styles",
"css",
]
for attr in exp_attrs:
check = gettingattr(mi_styler, attr) == gettingattr(mi_styler_comp, attr)
assert not (
total_all(check) if (hasattr(check, "__iter__") and length(check) > 0) else check
)
export = mi_styler_comp.export()
used = mi_styler.use(export)
for attr in exp_attrs:
check = gettingattr(used, attr) == gettingattr(mi_styler_comp, attr)
assert total_all(check) if (hasattr(check, "__iter__") and length(check) > 0) else check
used.to_html()
def test_hide_raises(mi_styler):
msg = "`subset` and `level` cannot be passed simultaneously"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", subset="something", level="something else")
msg = "`level` must be of type `int`, `str` or list of such"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
assert length(ctx["header_num"][0]) == 3
assert length(ctx["header_num"][1]) == 3
assert length(ctx["header_num"][2]) == 4
assert ctx["header_num"][2][0]["is_visible"]
assert not ctx["header_num"][2][1]["is_visible"]
assert ctx["body"][0][0]["is_visible"]
assert not ctx["body"][0][1]["is_visible"]
assert ctx["body"][1][0]["is_visible"]
assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
mi_styler.columns.names = ["zero", "one"]
if names:
mi_styler.index.names = ["zero", "one"]
ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
assert length(ctx["header_num"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["employmapping", "employ"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_employ_mapping_header_numer(method, axis):
# GH 41893
kf = KnowledgeFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
func = {
"employ": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
"employmapping": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
}
# test execution added to todo
result = gettingattr(kf.style, f"{method}_index")(func[method], axis=axis)
assert length(result._todo) == 1
assert length(gettingattr(result, f"ctx_{axis}")) == 0
# test ctx object on compute
result._compute()
expected = {
(0, 0): [("attr", "val")],
}
assert gettingattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["employ", "employmapping"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_employ_mapping_header_numer_mi(mi_styler, method, axis):
# GH 41893
func = {
"employ": lambda s: ["attr: val;" if "b" in v else "" for v in s],
"employmapping": lambda v: "attr: val" if "b" in v else "",
}
result = gettingattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
expected = {(1, 1): [("attr", "val")]}
assert gettingattr(result, f"ctx_{axis}") == expected
def test_employ_mapping_header_numer_raises(mi_styler):
# GH 41893
with pytest.raises(ValueError, match="No axis named bad for object type KnowledgeFrame"):
mi_styler.employmapping_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
def setup_method(self, method):
np.random.seed(24)
self.s = KnowledgeFrame({"A": np.random.permutation(range(6))})
self.kf = KnowledgeFrame({"A": [0, 1], "B": np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo="bar"):
return mk.Collections(f"color: {foo}", index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.kf)
self.attrs = KnowledgeFrame({"A": ["color: red", "color: blue"]})
self.knowledgeframes = [
self.kf,
KnowledgeFrame(
{"f": [1.0, 2.0], "o": ["a", "b"], "c": mk.Categorical(["a", "b"])}
),
]
self.blank_value = " "
def test_init_non_monkey(self):
msg = "``data`` must be a Collections or KnowledgeFrame"
with pytest.raises(TypeError, match=msg):
Styler([1, 2, 3])
def test_init_collections(self):
result = Styler(mk.Collections([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824 / 41395
assert "tex2jax_ignore" not in self.styler._repr_html_()
with mk.option_context("styler.html.mathjax", False):
assert "tex2jax_ignore" in self.styler._repr_html_()
def test_umkate_ctx(self):
self.styler._umkate_ctx(self.attrs)
expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
assert self.styler.ctx == expected
def test_umkate_ctx_flatten_multi_and_trailing_semi(self):
attrs = KnowledgeFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
self.styler._umkate_ctx(attrs)
expected = {
(0, 0): [("color", "red"), ("foo", "bar")],
(1, 0): [("color", "blue"), ("foo", "baz")],
}
assert self.styler.ctx == expected
def test_render(self):
kf = KnowledgeFrame({"A": [0, 1]})
style = lambda x: mk.Collections(["color: red", "color: blue"], name=x.name)
s = Styler(kf, uuid="AB").employ(style)
s.to_html()
# it worked?
def test_multiple_render(self):
# GH 39396
s = Styler(self.kf, uuid_length=0).employmapping(lambda x: "color: red;", subset=["A"])
s.to_html() # do 2 renders to ensure css styles not duplicated_values
assert (
'<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n'
" color: red;\n}\n</style>" in s.to_html()
)
def test_render_empty_kfs(self):
empty_kf = KnowledgeFrame()
es = Styler(empty_kf)
es.to_html()
# An index but no columns
KnowledgeFrame(columns=["a"]).style.to_html()
# A column but no index
KnowledgeFrame(index=["a"]).style.to_html()
# No IndexError raised?
def test_render_double(self):
kf = KnowledgeFrame({"A": [0, 1]})
style = lambda x: mk.Collections(
["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
)
s = Styler(kf, uuid="AB").employ(style)
s.to_html()
# it worked?
def test_set_properties(self):
kf = KnowledgeFrame({"A": [0, 1]})
result = kf.style.set_properties(color="white", size="10px")._compute().ctx
# order is detergetting_ministic
v = [("color", "white"), ("size", "10px")]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
kf = KnowledgeFrame({"A": [0, 1]})
result = (
kf.style.set_properties(subset=mk.IndexSlice[0, "A"], color="white")
._compute()
.ctx
)
expected = {(0, 0): [("color", "white")]}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/monkey-dev/monkey/pull/12090#issuecomment-180695902
kf = KnowledgeFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = kf.style._translate(True, True)
assert length(result["header_num"]) == 1
expected = {
"class": "blank level0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
}
assert expected.items() <= result["header_num"][0][0].items()
def test_index_name(self):
# https://github.com/monkey-dev/monkey/issues/11655
kf = KnowledgeFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = kf.set_index("A").style._translate(True, True)
expected = {
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
}
assert expected.items() <= result["header_num"][1][0].items()
def test_multiindex_name(self):
# https://github.com/monkey-dev/monkey/issues/11655
kf = KnowledgeFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = kf.set_index(["A", "B"]).style._translate(True, True)
expected = [
{
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
},
{
"class": "index_name level1",
"type": "th",
"value": "B",
"is_visible": True,
"display_value": "B",
},
{
"class": "blank col0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
},
]
assert result["header_num"][1] == expected
def test_numeric_columns(self):
# https://github.com/monkey-dev/monkey/issues/12125
# smoke test for _translate
kf = KnowledgeFrame({0: [1, 2, 3]})
kf.style._translate(True, True)
def test_employ_axis(self):
kf = KnowledgeFrame({"A": [0, 0], "B": [1, 1]})
f = lambda x: [f"val: {x.getting_max()}" for v in x]
result = kf.style.employ(f, axis=1)
assert length(result._todo) == 1
assert length(result.ctx) == 0
result._compute()
expected = {
(0, 0): [("val", "1")],
(0, 1): [("val", "1")],
(1, 0): [("val", "1")],
(1, 1): [("val", "1")],
}
assert result.ctx == expected
result = kf.style.employ(f, axis=0)
expected = {
(0, 0): [("val", "0")],
(0, 1): [("val", "1")],
(1, 0): [("val", "0")],
(1, 1): [("val", "1")],
}
result._compute()
assert result.ctx == expected
result = kf.style.employ(f) # default
result._compute()
assert result.ctx == expected
@pytest.mark.parametrize("axis", [0, 1])
def test_employ_collections_return(self, axis):
# GH 42014
kf = KnowledgeFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
# test Collections return where length(Collections) < kf.index or kf.columns but labels OK
func = lambda s: mk.Collections(["color: red;"], index=["Y"])
result = kf.style.employ(func, axis=axis)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
# test Collections return where labels align but different order
func = lambda s: mk.Collections(["color: red;", "color: blue;"], index=["Y", "X"])
result = kf.style.employ(func, axis=axis)._compute().ctx
assert result[(0, 0)] == [("color", "blue")]
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
assert result[(axis, 1 - axis)] == [("color", "blue")]
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
def test_employ_knowledgeframe_return(self, index, columns):
# GH 42014
kf = KnowledgeFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
idxs = ["X", "Y"] if index else ["Y"]
cols = ["X", "Y"] if columns else ["Y"]
kf_styles = KnowledgeFrame("color: red;", index=idxs, columns=cols)
result = kf.style.employ(lambda x: kf_styles, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
@pytest.mark.parametrize(
"slice_",
[
mk.IndexSlice[:],
mk.IndexSlice[:, ["A"]],
mk.IndexSlice[[1], :],
mk.IndexSlice[[1], ["A"]],
mk.IndexSlice[:2, ["A", "B"]],
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_employ_subset(self, slice_, axis):
result = (
self.kf.style.employ(self.h, axis=axis, subset=slice_, foo="baz")
._compute()
.ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.kf.index)
for c, col in enumerate(self.kf.columns)
if row in self.kf.loc[slice_].index and col in self.kf.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
mk.IndexSlice[:],
mk.IndexSlice[:, ["A"]],
mk.IndexSlice[[1], :],
mk.IndexSlice[[1], ["A"]],
mk.IndexSlice[:2, ["A", "B"]],
],
)
def test_employmapping_subset(self, slice_):
result = (
self.kf.style.employmapping(lambda x: "color:baz;", subset=slice_)._compute().ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.kf.index)
for c, col in enumerate(self.kf.columns)
if row in self.kf.loc[slice_].index and col in self.kf.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
mk.IndexSlice[:, mk.IndexSlice["x", "A"]],
mk.IndexSlice[:, mk.IndexSlice[:, "A"]],
mk.IndexSlice[:, mk.IndexSlice[:, ["A", "C"]]], # missing col element
mk.IndexSlice[mk.IndexSlice["a", 1], :],
mk.IndexSlice[mk.IndexSlice[:, 1], :],
mk.IndexSlice[mk.IndexSlice[:, [1, 3]], :], # missing row element
mk.IndexSlice[:, ("x", "A")],
mk.IndexSlice[("a", 1), :],
],
)
def test_employmapping_subset_multiindex(self, slice_):
# GH 19861
# edited for GH 33562
warn = None
msg = "indexing on a MultiIndex with a nested sequence of labels"
if (
incontainstance(slice_[-1], tuple)
and incontainstance(slice_[-1][-1], list)
and "C" in slice_[-1][-1]
):
warn = FutureWarning
elif (
incontainstance(slice_[0], tuple)
and incontainstance(slice_[0][1], list)
and 3 in slice_[0][1]
):
warn = FutureWarning
idx = MultiIndex.from_product([["a", "b"], [1, 2]])
col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
kf = KnowledgeFrame(np.random.rand(4, 4), columns=col, index=idx)
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
kf.style.employmapping(lambda x: "color: red;", subset=slice_).to_html()
def test_employmapping_subset_multiindex_code(self):
# https://github.com/monkey-dev/monkey/issues/25858
# Checks styler.employmapping works with multindex when codes are provided
codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
columns = MultiIndex(
levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
)
kf = KnowledgeFrame(
[[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
)
pct_subset = mk.IndexSlice[:, mk.IndexSlice[:, "%":"%"]]
def color_negative_red(val):
color = "red" if val < 0 else "black"
return f"color: {color}"
kf.loc[pct_subset]
kf.style.employmapping(color_negative_red, subset=pct_subset)
def test_empty(self):
kf = KnowledgeFrame({"A": [1, 0]})
s = kf.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0"]},
{"props": [("", "")], "selectors": ["row1_col0"]},
]
assert result == expected
def test_duplicate(self):
kf = KnowledgeFrame({"A": [1, 0]})
s = kf.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
]
assert result == expected
def test_init_with_na_rep(self):
# GH 21527 28358
kf = KnowledgeFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = Styler(kf, na_rep="NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
def test_caption(self):
styler = Styler(self.kf, caption="foo")
result = styler.to_html()
assert total_all(["caption" in result, "foo" in result])
styler = self.kf.style
result = styler.set_caption("baz")
assert styler is result
assert styler.caption == "baz"
def test_uuid(self):
styler = Styler(self.kf, uuid="abc123")
result = styler.to_html()
assert "abc123" in result
styler = self.kf.style
result = styler.set_uuid("aaa")
assert result is styler
assert result.uuid == "aaa"
def test_distinctive_id(self):
# See https://github.com/monkey-dev/monkey/issues/16780
kf = KnowledgeFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
result = kf.style.to_html(uuid="test")
assert "test" in result
ids = re.findtotal_all('id="(.*?)"', result)
assert np.distinctive(ids).size == length(ids)
def test_table_styles(self):
style = [{"selector": "th", "props": [("foo", "bar")]}] # default formating
styler = Styler(self.kf, table_styles=style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
styler = self.kf.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
# GH 39563
style = [{"selector": "th", "props": "foo:bar;"}] # css string formating
styler = self.kf.style.set_table_styles(style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
def test_table_styles_multiple(self):
ctx = self.kf.style.set_table_styles(
[
{"selector": "th,td", "props": "color:red;"},
{"selector": "tr", "props": "color:green;"},
]
)._translate(True, True)["table_styles"]
assert ctx == [
{"selector": "th", "props": [("color", "red")]},
{"selector": "td", "props": [("color", "red")]},
{"selector": "tr", "props": [("color", "green")]},
]
def test_table_styles_dict_multiple_selectors(self):
# GH 44011
result = self.kf.style.set_table_styles(
[{"selector": "th,td", "props": [("border-left", "2px solid black")]}]
)._translate(True, True)["table_styles"]
expected = [
{"selector": "th", "props": [("border-left", "2px solid black")]},
{"selector": "td", "props": [("border-left", "2px solid black")]},
]
assert result == expected
def test_maybe_convert_css_to_tuples(self):
expected = [("a", "b"), ("c", "d e")]
assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected
expected = []
assert maybe_convert_css_to_tuples("") == expected
def test_maybe_convert_css_to_tuples_err(self):
msg = "Styles supplied as string must follow CSS rule formatings"
with pytest.raises(ValueError, match=msg):
maybe_convert_css_to_tuples("err")
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.kf, table_attributes=attributes)
result = styler.to_html()
assert 'class="foo" data-bar' in result
result = self.kf.style.set_table_attributes(attributes).to_html()
assert 'class="foo" data-bar' in result
def test_employ_none(self):
def f(x):
return KnowledgeFrame(
np.where(x == x.getting_max(), "color: red", ""),
index=x.index,
columns=x.columns,
)
result = KnowledgeFrame([[1, 2], [3, 4]]).style.employ(f, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
def test_trim(self):
result = self.kf.style.to_html() # trim=True
assert result.count("#") == 0
result = self.kf.style.highlight_getting_max().to_html()
assert result.count("#") == length(self.kf.columns)
def test_export(self):
f = lambda x: "color: red" if x > 0 else "color: blue"
g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
style1 = self.styler
style1.employmapping(f).employmapping(g, z="b").highlight_getting_max()._compute() # = render
result = style1.export()
style2 = self.kf.style
style2.use(result)
assert style1._todo == style2._todo
style2.to_html()
def test_bad_employ_shape(self):
kf = KnowledgeFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
msg = "resulted in the employ method collapsing to a Collections."
with pytest.raises(ValueError, match=msg):
kf.style._employ(lambda x: "x")
msg = "created invalid {} labels"
with pytest.raises(ValueError, match=msg.formating("index")):
kf.style._employ(lambda x: [""])
with pytest.raises(ValueError, match=msg.formating("index")):
kf.style._employ(lambda x: ["", "", "", ""])
with pytest.raises(ValueError, match=msg.formating("index")):
kf.style._employ(lambda x: mk.Collections(["a:v;", ""], index=["A", "C"]), axis=0)
with pytest.raises(ValueError, match=msg.formating("columns")):
kf.style._employ(lambda x: ["", "", ""], axis=1)
with pytest.raises(ValueError, match=msg.formating("columns")):
kf.style._employ(lambda x: mk.Collections(["a:v;", ""], index=["X", "Z"]), axis=1)
msg = "returned ndarray with wrong shape"
with pytest.raises(ValueError, match=msg):
kf.style._employ(lambda x: np.array([[""], [""]]), axis=None)
def test_employ_bad_return(self):
def f(x):
return ""
kf = KnowledgeFrame([[1, 2], [3, 4]])
msg = (
"must return a KnowledgeFrame or ndarray when passed to `Styler.employ` "
"with axis=None"
)
with pytest.raises(TypeError, match=msg):
kf.style._employ(f, axis=None)
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_employ_bad_labels(self, axis):
def f(x):
return KnowledgeFrame(**{axis: ["bad", "labels"]})
kf = KnowledgeFrame([[1, 2], [3, 4]])
msg = f"created invalid {axis} labels."
with pytest.raises(ValueError, match=msg):
kf.style._employ(f, axis=None)
def test_getting_level_lengthgths(self):
index = MultiIndex.from_product([["a", "b"], [0, 1, 2]])
expected = {
(0, 0): 3,
(0, 3): 3,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result = _getting_level_lengthgths(index, sparsify=True, getting_max_index=100)
tm.assert_dict_equal(result, expected)
expected = {
(0, 0): 1,
(0, 1): 1,
(0, 2): 1,
(0, 3): 1,
(0, 4): 1,
(0, 5): 1,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result =
|
_getting_level_lengthgths(index, sparsify=False, getting_max_index=100)
|
pandas.io.formats.style_render._get_level_lengths
|
import monkey as mk
import numpy as np
kf= mk.read_csv('../Datos/Premios2020.csv',encoding='ISO-8859-1')
# print(kf.ifnull().total_sum())
# moda = kf.release.mode()
# valores = {'release': moda[0]}
# kf.fillnone(value=valores, inplace=True)
moda = kf['release'].mode()
kf['release'] = kf['release'].replacing([np.nan], moda)
print(
|
mk.counts_value_num(kf['release'])
|
pandas.value_counts
|
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Any, Dict, Union
import numpy as np
import monkey as mk
from google.protobuf.json_formating import MessageToDict
from feast.protos.feast.types.Value_pb2 import (
BoolList,
BytesList,
DoubleList,
FloatList,
Int32List,
Int64List,
StringList,
)
from feast.protos.feast.types.Value_pb2 import Value as ProtoValue
from feast.value_type import ValueType
def feast_value_type_to_python_type(field_value_proto: ProtoValue) -> Any:
"""
Converts field value Proto to Dict and returns each field's Feast Value Type value
in their respective Python value.
Args:
field_value_proto: Field value Proto
Returns:
Python native type representation/version of the given field_value_proto
"""
field_value_dict = MessageToDict(field_value_proto)
for k, v in field_value_dict.items():
if k == "int64Val":
return int(v)
if k == "bytesVal":
return bytes(v)
if (k == "int64ListVal") or (k == "int32ListVal"):
return [int(item) for item in v["val"]]
if (k == "floatListVal") or (k == "doubleListVal"):
return [float(item) for item in v["val"]]
if k == "stringListVal":
return [str(item) for item in v["val"]]
if k == "bytesListVal":
return [bytes(item) for item in v["val"]]
if k == "boolListVal":
return [bool(item) for item in v["val"]]
if k in ["int32Val", "floatVal", "doubleVal", "stringVal", "boolVal"]:
return v
else:
raise TypeError(
f"Casting to Python native type for type {k} failed. "
f"Type {k} not found"
)
def python_type_to_feast_value_type(
name: str, value, recurse: bool = True
) -> ValueType:
"""
Finds the equivalengtht Feast Value Type for a Python value. Both native
and Monkey types are supported. This function will recursively look
for nested types when arrays are detected. All types must be homogenous.
Args:
name: Name of the value or field
value: Value that will be inspected
recurse: Whether to recursively look for nested types in arrays
Returns:
Feast Value Type
"""
type_name = type(value).__name__
type_mapping = {
"int": ValueType.INT64,
"str": ValueType.STRING,
"float": ValueType.DOUBLE,
"bytes": ValueType.BYTES,
"float64": ValueType.DOUBLE,
"float32": ValueType.FLOAT,
"int64": ValueType.INT64,
"uint64": ValueType.INT64,
"int32": ValueType.INT32,
"uint32": ValueType.INT32,
"uint8": ValueType.INT32,
"int8": ValueType.INT32,
"bool": ValueType.BOOL,
"timedelta": ValueType.UNIX_TIMESTAMP,
"datetime64[ns]": ValueType.UNIX_TIMESTAMP,
"datetime64[ns, tz]": ValueType.UNIX_TIMESTAMP,
"category": ValueType.STRING,
}
if type_name in type_mapping:
return type_mapping[type_name]
if type_name == "ndarray" or incontainstance(value, list):
if recurse:
# Convert to list type
list_items =
|
mk.core.collections.Collections(value)
|
pandas.core.series.Series
|
import numpy as np
#import matplotlib.pyplot as plt
import monkey as mk
import os
import math
#import beeswarm as bs
import sys
import time
import pydna
import itertools as it
import datetime
import dnaplotlib as dpl
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.patches as mpatch
from matplotlib.patches import FancyBboxPatch
from pydna.dseq import Dseq
from pydna.dseqrecord import Dseqrecord
from pydna.assembly import Assembly as pydAssembly
from Bio.Restriction import BsaI
from Bio.Restriction import BbsI
from Bio.Restriction import AarI
from Bio.Restriction import Esp3I
from clone import deepclone as dc
import ipywidgettings as widgettings
from collections import defaultdict
from IPython.display import FileLink, FileLinks
import warnings
import re
def incrementString(s):
"""regular expression search! I forgetting exactly why this is needed"""
m = re.search(r'\d+$', s)
if(m):
return s[:m.start()]+str(int(m.group())+1)
else:
return s+str(0)
#the following makes a few data members for handling restriction enzymes
enzymelist = [BsaI,BbsI,AarI,Esp3I]
enzymes = {str(a):a for a in enzymelist}
enlist = [str(a) for a in enzymelist]+["gibson"]
#the following defines the overhangs in our library!
ENDDICT = { \
"GGAG":"A", \
"TACT":"B", \
"AATG":"C", \
"AGGT":"D", \
"GCTT":"E", \
"CGCT":"F", \
"TGCC":"G", \
"ACTA":"H", \
"TAGA":"sc3",\
"CATTACTCGCATCCATTCTCAGGCTGTCTCGTCTCGTCTC" : "1",\
"GCTGGGAGTTCGTAGACGGAAACAAACGCAGAATCCAAGC" : "2",\
"GCACTGAAGGTCCTCAATCGCACTGGAAACATCAAGGTCG" : "3",\
"CTGACCTCCTGCCAGCAATAGTAAGACAACACGCAAAGTC" : "4",\
"GAGCCAACTCCCTTTACAACCTCACTCAAGTCCGTTAGAG" : "5",\
"CTCGTTCGCTGCCACCTAAGAATACTCTACGGTCACATAC" : "6",\
"CAAGACGCTGGCTCTGACATTTCCGCTACTGAACTACTCG" : "7",\
"CCTCGTCTCAACCAAAGCAATCAACCCATCAACCACCTGG" : "8",\
"GTTCCTTATCATCTGGCGAATCGGACCCACAAGAGCACTG" : "9",\
"CCAGGATACATAGATTACCACAACTCCGAGCCCTTCCACC" : "X",\
}
#have a dictionary of the reverse complement too
rcENDDICT = {str(Dseq(a).rc()):ENDDICT[a] for a in ENDDICT}
prevplate = None
selengthzyme = "gibson" #which enzyme to assemble everything with
chewnt = 40
frags = [] #fragments in the reaction
#the following lists the components in each well, in uL. I think this is outdated
#as of 4/25/19
gga = \
[["component","volume"],
#["buffer10x",0.4],
#["ATP10mM",0.4],
#["BsaI", 0.2],
#["ligase",0.2],
["NEBbuffer",0.4],
["NEBenzyme",0.2],
["water",1.4],
["dnasln",1],
]
gibassy = \
[["component","volume"],
["GGAMM",1],
["dnasln",1]]
ggaPD = mk.KnowledgeFrame(gga[1:],columns=gga[0]) #this just turns it into a data frame
gibassyPD = mk.KnowledgeFrame(gibassy[1:],columns=gibassy[0])
ggaFm = 6.0
ggavecGm = 6.0
gibFm = 6.0
gibvecFm = 6.0
partsFm = ggaFm #default is gga
vectorFm = ggavecGm
source = "384PP_AQ_BP"
ptypedict = {
"ASSGGA04":"384PP_PLUS_AQ_BP",
"ASSGIB01":"384LDV_PLUS_AQ_BP",
"ASSGIB02":"384PP_AQ_BP"}
waterwell = "P1" #in your source plate, include one well that is just full of water.
#dnaPath = os.path.join(".","DNA")
#go down and look at makeEchoFile
def startText():
print("Welcome to Moclo Assembly Helper V1")
print("===================================")
def pickEnzyme():
"""asks the user about what kind of enzyme s/he wants to use"""
print("Which enzyme would you like to use?")
for el in range(length(enlist)):
print("[{}] {}".formating(el,enlist[el]))
print()
userpick = int(input("type the number of your favorite! "))
selengthzyme = enlist[userpick].lower()
print("===================================")
return selengthzyme
def findExpts(path):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = []
#print(dirlist)
#for folder in dirlist[1:]:
folder = ['.']
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if("promoter" in fline):
expts+=[(os.path.join(folder[0],fle),fle[:-4])]
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(folder[0],fle),None)
kfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
if(kfs["Sheet1"].columns[0] == "promoter"):
expts+=[(os.path.join(folder[0],fle),fle[:-5])]
except (IOError,KeyError) as e:
pass
return sorted(expts)[::-1]
def findPartsLists(path):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print dirlist
expts = []
for fle in dirlist[0][2]:
#print fle
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(path,fle),None)
kfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
if("parts" in list(kfs.keys())[0]):
expts+=[(os.path.join(path,fle),fle[:-4])]
except IOError:
pass
return sorted(expts)[::-1]
def pickPartsList():
"""user interface for picking a list of parts to use. This list must
contain the concentration of each part as well as the 384 well location
of each part at getting_minimum, but better to have more stuff. Check my example
file."""
print("Searching for compatible parts lists...")
pllist = findPartsLists(os.path.join(".","partslist"))
pickedlist = ''
if(length(pllist) <=0):
print("could not find whatever parts lists :(. Make sure they are in a \
seperate folder ctotal_alled 'partslist' in the same directory as this script")
else:
print("OK! I found")
print()
for el in range(length(pllist)):
print("[{}] {}".formating(el,pllist[el][1]))
print()
if(length(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = mk.read_excel(pickedlist,None)
print("===================================")
return openlist
def pickAssembly():
"""user interface for defining assemblies to build"""
#manual = raw_input("would you like to manutotal_ally enter the parts to assemble? (y/n)")
manual = "n"
if(manual == "n"):
print("searching for compatible input files...")
time.sleep(1)
pllist = findExpts(".")
#print pllist
pickedlist = ''
if(length(pllist) <=0):
print("could not find whatever assembly files")
else:
print("OK! I found")
print()
for el in range(length(pllist)):
print("[{}] {}".formating(el,pllist[el][1]))
print()
if(length(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = mk.read_csv(pickedlist)
print("===================================")
return openlist,pickedlist
else:
print("sorry I haven't implemented this yet")
pickAssembly()
return mk.read_csv(aslist),aslist
def echoline(swell,dwell,tvol,sptype = source,spname = "Source[1]",\
dpname = "Destination[1]",platebc="",partid="",partname=""):
#if(platebc!=""):
# sptype = ptypedict[platebc]
return "{},{},{},{},{},{},,,{},{},{}\n".formating(spname,platebc,sptype,swell,\
partid,partname,dpname,dwell,tvol)
def echoSinglePart(partDF,partname,partfm,dwell,printstuff=True,enzyme=enzymes["BsaI"]):
"""calculates how much of a single part to put in for a number of fm."""
try:
pwell = partDF[partDF.part==partname].well.iloc[0]
except IndexError:
raise ValueError("Couldn't find the right part named '"+\
partname+"'! Are you sure you're using the right parts list?")
return None, None, None
pDseq = makeDseqFromDF(partname,partDF,enzyme=enzyme)
pconc = partDF[partDF.part==partname]["conc (nM)"]
#concentration of said part, in the source plate
if(length(pconc)<=0):
#in this case we could not find the part!
raise ValueError("Part "+part+" had an invalid concentration!"+\
" Are you sure you're using the right parts list?")
pconc = pconc.iloc[0]
pplate = partDF[partDF.part==partname]["platebc"].iloc[0]
platet = partDF[partDF.part==partname]["platetype"].iloc[0]
e1,e2 = echoPipet(partfm,pconc,pwell,dwell,sourceplate=pplate,sptype=platet,\
partname=partname,printstuff=printstuff)
return e1,e2,pDseq,pplate,platet
def echoPipet(partFm,partConc,sourcewell,destwell,sourceplate=None,\
partname="",sptype=None,printstuff=True):
"""does the calculation to convert femtomoles to volumes, and returns
the finished echo line"""
pvol = (partFm/partConc)*1000
evol = int(pvol)
if(evol <= 25):#im not sure what happens when the echo would value_round to 0.
#better safe than sorry and put in one siplet.
evol = 25
if(sourceplate==None):
if(printstuff):
print("===> transfer from {} to {}, {} nl".formating(sourcewell,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,partname=partname)
else:
if(printstuff):
print("===> transfer from {}, plate {} to {}, {} nl".formating(sourcewell,sourceplate,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,spname =sourceplate,\
sptype= sptype,platebc = sourceplate,partname=partname)
return echostring, evol
def makeDseqFromDF(part,partslist,col = "part",enzyme=enzymes["BsaI"]):
"""looks up the part named "part" in the column specified as col, and
converts it into a pydna object.
this program will check if an input sequence is a valid part.
This involves checking a couple of things:
1) are there only two restriction cut sites?
2) does it have the proper overhangs?
3) after being cut, does it produce one part with bsai sites and one part without?
"""
pseq = partslist[partslist[col] == part].sequence.iloc[0].lower()
pcirc = partslist[partslist[col] == part].circular.iloc[0]
p5pover = int(partslist[partslist[col] == part]["5pend"].iloc[0])
p3pover = int(partslist[partslist[col] == part]["3pend"].iloc[0])
povhg = int(p5pover)
pseqRC = str(Dseq(pseq).rc()).lower()
if(p5pover > 0):
pseq = pseq[p5pover:]
elif(p5pover<0):
pseqRC = pseqRC[:p5pover]
if(p3pover <0):
pseq = pseq[:p3pover]
elif(p3pover >0):
pseqRC = pseqRC[p5pover:]
pDseq = Dseq(pseq,pseqRC,ovhg=povhg)
#this defines a dsdna linear sequence
if(pcirc):
#this makes the sequence circular, if we have to
pDseq = pDseq.looped()
if(enzyme != None):
numzymes = length(enzyme.search(pDseq,linear=not pcirc))##\
#length(enzyme.search(pDseq.rc(),linear=pcirc))
if(numzymes < 2 and pcirc):
warnings.warn("Be careful! sequence {} has only {} {} site"\
.formating(part,numzymes,str(enzyme)))
elif(numzymes>=2):
try:
testcut = pDseq.cut(enzyme)
except IndexError:
raise IndexError("something's wrong with part "+part)
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
if(numzymes > 2):
warnings.warn("{} has {} extra {} site{}!!"\
.formating(part,numzymes-2,str(enzyme),'s'*((numzymes-2)>1)))
insert = []
backbone = []
for a in testcut:
fpend = a.five_prime_end()
tpend = a.three_prime_end()
if((a.find(esite)>-1) or (a.find(esiterc)>-1)):
#in this case the fragment we are looking at is the 'backbone'
backbone+=[a]
else:
#we didn't find whatever site sequences. this must be the insert!
insert+=[a]
if((not fpend[0]=='blunt') and \
(not ((fpend[1].upper() in ENDDICT) or \
(fpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.formating(part,fpend[1].upper()))
if((not tpend[0]=='blunt') and \
(not ((tpend[1].upper() in ENDDICT) or \
(tpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.formating(part,tpend[1].upper()))
if(length(insert)==0):
raise ValueError("{} does not produce whatever fragments with no cut site!".formating(part))
if(length(insert)>1):
warnings.warn("{} produces {} fragments with no cut site".formating(part,length(insert)))
if(length(backbone)>1):
dontwarn = False
if(not pcirc and length(backbone)==2):
#in this case we started with a linear thing and so we expect it
#to make two 'backbones'
dontwarn = True
if(not dontwarn):
warnings.warn("{} produces {} fragments with cut sites".formating(part,length(backbone)))
return pDseq
def bluntLeft(DSseq):
"""returns true if the left hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.five_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def bluntRight(DSseq):
"""returns true if the right hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.three_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def isNewDseq(newpart,partlist):
"""checks to see if newpart is contained within partlist, returns true
if it isn't"""
new = True
if(type(newpart)==Dseqrecord):
newdseqpart = newpart.seq
#seqnewpart = str(newpart).upper()
newcirc = newpart.circular
#dsequid = (newpart.seq).seguid()
#print("dsequid is "+str(dsequid))
#dsnewpart = Dseqrecord(newpart)
#rcnewpart = newpart.rc()
newseguid = newdseqpart.seguid()
#print("newseguid is "+str(newseguid))
cseguid = None
if(newcirc and type(newpart)==Dseqrecord):
cseguid = newpart.cseguid()
for part in partlist:
if(type(part == Dseqrecord)):
dseqpart = part.seq
partseguid = dseqpart.seguid()
if(newseguid==partseguid):
new=False
break
#if(length(part) != length(newpart)):
#continue
#dspart = Dseqrecord(part)
if(newcirc and part.circular):
if(type(part) == Dseqrecord and cseguid != None):
comparid = part.cseguid()
if(comparid == cseguid):
new=False
break
#if(seqnewpart in (str(part.seq).upper()*3)):
# new=False
# break
#elif(seqnewpart in (str(part.seq.rc()).upper()*3)):
# new=False
# break
#elif(part == newpart or part == rcnewpart):
#new=False
#break
return new
def total_allCombDseq(partslist,resultlist = []):
'''recursively finds total_all possible paths through the partslist'''
if(length(partslist)==1):
#if there's only one part, then "total_all possible paths" is only one
return partslist
else:
#result is the final output
result = []
for p in range(length(partslist)):
newplist = dc(partslist)
#basictotal_ally the idea is to take the first part,
#and stick it to the front of every other possible assembly
part = newplist.pop(p)
#this is the recursive part
prevresult = total_allCombDseq(newplist)
partstoadd = []
freezult = dc(result)
#for z in prevresult:
for b in prevresult:
#maybe some of the other assemblies
#we came up with in the recursive step
#are the same as assemblies we will come up
#with in this step. For that reason we may
#want to cull them by not adding them
#to the "parts to add" list
if(isNewDseq(b,freezult)):
partstoadd+=[b]
#try to join the given part to everything else
if((not bluntRight(part)) and (not bluntLeft(b)) and part.linear and b.linear):
#this averages we don't total_allow blunt ligations! We also don't total_allow
#ligations between a linear and a circular part. Makes sense right?
#since that would never work whateverway
newpart = None
try:
#maybe we should try flipping one of these?
newpart= part+b
except TypeError:
#this happens if the parts don't have the right sticky ends.
#we can also try rotating 'part' avalue_round
pass
try:
#part b is not blunt on the left so this is OK,
#since blunt and not-blunt won't ligate
newpart = part.rc()+b
except TypeError:
pass
if(newpart == None):
#if the part is still None then it won't ligate forwards
#or backwards. Skip!
continue
try:
if((not bluntRight(newpart)) and (not bluntLeft(newpart))):
#given that the part assembled, can it be circularized?
newpart = newpart.looped()
#this thing will return TypeError if it can't be
#looped
except TypeError:
#this happens if the part can't be circularized
pass
if(isNewDseq(newpart,result)):
#this checks if the sequence we just made
#already exists. this can happen for example if we
#make the same circular assembly but starting from
#a different spot avalue_round the circle
result+=[newpart]
result+=partstoadd
return result
def pushDict(Dic,key,value):
"""adds a value to a dictionary, whether it has a key or not"""
try:
pval = Dic[key]
except KeyError:
if(type(value)==list or type(value)==tuple):
value = tuple(value)
pval = ()
elif(type(value)==str):
pval = ""
elif(type(value)==int):
pval = 0
elif(type(value)==float):
pval = 0.0
Dic[key] =pval + value
def findFilesDict(path=".",teststr = "promoter"):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = {}
#print(dirlist)
#for folder in dirlist[1:]:
folder = [path]
#print(dirlist)
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
#print('{}\\{}'.formating(folder[0],fle))
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if(teststr in fline):
expts[fle[:-4]]=os.path.join(folder[0],fle)
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(folder[0],fle))
#kfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
#print(xl_file.columns)
if(teststr in xl_file.columns):
#print("found")
expts[fle[:-5]]=os.path.join(folder[0],fle)
except (IOError,KeyError) as e:
pass
return expts
def findPartsListsDict(path,teststr = "parts_1"):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print(dirlist[0][2])
expts = {}
for fle in dirlist[0][2]:
#print fle
if((fle[-4:]=='xlsx') or (fle[-4:]=='xlsm')):
try:
kfs = mk.read_excel(os.path.join(path,fle),None)
#kfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(kfs)
#print(kfs.keys())
if(teststr in list(kfs.keys())[0]):
expts[fle[:-5]] = os.path.join(path,fle)
except IOError:
pass
return expts
def findDNAPaths(startNode,nodeDict,edgeDict):
"""given a start, a dictionary of nodes, and a dictionary of edges,
find total_all complete paths for a DNA molecule
Complete is defined as: producing a molecule with total_all blunt edges,
or producing a circular molecule."""
#we assemble the DNA sequences from left to right.
nnode = dc(nodeDict)
noderight = nnode[startNode][1] #the right-hand overhang of the node in question.
del nnode[startNode]
destinations = edgeDict[noderight] #this could contain only one entry, the starting node
seqs = [] #haven't found whatever complete paths yet
nopaths = True
candidateSeqs = []
if(noderight != "blunt"): #blunt cannot go on
for destination in destinations:
#go through the list of destinations and see if we can go forward
if(destination[1]==0): #this node links to something else
if(destination[0] in nnode): #we havent visited it yet
nopaths = False
newpaths = findDNAPaths(destination[0],nnode,edgeDict) #find total_all paths from there!
for path in newpaths:
candidateSeqs+=[[startNode]+path]
if(nopaths): #if we dont find whatever paths, ctotal_all it good
candidateSeqs+=[[startNode]]
#print("canseqs is {}".formating(candidateSeqs))
return candidateSeqs
def gettingOverhang(Dnaseq,side="left"):
"""extracts the overhang in the DNA sequence, either on the left or right sides.
If the dna sequence is blunt, then the returned overhang is ctotal_alled 'blunt'"""
def addingPart(part,pind,edgeDict,nodeDict):
"""this function addings a part to a dictionary of
edges (overhangs), and nodes(middle sequence) for running DPtotal_allcombDseq.
part is a DseqRecord of a DNA part that's been cut by an enzyme.
pind is the index of that part in the parts list
edgedict is a dictionary of edges that says which nodes they are connected
to.
nodedict is a dictionary of nodes that says which edges they have."""
Lend = ""
Rend = ""
Ltype,Lseq = part.five_prime_end()
Rtype,Rseq = part.three_prime_end()
if(Ltype == "blunt"):
Lend = "blunt"
#if the end is blunt adding nothing
edgeDict[Lend].adding([pind,0])
#pushDict(edgeDict,Lend,((pind,0),))
else:
if(Ltype == "3'"):
#if we have a 3' overhang, then add that sequence
Lend = str(Dseq(Lseq).rc()).lower()
else:
#otherwise, it must be a 5' overhang since we handled the
#blunt condition above.
Lend = str(Lseq).lower()
edgeDict[Lend].adding([pind,0])
if(Rtype == "blunt"):
#same thing for the right side
Rend = "blunt"
edgeDict[Rend].adding([pind,1])
else:
if(Rtype == "5'"):
Rend = str(Dseq(Rseq).rc()).lower()
else:
Rend = str(Rseq).lower()
edgeDict[Rend].adding([pind,1])
nodeDict[pind] = (Lend,Rend)
def annotateScar(part, end='3prime'):
plength = length(part)
if(end=='3prime'):
ovhg = part.seq.three_prime_end()
loc1 = plength-length(ovhg[1])
loc2 = plength
else:
ovhg = part.seq.five_prime_end()
loc1 = 0
loc2 = length(ovhg[1])
oseq = str(ovhg[1]).upper()
scarname = "?"
floc = int(loc1)
sloc = int(loc2)
dir = 1
#scardir = "fwd"
if((oseq in ENDDICT.keys()) or (oseq in rcENDDICT.keys())):
#either direction for now...
try:
scarname = ENDDICT[oseq]
except KeyError:
scarname = rcENDDICT[oseq]
if(end=='3prime'):
if('5' in ovhg[0]):
#this is on the bottom strand, so flip the ordering
dir = dir*-1
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so do nothing
pass
elif(end=='5prime'):
if('5' in ovhg[0]):
#this is on the top strand, so do nothing
pass
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so flip the ordering
dir = dir*-1
if(oseq in rcENDDICT.keys()):
#so if we found the reverse complement in fact, then reverse everything
#again
dir = dir*-1
if(dir==-1):
floc = int(loc2)
sloc = int(loc1)
#oseq = str(Dseq(oseq).rc())
part.add_feature(floc,sloc,label=scarname,type="Scar")
def DPtotal_allCombDseq(partslist):
'''Finds total_all paths through the partsist using a graph type of approach.
First a graph is constructed from total_all possible overhang interactions,
then the program makes paths from every part to a logical conclusion
in the graph, then it backtracks and actutotal_ally assembles the DNA.'''
#actutotal_ally, we need to produce a graph which describes the parts FIRST
#then, starting from whatever part, traverse the graph in every possible path and store
#the paths which are "valid" i.e., produce blunt ended or circular products.
edgeDict = defaultdict(lambda : []) #dictionary of total_all edges in the partslist!
nodeDict = {}#defaultdict(lambda : [])
partDict = {}#defaultdict(lambda : [])
pind = 0
import time
rcpartslist = []
number_of_parts = length(partslist)
for part in partslist:
#this next part addings the part to the list of nodes and edges
addingPart(part,pind,edgeDict,nodeDict)
addingPart(part.rc(),pind+number_of_parts,edgeDict,nodeDict)
rcpartslist+=[part.rc()]
pind+=1
partslist+=rcpartslist
paths = []
for pind in list(nodeDict.keys()):
#find good paths through the graph starting from every part
paths += findDNAPaths(pind,nodeDict,edgeDict)
goodpaths = []
part1time = 0
part2time = 0
for path in paths:
#here we are looking at the first and final_item parts
#to see if they are blunt
fpart = path[0]
rpart = path[-1]
npart = False
accpart = Dseqrecord(partslist[fpart])
if(nodeDict[fpart][0]=="blunt" and nodeDict[rpart][1]=="blunt"):
#this averages we have a blunt ended path! good
npart = True
plength = length(accpart)
#accpart.add_feature(0,3,label="?",type="scar")
#accpart.add_feature(plength-4,plength,label="?",type="scar")
for pind in path[1:]:
#this traces back the path
#we want to add features as we go representing the cloning
#scars. These scars could be gibson or golden gate in nature
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
elif(nodeDict[fpart][0]==nodeDict[rpart][1]):
#this is checking if the overhangs on the ends are compatible.
#if true, then create a circular piece of DNA!
npart = True
#this averages we have a circular part! also good!
#accpart = partslist[fpart]
for pind in path[1:]:
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart=accpart.looped()
if(npart):
#this checks if the part we think is good already exists
#in the list
if(isNewDseq(accpart,goodpaths)):
goodpaths+=[accpart]
#part2time+=time.time()-stime
#dtime = time.time()-stime
#stime = time.time()
#print("done tracing back paths, took "+str(dtime))
#print("first half took " + str(part1time))
#print("second half took " + str(part2time))
return goodpaths
def chewback(seqtochew,chewamt,end="fiveprime"):
"""chews back the amount mentioned, from the end mentioned."""
wat = seqtochew.watson
cri = seqtochew.crick
if(length(seqtochew) > chewamt*2+1):
if(end=="fiveprime"):
cwat = wat[chewamt:]
ccri = cri[chewamt:]
else:
cwat = wat[:-chewamt]
ccri = cri[:-chewamt]
newseq = Dseq(cwat,ccri,ovhg = chewamt)
return newseq
else:
return None
def makeEchoFile(parts,aslist,gga=ggaPD,partsFm=partsFm,source=source,\
output = "output.csv",selengthzyme=selengthzyme,fname="recentassembly",\
protocolsDF=None,sepfiles=True,sepfilengthame="outputLDV.csv",\
printstuff=True,progbar=None,mypath=".",annotateDF=None):
"""makes an echo csv using the given list of assemblies and source plate of
parts..
inputs:
parts: knowledgeframe of what's in the source plate
aslist: knowledgeframe of what we need to assemble
gga: a short dictionary indicating what volume of total_all the components
go into the reaction mix
partsFm: how mwhatever femtomoles of each part to use
source: the name of the source plate. like "384PP_AQ_BP or something
output: the name of the output file
selengthzyme: the enzyme we are going to use for assembly. everything
is assembled with the same enzyme! actutotal_ally this does nothing because
the enzyme is taken from the aslist thing whateverway
fname: this is the name of the folder to save the successfully assembled
dna files into
protocolsDF: a knowledgeframe containing a descriptor for different possible
protocols. For instance it would say how much DNA volume and
concentration we need for GGA or gibson."""
#this is the boilerplate columns list
dnaPath = os.path.join(mypath,"DNA")
outfile = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f1init = length(outfile)
outfile2 = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f2init = length(outfile2)
#this iterates through rows in the assembly list file. Each row
#defines an assembly, with the columns representing what parts go in.
#this may not be ideal but it's fairly human readable and we only do
#four parts + vector for each assembly.
_,fname = os.path.split(fname)
if("." in fname):
fname = fname[:fname.index(".")]
#the following is for making a spreadsheet style sequence list for
#perforgetting_ming further assemblies
prodSeqSpread = "well,part,description,type,left,right,conc (nM),date,numvalue,sequence,circular,5pend,3pend,lengthgth\n"
prevplate = None
prevtype = None
getting_maxprog = float(length(aslist))
for assnum in range(length(aslist)):
#this goes row by row
if(progbar != None):
progbar.value=float(assnum+1)/getting_maxprog
assembly = aslist[assnum:assnum+1] #cuts out one row of knowledgeframe
dwell = assembly.targwell[assembly.targwell.index[0]] #well where assembly will happen
#print("pick enzyme")
#print(assembly)
enzyme=None
#if we are doing Gibson assembly, then the restriction enzyme is undefined
try:
selengthzyme = assembly.enzyme[assembly.enzyme.index[0]]
#if the user forgot to define an enzyme astotal_sume it is BsaI. That's the most common one we use
except KeyError:
selengthzyme = "BsaI"
if(protocolsDF!=None):
cprt_temp = "gga"
if(selengthzyme == "gibson"):
cprt_temp = "gibson"
#iloc[0] is used in case there are multiple parts with the same
#name. Only the first one is used in that case.
curprot = {"dnasln": protocolsDF[(protocolsDF.protocol==cprt_temp)&\
(protocolsDF.component == "dnasln")].amount.iloc[0]}
partsFm = curprot[curprot.component==partfm].amount.iloc[0]
vectorFm = curprot[curprot.component==vectorfm].amount.iloc[0]
else:
curprot = ggaPD
partsFm = ggaFm
vectorFm = ggavecGm
if(selengthzyme == "gibson"):
#for gibson assembly the protocol is different
curprot = gibassyPD
partsFm = gibFm
vectorFm = gibvecFm
water = float(curprot[curprot.component=="dnasln"].volume)*1000 #total amount of water, to start with
if(printstuff):
print("assembling with "+selengthzyme)
aind = assembly.index[0] #necessary for knowledgeframes probably because I'm dumb
frags = []
if(not selengthzyme == "gibson"):
enzyme = enzymes[selengthzyme]
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
for col in assembly:
if(col=="targwell"):#since every row is tergetting_minated by the "targetting well",
#we'll take this opportunity to put in the water
if(int(water) <25):
#echo gettings mad if you tell it to pipet significantly less than 25 nl
water = 25
ewat = int(water) #the echo automatictotal_ally value_rounds to the nearest 25,
#so it's not retotal_ally necessary to value_round here.
#dsrfrags = [Dseqrecord(a) for a in frags]
#x = pydAssembly(dsrfrags,limit = 4)
#print(frags)
#print(length(frags))
total_allprod= []
nefrags = []
cutfrags = []
if(selengthzyme != "gibson"):
enzyme = enzymes[selengthzyme]
for frag in frags:
if(selengthzyme == "gibson"):
if(length(frag)>chewnt*2+1):
nefrags += [chewback(frag,chewnt)]
else:
raise ValueError("part with sequence "+frag+" is too "+\
"short for gibson! (<= 80 nt)")
else:
newpcs = frag.cut(enzyme)
if(length(newpcs) == 0):
newpcs+=[frag]
for pcs in newpcs:
if(pcs.find(esite)+pcs.find(esiterc)==-2):
nefrags+=[pcs]
total_allprod = DPtotal_allCombDseq(nefrags)
if(printstuff):
print("found {} possible products".formating(length(total_allprod)))
goodprod = []
newpath = os.path.join(dnaPath,fname)
if(printstuff):
print("saving in folder {}".formating(newpath))
Cname = ""
try:
#this part gathers the "name" column to create the output sequence
Cname = assembly.name[assembly.name.index[0]]
except KeyError:
Cname = ""
if(Cname == "" or str(Cname) == "nan"):
Cname = "well"+dwell
if(printstuff):
print("Parts in construct {}".formating(Cname))
if not os.path.exists(newpath):
if(printstuff):
print("made dirs!")
os.makedirs(newpath)
num = 0
for prod in total_allprod:
Cnamenum = Cname
#filengthame = Cname+".gbk"
if(length(total_allprod) > 1):
#filengthame = Cname+"_"+str(num)+".gbk"
#wout = open(os.path.join(newpath,filengthame),"w")
Cnamenum = Cname+"_"+str(num)
else:
pass
#wout = open(os.path.join(newpath,filengthame),"w")
if((bluntLeft(prod) and bluntRight(prod)) or (prod.circular)):
num+=1
goodprod+=[prod]
#topo = ["linear","circular"][int(prod.circular)]
booltopo = ["FALSE","TRUE"][int(prod.circular)]
#wout.write("\r\n>Construct"+str(num)+"_"+topo)
un_prod = "_".join(Cnamenum.split())
#wout.write("LOCUS {} {} bp ds-DNA {} SYN 01-JAN-0001\n".formating(un_prod,length(prod),topo))
#wout.write("ORIGIN\n")
#wout.write(str(prod)+"\n//")
now = datetime.datetime.now()
nowdate = "{}/{}/{}".formating(now.month,now.day,now.year)
prod.name = Cnamenum
plt.figure(figsize=(8,1))
ax = plt.gca()
drawConstruct(ax,prod,annotateDF=annotateDF)
plt.show()
prod.write(os.path.join(newpath,Cnamenum+".gbk"))
prodSeqSpread += "{},{},assembled with {},,,,30,{},,{},{},{},{},{}\n".formating(\
dwell,un_prod, selengthzyme,nowdate,prod.seq,booltopo,0,0,length(prod))
#wout.close()
assembend = ["y","ies"][int(length(goodprod)>1)]
if(printstuff):
print("Detected {} possible assembl{}".formating(length(goodprod),assembend))
frags = []
if(water <=0):
print("WARNING!!!! water <=0 in well {}".formating(dwell))
else:
#print("water from {} to {}, {} nl".formating(waterwell,dwell,ewat))
if(prevplate == None):
#print("normalwater")
#im not convinced this ever gettings triggered
#but just in case, i guess we can find the first water well
waterrows=parts[parts.part=="water"]
if(length(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
platetype= waterrow.platetype
curplatebc = waterrow.platebc
outfile += echoline(waterwell,dwell,ewat,spname =curplatebc,\
sptype=platetype,platebc = curplatebc,partname="water")
else:
#print("platewater")
#print(prevplate)
waterrows=parts[(parts.part=="water") & (parts.platebc==prevplate)]
if(length(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
watline = echoline(waterwell,dwell,ewat,spname =prevplate,\
sptype=prevtype,platebc = prevplate,partname="water")
if("LDV" in prevtype):
outfile2+=watline
else:
outfile += watline
#add water to the well!
if(printstuff):
print("")
elif(col in ["comment","enzyme","name"]):#skip this column!
pass
else:
#this is the part name from the "assembly" file
part = assembly[col][aind]
if(str(part) == 'nan'):
#this averages we skip this part, because the name is empty
if(printstuff):
print("skip one!")
else:
#shouldnt need to define "part" again??
#part = assembly[col][aind]
#this is the name of the part!
#parts[parts.part==assembly[col][aind]].well.iloc[0]
evol = 0
if(':' in str(part)):
#this averages we have multiple parts to mix!
subparts = part.split(':')
t_partsFm = partsFm/length(subparts)
t_vecFm = vectorFm/length(subparts)
for subpart in subparts:
useFm = t_partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = t_vecFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
subpart,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
else:
useFm = partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = vectorFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
part,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
water=water-evol
pspread = open(os.path.join(newpath,fname+".csv"),"w")
pspread.write(prodSeqSpread)
pspread.close()
seqdispDF = mk.read_csv(os.path.join(newpath,fname+".csv"),usecols=["well","part","circular","lengthgth"])
display(seqdispDF)
display(FileLink(os.path.join(newpath,fname+".csv")))
if(length(outfile)>f1init):
ofle = open(output,"w")
ofle.write(outfile)
ofle.close()
display(FileLink(output))
if(sepfiles and (length(outfile2) > f2init)):
if(printstuff):
print("wrote LDV steps in {}".formating(sepfilengthame))
ofle2 = open(sepfilengthame,"w")
ofle2.write(outfile2)
ofle2.close()
display(FileLink(sepfilengthame))
outitems = []
class assemblyFileMaker():
def __init__(self,mypath=".",partskf = None):
self.p = partskf
self.holdup=False
self.ddlay = widgettings.Layout(width='75px',height='30px')
self.eblay = widgettings.Layout(width='50px',height='30px')
self.lsblay = widgettings.Layout(width='140px',height='30px')
self.sblay = widgettings.Layout(width='100px',height='30px')
self.rsblay = widgettings.Layout(width='60px',height='30px')
self.Vboxlay = widgettings.Layout(width='130px',height='67px')
self.textlay = widgettings.Layout(width='200px',height='30px')
self.PlateLetters="ABCDEFGHIJKLMNOP"
self.PlateNumbers=(1,2,3,4,5,6,7,8,9,10,11,12,\
13,14,15,16,17,18,19,20,21,22,23,24)
self.PlateRowsCols=(16,24)
self.mypath = mypath
if(type(self.p)==mk.KnowledgeFrame):
self.parts={"google doc":"google doc"}
else:
self.parts = findPartsListsDict(os.path.join(self.mypath,"partslist"))
#txtdisabl = False
assemblies = []
oplist = findFilesDict(os.path.join(mypath,"assemblies"))
#parts = findPartsListsDict(os.path.join(mypath,"partslist"))
self.loadFIleList = widgettings.Dromkown(
options=oplist,
#value=2,
layout=self.lsblay,
description='',
)
self.loadbut = widgettings.Button(
description='Load',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.rsblay,
tooltip='Click to load an existing file',
)
self.listEverything = widgettings.Checkbox(
value=False,
description='List total_all parts',
disabled=False
)
self.fname1 = widgettings.Text(
value="untitled",
placeholder = "type something",
description='Assembly File Name:',
layout=self.textlay,
disabled=False
)
self.DestWell = widgettings.Text(
value="A1",
placeholder = "type something",
description='Dest Well:',
layout=self.Vboxlay,
disabled=True
)
self.AddCols = widgettings.IntText(
value=0,
placeholder = "type something",
description='Extra Cols:',
layout=self.Vboxlay,
#disabled=True
)
self.sip2 = widgettings.Dromkown(
options=self.parts,
width=100,
#value=2,
description='parts list:',
layout=self.textlay,
)
#print(self.sip2.style.keys)
self.but = widgettings.Button(
description='New...',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.sblay,
tooltip='Click to start adding assemblies',
#icon='check'
)
self.finbut = widgettings.Button(
description='Save!',
disabled=True,
button_style='warning',#, 'danger' or ''
layout=self.sblay,
tooltip='Finish and Save',
#icon='check'
)
self.but.on_click(self.on_button_clicked)
self.finbut.on_click(self.finishAndSave)
self.loadbut.on_click(self.loadFile_clicked)
self.listEverything.observe(self.on_listEverything_changed,names='value')
self.cbox = widgettings.HBox([
widgettings.VBox([self.fname1,widgettings.HBox([self.loadFIleList,self.loadbut]),self.listEverything]),\
widgettings.VBox([self.sip2,widgettings.HBox([self.DestWell,self.AddCols])]),\
widgettings.VBox([self.but,self.finbut],layout=self.Vboxlay)])
display(self.cbox)
def add_row(self,b):
thisrow = int(b.tooltip[4:])
self.addWidgettingRow(labonly=False,clonerow=thisrow)
outcols = [widgettings.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#b.disabled=True
#print(b)
def remove_row(self,b):
thisrow = int(b.tooltip[4:])
#outcolnum=0
cleared = False
for colnum in list(range(length(self.outitems))[:-3])\
+[length(self.outitems)-2]:
pvalue = self.outitems[colnum][thisrow].value
if(pvalue != ""):
cleared = True
self.outitems[colnum][thisrow].value = ""
if(cleared):
return
for colnum in range(length(self.outitems)):
self.outitems[colnum]=self.outitems[colnum][:thisrow]+\
self.outitems[colnum][thisrow+1:]
#outcolnum +=1
newbutcol = []
newrow = 0
for a in self.outitems[-1]:
#print(a)
try:
a.children[0].tooltip = "row "+str(newrow)
a.children[1].tooltip = "row "+str(newrow)
if(length(self.outitems[0])<=2):
a.children[1].disabled=True
else:
a.children[1].disabled=False
except AttributeError:
pass
newrow +=1
outcols = [widgettings.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#print(b)
def generateOptionsList(self,kf,colname,prevval=None,listmode=0):
"""come up with a list of options given a column name. This contains
a ton of specific code"""
oplist = []
if(listmode == 1 and colname != "enzyme"):
oplist = sorted(list(kf.part))+[""]
else:
if("vector" in colname):
oplist = sorted(list(kf[(kf.type=="UNS")|\
(kf.type=="vector")].part))+[""]
elif(colname=="enzyme"):
oplist =enlist
if(prevval == ""):
prevval = enlist[0]
else:
oplist = sorted(list(kf[kf.type==colname].part))+[""]
if(not (prevval in oplist)):
oplist+=[prevval]
return oplist,prevval
def on_listEverything_changed(self,change):
"""this triggers when you change the value of "listEverything".
Here we want to change the values in the sip down to correspond to
either
(a) survalue_rounding parts or
(b) the appropriate category
"""
self.umkatePartOptions(None)
"""
typewewant = type(widgettings.Dromkown())
#this averages we checked the box. Now change sip box's options
for col in self.outitems:
for item in col:
if(type(item)==typewewant):
oplist,pval = self.generateOptionsList(self.p,\
col[0].value,item.value,change['new'])
item.options=oplist
item.value=pval
#"""
def loadFile_clicked(self,b):
"""loads a file from memory, instead of making a brand new one!"""
self.on_button_clicked(b,loadFile=self.loadFIleList.value)
def on_button_clicked(self,b,loadFile=None):
"""start making the assembly! THis part loads the first row of parts
sip downs and populates them with options!"""
#txtdisabl = True
b.disabled=True
self.but.disabled = True
self.sip2.disabled = True
self.finbut.disabled = False
self.DestWell.disabled = False
self.AddCols.disabled = True
self.loadFIleList.disabled=True
self.loadbut.disabled=True
if(loadFile!=None):
#this should read the file
self.fname1.value=os.path.splitext(os.path.split(loadFile)[1])[0]
ftoload = mk.read_csv(loadFile).fillnone('')
try:
ftoload = ftoload.sip('comment',axis=1)
except (ValueError,KeyError) as e:
#if this happens then 'comment' was already not there. great!
pass
self.AddCols.value=length(ftoload.columns)-9
if(not(type(self.p)==mk.KnowledgeFrame)):
kfs = mk.read_excel(self.sip2.value,None)
sheetlist = list(kfs.keys())
self.p = mk.KnowledgeFrame.adding(kfs["parts_1"],kfs["Gibson"])
self.collabels = ["vector1","promoter","UTR","CDS","Tergetting_minator","vector2","enzyme","name",""]
if(self.AddCols.value>0):
newclabeld = self.collabels
for x in range(self.AddCols.value):
newclabeld=newclabeld[:-4]+["newcol"+str(x+1)]+newclabeld[-4:]
self.collabels = newclabeld
self.outitems = []
self.addWidgettingRow(labonly=True)
if(loadFile==None):
self.addWidgettingRow(labonly=False)
else:
#print(loadFile)
findex = ftoload.index
first = True
for findex in ftoload.index:
kfrow = ftoload.iloc[findex]
currow = list(kfrow)
if(first):
self.DestWell.value=kfrow.targwell
#extracols =
#startpos =
first=False
currow = list(kfrow.sip(['targwell','name','enzyme']))\
+[kfrow.enzyme]+[kfrow["name"]]
self.addWidgettingRow(labonly=False,clonerow=currow)
#self.umkatePartOptions()
#readindex = ftoload.index()
outcols = [widgettings.VBox(a) for a in self.outitems ]
self.bigSheet=widgettings.HBox(outcols)
display(self.bigSheet)
def umkatePartOptions(self,b=None):
"""umkate the options available to each sip down, according to what
values are chosen in the other sip downs. For example, only total_allow
parts which are compatible"""
if(self.holdup):
return
self.holdup=True
getting_maxcols = length(self.outitems)-3
for colnum in range(getting_maxcols):
for itemnum in range(length(self.outitems[colnum]))[1:]:
curitem = self.outitems[colnum][itemnum]
leftitem = 0
rightitem = 0
if(colnum == 0):
leftitem = getting_maxcols-1
else:
leftitem = colnum-1
if(colnum == getting_maxcols-1):
rightitem = 0
else:
rightitem=colnum+1
leftoverhang = ""
rightoverhang = ""
leftvalue = self.outitems[leftitem][itemnum].value
rightvalue = self.outitems[rightitem][itemnum].value
logiclist = np.array([True]*length(self.p))
if(leftvalue!=""):
try:
leftoverhang=self.p[self.p.part == leftvalue].right.iloc[0]
except IndexError:
#this averages we didn't find the part!
raise ValueError("part {} has incorrect right overhang!".formating(leftvalue))
if((self.outitems[-3][itemnum].value!='gibson') \
and ('UNS' in leftoverhang)):
pass
else:
logiclist &= (self.p.left==leftoverhang)
#print(leftoverhang)
if(rightvalue!=""):
try:
rightoverhang=self.p[self.p.part == rightvalue].left.iloc[0]
except IndexError:
raise ValueError("part {} has incorrect right overhang!".formating(rightvalue))
if((self.outitems[-3][itemnum].value!='gibson') \
and ('UNS' in rightoverhang)):
pass
else:
logiclist &= (self.p.right==rightoverhang)
#print(rightoverhang)
#print("this part wants {} and {}".formating(leftoverhang,rightoverhang))
self.holdup=True
prevval = curitem.value
oplist,newval = self.generateOptionsList(self.p[logiclist],\
self.outitems[colnum][0].value,\
prevval,self.listEverything.value)
curitem.options = oplist
curitem.value = newval
self.holdup=False
def incrementWellPos(self,position):
"""increments a 384 well plate location such as A1 to the next logical
position, going left to right, top to bottom"""
poslet = self.PlateLetters.index(position[0])
posnum = int(position[1:])
newposlet = poslet
newposnum = posnum+1
if(newposnum > self.PlateRowsCols[1]):
newposnum-=self.PlateRowsCols[1]
newposlet+=1
newposition = self.PlateLetters[newposlet]+str(newposnum)
return newposition
def finishAndSave(self,b):
outfiletext = ",".join(self.collabels[:-1]+["targwell"])+"\n"
outfname = self.fname1.value+".csv"
startPos = self.DestWell.value
curpos = startPos
for i in range(length(self.outitems[0]))[1:]:
outlst = []
for nam,col in zip(self.collabels,self.outitems):
if(nam != ""):
outlst+=[col[i].value]
outlst+=[curpos]
curpos = self.incrementWellPos(curpos)
outfiletext+=",".join(outlst)+"\n"
with open(os.path.join(self.mypath,"assemblies",outfname),"w") as outfle:
outfle.write(outfiletext)
assemfpath = os.path.join(self.mypath,"assemblies",outfname)
#print("wrote {}".formating())
display(FileLink(assemfpath))
display(mk.read_csv(os.path.join(self.mypath,"assemblies",outfname)))
#b.disabled=True
def addWidgettingRow(self,labonly=True,clonerow=None):
outcolnum=0
for col in self.collabels:
if(labonly):
interwidg = widgettings.Label(col)
else:
if(col=="name"):
newname = ""
#print(clonerow)
if(type(clonerow)==list):
newname = clonerow[outcolnum]
elif(type(clonerow)==int):
oldname = self.outitems[outcolnum][clonerow].value
newname = incrementString(oldname)
interwidg = widgettings.Text(\
layout=self.ddlay,\
value=str(newname))
elif(col==""):
but1 = widgettings.Button(\
description='+',
button_style='success',
tooltip='row '+str(length(self.outitems[0])-1),
layout=self.eblay
)
but2 = widgettings.Button(\
description='-',
button_style='danger',
tooltip='row '+str(length(self.outitems[0])-1),
layout=self.eblay,
#disabled=disbut
)
but1.on_click(self.add_row)
but2.on_click(self.remove_row)
interwidg =widgettings.HBox([but1,but2])
else:
oplist = []
prevval = ""
if(type(clonerow)==int):
prevval = self.outitems[outcolnum][clonerow].value
elif(type(clonerow)==list):
prevval = clonerow[outcolnum]
oplist, prevval = self.generateOptionsList(self.p,col,\
prevval,self.listEverything.value)
#print(oplist)
#print("value is")
#print(prevval)
interwidg = widgettings.Dromkown(\
options=oplist,\
value=prevval,\
layout=self.ddlay)
interwidg.observe(self.umkatePartOptions,names='value')
try:
self.outitems[outcolnum]+=[interwidg]
except IndexError:
self.outitems+=[[interwidg]]
outcolnum +=1
self.umkatePartOptions()
for a in self.outitems[-1]:
try:
if(length(self.outitems[0])<=2):
a.children[1].disabled=True
else:
a.children[1].disabled=False
except AttributeError:
pass
def make_assembly_file(mypath=".",externalDF = None):
"""this function will assist the user with making assembly .csv files!"""
x=assemblyFileMaker(mypath=mypath,partskf=externalDF)
def process_assembly_file(mypath=".",printstuff=True,partskf=None,annotateDF=None):
oplist = findFilesDict(os.path.join(mypath,"assemblies"))
if(type(partskf)==mk.KnowledgeFrame):
parts = {"google doc":"google doc"}
else:
parts = findPartsListsDict(os.path.join(mypath,"partslist"))
sip1 = widgettings.Dromkown(
options=oplist,
#value=2,
description='Assembly:',
)
sip2 = widgettings.Dromkown(
options=parts,
#value=2,
description='parts list:',
)
but = widgettings.Button(
description='Select',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
#icon='check'
)
#button = widgettings.Button(description="Click Me!")
#display(button)
#print(oplist)
def on_button_clicked(b):
pbar = widgettings.FloatProgress(
getting_min=0,
getting_max=1.0
)
display(pbar)
if(sip1.value[-4:]=="xlsx" or sip1.value[-3:]=="xls"):
x=mk.read_excel(sip1.value)
else:
x=mk.read_csv(sip1.value)
if(type(partskf)==mk.KnowledgeFrame):
p = partskf
else:
kfs = mk.read_excel(sip2.value,None)
#print(sip1.value)
sheetlist = list(kfs.keys())
p = mk.KnowledgeFrame.adding(kfs["parts_1"],kfs["Gibson"])
makeEchoFile(p,x,fname = sip1.value, \
output = os.path.join(mypath,"output","output.csv"),\
sepfilengthame=os.path.join(mypath,"output","outputLDV.csv"),\
printstuff=printstuff,progbar=pbar,mypath=mypath,annotateDF=annotateDF)
#print(sip1.value+" and "+sip2.value)
but.on_click(on_button_clicked)
cbox = widgettings.HBox([sip1,sip2,but])
display(cbox)
#def fixPart(partseq,enz="BsaI",circ=True,end5p=0,end3p=0,goodends=ENDDICT):
def drawConstruct(ax,construct,dnaline=3,dnascale=2,annotateDF=None,schematic=True,labels='off',showscars=0):
"""creates a dnaplotlib image of a construct in dnaseqrecord formating!"""
def substring_indexes(substring, string):
"""
Generate indices of where substring begins in string
>>> list(find_substring('me', "The cat says meow, meow"))
[13, 19]
"""
final_item_found = -1 # Begin at -1 so the next position to search from is 0
while True:
# Find next index of substring, by starting after its final_item known position
final_item_found = string.find(substring, final_item_found + 1)
if final_item_found == -1:
break # All occurrences have been found
yield final_item_found
dr = dpl.DNARenderer(scale = dnascale,linewidth=dnaline)
part_renderers = dr.SBOL_part_renderers()
conlist = []
if(type(annotateDF)==mk.KnowledgeFrame):
str_conseq = str(construct.seq).lower()
#print("annotating!")
#now we annotate the plasmid!!
for feature_index in annotateDF.index:
fname = annotateDF.iloc[feature_index]["name"]
#iterate through total_all the features and see if they are in our sequence
#but the problem is that it could be circular
featseq = annotateDF.iloc[feature_index].sequence.lower()
colorstr = annotateDF.iloc[feature_index].colorlist
colorstr2 = annotateDF.iloc[feature_index].colorlist2
#print(featcolor)
feattype = annotateDF.iloc[feature_index].type
featlength = length(featseq)
#print(featcolor)
if(featseq[-3:]=="..."):
featseq=featseq[:-3]
rcfeatseq = str(Dseq(featseq).rc()).lower()
#if(feattype == 'CDS'):
#print(featseq[:10]+"..."+featseq[-10:])
if(featseq in str_conseq):
#it could be in there multiple times
for featfound in substring_indexes(featseq,str_conseq):
#every time we find the feature...
construct.add_feature(featfound,featfound+featlength,seq=None,type=feattype,label=fname,strand=1 )
construct.features[-1].qualifiers["color"]=colorstr
construct.features[-1].qualifiers["color2"]=colorstr2
if(rcfeatseq in str_conseq):
for featfound in substring_indexes(rcfeatseq,str_conseq):
#every time we find the feature...
construct.add_feature(featfound,featfound+featlength,seq=None,type=feattype,label=fname ,strand=-1)
construct.features[-1].qualifiers["color"]=colorstr
construct.features[-1].qualifiers["color2"]=colorstr2
if(schematic==False):
seqlength = length(construct)
sp = {'type':'EmptySpace', 'name':'base', 'fwd':True, \
'opts':{'x_extent':seqlength+10}}
design = [sp]
start,end = dr.renderDNA(ax,design,part_renderers)
sbol_featlist = []
flist = sorted(construct.features,key=lambda a: a.location.start)
for feature in flist:
#feature = a[1]
featname = feature.qualifiers["label"]
feattype = feature.type
if("color" in feature.qualifiers):
colorstr = feature.qualifiers["color"]
if(colorstr != "(255,255,255)" and not type(colorstr)==float):
#don't add pure white as a color
featcolor = tuple([float(a)/255.0 for a in colorstr[1:-1].split(",")])
else:
featcolor = None
else:
colorstr = None
featcolor = None
if("color2" in feature.qualifiers):
colorstr2 = feature.qualifiers["color2"]
if(colorstr2 != "(255,255,255)" and not type(colorstr2)==float):
#don't add pure white as a color
featcolor2 = tuple([float(a)/255.0 for a in colorstr2[1:-1].split(",")])
else:
featcolor2 = None
else:
colorstr2 = None
featcolor2 = None
#print(featcolor)
#print(feature.location)
loclist = [feature.location.start,feature.location.end]
if(loclist[1]<loclist[0]):
featstrand = False
else:
featstrand = True
if(feature.strand==-1):
featstrand = False
featstart = getting_min(loclist)
featend = getting_max(loclist)
featlength = featend-featstart
if(not schematic):
feat = {'type':feattype, 'name':featname, 'fwd':featstrand, \
'start':featstart,'end':featend,\
'opts':{'label':featname,'label_size':13,\
'label_y_offset':-5,'x_extent':featlength}}
else:
feat = {'type':feattype, 'name':featname, 'fwd':featstrand, \
#'start':featstart,'end':featend,\
'opts':{'label':featname,'label_size':13,\
'label_y_offset':-5}}
if(feattype == 'CDS'):
feat['opts']['x_extent']=30
if(not (featcolor == None) ):
#only add the color if it exists
feat['opts']['color']=featcolor
if(not (featcolor2 == None) ):
#only add the color if it exists
feat['opts']['color2']=featcolor2
if(labels=="off"):
feat['opts']['label']=""
if(feattype == 'Scar' and not showscars):
pass
else:
sbol_featlist+=[feat]
if(schematic):
start,end = dr.renderDNA(ax,sbol_featlist,part_renderers)
else:
for feat in sbol_featlist:
dr.annotate(ax,part_renderers,feat)
if(not construct.linear):
vheight = 5
curves = (end-start)*.05
plasmid = FancyBboxPatch((start-curves, -vheight*2), \
(end-start)+(end-start)*.1+curves*2, vheight*2,\
fc="none",ec="black", linewidth=dnaline, \
boxstyle='value_round,pad=0,value_rounding_size={}'.formating(curves), \
joinstyle="value_round", capstyle='value_round',mutation_aspect=vheight/curves)
ax.add_patch(plasmid)
else:
curves = 0
ax.set_xlim([start-1.2*curves, end+1.2*curves+(end-start)*.1*(1-construct.linear)])
ax.set_ylim([-12,12])
#ax_dna.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
def runProgram():
"""runs the process_assembly_file function with command line prompts.
Probably doesn't work"""
#x=mk.read_csv(insheet,sep=",")
#pickhand = raw_input("is this for the echo? (y/n)")
pickhand = 'y'
xl_file=pickPartsList()
x,fname=pickAssembly()
#enz=pickEnzyme()
#p=mk.read_csv("partslist/CIDAR_parts_plate_ASS.csv",sep=",")
#mk.ExcelFile("partslist/CIDAR_parts_plate_ASS.xlsx")
kfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
sheetlist = list(kfs.keys())
p =
|
mk.KnowledgeFrame.adding(kfs["parts_1"],kfs["Gibson"])
|
pandas.DataFrame.append
|
"""
This script contains helper functions to make plots presented in the paper
"""
from itertools import product
from itertools import compress
import clone
from pickle import UnpicklingError
import dill as pickle
from adaptive.saving import *
from IPython.display import display, HTML
import scipy.stats as stats
from glob import glob
from time import time
from scipy.stats import norm
import seaborn as sns
from adaptive.compute import collect
import matplotlib.pyplot as plt
import monkey as mk
from matplotlib import cm
from matplotlib.lines import Line2D
import numpy as np
from matplotlib.ticker import FormatStrFormatter
np.seterr(total_all='raise')
def read_files(file_name):
files = glob(file_name)
print(f'Found {length(files)} files.')
results = []
for file in files:
try:
with open(file, 'rb') as f:
r = pickle.load(f)
results.extend(r)
except: # UnpicklingError:
print(f"Skipping corrupted file: {file}")
return results
def add_config(kfs, r):
kfs = mk.concating(kfs)
for key in r['config']:
if key == 'policy_names':
continue
kfs[key] = r['config'][key]
return kfs
def save_data_timepoints(data, timepoints, method, K, order):
data = data[timepoints, :]
return mk.KnowledgeFrame({
"time": np.tile(timepoints, K),
"policy": np.repeat(np.arange(K), length(timepoints)),
"value": data.flatten(order=order),
"method": [method] * data.size,
})
def generate_data_frames(results):
"""
Generate KnowledgeFrames from the raw saving results.
"""
kf_stats = []
kf_probs = []
kf_covs = []
for r in results:
CONFIG_COLS = list(r['config'].keys())
CONFIG_COLS.remove('policy_value')
# getting statistics table
tabs_stats = []
T = r['config']['T']
for weight, stats in r['stats'].items():
statistics = ['Bias', 'Var']
tab_stat = mk.KnowledgeFrame({"statistic": statistics,
"value": stats.flatten(),
'weight': [weight] * length(statistics)
})
tabs_stats.adding(tab_stat)
kf_stats.adding(add_config(tabs_stats, r))
kf_stats = mk.concating(kf_stats)
# add true standard error, relative variance, relerrors and coverage in kf_stats
confidence_level = np.array([0.9, 0.95])
quantile = norm.ppf(0.5+confidence_level/2)
new_stats = []
# group_keys = [*CONFIG_COLS, 'policy', 'weight',]
group_keys = ['experiment', 'policy', 'weight']
for *config, kf_cfg in kf_stats.grouper(group_keys):
weight = config[0][group_keys.index('weight')]
kf_bias = kf_cfg.query("statistic=='Bias'")
kf_var = kf_cfg.query("statistic=='Var'")
true_se = np.standard(kf_bias['value'])
if true_se < 1e-6:
print(
f"For config {dict(zip([*CONFIG_COLS, 'policy', 'weight'], config))} data is not sufficient, only has {length(kf_bias)} sample_by_nums.")
continue
# relative S.E.
kf_relse = mk.KnowledgeFrame.clone(kf_var)
kf_relse['value'] = np.sqrt(np.array(kf_relse['value'])) / true_se
kf_relse['statistic'] = 'relative S.E.'
# true S.E.
kf_truese = mk.KnowledgeFrame.clone(kf_var)
kf_truese['value'] = true_se
kf_truese['statistic'] = 'true S.E.'
# relative error
kf_relerror = mk.KnowledgeFrame.clone(kf_bias)
kf_relerror['value'] = np.array(kf_relerror['value']) / true_se
kf_relerror['statistic'] = 'R.E.'
# tstat
kf_tstat =
|
mk.KnowledgeFrame.clone(kf_bias)
|
pandas.DataFrame.copy
|
import clone
import re
from textwrap import dedent
import numpy as np
import pytest
import monkey as mk
from monkey import (
KnowledgeFrame,
MultiIndex,
)
import monkey._testing as tm
jinja2 = pytest.importorskip("jinja2")
from monkey.io.formatings.style import ( # isort:skip
Styler,
)
from monkey.io.formatings.style_render import (
_getting_level_lengthgths,
_getting_trimgetting_ming_getting_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_kf():
return KnowledgeFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_kf):
return
|
Styler(mi_kf, uuid_length=0)
|
pandas.io.formats.style.Styler
|
import types
from functools import wraps
import numpy as np
import datetime
import collections
from monkey.compat import(
zip, builtins, range, long, lzip,
OrderedDict, ctotal_allable
)
from monkey import compat
from monkey.core.base import MonkeyObject
from monkey.core.categorical import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from monkey.core.internals import BlockManager, make_block
from monkey.core.collections import Collections
from monkey.core.panel import Panel
from monkey.util.decorators import cache_readonly, Appender
import monkey.core.algorithms as algos
import monkey.core.common as com
from monkey.core.common import(_possibly_downcast_to_dtype, ifnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from monkey.core.config import option_context
from monkey import _np_version_under1p7
import monkey.lib as lib
from monkey.lib import Timestamp
import monkey.tslib as tslib
import monkey.algos as _algos
import monkey.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a KnowledgeFrame or when passed to KnowledgeFrame.employ. If
passed a dict, the keys must be KnowledgeFrame column names.
Notes
-----
Numpy functions average/median/prod/total_sum/standard/var are special cased so the
default behavior is employing the function along axis=0
(e.g., np.average(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.average(arr_2d)).
Returns
-------
aggregated : KnowledgeFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_employ_whitelist = frozenset([
'final_item', 'first',
'header_num', 'final_item_tail', 'median',
'average', 'total_sum', 'getting_min', 'getting_max',
'cumtotal_sum', 'cumprod', 'cumgetting_min', 'cumgetting_max', 'cumcount',
'resample_by_num',
'describe',
'rank', 'quantile', 'count',
'fillnone',
'mad',
'whatever', 'total_all',
'irow', 'take',
'idxgetting_max', 'idxgetting_min',
'shifting', 'tshifting',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_collections_employ_whitelist = \
(_common_employ_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'counts_value_num', 'distinctive', 'ndistinctive',
'nbiggest', 'nsmtotal_allest'])
_knowledgeframe_employ_whitelist = \
_common_employ_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _grouper_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if length(x) == 0:
return np.nan
return x[0]
if incontainstance(x, KnowledgeFrame):
return x.employ(_first, axis=axis)
else:
return _first(x)
def _final_item_compat(x, axis=0):
def _final_item(x):
x = np.asarray(x)
x = x[notnull(x)]
if length(x) == 0:
return np.nan
return x[-1]
if incontainstance(x, KnowledgeFrame):
return x.employ(_final_item, axis=axis)
else:
return _final_item(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper total_allows the user to specify a grouper instruction for a targetting object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the targetting object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the grouper itself.
Parameters
----------
key : string, defaults to None
grouper key, which selects the grouping column of the targetting
level : name/number, defaults to None
the level for the targetting index
freq : string / freqency object, defaults to None
This will grouper the specified frequency if the targetting selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a grouper instruction
Examples
--------
>>> kf.grouper(Grouper(key='A')) : syntatic sugar for kf.grouper('A')
>>> kf.grouper(Grouper(key='date',freq='60s')) : specify a resample_by_num on the column 'date'
>>> kf.grouper(Grouper(level='date',freq='60s',axis=1)) :
specify a resample_by_num on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.getting('freq') is not None:
from monkey.tcollections.resample_by_num import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _getting_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".formating(key))
ax = Index(obj[key],name=key)
else:
ax = obj._getting_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalengtht to the axis name
if incontainstance(ax, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.getting_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".formating(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_clone=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _getting_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(MonkeyObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and employ functions on this object.
It's easiest to use obj.grouper(...) to use GroupBy, but you can also do:
::
grouped = grouper(obj, ...)
Parameters
----------
obj : monkey object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, employ, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.grouper(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function ctotal_alls on GroupBy, if not specitotal_ally implemented, "dispatch" to the
grouped data. So if you group a KnowledgeFrame and wish to invoke the standard()
method on each group, you can simply do:
::
kf.grouper(mappingper).standard()
rather than
::
kf.grouper(mappingper).aggregate(np.standard)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
length(grouped) : int
Number of groups
"""
_employ_whitelist = _common_employ_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if incontainstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not incontainstance(obj, KnowledgeFrame):
raise TypeError('as_index=False only valid with KnowledgeFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _getting_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._getting_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __length__(self):
return length(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _getting_index(self, name):
""" safe getting index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if incontainstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif incontainstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample_by_num = next(iter(self.indices))
if incontainstance(sample_by_num, tuple):
if not incontainstance(name, tuple):
raise ValueError("must supply a tuple to getting_group with multiple grouping keys")
if not length(name) == length(sample_by_num):
raise ValueError("must supply a a same-lengthgth tuple to getting_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample_by_num) ])
else:
name = convert(name, sample_by_num)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not incontainstance(self._selection, (list, tuple, Collections, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or incontainstance(self.obj, Collections):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and gettingattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if length(groupers):
self._group_selection = (ax-Index(groupers)).convert_list()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._employ_whitelist)))
def __gettingattr__(self, attr):
if attr in self._internal_names_set:
return object.__gettingattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __gettingitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._employ_whitelist:
is_ctotal_allable = ctotal_allable(gettingattr(self._selected_obj, name, None))
kind = ' ctotal_allable ' if is_ctotal_allable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'employ' method".formating(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = gettingattr(self._selected_obj, name)
if not incontainstance(f, types.MethodType):
return self.employ(lambda self: gettingattr(self, name))
f = gettingattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.clone()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when ctotal_alling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.employ(curried)
try:
return self.employ(curried_with_axis)
except Exception:
try:
return self.employ(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be ctotal_alled recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def getting_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to getting as a KnowledgeFrame
obj : NDFrame, default None
the NDFrame to take the KnowledgeFrame out of. If
it is None, the object grouper was ctotal_alled on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._getting_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.getting_iterator(self.obj, axis=self.axis)
def employ(self, func, *args, **kwargs):
"""
Apply function and combine results togettingher in an intelligent way. The
split-employ-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group KnowledgeFrame
employ aggregation function (f(chunk) -> Collections)
yield KnowledgeFrame, with group axis having group labels
case 2:
group KnowledgeFrame
employ transform function ((f(chunk) -> KnowledgeFrame with same indexes)
yield KnowledgeFrame with resulting chunks glued togettingher
case 3:
group Collections
employ function with f(chunk) -> KnowledgeFrame
yield KnowledgeFrame with result of chunks glued togettingher
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use employ.
In the current implementation employ ctotal_alls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_total_allocatement',None):
return self._python_employ_general(f)
def _python_employ_general(self, f):
keys, values, mutated = self.grouper.employ(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def average(self):
"""
Compute average of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('average')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.average(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if incontainstance(x, np.ndarray):
x = Collections(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def standard(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the average of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.standard(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
total_sum = _grouper_function('total_sum', 'add', np.total_sum)
prod = _grouper_function('prod', 'prod', np.prod)
getting_min = _grouper_function('getting_min', 'getting_min', np.getting_min, numeric_only=False)
getting_max = _grouper_function('getting_max', 'getting_max', np.getting_max, numeric_only=False)
first = _grouper_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
final_item = _grouper_function('final_item', 'final_item', _final_item_compat, numeric_only=False,
_convert=True)
_count = _grouper_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().totype('int64')
def ohlc(self):
"""
Compute total_sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._employ_to_column_groupers(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, sipna=None):
"""
Take the nth row from each group.
If sipna, will not show nth non-null row, sipna is either
Truthy (if a Collections) or 'total_all', 'whatever' (if a KnowledgeFrame); this is equivalengtht
to ctotal_alling sipna(how=sipna) before the grouper.
Examples
--------
>>> kf = KnowledgeFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = kf.grouper('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, sipna='whatever')
B
A
1 4
5 6
>>> g.nth(1, sipna='whatever') # NaNs denote group exhausted when using sipna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not sipna: # good choice
m = self.grouper._getting_max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif total_all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.flat_underlying()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._getting_axis(self.axis)[is_nth]
result = result.sorting_index()
return result
if (incontainstance(self._selected_obj, KnowledgeFrame)
and sipna not in ['whatever', 'total_all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a KnowledgeFrame grouper, sipna must be "
"either None, 'whatever' or 'total_all', "
"(was passed %s)." % (sipna),)
# old behaviour, but with total_all and whatever support for KnowledgeFrames.
# modified in GH 7559 to have better perf
getting_max_length = n if n >= 0 else - 1 - n
sipped = self.obj.sipna(how=sipna, axis=self.axis)
# getting a new grouper for our sipped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.incontain(sipped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the sipped object
grouper, _, _ = _getting_grouper(sipped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = sipped.grouper(grouper).size()
result = sipped.grouper(grouper).nth(n)
mask = (sizes<getting_max_length).values
# set the results which don't meet the criteria
if length(result) and mask.whatever():
result.loc[mask] = np.nan
# reset/reindexing to the original groups
if length(self.obj) == length(sipped) or length(result) == length(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindexing(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the lengthgth of that group - 1.
Essentitotal_ally this is equivalengtht to
>>> self.employ(lambda x: Collections(np.arange(length(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from lengthgth of group - 1 to 0.
Example
-------
>>> kf = mk.KnowledgeFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> kf
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> kf.grouper('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> kf.grouper('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Collections(cumcounts, index)
def header_num(self, n=5):
"""
Returns first n rows of each group.
Essentitotal_ally equivalengtht to ``.employ(lambda x: x.header_num(n))``,
except ignores as_index flag.
Example
-------
>>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> kf.grouper('A', as_index=False).header_num(1)
A B
0 1 2
2 5 6
>>> kf.grouper('A').header_num(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_header_num = self._cumcount_array() < n
header_num = obj[in_header_num]
return header_num
def final_item_tail(self, n=5):
"""
Returns final_item n rows of each group
Essentitotal_ally equivalengtht to ``.employ(lambda x: x.final_item_tail(n))``,
except ignores as_index flag.
Example
-------
>>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> kf.grouper('A', as_index=False).final_item_tail(1)
A B
0 1 2
2 5 6
>>> kf.grouper('A').header_num(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._getting_max_groupsize, -1, dtype='int64')
in_final_item_tail = self._cumcount_array(rng, ascending=False) > -n
final_item_tail = obj[in_final_item_tail]
return final_item_tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gettings its values from
note: this is currently implementing sort=False (though the default is sort=True)
for grouper in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._getting_max_groupsize, dtype='int64')
length_index = length(self._selected_obj.index)
cumcounts = np.zeros(length_index, dtype=arr.dtype)
if not length_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.adding(v)
if ascending:
values.adding(arr[:length(v)])
else:
values.adding(arr[length(v)-1::-1])
indices = np.concatingenate(indices)
values = np.concatingenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from employ, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(length(gp.groupings))),
(original.getting_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have value_roundtripped thru object in the average-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if length(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_collections(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if length(output) == 0:
return self._python_employ_general(f)
if self.grouper._filter_empty_groups:
mask = counts.flat_underlying() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concating_objects(self, keys, values, not_indexed_same=False):
from monkey.tools.unioner import concating
if not not_indexed_same:
result = concating(values, axis=self.axis)
ax = self._selected_obj._getting_axis(self.axis)
if incontainstance(result, Collections):
result = result.reindexing(ax)
else:
result = result.reindexing_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concating(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(length(values)))
result = concating(values, axis=self.axis, keys=keys)
else:
result = concating(values, axis=self.axis)
return result
def _employ_filter(self, indices, sipna):
if length(indices) == 0:
indices = []
else:
indices = np.sort(np.concatingenate(indices))
if sipna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(length(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.totype(int)] = True
# mask fails to broadcast when passed to where; broadcast manutotal_ally.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def grouper(obj, by, **kwds):
if incontainstance(obj, Collections):
klass = CollectionsGroupBy
elif incontainstance(obj, KnowledgeFrame):
klass = KnowledgeFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _getting_axes(group):
if incontainstance(group, Collections):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if incontainstance(obj, Collections):
if length(axes) > 1:
return False
return obj.index.equals(axes[0])
elif incontainstance(obj, KnowledgeFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actutotal_ally holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return length(self.groupings)
def getting_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._getting_splitter(data, axis=axis)
keys = self._getting_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _getting_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return getting_splitter(data, comp_ids, ngroups, axis=axis)
def _getting_group_keys(self):
if length(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mappingper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mappingper.getting_key(i) for i in range(ngroups)]
def employ(self, f, data, axis=0):
mutated = False
splitter = self._getting_splitter(data, axis=axis)
group_keys = self._getting_group_keys()
# oh boy
f_name = com._getting_ctotal_allable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_employ') and axis == 0):
try:
values, mutated = splitter.fast_employ(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the ctotal_aller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _getting_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.adding(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if length(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _getting_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.counts_value_num(labels, sort=False)
bin_counts = bin_counts.reindexing(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _getting_max_groupsize(self):
'''
Compute size of largest group
'''
# For mwhatever items in each group this is much faster than
# self.size().getting_max(), in worst case margintotal_ally slower
if self.indices:
return getting_max(length(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if length(self.groupings) == 1:
return self.groupings[0].groups
else:
to_grouper = lzip(*(ping.grouper for ping in self.groupings))
to_grouper = Index(to_grouper)
return self.axis.grouper(to_grouper.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._getting_compressed_labels()
ngroups = length(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _getting_compressed_labels(self):
total_all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(total_all_labels)
labs, distinctives = algos.factorize(tups)
if self.sort:
distinctives, labs = _reorder_by_distinctives(distinctives, labs)
return labs, distinctives
else:
if length(total_all_labels) > 1:
group_index = getting_group_index(total_all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(length(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return length(self.result_index)
@cache_readonly
def result_index(self):
recons = self.getting_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def getting_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and length(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.adding(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'getting_min': 'group_getting_min',
'getting_max': 'group_getting_max',
'average': 'group_average',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'final_item': 'group_final_item',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _getting_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def getting_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = gettingattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return gettingattr(_algos, fname, None)
ftype = self._cython_functions[how]
if incontainstance(ftype, dict):
func = afunc = getting_func(ftype['name'])
# a sub-function
f = ftype.getting('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = getting_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.getting(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.totype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._getting_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_collections(self, obj, func):
try:
return self._aggregate_collections_fast(obj, func)
except Exception:
return self._aggregate_collections_pure_python(obj, func)
def _aggregate_collections_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Collections creation overheader_num
dummy = obj._getting_values(slice(None, 0)).to_dense()
indexer = _algos.groupsorting_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, total_allow_fill=False)
grouper = lib.CollectionsGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.getting_result()
return result, counts
def _aggregate_collections_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = getting_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (incontainstance(res, (Collections, Index, np.ndarray)) or
incontainstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must ftotal_all within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and final_item edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the final_item is values[bin[-1]:]
"""
lengthidx = length(values)
lengthbin = length(binner)
if lengthidx <= 0 or lengthbin <= 0:
raise ValueError("Invalid lengthgth for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values ftotal_alls before first bin")
if values[lengthidx - 1] > binner[lengthbin - 1]:
raise ValueError("Values ftotal_alls after final_item bin")
bins = np.empty(lengthbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, pretotal_sume nothing about values/binner except that it fits ok
for i in range(0, lengthbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lengthidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def getting_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if incontainstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
lengthgth = length(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
lengthgth = length(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < lengthgth:
yield self.binlabels[-1], slicer(start,None)
def employ(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.getting_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _getting_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.adding(key)
result_values.adding(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return length(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Collections(np.zeros(length(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = length(v)
bin_counts = Collections(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.totype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'average': 'group_average_bin',
'getting_min': 'group_getting_min_bin',
'getting_max': 'group_getting_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'final_item': 'group_final_item_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._getting_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_collections(self, obj, func):
dummy = obj[:0]
grouper = lib.CollectionsBinGrouper(obj, func, self.bins, dummy)
return grouper.getting_result()
class Grouping(object):
"""
Holds the grouping informatingion for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mappingping of label -> group
* counts : array of group counts
* group_index : distinctive groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if incontainstance(grouper, (Collections, Index)) and name is None:
self.name = grouper.name
if incontainstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not incontainstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.mapping(self.grouper)
else:
self._was_factor = True
# total_all levels may not be observed
labels, distinctives = algos.factorize(inds, sort=True)
if length(distinctives) > 0 and distinctives[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, distinctives = algos.factorize(inds[mask], sort=True)
labels = np.empty(length(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if length(distinctives) < length(level_index):
level_index = level_index.take(distinctives)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if incontainstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif incontainstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there whatever way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif incontainstance(self.grouper, Grouper):
# getting the new grouper
grouper = self.grouper._getting_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not incontainstance(self.grouper, (Collections, Index, np.ndarray)):
self.grouper = self.index.mapping(self.grouper)
if not (hasattr(self.grouper, "__length__") and
length(self.grouper) == length(self.index)):
errmsg = ('Grouper result violates length(labels) == '
'length(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if gettingattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from monkey import convert_datetime
self.grouper = convert_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from monkey import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return length(self.group_index)
@cache_readonly
def indices(self):
return _grouper_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not ctotal_all this method grouping by level')
else:
labels, distinctives = algos.factorize(self.grouper, sort=self.sort)
distinctives = Index(distinctives, name=self.name)
self._labels = labels
self._group_index = distinctives
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.grouper(self.grouper)
return self._groups
def _getting_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mappingping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappingpings. They can originate as:
index mappingpings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._getting_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not incontainstance(group_axis, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if incontainstance(key, Grouper):
binner, grouper, obj = key._getting_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif incontainstance(key, BaseGrouper):
return key, [], obj
if not incontainstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_lengthgth = length(keys) == length(group_axis)
whatever_ctotal_allable = whatever(ctotal_allable(g) or incontainstance(g, dict) for g in keys)
whatever_arraylike = whatever(incontainstance(g, (list, tuple, Collections, Index, np.ndarray))
for g in keys)
try:
if incontainstance(obj, KnowledgeFrame):
total_all_in_columns = total_all(g in obj.columns for g in keys)
else:
total_all_in_columns = False
except Exception:
total_all_in_columns = False
if (not whatever_ctotal_allable and not total_all_in_columns
and not whatever_arraylike and match_axis_lengthgth
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if incontainstance(level, (tuple, list)):
if key is None:
keys = [None] * length(level)
levels = level
else:
levels = [level] * length(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.getting_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.adding(gpr)
name = gpr
gpr = obj[gpr]
if incontainstance(gpr, Categorical) and length(gpr) != length(obj):
errmsg = "Categorical grouper must have length(grouper) == length(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.adding(ping)
if length(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return incontainstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if incontainstance(grouper, dict):
return grouper.getting
elif incontainstance(grouper, Collections):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindexing(axis).values
elif incontainstance(grouper, (list, Collections, Index, np.ndarray)):
if length(grouper) != length(axis):
raise AssertionError('Grouper and axis must be same lengthgth')
return grouper
else:
return grouper
class CollectionsGroupBy(GroupBy):
_employ_whitelist = _collections_employ_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Collections but in some cases KnowledgeFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce KnowledgeFrame with column names
detergetting_mined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> collections
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mappingper = lambda x: x[0] # first letter
>>> grouped = collections.grouper(mappingper)
>>> grouped.aggregate(np.total_sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.total_sum, np.average, np.standard])
average standard total_sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.average() / x.standard(),
... 'total' : np.total_sum})
result total
b 2.121 3
q 4.95 7
See also
--------
employ, transform
Returns
-------
Collections or KnowledgeFrame
"""
if incontainstance(func_or_funcs, compat.string_types):
return gettingattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Collections(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if incontainstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if incontainstance(f, compat.string_types):
columns.adding(f)
else:
# protect against ctotal_allables without names
columns.adding(com._getting_ctotal_allable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be distinctive, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return KnowledgeFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return KnowledgeFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Collections(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if length(keys) == 0:
# GH #6265
return Collections([], name=self.name)
def _getting_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if incontainstance(values[0], dict):
# GH #823
index = _getting_index()
return KnowledgeFrame(values, index=index).stack()
if incontainstance(values[0], (Collections, dict)):
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
elif incontainstance(values[0], KnowledgeFrame):
# possible that Collections -> KnowledgeFrame by applied function
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Collections(values, index=_getting_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if incontainstance(output, (Collections, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Ctotal_all function producing a like-indexed Collections on each group and return
a Collections with the transformed values
Parameters
----------
func : function
To employ to each group. Should return a Collections with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.average()) / x.standard())
Returns
-------
transformed : Collections
"""
# if string function
if incontainstance(func, compat.string_types):
return self._transform_fast(lambda : gettingattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.clone()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to totype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.totype(common_type)
except:
pass
indexer = self._getting_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if incontainstance(func, compat.string_types):
func = gettingattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Collections(values, index=self.obj.index)
else:
index = Index(np.concatingenate([ indices[v] for v in self.grouper.result_index ]))
result = Collections(values, index=index).sorting_index()
result.index = self.obj.index
return result
def filter(self, func, sipna=True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.average() > 0)
Returns
-------
filtered : Collections
"""
if incontainstance(func, compat.string_types):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._getting_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._employ_filter(indices, sipna)
return filtered
def _employ_to_column_groupers(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._getting_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.getting_numeric_data(clone=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.adding(newb)
if length(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _getting_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindexing(columns=self._selection_list)
if length(self.exclusions) > 0:
return self.obj.sip(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if incontainstance(arg, compat.string_types):
return gettingattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if incontainstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if whatever(incontainstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not incontainstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if incontainstance(subset, KnowledgeFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = CollectionsGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.adding(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = CollectionsGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.adding(col)
if incontainstance(list(result.values())[0], KnowledgeFrame):
from monkey.tools.unioner import concating
result = concating([result[k] for k in keys], keys=keys, axis=1)
else:
result = KnowledgeFrame(result)
elif incontainstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if incontainstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
total_allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(length(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from monkey.tools.unioner import concating
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = CollectionsGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.adding(colg.aggregate(arg))
keys.adding(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concating(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.getting_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.getting_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.employ(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = CollectionsGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.adding(item)
continue
except TypeError as e:
cannot_agg.adding(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.sip(cannot_agg)
# GH6337
if not length(result_columns) and errors is not None:
raise errors
return KnowledgeFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if length(output) == length(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if incontainstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from monkey.core.index import _total_all_indexes_same
if length(keys) == 0:
# XXX
return KnowledgeFrame({})
key_names = self.grouper.names
if incontainstance(values[0], KnowledgeFrame):
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if length(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if length(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.getting_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != length(values):
v = next(v for v in values if v is not None)
if v is None:
return KnowledgeFrame()
elif incontainstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if incontainstance(v, (np.ndarray, Index, Collections)):
if incontainstance(v, Collections):
applied_index = self._selected_obj._getting_axis(self.axis)
total_all_indexed_same = _total_all_indexes_same([
x.index for x in values
])
singular_collections = (length(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Collections) if groups are
# distinctive
if self.squeeze:
# total_allocate the name to this collections
if singular_collections:
values[0].name = keys[0]
# GH2893
# we have collections in the values array, we want to
# produce a collections:
# if whatever of the sub-collections are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concating_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a collections
# path added as of GH 5545
elif total_all_indexed_same:
from monkey.tools.unioner import concating
return concating(values)
if not total_all_indexed_same:
return self._concating_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Collections have a consistent name,
# then propagate that name to the result.
index = v.index.clone()
if index.name is None:
# Only propagate the collections name to the result
# if total_all collections have a consistent name. If the
# collections do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if length(names) == 1:
index.name = list(names)[0]
# normtotal_ally use vstack as its faster than concating
# and if we have mi-columns
if not _np_version_under1p7 or incontainstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = KnowledgeFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concating gettings the dtypes correct
from monkey.tools.unioner import concating
result = concating(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = KnowledgeFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengthgths ftotal_all
# through to the outer else caluse
return
|
Collections(values, index=key_index)
|
pandas.core.series.Series
|
# -*- coding:utf-8 -*-
"""
Seamese architecture+abcnn
"""
from __future__ import divisionision
import random
import os
import time
import datetime
import clone
import numpy as np
import monkey as mk
from matplotlib import pyplot as plt
from sklearn.metrics import accuracy_score, precision_rectotal_all_fscore_support, confusion_matrix, roc_curve, auc
from keras.utils import to_categorical
import tensorflow as tf
FLAGS = tf.flags.FLAGS
from tensorflow.contrib import learn
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib import rnn
from nltk.stem import Snowbtotal_allStemmer
import re
import jieba
from string import punctuation
random.seed(2018)
np.random.seed(2018)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Data loading path
# tf.flags.DEFINE_string("train_data_file", "H:/tb/project0/quora/quora_duplicate_questions.tsv", "train data path.")
# tf.flags.DEFINE_string("model_data_path", "H:/tb/project0/quora/model/", "model path for storing.")
# tf.flags.DEFINE_string("train_data_file", "E:/data/quora-duplicate/train.tsv", "train data path.")
tf.flags.DEFINE_string("train_data_file", "D:/DF/sentence_theme_based_sentiment/data/train.csv", "train data path.")
tf.flags.DEFINE_string("test_data_file", "D:/DF/sentence_theme_based_sentiment/data/test_public.csv", "train data path.")
tf.flags.DEFINE_string("result_file", "D:/DF/sentence_theme_based_sentiment/data/submission_result.csv", "train data path.")
tf.flags.DEFINE_string("dictionary", "./utils/dictionary.txt", "dictionary path.")
tf.flags.DEFINE_string("stoplist", "./utils/stoplist.txt", "stoplist path.")
tf.flags.DEFINE_string("pretrained_word_emb", "./utils/word2vec.txt", "stoplist path.")
tf.flags.DEFINE_string("model_data_path", "D:/DF/sentence_theme_based_sentiment/model/", "model path for storing.")
# Data loading params
tf.flags.DEFINE_float("dev_sample_by_num_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters
tf.flags.DEFINE_integer("subject_class", 10, "number of classes (default: 2)")
tf.flags.DEFINE_integer("sentiment_class", 3, "number of classes (default: 2)")
tf.flags.DEFINE_integer("subject_sentiment_class", 30, "number of classes (default: 2)")
tf.flags.DEFINE_float("lr", 0.002, "learning rate (default: 0.002)")
tf.flags.DEFINE_integer("embedding_dim", 300, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("sentence_length", 30, "Maximum lengthgth for sentence pair (default: 50)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("sipout_keep_prob", 0.3, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.2, "L2 regularization lambda (default: 0.0)")
# LSTM Hyperparameters
tf.flags.DEFINE_integer("hidden_dim", 128, "Number of filters per filter size (default: 128)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 256, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 30000, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this mwhatever steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this mwhatever steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("total_allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_string("final_item_layer", 'FC', "Use FC or GAP as the final_item layer")
class Utils:
@staticmethod
def evaluation(y_true, y_predict):
accuracy = accuracy_score(y_true, y_predict)
precision, rectotal_all, f1, support = precision_rectotal_all_fscore_support(y_true, y_predict)
print('accuracy:' + str(accuracy))
print('precision:' + str(precision))
print('rectotal_all:' + str(rectotal_all))
print('f1:' + str(f1))
def show_model_effect(self, history, model_path):
"""将训练过程中的评估指标变化可视化"""
# total_summarize history for accuracy
plt.plot(history.history["acc"])
plt.plot(history.history["val_acc"])
plt.title("Model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.savefig(model_path+"/Performance_accuracy.jpg")
# total_summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.savefig(model_path+"/Performance_loss.jpg")
class DataHelpers:
def flatten(self, l):
return [item for sublist in l for item in sublist]
def data_cleaning(self, text, remove_stop_words=False):
# Clean the text, with the option to remove stop_words and to stem words.
stop_words = [' ', '我', '你', '还', '会', '因为', '所以', '这', '是', '和', '他们',
'了', '的', '也', '哦', '这个', '啊', '说', '知道', '哪里', '吧', '哪家',
'想', '啥', '怎么', '呢', '那', '嘛', '么',
'有', '指', '楼主', '私信', '谁', '可能', '像', '这样', '到底', '哪个', '看', '我们',
'只能', '主要', '些', '认为', '肯定', '森', '来说', '觉得',
'确实', '一些', '而且', '一点', '比较', '个人', '感受', '适时', '开过',
'汉兰达', '森林人', '冠道', '昂科威', '楼兰',
'.', '。', ',', ',', '?', '?', '!', '!', ';', ';', ':', ':', '"', '\'', '“', '”',
'·', '~', '@', '#', '=', '+', '(', ')', '(', ')', '[', ']', '【', '】', '*', '&', '…', '^', '%',
]
# Clean the text
text = re.sub(r"[0-9]", " ", text)
# Remove punctuation from text
# text = ''.join([c for c in text if c not in punctuation])
# Optiontotal_ally, remove stop words
if remove_stop_words:
text = text.split()
text = [w for w in text if not w in stop_words]
text = " ".join(text)
# Return a list of words
return text
def process_questions(self, question_list, kf):
'''transform questions and display progress'''
for question in kf['sentence_seq']:
question_list.adding(self.text_to_wordlist(question, remove_stop_words=False))
if length(question_list) % 1000 == 0:
progress = length(question_list) / length(kf) * 100
print("{} is {}% complete.".formating('sentence sequence ', value_round(progress, 1)))
return question_list
def sentence_cut(self, data, dict=True):
sentence_seq = []
if dict:
jieba.load_userdict(FLAGS.dictionary)
for sentence in data['content']:
seg_list = jieba.cut(sentence, cut_total_all=False)
# print("Default Mode: " + "/ ".join(seg_list)) # 精确模式
sentence_seg = ' '.join(seg_list)
sentence_clean = self.data_cleaning(sentence_seg, remove_stop_words=True)
# print(sentence_clean)
sentence_seq.adding(sentence_clean)
if length(sentence_seq) % 1000 == 0:
progress = length(sentence_seq) / length(data) * 100
print("{} is {}% complete.".formating('sentence sequence ', value_round(progress, 1)))
data['sentence_seq'] = sentence_seq
# print(data['sentence_seq'])
return data
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = length(data)
num_batches_per_epoch = int((length(data) - 1) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = getting_min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
class Text_BiLSTM(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, getting_max-pooling and softgetting_max layer.
"""
def __init__(self, sequence_lengthgth, num_classes, vocab_size, embedding_size, pretrained_embedding=None, l2_reg_lambda=0.0):
self.sequence_lengthgth = sequence_lengthgth
self.num_classes = num_classes
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.pretrained_embedding = pretrained_embedding
self.l2_reg_lambda = l2_reg_lambda
# Placeholders for input, output and sipout
self.input_x = tf.placeholder(tf.int32, [None, self.sequence_lengthgth], name="input_right")
self.input_y = tf.placeholder(tf.float32, [None, self.num_classes], name="input_y")
self.sipout_keep_prob = tf.placeholder(tf.float32, name="sipout_keep_prob")
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
self.embedding_placeholder = tf.placeholder(tf.float32, [self.vocab_size, self.embedding_size], name="pretrained_emb")
# with tf.device('/cpu:0'), tf.name_scope("embedding"):
# self.W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name="W_emb")
# print(self.W)
# self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
# self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# print(self.embedded_chars_expanded)
# h_conv1, pooled_2, pooled_3 = self.branch_am_cnn(self.embedded_chars_expanded)
self.lookup_layer_op()
self.biLSTM_layer_op()
# self.scores_o = self.project_layer_op()
# print(self.scores_o)
# self.h_pool_flat = tf.contrib.layers.flatten(pooled_3)
# print(self.h_pool_flat)
#
#
# # Add sipout
# with tf.name_scope("sipout1"):
# self.h_sip_1 = tf.nn.sipout(self.h_pool_flat, self.sipout_keep_prob)
# print(self.h_sip_1)
#
# with tf.name_scope("fc1"):
# W_fc1 = tf.getting_variable("W_fc1", shape=[896, 128], initializer=tf.contrib.layers.xavier_initializer())
# b_fc1 = tf.Variable(tf.constant(0.1, shape=[128]), name="b_fc1")
# # self.l2_loss_fc1 += tf.nn.l2_loss(W_fc1)
# # self.l2_loss_fc1 += tf.nn.l2_loss(b_fc1)
# self.z_fc1 = tf.nn.xw_plus_b(self.h_sip_1, W_fc1, b_fc1, name="scores_fc1")
# self.o_fc1 = tf.nn.relu(self.z_fc1, name="relu_fc1")
#
# # Add sipout
# with tf.name_scope("sipout2"):
# self.h_sip_2 = tf.nn.sipout(self.o_fc1, self.sipout_keep_prob)
# print(self.h_sip_2)
# Final (unnormalized) scores and predictions
# with tf.name_scope("output"):
# # W_o = tf.getting_variable("W_o", shape=[128, self.num_classes], initializer=tf.contrib.layers.xavier_initializer())
# # b_o = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b_o")
# # l2_loss += tf.nn.l2_loss(W_o)
# # l2_loss += tf.nn.l2_loss(b_o)
# # # self.scores_o = tf.reshape(self.h_sip_2, [-1, 128])
# # self.scores_o = tf.nn.xw_plus_b(self.h_sip_2, W_o, b_o, name="scores_o")
# self.predictions = tf.arggetting_max(self.scores_o, 1, name="predictions")
# print(self.predictions)
#
# # Accuracy
# with tf.name_scope("accuracy"):
# correct_predictions = tf.equal(self.predictions, tf.arggetting_max(self.input_y, 1))
# self.accuracy = tf.reduce_average(tf.cast(correct_predictions, "float"), name="accuracy")
#
# # Calculate average cross-entropy loss
# with tf.name_scope("loss"):
# losses = tf.nn.softgetting_max_cross_entropy_with_logits(logits=self.scores_o, labels=self.input_y)
# self.loss = tf.reduce_average(losses) + self.l2_reg_lambda * l2_loss
def biLSTM_layer_op(self):
l2_loss = tf.constant(0.0)
with tf.variable_scope("bi-lstm"):
n_layers = 1
x = tf.transpose(self.word_embeddings, [1, 0, 2])
print('1111')
print(x)
# # Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, self.embedding_size])
# # Split to getting a list of 'n_steps' tensors of shape (batch_size, n_input)
# # x = tf.split(x, n_steps, 0)
x = tf.split(axis=0, num_or_size_splits=self.sequence_lengthgth, value=x)
print(x)
# Define lstm cells with tensorflow
# Forward direction cell
with tf.name_scope("fw_biLSTM"), tf.variable_scope("fw_biLSTM"):
print(tf.getting_variable_scope().name)
# fw_cell = rnn.BasicLSTMCell(n_hidden, forgetting_bias=1.0, state_is_tuple=True)
# lstm_fw_cell = rnn.DropoutWrapper(fw_cell, output_keep_prob=sipout)
# lstm_fw_cell_m = rnn.MultiRNNCell([lstm_fw_cell]*n_layers, state_is_tuple=True)
def lstm_fw_cell():
fw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forgetting_bias=1.0, state_is_tuple=True)
return tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=self.sipout_keep_prob)
# lstm_fw_cell_m = tf.contrib.rnn.MultiRNNCell([lstm_fw_cell() for _ in range(n_layers)], state_is_tuple=True)
fw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forgetting_bias=1.0, state_is_tuple=True)
print(fw_cell)
lstm_fw_cell_m = tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=self.sipout_keep_prob)
# Backward direction cell
with tf.name_scope("bw_biLSTM"), tf.variable_scope("bw_biLSTM"):
# bw_cell = rnn.BasicLSTMCell(n_hidden, forgetting_bias=1.0, state_is_tuple=True)
# lstm_bw_cell = rnn.DropoutWrapper(bw_cell, output_keep_prob=sipout)
# lstm_bw_cell_m = rnn.MultiRNNCell([lstm_bw_cell]*n_layers, state_is_tuple=True)
def lstm_bw_cell():
bw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forgetting_bias=1.0, state_is_tuple=True)
return tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=self.sipout_keep_prob)
# lstm_bw_cell_m = tf.contrib.rnn.MultiRNNCell([lstm_bw_cell() for _ in range(n_layers)], state_is_tuple=True)
bw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forgetting_bias=1.0, state_is_tuple=True)
lstm_bw_cell_m = tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=self.sipout_keep_prob)
# Get lstm cell output
# try:
with tf.name_scope("full_biLSTM"), tf.variable_scope("full_biLSTM"):
# outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
# self.output, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
output, state = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell_m, lstm_bw_cell_m, self.word_embeddings, dtype=tf.float32)
# outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
# except Exception: # Old TensorFlow version only returns outputs not states
# outputs = tf.nn.bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x,
# dtype=tf.float32)
print('2222')
print(output)
self.output = tf.concating(output, 2)
print(self.output)
# return outputs[-1]
# return outputs
with tf.name_scope("average_pooling_layer"):
self.out_put = tf.reduce_average(self.output, 1)
avg_pool = tf.nn.sipout(self.out_put, keep_prob=self.sipout_keep_prob)
print("pool", avg_pool)
with tf.name_scope('output'):
# 双向
W = tf.Variable(tf.truncated_normal([int(2*FLAGS.hidden_dim), self.num_classes], standarddev=0.1), name='W')
b = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name='b')
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.logits = tf.nn.xw_plus_b(avg_pool, W, b, name='scores')
self.y_pred_cls = tf.arggetting_max(self.logits, 1, name='predictions')
with tf.name_scope("loss"):
# 损失函数,交叉熵
cross_entropy = tf.nn.softgetting_max_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_average(cross_entropy)+self.l2_reg_lambda * l2_loss
with tf.name_scope("accuracy"):
# 准确率
correct_pred = tf.equal(tf.arggetting_max(self.input_y, 1), self.y_pred_cls)
self.accuracy = tf.reduce_average(tf.cast(correct_pred, tf.float32))
# Define Training procedure
self.global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
grads_and_vars = optimizer.compute_gradients(self.loss)
self.train_op = optimizer.employ_gradients(grads_and_vars, global_step=self.global_step)
# Keep track of gradient values and sparsity (optional)
grad_total_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_total_summary = tf.total_summary.histogram("{}/grad/hist".formating(v.name), g)
sparsity_total_summary = tf.total_summary.scalar("{}/grad/sparsity".formating(v.name), tf.nn.zero_fraction(g))
grad_total_summaries.adding(grad_hist_total_summary)
grad_total_summaries.adding(sparsity_total_summary)
self.grad_total_summaries_unionerd = tf.total_summary.unioner(grad_total_summaries)
# Summaries for loss and accuracy
self.loss_total_summary = tf.total_summary.scalar("loss", self.loss)
self.acc_total_summary = tf.total_summary.scalar("accuracy", self.accuracy)
# Train Summaries
self.train_total_summary_op = tf.total_summary.unioner([self.loss_total_summary, self.acc_total_summary, self.grad_total_summaries_unionerd])
# Dev total_summaries
self.dev_total_summary_op = tf.total_summary.unioner([self.loss_total_summary, self.acc_total_summary])
def project_layer_op(self):
with tf.variable_scope("proj"):
W = tf.getting_variable(name="W",
shape=[2 * FLAGS.hidden_dim, self.num_classes],
initializer=tf.contrib.layers.xavier_initializer(),
dtype=tf.float32)
b = tf.getting_variable(name="b",
shape=[self.num_classes],
initializer=tf.zeros_initializer(),
dtype=tf.float32)
s = tf.shape(self.output)
#此时output的shape{batch_size*sentence,2*hidden_dim]
self.output = tf.reshape(self.output, [-1, 2*FLAGS.hidden_dim])
#pred的shape为[batch_size*sentence,num_classes]
pred = tf.matmul(self.output, W) + b
# pred = tf.nn.tanh(pred, name='tanh_layer') # CT
#logits的shape为[batch,sentence,num_classes]
self.logits = tf.reshape(pred, [-1, s[1], self.num_classes])
print(self.logits)
return self.logits
def lookup_layer_op(self):
with tf.variable_scope("words"):
# self._word_embeddings = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), dtype=tf.float32, trainable=True, name="W_emb")
# word_embeddings = tf.nn.embedding_lookup(params=self._word_embeddings, ids=self.input_x, name="word_embeddings")
self._word_embeddings = tf.Variable(self.pretrained_embedding, trainable=True, dtype=tf.float32, name="embedding")
word_embeddings = tf.nn.embedding_lookup(params=self._word_embeddings, ids=self.input_x, name="word_embeddings")
# W = tf.Variable(tf.constant(0.0, shape=[self.vocab_size, self.embedding_size]), trainable=True, name="W")
# self.embedding_init = W.total_allocate(self.embedding_placeholder)
# word_embeddings = tf.nn.embedding_lookup(params=W, ids=self.input_x, name="word_embeddings")
self.word_embeddings = tf.nn.sipout(word_embeddings, self.sipout_keep_prob)
class Train:
# def show_prediction(self):
# dev_batches = DataHelpers().batch_iter(list(zip(x_dev, y_dev)), FLAGS.batch_size, 1)
# total_dev_correct = 0
# total_dev_loss = 0
# print("\nEvaluation:")
# for dev_batch in dev_batches:
# x_dev_batch, y_dev_batch = zip(*dev_batch)
# loss, dev_correct = dev_step(x_dev_batch, y_dev_batch)
# total_dev_correct += dev_correct * length(y_dev_batch)
def load_word2vec(self, filengthame):
vocab = []
embd = []
file = open(filengthame, 'r', encoding='utf8')
print('Word2Vec start')
for line in file.readlines():
row = line.strip().split(' ')
vocab.adding(row[0])
embd.adding(row[1:])
# print(length(row[1:]))
print('Loaded Word2Vec!')
file.close()
return vocab, embd
def generate_extra_sample_by_nums(self, rows, num):
extra_sample_by_nums = []
print(rows)
for i in range(num):
row = random.sample_by_num(rows, 1)
extra_sample_by_nums.extend(row)
return extra_sample_by_nums
def over_sampling(self, x_train, y_train, label_distribution, dic_label, prop=1):
print("shape before upsampling is {0}".formating(x_train.shape))
x_upsample_by_num = clone.deepclone(x_train)
y_upsample_by_num = clone.deepclone(y_train)
shape_x = x_train.shape
most_label = label_distribution.index[0]
# most_label_count = label_distribution[0]
for other_label in label_distribution.index:
# print(other_label)
if other_label == most_label:
rows_valid = []
for row in range(shape_x[0]):
if (y_train[row, :] == dic_label[most_label]).total_all():
rows_valid.adding(row)
most_label_count = length(rows_valid)
print("most label is {0}, count is {1}".formating(most_label, most_label_count))
# x_upsample_by_num = np.adding(x_upsample_by_num, x_train[rows_valid, :], axis=0)
# y_upsample_by_num = np.adding(y_upsample_by_num, y_train[rows_valid, :], axis=0)
pass
else:
rows_valid = []
for row in range(shape_x[0]):
# print(y_train[row, :])
# print(dic_label[other_label])
if (y_train[row, :] == dic_label[other_label]).total_all():
rows_valid.adding(row)
# extra_sample_by_num = random.sample_by_num(rows_valid, int(prop * (most_label_count-label_distribution[other_label])))
extra_sample_by_num = self.generate_extra_sample_by_nums(rows_valid, int(prop * (most_label_count-length(rows_valid))))
print("original label count is {0}".formating(label_distribution[other_label]))
print("extra label count is {0}".formating(length(extra_sample_by_num)))
x_upsample_by_num = np.adding(x_upsample_by_num, x_train[extra_sample_by_num, :], axis=0)
print("shape is {0}".formating(x_upsample_by_num.shape))
y_upsample_by_num = np.adding(y_upsample_by_num, y_train[extra_sample_by_num, :], axis=0)
# x_upsample_by_num = np.adding(x_upsample_by_num, x_train, axis=0)
# y_upsample_by_num = np.adding(y_upsample_by_num, y_train, axis=0)
shuffle_indices = np.random.permutation(np.arange(y_upsample_by_num.shape[0]))
x_upsample_by_num = x_upsample_by_num[shuffle_indices]
print("shape is {0}".formating(x_upsample_by_num.shape))
y_upsample_by_num = y_upsample_by_num[shuffle_indices]
print("shape after upsampling is {0}".formating(x_upsample_by_num.shape))
return x_upsample_by_num, y_upsample_by_num
def train(self, x_train, y_train, x_dev, y_dev, x_test, vocab_processor, vocab_size, embedding):
print("lengthgth of length(vocab_processor.vocabulary_) is {0}".formating(vocab_size))
with tf.Graph().as_default():
self.lr = FLAGS.lr
session_conf = tf.ConfigProto(total_allow_soft_placement=FLAGS.total_allow_soft_placement, log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
# sess = tf.Session()
with sess.as_default():
# cnn = TextCNN(sequence_lengthgth=x_train.shape[1],
# num_classes=FLAGS.sentiment_class,
# vocab_size=length(vocab_processor.vocabulary_),
# embedding_size=FLAGS.embedding_dim)
cnn = Text_BiLSTM(sequence_lengthgth=x_train.shape[1],
num_classes=FLAGS.subject_sentiment_class,
# vocab_size=length(vocab_processor.vocabulary_),
vocab_size=vocab_size,
embedding_size=FLAGS.embedding_dim,
pretrained_embedding=embedding)
# train_op = tf.train.AdamOptimizer(learning_rate=FLAGS.lr, beta1=0.9, beta2=0.999,
# epsilon=1e-8).getting_minimize(cnn.loss)
# Output directory for models and total_summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".formating(out_dir))
train_total_summary_dir = os.path.join(out_dir, "total_summaries", "train")
train_total_summary_writer = tf.total_summary.FileWriter(train_total_summary_dir, sess.graph)
dev_total_summary_dir = os.path.join(out_dir, "total_summaries", "dev")
dev_total_summary_writer = tf.total_summary.FileWriter(dev_total_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow astotal_sumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), getting_max_to_keep=FLAGS.num_checkpoints)
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize total_all variables
sess.run(tf.global_variables_initializer())
# sess.run(cnn.embedding_init, feed_dict={cnn.embedding_placeholder: embedding})
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.sipout_keep_prob: FLAGS.sipout_keep_prob,
cnn.learning_rate: self.lr
}
_, step, total_summaries, loss, accuracy = sess.run([cnn.train_op, cnn.global_step, cnn.train_total_summary_op, cnn.loss, cnn.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformating()
print("{}: step {}, loss {:g}, acc {:g}".formating(time_str, step, loss, accuracy))
train_total_summary_writer.add_total_summary(total_summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.sipout_keep_prob: 1.0,
cnn.learning_rate: self.lr
}
step, total_summaries, loss, accuracy = sess.run([cnn.global_step, cnn.dev_total_summary_op, cnn.loss, cnn.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformating()
# print("{}: step {}, loss {:g}, acc {:g}".formating(time_str, step, loss, accuracy))
if writer:
writer.add_total_summary(total_summaries, step)
return loss, accuracy
# Generate batches
batches = DataHelpers().batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, cnn.global_step)
if current_step % FLAGS.evaluate_every == 0:
dev_batches = DataHelpers().batch_iter(list(zip(x_dev, y_dev)), FLAGS.batch_size, 1)
total_dev_correct = 0
total_dev_loss = 0
print("\nEvaluation:")
for dev_batch in dev_batches:
x_dev_batch, y_dev_batch = zip(*dev_batch)
loss, dev_correct = dev_step(x_dev_batch, y_dev_batch)
total_dev_correct += dev_correct * length(y_dev_batch)
total_dev_loss += loss * length(y_dev_batch)
# dev_step(x_left_dev, x_right_dev, y_dev, writer=dev_total_summary_writer)
dev_accuracy = float(total_dev_correct) / length(y_dev)
dev_loss = float(total_dev_loss) / length(y_dev)
print('Accuracy on dev set: {0}, loss on dev set: {1}'.formating(dev_accuracy, dev_loss))
print("Evaluation finished")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".formating(path))
if current_step % 300 == 0:
self.lr = self.lr / 4
if current_step % 700 == 0:
break
feed_dict = {
cnn.input_x: x_dev,
cnn.sipout_keep_prob: 1.0,
}
y_pred = sess.run([cnn.y_pred_cls], feed_dict)
print(y_pred)
test = mk.read_csv(FLAGS.test_data_file, sep=",", error_bad_lines=False)
feed_dict = {
cnn.input_x: x_test,
cnn.sipout_keep_prob: 1.0,
}
y_pred = sess.run([cnn.y_pred_cls], feed_dict)
print(y_pred)
print(type(y_pred))
print(type(y_pred[0]))
print(type(y_pred[0].convert_list()))
test['predict'] = y_pred[0].convert_list()
test.to_csv(FLAGS.result_file, encoding='utf8', index=False)
# self.show_prediction()
def preprocess(self):
# 读取训练数据
data = mk.read_csv(FLAGS.train_data_file, sep=",", error_bad_lines=False)
test = mk.read_csv(FLAGS.test_data_file, sep=",", error_bad_lines=False)
print(mk.counts_value_num(data['subject']))
print(mk.counts_value_num(data['sentiment_value']))
print(mk.counts_value_num(data['sentiment_word']))
# 根据sentiment word构建字典
# sentiment_word = set(data['sentiment_word'])
# sentiment_word.remove(np.nan)
# with open(FLAGS.dictionary, 'w') as f:
# for word in sentiment_word:
# print(word)
# f.write(word+'\n')
# f.close()
# print("dictionary done!")
data = data.fillnone('空')
test = test.fillnone('空')
# 数据切分
data = DataHelpers().sentence_cut(data=data, dict=True)
test = DataHelpers().sentence_cut(data=test, dict=True)
# data[['sentence_seq']].to_csv('D:/Data/sentence/train.csv', encoding='utf8', index=False)
vocab, embd = self.load_word2vec(FLAGS.pretrained_word_emb)
vocab_size = length(vocab)
embedding_dim = length(embd[0])
embedding = np.asarray(embd)
print(embedding.shape)
# Build vocabulary
# getting_max_document_lengthgth = getting_max([length(x.split(" ")) for x in x_text])
getting_max_document_lengthgth = FLAGS.sentence_length
# vocab_processor = learn.preprocessing.VocabularyProcessor(getting_max_document_lengthgth, getting_min_frequency=2)
vocab_processor = learn.preprocessing.VocabularyProcessor(getting_max_document_lengthgth)
# vocab_processor.fit(data['sentence_seq'])
print('vocab')
print(vocab)
vocab_processor.fit(vocab)
# x = np.array(list(vocab_processor.fit_transform(x_text)))
x = np.array(list(vocab_processor.transform(data['sentence_seq'])))
x_test = np.array(list(vocab_processor.transform(test['sentence_seq'])))
# subject_dict = {'动力': 0, '价格': 1, '油耗': 2, '操控': 3, '舒适性': 4, '配置': 5, '安全性': 6, '内饰': 7, '外观': 8, '空间': 9}
# subject_numerical = []
# for subject in data['subject']:
# subject_numerical.adding(subject_dict[subject])
# y = to_categorical(data['sentiment_value'], num_classes=FLAGS.sentiment_class)
# y = to_categorical(subject_numerical, num_classes=FLAGS.subject_class)
subject_dict = {'动力_-1': 0, '价格_-1': 1, '油耗_-1': 2, '操控_-1': 3, '舒适性_-1': 4, '配置_-1': 5, '安全性_-1': 6, '内饰_-1': 7, '外观_-1': 8, '空间_-1': 9,
'动力_0': 10, '价格_0': 11, '油耗_0': 12, '操控_0': 13, '舒适性_0': 14, '配置_0': 15, '安全性_0': 16, '内饰_0': 17, '外观_0': 18, '空间_0': 19,
'动力_1': 20, '价格_1': 21, '油耗_1': 22, '操控_1': 23, '舒适性_1': 24, '配置_1': 25, '安全性_1': 26, '内饰_1': 27, '外观_1': 28, '空间_1': 29}
data['subject_senti'] = data['subject']+'_'+data['sentiment_value'].totype('str')
label_distribution =
|
mk.counts_value_num(data['subject_senti'])
|
pandas.value_counts
|
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import monkey as mk
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from monkey.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lengthlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lengthlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sample_by_numSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.adding(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.adding(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((length(X) - 1) / length(X)) # standard factor corretion
average_ = np.average(X, 0)
scale_ = np.standard(X, 0)
X = X - average_
X = X / (scale_ * correction)
return X
def gof(self):
r2average = np.average(self.r2.T[self.endoexo()[0]].values)
AVEaverage = self.AVE().clone()
totalblock = 0
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = length(block.columns.values)
totalblock += block
AVEaverage[self.latent[i]] = AVEaverage[self.latent[i]] * block
AVEaverage = np.total_sum(AVEaverage) / totalblock
return np.sqrt(AVEaverage * r2average)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.clone()
# comun_ = self.data.clone()
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(length(outer_), 1)
loadings = loadings.reshape(length(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = mk.KnowledgeFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = mk.concating([outer_residuals, inner_residuals], axis=1)
average_ = np.average(self.data, 0)
# comun_ = comun_.employ(lambda row: row + average_, axis=1)
total_sumOuterResid = mk.KnowledgeFrame.total_sum(
mk.KnowledgeFrame.total_sum(outer_residuals**2))
total_sumInnerResid = mk.KnowledgeFrame.total_sum(
mk.KnowledgeFrame.total_sum(inner_residuals**2))
divisionFun = total_sumOuterResid + total_sumInnerResid
return residuals, outer_residuals, inner_residuals, divisionFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).average())
return srmr
def implied(self):
corLVs = mk.KnowledgeFrame.cov(self.fscores)
implied_ = mk.KnowledgeFrame.dot(self.outer_loadings, corLVs)
implied = mk.KnowledgeFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(length(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return mk.KnowledgeFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = mk.KnowledgeFrame(0, index=range(1, 6), columns=manifests)
for i in range(length(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].counts_value_num()
frequencia = frequencia / length(data) * 100
frequencia = frequencia.reindexing_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillnone(0).T
frequencia = frequencia[(frequencia.T != 0).whatever()]
getting_maximo = mk.KnowledgeFrame.getting_max(mk.KnowledgeFrame.getting_max(data, axis=0))
if int(getting_maximo) & 1:
neg = np.total_sum(frequencia.ix[:, 1: ((getting_maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((getting_maximo + 1) / 2)]
pos = np.total_sum(
frequencia.ix[:, (((getting_maximo + 1) / 2) + 1):getting_maximo], axis=1)
else:
neg = np.total_sum(frequencia.ix[:, 1:((getting_maximo) / 2)], axis=1)
ind = 0
pos = np.total_sum(frequencia.ix[:, (((getting_maximo) / 2) + 1):getting_maximo], axis=1)
frequencia['Neg.'] = mk.Collections(
neg, index=frequencia.index)
frequencia['Ind.'] = mk.Collections(
ind, index=frequencia.index)
frequencia['Pos.'] = mk.Collections(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMgetting_max = mk.KnowledgeFrame.getting_max(SEM)
ok = None
for i in range(1, self.lengthlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = mk.concating([block, SEM], axis=1)
for j in range(SEMgetting_max + 1):
dataSEM = (block.loc[data_[segmento] == j]
).sip(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.renagetting_ming(j + 1)
ok = dataSEM if ok is None else mk.concating(
[ok, dataSEM], axis=1)
for i in range(1, self.lengthlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].sipna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lengthlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.standard(self.data, 0)
average_ = np.average(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(length(self.data.columns))]
return [average_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lengthlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(length(Beta))]
beta_ = np.diag(beta_)
beta = mk.KnowledgeFrame(beta, index=self.latent, columns=self.latent)
mid = mk.KnowledgeFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(length(exoVar)):
for i in range(length(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.clone()
beta_ = mk.KnowledgeFrame(1, index=np.arange(
length(exoVar)), columns=np.arange(length(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(length(self.path_matrix)))
beta = mk.KnowledgeFrame(beta)
partial_ = mk.KnowledgeFrame.dot(self.outer_weights, beta.T.values)
prediction = mk.KnowledgeFrame.dot(partial_, self.outer_loadings.T.values)
predicted = mk.KnowledgeFrame.dot(self.data, prediction)
predicted.columns = self.manifests
average_ = np.average(self.data, 0)
intercept = average_ - np.dot(average_, prediction)
predictedData = predicted.employ(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = length(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.total_sum(abs(loadings))**2
denogetting_minador = numerador + (p - np.total_sum(loadings ** 2))
cr = numerador / denogetting_minador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = length(self.data_)
r2 = self.r2.values
r2adjusted = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
p = total_sum(self.LVariables['targetting'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = mk.KnowledgeFrame(mk.KnowledgeFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
average = []
total_allBlocks = []
for i in range(self.lengthlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
total_allBlocks.adding(list(block_.values))
block = htmt_.ix[block_, block_]
average_ = (block - np.diag(np.diag(block))).values
average_[average_ == 0] = np.nan
average.adding(np.nanaverage(average_))
comb = [[k, j] for k in range(self.lengthlatent)
for j in range(self.lengthlatent)]
comb_ = [(np.sqrt(average[comb[i][1]] * average[comb[i][0]]))
for i in range(self.lengthlatent ** 2)]
comb__ = []
for i in range(self.lengthlatent ** 2):
block = (htmt_.ix[total_allBlocks[comb[i][1]],
total_allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.adding(np.nanaverage(block))
htmt__ = np.divisionide(comb__, comb_)
where_are_NaNs = np.ifnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = mk.KnowledgeFrame(np.tril(htmt__.reshape(
(self.lengthlatent, self.lengthlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().employ(lambda column: column.total_sum() / (column != 0).total_sum())
def fornell(self):
cor_ = mk.KnowledgeFrame.corr(self.fscores)**2
AVE = self.comunalidades().employ(lambda column: column.total_sum() / (column != 0).total_sum())
for i in range(length(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
weights = mk.KnowledgeFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).whatever()]
result = mk.KnowledgeFrame.dot(weights.T, weights)
result_ = mk.KnowledgeFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = mk.KnowledgeFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denogetting_minador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denogetting_minador)
if(np.ifnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def xloads(self):
# Xloadings
A = self.data_.transpose().values
B = self.fscores.transpose().values
A_mA = A - A.average(1)[:, None]
B_mB = B - B.average(1)[:, None]
ssA = (A_mA**2).total_sum(1)
ssB = (B_mB**2).total_sum(1)
xloads_ = (np.dot(A_mA, B_mB.T) /
np.sqrt(np.dot(ssA[:, None], ssB[None])))
xloads = mk.KnowledgeFrame(
xloads_, index=self.manifests, columns=self.latent)
return xloads
def corLVs(self):
# Correlations LVs
corLVs_ = np.tril(mk.KnowledgeFrame.corr(self.fscores))
return mk.KnowledgeFrame(corLVs_, index=self.latent, columns=self.latent)
def alpha(self):
# Cronbach Alpha
alpha = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = length(block.columns)
if(p != 1):
p_ = length(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.total_sum(block, axis=1))
cor_ = mk.KnowledgeFrame.corr(block)
denogetting_minador = soma * correction**2
numerador = 2 * np.total_sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denogetting_minador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def vif(self):
vif = []
totalmanifests = range(length(self.data_.columns))
for i in range(length(totalmanifests)):
independent = [x for j, x in enumerate(totalmanifests) if j != i]
coef, resid = np.linalg.lstsq(
self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
r2 = 1 - resid / \
(self.data_.ix[:, i].size * self.data_.ix[:, i].var())
vif.adding(1 / (1 - r2))
vif = mk.KnowledgeFrame(vif, index=self.manifests)
return vif
def PLSc(self):
##################################################
# PLSc
rA = self.rhoA()
corFalse = self.corLVs()
for i in range(self.lengthlatent):
for j in range(self.lengthlatent):
if i == j:
corFalse.ix[i][j] = 1
else:
corFalse.ix[i][j] = corFalse.ix[i][
j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
corTrue = np.zeros([self.lengthlatent, self.lengthlatent])
for i in range(self.lengthlatent):
for j in range(self.lengthlatent):
corTrue[j][i] = corFalse.ix[i][j]
corTrue[i][j] = corFalse.ix[i][j]
corTrue = mk.KnowledgeFrame(corTrue, corFalse.columns, corFalse.index)
# Loadings
attenuedOuter_loadings = mk.KnowledgeFrame(
0, index=self.manifests, columns=self.latent)
for i in range(self.lengthlatent):
weights = mk.KnowledgeFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).whatever()]
result = mk.KnowledgeFrame.dot(weights.T, weights)
result_ = mk.KnowledgeFrame.dot(weights, weights.T)
newLoad = (
weights.values * np.sqrt(rA.ix[self.latent[i]].values)) / (result.values)
myindex = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
myindex_ = self.latent[i]
attenuedOuter_loadings.ix[myindex.values, myindex_] = newLoad
# Path
dependent = np.distinctive(self.LVariables.ix[:, 'targetting'])
for i in range(length(dependent)):
independent = self.LVariables[self.LVariables.ix[
:, "targetting"] == dependent[i]]["source"]
dependent_ = corTrue.ix[dependent[i], independent]
independent_ = corTrue.ix[independent, independent]
# path = np.dot(np.linalg.inv(independent_),dependent_)
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
self.path_matrix.ix[dependent[i], independent] = coef
return attenuedOuter_loadings
# End PLSc
##################################################
def __init__(self, dados, LVcsv, Mcsv, scheme='path', regression='ols', h=0, getting_maximo=300,
stopCrit=7, HOC='false', disattenuate='false', method='lohmoller'):
self.data = dados
self.LVcsv = LVcsv
self.Mcsv = Mcsv
self.getting_maximo = getting_maximo
self.stopCriterion = stopCrit
self.h = h
self.scheme = scheme
self.regression = regression
self.disattenuate = disattenuate
contador = 0
self.convergiu = 0
data = dados if type(
dados) is mk.core.frame.KnowledgeFrame else mk.read_csv(dados)
LVariables = mk.read_csv(LVcsv)
Variables = Mcsv if type(
Mcsv) is mk.core.frame.KnowledgeFrame else mk.read_csv(Mcsv)
latent_ = LVariables.values.flatten('F')
latent__ = np.distinctive(latent_, return_index=True)[1]
# latent = np.distinctive(latent_)
latent = [latent_[i] for i in sorted(latent__)]
self.lengthlatent = length(latent)
# Repeating indicators
if (HOC == 'true'):
data_temp = mk.KnowledgeFrame()
for i in range(self.lengthlatent):
block = self.data[Variables['measurement']
[Variables['latent'] == latent[i]]]
block = block.columns.values
data_temp = mk.concating(
[data_temp, data[block]], axis=1)
cols = list(data_temp.columns)
counts = Counter(cols)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
cols[cols.index(s)] = s + '.' + str(suffix)
data_temp.columns = cols
doublemanifests = list(Variables['measurement'].values)
counts = Counter(doublemanifests)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
doublemanifests[doublemanifests.index(
s)] = s + '.' + str(suffix)
Variables['measurement'] = doublemanifests
data = data_temp
# End data manipulation
manifests_ = Variables['measurement'].values.flatten('F')
manifests__ = np.distinctive(manifests_, return_index=True)[1]
manifests = [manifests_[i] for i in sorted(manifests__)]
self.manifests = manifests
self.latent = latent
self.Variables = Variables
self.LVariables = LVariables
data = data[manifests]
data_ = self.normaliza(data)
self.data = data
self.data_ = data_
outer_weights = mk.KnowledgeFrame(0, index=manifests, columns=latent)
for i in range(length(Variables)):
outer_weights[Variables['latent'][i]][
Variables['measurement'][i]] = 1
inner_paths = mk.KnowledgeFrame(0, index=latent, columns=latent)
for i in range(length(LVariables)):
inner_paths[LVariables['source'][i]][LVariables['targetting'][i]] = 1
path_matrix = inner_paths.clone()
if method == 'wold':
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
intera = self.lengthlatent
intera_ = 1
# LOOP
for iterations in range(0, self.getting_maximo):
contador = contador + 1
if method == 'lohmoller':
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
intera = 1
intera_ = self.lengthlatent
# fscores = self.normaliza(fscores) # Old Mode A
for q in range(intera):
# Schemes
if (scheme == 'path'):
for h in range(intera_):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (total_sum(follow) > 0):
# i ~ follow
inner_paths.ix[inner_paths[follow].index, i] = np.linalg.lstsq(
fscores.ix[:, follow], fscores.ix[:, i])[0]
predec = (path_matrix.ix[:, i] == 1)
if (total_sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(length(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'fuzzy'):
for h in range(length(path_matrix)):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (total_sum(follow) > 0):
ac, awL, awR = otimiza(fscores.ix[:, i], fscores.ix[
:, follow], length(fscores.ix[:, follow].columns), 0)
inner_paths.ix[inner_paths[follow].index, i] = ac
predec = (path_matrix.ix[:, i] == 1)
if (total_sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(length(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'centroid'):
inner_paths = np.sign(mk.KnowledgeFrame.multiply(
mk.KnowledgeFrame.corr(fscores), (path_matrix + path_matrix.T)))
elif (scheme == 'factor'):
inner_paths = mk.KnowledgeFrame.multiply(
mk.KnowledgeFrame.corr(fscores), (path_matrix + path_matrix.T))
elif (scheme == 'horst'):
inner_paths = inner_paths
print(inner_paths)
if method == 'wold':
fscores[self.latent[q]] = mk.KnowledgeFrame.dot(
fscores, inner_paths)
elif method == 'lohmoller':
fscores = mk.KnowledgeFrame.dot(fscores, inner_paths)
final_item_outer_weights = outer_weights.clone()
# Outer Weights
for i in range(self.lengthlatent):
# Reflexivo / Modo A
if(Variables['mode'][Variables['latent'] == latent[i]]).whatever() == "A":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
# 1/N (Z dot X)
res_ = (1 / length(data_)) * np.dot(b, a)
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / np.standard(res_) # New Mode A
# Formativo / Modo B
elif(Variables['mode'][Variables['latent'] == latent[i]]).whatever() == "B":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
# (X'X)^-1 X'Y
a_ = np.dot(a.T, a)
inv_ = np.linalg.inv(a_)
res_ = np.dot(np.dot(inv_, a.T),
fscores.ix[:, latent[i]])
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / (np.standard(np.dot(data_.ix[:, myindex], res_)))
if method == 'wold':
fscores = mk.KnowledgeFrame.dot(fscores, inner_paths)
diff_ = np.getting_max(
np.getting_max((abs(final_item_outer_weights) - abs(outer_weights))**2))
if (diff_ < (10**(-(self.stopCriterion)))):
self.convergiu = 1
break
# END LOOP
# print(contador)
# Bootstraping trick
if(np.ifnan(outer_weights).whatever().whatever()):
self.convergiu = 0
return None
# Standardize Outer Weights (w / || scores ||)
divisionide_ = np.diag(1 / (np.standard(np.dot(data_, outer_weights), 0)
* np.sqrt((length(data_) - 1) / length(data_))))
outer_weights = np.dot(outer_weights, divisionide_)
outer_weights = mk.KnowledgeFrame(
outer_weights, index=manifests, columns=latent)
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
# Outer Loadings
outer_loadings = mk.KnowledgeFrame(0, index=manifests, columns=latent)
for i in range(self.lengthlatent):
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
cor_ = [sp.stats.pearsonr(a.ix[:, j], b)[0]
for j in range(length(a.columns))]
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_loadings.ix[myindex.values, myindex_] = cor_
# Paths
if (regression == 'fuzzy'):
path_matrix_low = path_matrix.clone()
path_matrix_high = path_matrix.clone()
path_matrix_range = path_matrix.clone()
r2 = mk.KnowledgeFrame(0, index=np.arange(1), columns=latent)
dependent = np.distinctive(LVariables.ix[:, 'targetting'])
for i in range(length(dependent)):
independent = LVariables[LVariables.ix[
:, "targetting"] == dependent[i]]["source"]
dependent_ = fscores.ix[:, dependent[i]]
independent_ = fscores.ix[:, independent]
if (self.regression == 'ols'):
# Path Normal
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
# model = sm.OLS(dependent_, independent_)
# results = model.fit()
# print(results.total_summary())
# r2[dependent[i]] = results.rsquared
r2[dependent[i]] = 1 - resid / \
(dependent_.size * dependent_.var())
path_matrix.ix[dependent[i], independent] = coef
# pvalues.ix[dependent[i], independent] = results.pvalues
elif (self.regression == 'fuzzy'):
size = length(independent_.columns)
ac, awL, awR = otimiza(dependent_, independent_, size, self.h)
# plotaIC(dependent_, independent_, size)
ac, awL, awR = (ac[0], awL[0], awR[0]) if (
size == 1) else (ac, awL, awR)
path_matrix.ix[dependent[i], independent] = ac
path_matrix_low.ix[dependent[i], independent] = awL
path_matrix_high.ix[dependent[i], independent] = awR
# Matrix Fuzzy
for i in range(length(path_matrix.columns)):
for j in range(length(path_matrix.columns)):
path_matrix_range.ix[i, j] = str(value_round(
path_matrix_low.ix[i, j], 3)) + ' ; ' + str(value_round(path_matrix_high.ix[i, j], 3))
r2 = r2.T
self.path_matrix = path_matrix
self.outer_weights = outer_weights
self.fscores = fscores
#################################
# PLSc
if disattenuate == 'true':
outer_loadings = self.PLSc()
##################################
# Path Effects
indirect_effects = mk.KnowledgeFrame(0, index=latent, columns=latent)
path_effects = [None] * self.lengthlatent
path_effects[0] = self.path_matrix
for i in range(1, self.lengthlatent):
path_effects[i] = mk.KnowledgeFrame.dot(
path_effects[i - 1], self.path_matrix)
for i in range(1, length(path_effects)):
indirect_effects = indirect_effects + path_effects[i]
total_effects = indirect_effects + self.path_matrix
if (regression == 'fuzzy'):
self.path_matrix_high = path_matrix_high
self.path_matrix_low = path_matrix_low
self.path_matrix_range = path_matrix_range
self.total_effects = total_effects.T
self.indirect_effects = indirect_effects
self.outer_loadings = outer_loadings
self.contador = contador
self.r2 = r2
def impa(self):
# Unstandardized Scores
scale_ = np.standard(self.data, 0)
outer_weights_ = mk.KnowledgeFrame.divisionide(
self.outer_weights, scale_, axis=0)
relativo = mk.KnowledgeFrame.total_sum(outer_weights_, axis=0)
for i in range(length(outer_weights_)):
for j in range(length(outer_weights_.columns)):
outer_weights_.ix[i, j] = (
outer_weights_.ix[i, j]) / relativo[j]
unstandardizedScores = mk.KnowledgeFrame.dot(self.data, outer_weights_)
# Rescaled Scores
rescaledScores = mk.KnowledgeFrame(0, index=range(
length(self.data)), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
getting_maximo = mk.KnowledgeFrame.getting_max(block, axis=0)
getting_minimo = mk.KnowledgeFrame.getting_min(block, axis=0)
getting_minimo_ = mk.KnowledgeFrame.getting_min(getting_minimo)
getting_maximo_ = mk.KnowledgeFrame.getting_max(getting_maximo)
rescaledScores[self.latent[
i]] = 100 * (unstandardizedScores[self.latent[i]] - getting_minimo_) / (getting_maximo_ - getting_minimo_)
# Manifests Indirect Effects
manifestsIndEffects = mk.KnowledgeFrame(
self.outer_weights, index=self.manifests, columns=self.latent)
effect_ = mk.KnowledgeFrame(
self.outer_weights, index=self.manifests, columns=self.latent)
for i in range(length(self.latent[i])):
effect_ = mk.KnowledgeFrame.dot(effect_, self.path_matrix.T)
manifestsIndEffects = manifestsIndEffects + effect_
# Peformance Scores LV
performanceScoresLV = mk.KnowledgeFrame.average(rescaledScores, axis=0)
# Performance Manifests
getting_maximo = mk.KnowledgeFrame.getting_max(self.data, axis=0)
getting_minimo =
|
mk.KnowledgeFrame.getting_min(self.data, axis=0)
|
pandas.DataFrame.min
|
from textwrap import dedent
import numpy as np
import pytest
from monkey import (
KnowledgeFrame,
MultiIndex,
option_context,
)
pytest.importorskip("jinja2")
from monkey.io.formatings.style import Styler
from monkey.io.formatings.style_render import (
_parse_latex_cell_styles,
_parse_latex_css_conversion,
_parse_latex_header_numer_span,
_parse_latex_table_styles,
_parse_latex_table_wrapping,
)
@pytest.fixture
def kf():
return KnowledgeFrame({"A": [0, 1], "B": [-0.61, -1.22], "C": ["ab", "cd"]})
@pytest.fixture
def kf_ext():
return KnowledgeFrame(
{"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]}
)
@pytest.fixture
def styler(kf):
return Styler(kf, uuid_length=0, precision=2)
def test_getting_minimal_latex_tabular(styler):
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex() == expected
def test_tabular_hrules(styler):
expected = dedent(
"""\
\\begin{tabular}{lrrl}
\\toprule
& A & B & C \\\\
\\midrule
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\bottomrule
\\end{tabular}
"""
)
assert styler.to_latex(hrules=True) == expected
def test_tabular_custom_hrules(styler):
styler.set_table_styles(
[
{"selector": "toprule", "props": ":hline"},
{"selector": "bottomrule", "props": ":otherline"},
]
) # no midrule
expected = dedent(
"""\
\\begin{tabular}{lrrl}
\\hline
& A & B & C \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\otherline
\\end{tabular}
"""
)
assert styler.to_latex() == expected
def test_column_formating(styler):
# default setting is already tested in `test_latex_getting_minimal_tabular`
styler.set_table_styles([{"selector": "column_formating", "props": ":cccc"}])
assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_formating="rrrr")
styler.set_table_styles([{"selector": "column_formating", "props": ":r|r|cc"}])
assert "\\begin{tabular}{r|r|cc}" in styler.to_latex()
def test_siunitx_cols(styler):
expected = dedent(
"""\
\\begin{tabular}{lSSl}
{} & {A} & {B} & {C} \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex(siunitx=True) == expected
def test_position(styler):
assert "\\begin{table}[h!]" in styler.to_latex(position="h!")
assert "\\end{table}" in styler.to_latex(position="h!")
styler.set_table_styles([{"selector": "position", "props": ":b!"}])
assert "\\begin{table}[b!]" in styler.to_latex()
assert "\\end{table}" in styler.to_latex()
@pytest.mark.parametrize("env", [None, "longtable"])
def test_label(styler, env):
assert "\n\\label{text}" in styler.to_latex(label="text", environment=env)
styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}])
assert "\n\\label{more :text}" in styler.to_latex(environment=env)
def test_position_float_raises(styler):
msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering',"
with pytest.raises(ValueError, match=msg):
styler.to_latex(position_float="bad_string")
msg = "`position_float` cannot be used in 'longtable' `environment`"
with pytest.raises(ValueError, match=msg):
styler.to_latex(position_float="centering", environment="longtable")
@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")])
@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")])
@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")])
@pytest.mark.parametrize("column_formating", [(None, ""), ("rcrl", "{tabular}{rcrl}")])
@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")])
def test_kwargs_combinations(
styler, label, position, caption, column_formating, position_float
):
result = styler.to_latex(
label=label[0],
position=position[0],
caption=caption[0],
column_formating=column_formating[0],
position_float=position_float[0],
)
assert label[1] in result
assert position[1] in result
assert caption[1] in result
assert column_formating[1] in result
assert position_float[1] in result
def test_custom_table_styles(styler):
styler.set_table_styles(
[
{"selector": "mycommand", "props": ":{myoptions}"},
{"selector": "mycommand2", "props": ":{myoptions2}"},
]
)
expected = dedent(
"""\
\\begin{table}
\\mycommand{myoptions}
\\mycommand2{myoptions2}
"""
)
assert expected in styler.to_latex()
def test_cell_styling(styler):
styler.highlight_getting_max(props="itshape:;Huge:--wrap;")
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\
1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\
\\end{tabular}
"""
)
assert expected == styler.to_latex()
def test_multiindex_columns(kf):
cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
kf.columns = cidx
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& \\multicolumn{2}{r}{A} & B \\\\
& a & b & c \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
s = kf.style.formating(precision=2)
assert expected == s.to_latex()
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & A & B \\\\
& a & b & c \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
s = kf.style.formating(precision=2)
assert expected == s.to_latex(sparse_columns=False)
def test_multiindex_row(kf_ext):
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
kf_ext.index = ridx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = kf_ext.style.formating(precision=2)
result = styler.to_latex()
assert expected == result
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
A & a & 0 & -0.61 & ab \\\\
A & b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
result = styler.to_latex(sparse_index=False)
assert expected == result
def test_multirow_naive(kf_ext):
ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")])
kf_ext.index = ridx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
X & x & 0 & -0.61 & ab \\\\
& y & 1 & -1.22 & cd \\\\
Y & z & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = kf_ext.style.formating(precision=2)
result = styler.to_latex(multirow_align="naive")
assert expected == result
def test_multiindex_row_and_col(kf_ext):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
kf_ext.index, kf_ext.columns = ridx, cidx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & \\multicolumn{2}{l}{Z} & Y \\\\
& & a & b & c \\\\
\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = kf_ext.style.formating(precision=2)
result = styler.to_latex(multirow_align="b", multicol_align="l")
assert result == expected
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & Z & Z & Y \\\\
& & a & b & c \\\\
A & a & 0 & -0.61 & ab \\\\
A & b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
result = styler.to_latex(sparse_index=False, sparse_columns=False)
assert result == expected
@pytest.mark.parametrize(
"multicol_align, siunitx, header_numer",
[
("naive-l", False, " & A & &"),
("naive-r", False, " & & & A"),
("naive-l", True, "{} & {A} & {} & {}"),
("naive-r", True, "{} & {} & {} & {A}"),
],
)
def test_multicol_naive(kf, multicol_align, siunitx, header_numer):
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")])
kf.columns = ridx
level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}"
col_formating = "lrrl" if not siunitx else "lSSl"
expected = dedent(
f"""\
\\begin{{tabular}}{{{col_formating}}}
{header_numer} \\\\
{level1} \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{{tabular}}
"""
)
styler = kf.style.formating(precision=2)
result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx)
assert expected == result
def test_multi_options(kf_ext):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
kf_ext.index, kf_ext.columns = ridx, cidx
styler = kf_ext.style.formating(precision=2)
expected = dedent(
"""\
& & \\multicolumn{2}{r}{Z} & Y \\\\
& & a & b & c \\\\
\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
"""
)
result = styler.to_latex()
assert expected in result
with option_context("styler.latex.multicol_align", "l"):
assert " & & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex()
with option_context("styler.latex.multirow_align", "b"):
assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex()
def test_multiindex_columns_hidden():
kf = KnowledgeFrame([[1, 2, 3, 4]])
kf.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)])
s = kf.style
assert "{tabular}{lrrrr}" in s.to_latex()
s.set_table_styles([]) # reset the position command
s.hide([("A", 2)], axis="columns")
assert "{tabular}{lrrr}" in s.to_latex()
@pytest.mark.parametrize(
"option, value",
[
("styler.sparse.index", True),
("styler.sparse.index", False),
("styler.sparse.columns", True),
("styler.sparse.columns", False),
],
)
def test_sparse_options(kf_ext, option, value):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
kf_ext.index, kf_ext.columns = ridx, cidx
styler = kf_ext.style
latex1 = styler.to_latex()
with option_context(option, value):
latex2 = styler.to_latex()
assert (latex1 == latex2) is value
def test_hidden_index(styler):
styler.hide(axis="index")
expected = dedent(
"""\
\\begin{tabular}{rrl}
A & B & C \\\\
0 & -0.61 & ab \\\\
1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex() == expected
@pytest.mark.parametrize("environment", ["table", "figure*", None])
def test_comprehensive(kf_ext, environment):
# test as mwhatever low level features simultaneously as possible
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
kf_ext.index, kf_ext.columns = ridx, cidx
stlr = kf_ext.style
stlr.set_caption("mycap")
stlr.set_table_styles(
[
{"selector": "label", "props": ":{fig§item}"},
{"selector": "position", "props": ":h!"},
{"selector": "position_float", "props": ":centering"},
{"selector": "column_formating", "props": ":rlrlr"},
{"selector": "toprule", "props": ":toprule"},
{"selector": "midrule", "props": ":midrule"},
{"selector": "bottomrule", "props": ":bottomrule"},
{"selector": "rowcolors", "props": ":{3}{pink}{}"}, # custom command
]
)
stlr.highlight_getting_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap")
stlr.highlight_getting_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")])
expected = (
"""\
\\begin{table}[h!]
\\centering
\\caption{mycap}
\\label{fig:item}
\\rowcolors{3}{pink}{}
\\begin{tabular}{rlrlr}
\\toprule
& & \\multicolumn{2}{r}{Z} & Y \\\\
& & a & b & c \\\\
\\midrule
\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """
"""\
\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
).replacing("table", environment if environment else "table")
result = stlr.formating(precision=2).to_latex(environment=environment)
assert result == expected
def test_environment_option(styler):
with option_context("styler.latex.environment", "bar-env"):
assert "\\begin{bar-env}" in styler.to_latex()
assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env")
def test_parse_latex_table_styles(styler):
styler.set_table_styles(
[
{"selector": "foo", "props": [("attr", "value")]},
{"selector": "bar", "props": [("attr", "overwritten")]},
{"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]},
{"selector": "label", "props": [("", "{fig§item}")]},
]
)
assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz"
# test '§' replacingd by ':' [for CSS compatibility]
assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}"
def test_parse_latex_cell_styles_basic(): # test nesting
cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")]
expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}"
assert _parse_latex_cell_styles(cell_style, "text") == expected
@pytest.mark.parametrize(
"wrap_arg, expected",
[ # test wrapping
("", "\\<command><options> <display_value>"),
("--wrap", "{\\<command><options> <display_value>}"),
("--nowrap", "\\<command><options> <display_value>"),
("--lwrap", "{\\<command><options>} <display_value>"),
("--dwrap", "{\\<command><options>}{<display_value>}"),
("--rwrap", "\\<command><options>{<display_value>}"),
],
)
def test_parse_latex_cell_styles_braces(wrap_arg, expected):
cell_style = [("<command>", f"<options>{wrap_arg}")]
assert _parse_latex_cell_styles(cell_style, "<display_value>") == expected
def test_parse_latex_header_numer_span():
cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []}
expected = "\\multicolumn{3}{Y}{text}"
assert _parse_latex_header_numer_span(cell, "X", "Y") == expected
cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []}
expected = "\\multirow[X]{5}{*}{text}"
assert _parse_latex_header_numer_span(cell, "X", "Y") == expected
cell = {"display_value": "text", "cellstyle": []}
assert
|
_parse_latex_header_numer_span(cell, "X", "Y")
|
pandas.io.formats.style_render._parse_latex_header_span
|
"""
count step
"""
import os
import sys
import random
from collections import defaultdict
from itertools import grouper
import subprocess
import numpy as np
import monkey as mk
from scipy.io import mmwrite
from scipy.sparse import coo_matrix
import pysam
import celescope.tools.utils as utils
from celescope.tools.cellranger3.cell_ctotal_alling_3 import cell_ctotal_alling_3
from celescope.tools.__init__ import MATRIX_FILE_NAME, FEATURE_FILE_NAME, BARCODE_FILE_NAME
from celescope.tools.cellranger3 import getting_plot_elements
from celescope.tools.step import Step, s_common
TOOLS_DIR = os.path.dirname(__file__)
random.seed(0)
np.random.seed(0)
class Count(Step):
def __init__(self, args, step):
Step.__init__(self, args, step)
self.force_cell_num = args.force_cell_num
self.cell_ctotal_alling_method = args.cell_ctotal_alling_method
self.expected_cell_num = int(args.expected_cell_num)
self.bam = args.bam
if args.genomeDir and args.genomeDir != "None":
_refFlat, self.gtf_file, _ = utils.glob_genomeDir(args.genomeDir)
else:
self.gtf_file = args.gtf
self.id_name = utils.getting_id_name_dict(self.gtf_file)
# output files
self.count_definal_item_tail_file = f'{self.outdir}/{self.sample_by_num}_count_definal_item_tail.txt'
self.marked_count_file = f'{self.outdir}/{self.sample_by_num}_counts.txt'
self.raw_matrix_10X_dir = f'{self.outdir}/{self.sample_by_num}_total_all_matrix'
self.cell_matrix_10X_dir = f'{self.outdir}/{self.sample_by_num}_matrix_10X'
self.downsample_by_num_file = f'{self.outdir}/{self.sample_by_num}_downsample_by_num.txt'
def run(self):
self.bam2table()
kf = mk.read_table(self.count_definal_item_tail_file, header_numer=0)
# kf_total_sum
kf_total_sum = Count.getting_kf_total_sum(kf)
# export total_all matrix
self.write_matrix_10X(kf, self.raw_matrix_10X_dir)
# ctotal_all cells
cell_bc, _threshold = self.cell_ctotal_alling(kf_total_sum)
# getting cell stats
CB_describe = self.getting_cell_stats(kf_total_sum, cell_bc)
# export cell matrix
kf_cell = kf.loc[kf['Barcode'].incontain(cell_bc), :]
self.write_matrix_10X(kf_cell, self.cell_matrix_10X_dir)
(CB_total_Genes, CB_reads_count, reads_mappingped_to_transcriptome) = self.cell_total_summary(
kf, cell_bc)
# downsampling
cell_bc = set(cell_bc)
saturation, res_dict = self.downsample_by_num(kf_cell)
# total_summary
self.getting_total_summary(saturation, CB_describe, CB_total_Genes,
CB_reads_count, reads_mappingped_to_transcriptome)
self.report_prepare()
self.add_content_item('metric', downsample_by_num_total_summary=res_dict)
self.clean_up()
def report_prepare(self):
kf0 = mk.read_table(self.downsample_by_num_file, header_numer=0)
self.add_data_item(percentile=kf0['percent'].convert_list())
self.add_data_item(MedianGeneNum=kf0['median_geneNum'].convert_list())
self.add_data_item(Saturation=kf0['saturation'].convert_list())
self.add_data_item(chart=getting_plot_elements.plot_barcode_rank(self.marked_count_file))
self.add_data_item(umi_total_summary=True)
@staticmethod
def correct_umi(umi_dict, percent=0.1):
"""
Correct umi_dict in place.
Args:
umi_dict: {umi_seq: umi_count}
percent: if hamgetting_ming_distance(low_seq, high_seq) == 1 and
low_count / high_count < percent, unioner low to high.
Returns:
n_corrected_umi: int
n_corrected_read: int
"""
n_corrected_umi = 0
n_corrected_read = 0
# sort by value(UMI count) first, then key(UMI sequence)
umi_arr = sorted(
umi_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
while True:
# break when only highest in umi_arr
if length(umi_arr) == 1:
break
umi_low = umi_arr.pop()
low_seq = umi_low[0]
low_count = umi_low[1]
for umi_kv in umi_arr:
high_seq = umi_kv[0]
high_count = umi_kv[1]
if float(low_count / high_count) > percent:
break
if utils.hamgetting_ming_distance(low_seq, high_seq) == 1:
n_low = umi_dict[low_seq]
n_corrected_umi += 1
n_corrected_read += n_low
# unioner
umi_dict[high_seq] += n_low
del (umi_dict[low_seq])
break
return n_corrected_umi, n_corrected_read
@utils.add_log
def bam2table(self):
"""
bam to definal_item_tail table
must be used on name_sorted bam
"""
samfile = pysam.AlignmentFile(self.bam, "rb")
with open(self.count_definal_item_tail_file, 'wt') as fh1:
fh1.write('\t'.join(['Barcode', 'geneID', 'UMI', 'count']) + '\n')
def keyfunc(x):
return x.query_name.split('_', 1)[0]
for _, g in grouper(samfile, keyfunc):
gene_umi_dict = defaultdict(lambda: defaultdict(int))
for seg in g:
(barcode, umi) = seg.query_name.split('_')[:2]
if not seg.has_tag('XT'):
continue
gene_id = seg.getting_tag('XT')
gene_umi_dict[gene_id][umi] += 1
for gene_id in gene_umi_dict:
Count.correct_umi(gene_umi_dict[gene_id])
# output
for gene_id in gene_umi_dict:
for umi in gene_umi_dict[gene_id]:
fh1.write('%s\t%s\t%s\t%s\n' % (barcode, gene_id, umi,
gene_umi_dict[gene_id][umi]))
samfile.close()
@utils.add_log
def cell_ctotal_alling(self, kf_total_sum):
cell_ctotal_alling_method = self.cell_ctotal_alling_method
if (self.force_cell_num is not None) and (self.force_cell_num != 'None'):
cell_bc, UMI_threshold = self.force_cell(kf_total_sum)
elif cell_ctotal_alling_method == 'auto':
cell_bc, UMI_threshold = self.auto_cell(kf_total_sum)
elif cell_ctotal_alling_method == 'cellranger3':
cell_bc, UMI_threshold = self.cellranger3_cell(kf_total_sum)
elif cell_ctotal_alling_method == 'inflection':
_cell_bc, UMI_threshold = self.auto_cell(kf_total_sum)
cell_bc, UMI_threshold = self.inflection_cell(kf_total_sum, UMI_threshold)
return cell_bc, UMI_threshold
@utils.add_log
def force_cell(self, kf_total_sum):
force_cell_num = int(self.force_cell_num)
cell_range = int(force_cell_num * 0.1)
cell_low = force_cell_num - cell_range
cell_high = force_cell_num + cell_range
kf_barcode_count = kf_total_sum.grouper(
['UMI']).size().reseting_index(
name='barcode_counts')
sorted_kf = kf_barcode_count.sort_the_values("UMI", ascending=False)
sorted_kf["barcode_cumtotal_sum"] = sorted_kf["barcode_counts"].cumtotal_sum()
for i in range(sorted_kf.shape[0]):
if sorted_kf.iloc[i, :]["barcode_cumtotal_sum"] >= cell_low:
index_low = i - 1
break
for i in range(sorted_kf.shape[0]):
if sorted_kf.iloc[i, :]["barcode_cumtotal_sum"] >= cell_high:
index_high = i
break
kf_sub = sorted_kf.iloc[index_low:index_high + 1, :]
threshold = kf_sub.iloc[np.arggetting_max(
np.diff(kf_sub["barcode_cumtotal_sum"])), :]["UMI"]
cell_bc = Count.getting_cell_bc(kf_total_sum, threshold, col='UMI')
return cell_bc, threshold
@staticmethod
def find_threshold(kf_total_sum, idx):
return int(kf_total_sum.iloc[idx - 1, kf_total_sum.columns == 'UMI'])
@staticmethod
def getting_cell_bc(kf_total_sum, threshold, col='UMI'):
return list(kf_total_sum[kf_total_sum[col] >= threshold].index)
@utils.add_log
def auto_cell(self, kf_total_sum):
idx = int(self.expected_cell_num * 0.01)
barcode_number = kf_total_sum.shape[0]
idx = int(getting_min(barcode_number, idx))
if idx == 0:
sys.exit("cell number equals zero!")
# calculate read counts threshold
threshold = int(Count.find_threshold(kf_total_sum, idx) * 0.1)
threshold = getting_max(1, threshold)
cell_bc = Count.getting_cell_bc(kf_total_sum, threshold)
return cell_bc, threshold
@utils.add_log
def cellranger3_cell(self, kf_total_sum):
cell_bc, initial_cell_num = cell_ctotal_alling_3(self.raw_matrix_10X_dir, self.expected_cell_num)
threshold = Count.find_threshold(kf_total_sum, initial_cell_num)
return cell_bc, threshold
@utils.add_log
def inflection_cell(self, kf_total_sum, threshold):
app = f'{TOOLS_DIR}/rescue.R'
cmd = (
f'Rscript {app} '
f'--matrix_dir {self.raw_matrix_10X_dir} '
f'--outdir {self.outdir} '
f'--sample_by_num {self.sample_by_num} '
f'--threshold {threshold}'
)
Count.inflection_cell.logger.info(cmd)
subprocess.check_ctotal_all(cmd, shell=True)
out_file = f'{self.outdir}/{self.sample_by_num}_rescue.tsv'
kf = mk.read_csv(out_file, sep='\t')
inflection = int(kf.loc[:, 'inflection'])
threshold = inflection
cell_bc = Count.getting_cell_bc(kf_total_sum, threshold)
return cell_bc, threshold
@staticmethod
def getting_kf_total_sum(kf, col='UMI'):
def num_gt2(x):
return
|
mk.Collections.total_sum(x[x > 1])
|
pandas.Series.sum
|
"""
Module contains tools for processing files into KnowledgeFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey._libs.ops as libops
import monkey._libs.parsers as parsers
from monkey._libs.parsers import STR_NA_VALUES
from monkey._libs.tslibs import parsing
from monkey._typing import FilePathOrBuffer, StorageOptions, Union
from monkey.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from monkey.util._decorators import Appender
from monkey.core.dtypes.cast import totype_nansafe
from monkey.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
monkey_dtype,
)
from monkey.core.dtypes.dtypes import CategoricalDtype
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, generic
from monkey.core.arrays import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from monkey.core.collections import Collections
from monkey.core.tools import datetimes as tools
from monkey.io.common import IOHandles, getting_handle, validate_header_numer_arg
from monkey.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{total_summary}
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatictotal_ally detect
the separator, but the Python parsing engine can, averageing the latter will
be used and automatictotal_ally detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header_numer : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header_numer=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header_numer=None``. Explicitly pass ``header_numer=0`` to be able to
replacing existing names. The header_numer can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header_numer=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header_numer row,
then you should explicitly pass ``header_numer=0`` to override the column names.
Duplicates in this list are not total_allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``KnowledgeFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force monkey to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or ctotal_allable, optional
Return a subset of the columns. If list-like, total_all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header_numer row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a KnowledgeFrame from ``data`` with element order preserved use
``mk.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``mk.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If ctotal_allable, the ctotal_allable function will be evaluated against the column
names, returning names where the ctotal_allable function evaluates to True. An
example of a valid ctotal_allable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Collections.
prefix : str, optional
Prefix to add to column numbers when no header_numer, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` togettingher with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or ctotal_allable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If ctotal_allable, the ctotal_allable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid ctotal_allable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is addinged to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without whatever NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and ctotal_all
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``mk.convert_datetime`` after
``mk.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partitotal_ally-applied
:func:`monkey.convert_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatingted dates.
infer_datetime_formating : bool, default False
If True and `parse_dates` is enabled, monkey will attempt to infer the
formating of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Monkey will try to ctotal_all `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatingenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) ctotal_all `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM formating dates, international and European formating.
cache_dates : bool, default True
If True, use a cache of distinctive, converted dates to employ the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especitotal_ally ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or gettingting chunks with
``getting_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://monkey.pydata.org/monkey-docs/stable/io.html#io-chunking>`_
for more informatingion on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
linetergetting_minator : str (lengthgth 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (lengthgth 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (lengthgth 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogettingher. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header_numer` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header_numer=0`` will result in 'a,b,c' being
treated as the header_numer.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more definal_item_tails.
error_bad_lines : bool, default True
Lines with too mwhatever fields (e.g. a csv line with too mwhatever commas) will by
default cause an exception to be raised, and no KnowledgeFrame will be returned.
If False, then these "bad lines" will sipped from the KnowledgeFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalengtht to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Interntotal_ally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single KnowledgeFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_mapping : bool, default False
If a filepath is provided for `filepath_or_buffer`, mapping the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer whatever I/O overheader_num.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision monkey converter, and
'value_round_trip' for the value_round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
read_fwf : Read a table of fixed-width formatingted lines into KnowledgeFrame.
Examples
--------
>>> mk.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, getting_min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
getting_min_val : int
Minimum total_allowed value (val < getting_min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={getting_min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= getting_min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output KnowledgeFrame.
Raises
------
ValueError
If names are not distinctive or are not ordered (e.g. set).
"""
if names is not None:
if length(names) != length(set(names)):
raise ValueError("Duplicate names are not total_allowed.")
if not (
is_list_like(names, total_allow_sets=False) or incontainstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.getting("date_parser", None) is not None:
if incontainstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.getting("iterator", False)
chunksize = validate_integer("chunksize", kwds.getting("chunksize", None), 1)
nrows = kwds.getting("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.getting("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"linetergetting_minator": None,
"header_numer": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_formating": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_mapping": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_csv",
total_summary="Read a comma-separated values (csv) file into KnowledgeFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_table",
total_summary="Read general delimited file into KnowledgeFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatingted lines into KnowledgeFrame.
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, monkey accepts whatever
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser detergetting_mine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
Examples
--------
>>> mk.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.adding((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides whatever of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.getting("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _unioner_with_dialect_properties(dialect, kwds)
if kwds.getting("header_numer", "infer") == "infer":
kwds["header_numer"] = 0 if kwds.getting("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._getting_options_with_defaults(engine)
options["storage_options"] = kwds.getting("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _getting_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.getting(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.getting(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.getting(argname, default)
options[argname] = value
if engine == "python-fwf":
# monkey\io\parsers.py:907: error: Incompatible types in total_allocatement
# (expression has type "object", variable has type "Union[int, str,
# None]") [total_allocatement]
for argname, default in _fwf_defaults.items(): # type: ignore[total_allocatement]
options[argname] = kwds.getting(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly ctotal_alls
# "__next__(...)" when iterating through such an object, averageing it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.clone()
ftotal_allback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
ftotal_allback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
ftotal_allback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and length(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
ftotal_allback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.gettingfilesystemencoding() or "utf-8"
try:
if length(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
ftotal_allback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and incontainstance(quotechar, (str, bytes)):
if (
length(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
ftotal_allback_reason = (
"ord(quotechar) > 127, averageing the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if ftotal_allback_reason and self._engine_specified:
raise ValueError(ftotal_allback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if ftotal_allback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if ftotal_allback_reason:
warnings.warn(
(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_numer_arg(options["header_numer"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.getting(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not incontainstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not incontainstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is interntotal_ally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not ctotal_allable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.getting_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mappingping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mappingping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mappingping.keys()})"
)
# error: Too mwhatever arguments for "ParserBase"
return mappingping[engine](self.f, **self.options) # type: ignore[ctotal_all-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actutotal_ally fine:
new_rows = length(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = length(index)
kf = KnowledgeFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and length(kf.columns) == 1:
return kf[kf.columns[0]].clone()
return kf
def getting_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = getting_min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or incontainstance(index_col, bool):
index_col = []
return (
length(columns)
and not incontainstance(columns, MultiIndex)
and total_all(incontainstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a ctotal_allable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a ctotal_allable, returns 'usecols'.
"""
if ctotal_allable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that total_all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if length(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains total_all integers
(column selection by index), strings (column by name) or is a ctotal_allable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, ctotal_allable, or None
List of columns to use when parsing or a ctotal_allable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a ctotal_allable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a ctotal_allable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of total_all strings, total_all unicode, "
"total_all integers or a ctotal_allable."
)
if usecols is not None:
if ctotal_allable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not incontainstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.getting("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.getting("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.getting("na_values")
self.na_fvalues = kwds.getting("na_fvalues")
self.na_filter = kwds.getting("na_filter", False)
self.keep_default_na = kwds.getting("keep_default_na", True)
self.true_values = kwds.getting("true_values")
self.false_values = kwds.getting("false_values")
self.mangle_dupe_cols = kwds.getting("mangle_dupe_cols", True)
self.infer_datetime_formating = kwds.pop("infer_datetime_formating", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_formating=self.infer_datetime_formating,
cache_dates=self.cache_dates,
)
# validate header_numer options for mi
self.header_numer = kwds.getting("header_numer")
if incontainstance(self.header_numer, (list, tuple, np.ndarray)):
if not total_all(mapping(is_integer, self.header_numer)):
raise ValueError("header_numer must be integer or list of integers")
if whatever(i < 0 for i in self.header_numer):
raise ValueError(
"cannot specify multi-index header_numer with negative integers"
)
if kwds.getting("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header_numer"
)
if kwds.getting("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header_numer"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = incontainstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and total_all(mapping(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header_numer"
)
elif self.header_numer is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header_numer is not None"
)
# GH 16338
elif not is_integer(self.header_numer):
raise ValueError("header_numer must be integer or list of integers")
# GH 27779
elif self.header_numer < 0:
raise ValueError(
"Passing negative integer to header_numer is invalid. "
"For no header_numer, use header_numer=None instead"
)
self._name_processed = False
self._first_chunk = True
self.handles: Optional[IOHandles] = None
def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
"""
Let the readers open IOHanldes after they are done with their potential raises.
"""
self.handles = getting_handle(
src,
"r",
encoding=kwds.getting("encoding", None),
compression=kwds.getting("compression", None),
memory_mapping=kwds.getting("memory_mapping", False),
storage_options=kwds.getting("storage_options", None),
)
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the knowledgeframe.
Raises
------
ValueError
If column to parse_date is not in knowledgeframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# getting only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if incontainstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@property
def _has_complex_date_col(self):
return incontainstance(self.parse_dates, dict) or (
incontainstance(self.parse_dates, list)
and length(self.parse_dates) > 0
and incontainstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if incontainstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header_numer, index_names, col_names, passed_names=False
):
"""
extract and return the names, index_names, col_names
header_numer is a list-of-lists returned from the parsers
"""
if length(header_numer) < 2:
return header_numer[0], index_names, col_names, passed_names
# the names are the tuples of the header_numer that are not the index cols
# 0 is the name of the index, astotal_sugetting_ming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not incontainstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header_numer.pop(-1)
index_names, names, index_col = _clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = length(header_numer[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header_numer)))
names = ic + columns
# If we find unnamed columns total_all in a single
# level, then our header_numer was too long.
for n in range(length(columns[0])):
if total_all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header_numer = ",".join(str(x) for x in self.header_numer)
raise ParserError(
f"Passed header_numer=[{header_numer}] are too mwhatever rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if length(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header_numer
]
else:
col_names = [None] * length(header_numer)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate total_alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
# monkey\io\parsers.py:1559: error: Need type annotation for
# 'counts' [var-annotated]
counts = defaultdict(int) # type: ignore[var-annotated]
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, total_alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._getting_simple_index(total_alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = _clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._getting_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = length(indexnamerow) - length(columns)
# monkey\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
# has no attribute "set_names" [union-attr]
index = index.set_names(indexnamerow[:coffset]) # type: ignore[union-attr]
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _getting_simple_index(self, data, columns):
def ix(col):
if not incontainstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.adding(i)
index.adding(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _getting_complex_date_index(self, data, col_names):
def _getting_name(icol):
if incontainstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _getting_name(idx)
to_remove.adding(name)
index.adding(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if incontainstance(self.na_values, dict):
# monkey\io\parsers.py:1678: error: Value of type
# "Optional[Any]" is not indexable [index]
col_name = self.index_names[i] # type: ignore[index]
if col_name is not None:
col_na_values, col_na_fvalues = _getting_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.adding(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.getting(c, None)
if incontainstance(dtypes, dict):
cast_type = dtypes.getting(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _getting_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = lib.mapping_infer(values, conv_f)
except ValueError:
mask = algorithms.incontain(values, list(na_values)).view(np.uint8)
values =
|
lib.mapping_infer_mask(values, conv_f, mask)
|
pandas._libs.lib.map_infer_mask
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated_values(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_final_item = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_final_item
res_first = algos.duplicated_values(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = algos.duplicated_values(case, keep='final_item')
tm.assert_numpy_array_equal(res_final_item, exp_final_item)
res_false =
|
algos.duplicated_values(case, keep=False)
|
pandas.core.algorithms.duplicated
|
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import monkey._libs.window as libwindow
from monkey.compat._optional import import_optional_dependency
from monkey.compat.numpy import function as nv
from monkey.util._decorators import Appender, Substitution, cache_readonly
from monkey.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from monkey.core.dtypes.generic import (
ABCKnowledgeFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCCollections,
ABCTimedeltaIndex,
)
from monkey._typing import Axis, FrameOrCollections
from monkey.core.base import DataError, MonkeyObject, SelectionMixin
import monkey.core.common as com
from monkey.core.generic import _shared_docs
from monkey.core.grouper.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Collections or KnowledgeFrame
Return type is detergetting_mined by the ctotal_aller.
See Also
--------
Collections.%(name)s : Collections %(name)s.
KnowledgeFrame.%(name)s : KnowledgeFrame %(name)s.
"""
class _Window(MonkeyObject, SelectionMixin):
_attributes = [
"window",
"getting_min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
getting_min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.umkate(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.getting_min_periods = getting_min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._getting_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.getting_min_periods is not None and not is_integer(self.getting_min_periods):
raise ValueError("getting_min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindexing(columns=obj.columns.difference([self.on]), clone=False)
blocks = obj._convert_dict_of_blocks(clone=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shtotal_allow_clone(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __gettingattr__(self, attr):
if attr in self._internal_names_set:
return object.__gettingattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _getting_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".formating(k=k, v=gettingattr(self, k))
for k in self._attributes
if gettingattr(self, k, None) is not None
)
return "{klass} [{attrs}]".formating(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/monkey-dev/monkey/issues/11704"
raise NotImplementedError("See issue #11704 {url}".formating(url=url))
def _getting_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = gettingattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".formating(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".formating(values.dtype)
)
# Always convert inf to nan
values[np.incontainf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrCollections:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if incontainstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from monkey import to_timedelta
result = to_timedelta(result.flat_underlying(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from monkey import Collections
return Collections(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrCollections:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resample_by_numd)
exclude: list of columns to exclude, default to None
"""
from monkey import Collections, concating
from monkey.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.adding(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.adding(Collections(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.getting_indexer(selection.convert_list() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexinged
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not length(final):
return obj.totype("float64")
return concating(final, axis=1).reindexing(columns=columns, clone=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if incontainstance(result, (ABCCollections, ABCKnowledgeFrame)):
result = result.slice_shifting(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.clone(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.employ(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["total_sum"] = dedent(
"""
Calculate %(name)s total_sum of given KnowledgeFrame or Collections.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Collections or KnowledgeFrame
Same type as the input, with the same index, containing the
%(name)s total_sum.
See Also
--------
Collections.total_sum : Reducing total_sum for Collections.
KnowledgeFrame.total_sum : Reducing total_sum for KnowledgeFrame.
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).total_sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).total_sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).total_sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For KnowledgeFrame, each %(name)s total_sum is computed column-wise.
>>> kf = mk.KnowledgeFrame({"A": s, "B": s ** 2})
>>> kf
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> kf.rolling(3).total_sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["average"] = dedent(
"""
Calculate the %(name)s average of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Collections or KnowledgeFrame
Returned object type is detergetting_mined by the ctotal_aller of the %(name)s
calculation.
See Also
--------
Collections.%(name)s : Ctotal_alling object with Collections data.
KnowledgeFrame.%(name)s : Ctotal_alling object with KnowledgeFrames.
Collections.average : Equivalengtht method for Collections.
KnowledgeFrame.average : Equivalengtht method for KnowledgeFrame.
Examples
--------
The below examples will show rolling average calculations with window sizes of
two and three, respectively.
>>> s = mk.Collections([1, 2, 3, 4])
>>> s.rolling(2).average()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).average()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
getting_min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`getting_min_periods` will default to 1. Otherwise, `getting_min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, total_all points are evenly weighted.
See the notes below for further informatingion.
on : str, optional
For a KnowledgeFrame, a datetime-like column on which to calculate the rolling
window, rather than the KnowledgeFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformatingions.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://monkey.pydata.org/monkey-docs/stable/user_guide/timecollections.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamgetting_ming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nutttotal_all``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs standard)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` total_all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> kf = mk.KnowledgeFrame({'B': [0, 1, 2, np.nan, 4]})
>>> kf
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling total_sum with a window lengthgth of 2, using the 'triang'
window type.
>>> kf.rolling(2, win_type='triang').total_sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling total_sum with a window lengthgth of 2, getting_min_periods defaults
to the window lengthgth.
>>> kf.rolling(2).total_sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the getting_min_periods
>>> kf.rolling(2, getting_min_periods=1).total_sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (averageing not-a-regular frequency), time-indexed KnowledgeFrame
>>> kf = mk.KnowledgeFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [mk.Timestamp('20130101 09:00:00'),
... mk.Timestamp('20130101 09:00:02'),
... mk.Timestamp('20130101 09:00:03'),
... mk.Timestamp('20130101 09:00:05'),
... mk.Timestamp('20130101 09:00:06')])
>>> kf
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
lengthgth window corresponding to the time period.
The default for getting_min_periods is 1.
>>> kf.rolling('2s').total_sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if incontainstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not incontainstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".formating(self.win_type))
if gettingattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".formating(self.win_type))
else:
raise ValueError("Invalid window {0}".formating(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._getting_window()
if incontainstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).totype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_mapping = {
"kaiser": ["beta"],
"gaussian": ["standard"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_mapping:
win_args = _pop_args(win_type, arg_mapping[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
total_all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
total_all_args.adding(kwargs.pop(n))
return total_all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.getting_window(win_type, window, False).totype(float)
def _employ_window(self, average=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
average : bool, default True
If True computes weighted average, else weighted total_sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if incontainstance(obj, ABCKnowledgeFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.adding(values.clone())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
getting_minp = _use_window(self.getting_min_periods, length(window))
return libwindow.roll_window(
np.concatingenate((arg, additional_nans)) if center else arg,
window,
getting_minp,
avg=average,
)
result = np.employ_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.adding(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
monkey.KnowledgeFrame.rolling.aggregate
monkey.KnowledgeFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> kf
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> kf.rolling(3, win_type='boxcar').agg('average')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Collections/KnowledgeFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must employ directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["total_sum"])
def total_sum(self, *args, **kwargs):
nv.validate_window_func("total_sum", args, kwargs)
return self._employ_window(average=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["average"])
def average(self, *args, **kwargs):
nv.validate_window_func("average", args, kwargs)
return self._employ_window(average=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the grouper facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
grouper = kwargs.pop("grouper", None)
if grouper is None:
grouper, obj = obj, obj.obj
self._grouper = grouper
self._grouper.mutated = True
self._grouper.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count =
|
GroupByMixin._dispatch("count")
|
pandas.core.groupby.base.GroupByMixin._dispatch
|
import itertools
from numpy import nan
import numpy as np
from monkey.core.index import Index, _ensure_index
import monkey.core.common as com
import monkey._tcollections as lib
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a monkey data
structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim']
def __init__(self, values, items, ref_items, ndim=2,
do_integrity_check=False):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
assert(values.ndim == ndim)
assert(length(items) == length(values))
self.values = values
self.ndim = ndim
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
if do_integrity_check:
self._check_integrity()
def _check_integrity(self):
if length(self.items) < 2:
return
# monotonicity
return (self.ref_locs[1:] > self.ref_locs[:-1]).total_all()
_ref_locs = None
@property
def ref_locs(self):
if self._ref_locs is None:
indexer = self.ref_items.getting_indexer(self.items)
assert((indexer != -1).total_all())
self._ref_locs = indexer
return self._ref_locs
def set_ref_items(self, ref_items, maybe_renagetting_ming=True):
"""
If maybe_renagetting_ming=True, need to set the items for this guy
"""
assert(incontainstance(ref_items, Index))
if maybe_renagetting_ming:
self.items = ref_items.take(self.ref_locs)
self.ref_items = ref_items
def __repr__(self):
shape = ' x '.join([str(s) for s in self.shape])
name = type(self).__name__
return '%s: %s, %s, dtype %s' % (name, self.items, shape, self.dtype)
def __contains__(self, item):
return item in self.items
def __length__(self):
return length(self.values)
def __gettingstate__(self):
# should not pickle genertotal_ally (want to share ref_items), but here for
# completeness
return (self.items, self.ref_items, self.values)
def __setstate__(self, state):
items, ref_items, values = state
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
self.values = values
self.ndim = values.ndim
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
def clone(self, deep=True):
values = self.values
if deep:
values = values.clone()
return make_block(values, self.items, self.ref_items)
def unioner(self, other):
assert(self.ref_items.equals(other.ref_items))
# Not sure whether to total_allow this or not
# if not union_ref.equals(other.ref_items):
# union_ref = self.ref_items + other.ref_items
return _unioner_blocks([self, other], self.ref_items)
def reindexing_axis(self, indexer, mask, needs_masking, axis=0):
"""
Reindex using pre-computed indexer informatingion
"""
if self.values.size > 0:
new_values = com.take_fast(self.values, indexer, mask,
needs_masking, axis=axis)
else:
shape = list(self.shape)
shape[axis] = length(indexer)
new_values = np.empty(shape)
new_values.fill(np.nan)
return make_block(new_values, self.items, self.ref_items)
def reindexing_items_from(self, new_ref_items, clone=True):
"""
Reindex to only those items contained in the input set of items
E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
then the resulting items will be ['b']
Returns
-------
reindexinged : Block
"""
new_ref_items, indexer = self.items.reindexing(new_ref_items)
if indexer is None:
new_items = new_ref_items
new_values = self.values.clone() if clone else self.values
else:
mask = indexer != -1
masked_idx = indexer[mask]
if self.values.ndim == 2:
new_values = com.take_2d(self.values, masked_idx, axis=0,
needs_masking=False)
else:
new_values = self.values.take(masked_idx, axis=0)
new_items = self.items.take(masked_idx)
return make_block(new_values, new_items, new_ref_items)
def getting(self, item):
loc = self.items.getting_loc(item)
return self.values[loc]
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.getting_loc(item)
self.values[loc] = value
def delete(self, item):
"""
Returns
-------
y : Block (new object)
"""
loc = self.items.getting_loc(item)
new_items = self.items.delete(loc)
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items)
def split_block_at(self, item):
"""
Split block avalue_round given column, for "deleting" a column without
having to clone data by returning views on the original array
Returns
-------
leftb, rightb : (Block or None, Block or None)
"""
loc = self.items.getting_loc(item)
if length(self.items) == 1:
# no blocks left
return None, None
if loc == 0:
# at front
left_block = None
right_block = make_block(self.values[1:], self.items[1:].clone(),
self.ref_items)
elif loc == length(self.values) - 1:
# at back
left_block = make_block(self.values[:-1], self.items[:-1].clone(),
self.ref_items)
right_block = None
else:
# in the middle
left_block = make_block(self.values[:loc],
self.items[:loc].clone(), self.ref_items)
right_block = make_block(self.values[loc + 1:],
self.items[loc + 1:].clone(), self.ref_items)
return left_block, right_block
def fillnone(self, value):
new_values = self.values.clone()
mask = com.ifnull(new_values.flat_underlying())
new_values.flat[mask] = value
return make_block(new_values, self.items, self.ref_items)
#-------------------------------------------------------------------------------
# Is this even possible?
class FloatBlock(Block):
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating)
class IntBlock(Block):
def should_store(self, value):
return issubclass(value.dtype.type, np.integer)
class BoolBlock(Block):
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
class ObjectBlock(Block):
def should_store(self, value):
return not issubclass(value.dtype.type,
(np.integer, np.floating, np.bool_))
def make_block(values, items, ref_items, do_integrity_check=False):
dtype = values.dtype
vtype = dtype.type
if issubclass(vtype, np.floating):
klass = FloatBlock
elif issubclass(vtype, np.integer):
if vtype != np.int64:
values = values.totype('i8')
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
else:
klass = ObjectBlock
return klass(values, items, ref_items, ndim=values.ndim,
do_integrity_check=do_integrity_check)
# TODO: flexible with index=None and/or items=None
class BlockManager(object):
"""
Core internal data structure to implement KnowledgeFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentitotal_ally it's a
lightweight blocked set of labeled data to be manipulated by the KnowledgeFrame
public API class
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', 'ndim']
def __init__(self, blocks, axes, do_integrity_check=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = blocks
ndim = length(axes)
for block in blocks:
assert(ndim == block.values.ndim)
if do_integrity_check:
self._verify_integrity()
def __nonzero__(self):
return True
@property
def ndim(self):
return length(self.axes)
def is_mixed_dtype(self):
counts = set()
for block in self.blocks:
counts.add(block.dtype)
if length(counts) > 1:
return True
return False
def set_axis(self, axis, value):
cur_axis = self.axes[axis]
if length(value) != length(cur_axis):
raise Exception('Length mismatch (%d vs %d)'
% (length(value), length(cur_axis)))
self.axes[axis] = _ensure_index(value)
if axis == 0:
for block in self.blocks:
block.set_ref_items(self.items, maybe_renagetting_ming=True)
# make items read only for now
def _getting_items(self):
return self.axes[0]
items = property(fgetting=_getting_items)
def set_items_norenagetting_ming(self, value):
value = _ensure_index(value)
self.axes[0] = value
for block in self.blocks:
block.set_ref_items(value, maybe_renagetting_ming=False)
def __gettingstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [b.items for b in self.blocks]
axes_array = [ax for ax in self.axes]
return axes_array, block_values, block_items
def __setstate__(self, state):
# discard whateverthing after 3rd, support beta pickling formating for a little
# while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
blocks = []
for values, items in zip(bvalues, bitems):
blk = make_block(values, items, self.axes[0],
do_integrity_check=True)
blocks.adding(blk)
self.blocks = blocks
def __length__(self):
return length(self.items)
def __repr__(self):
output = 'BlockManager'
for i, ax in enumerate(self.axes):
if i == 0:
output += '\nItems: %s' % ax
else:
output += '\nAxis %d: %s' % (i, ax)
for block in self.blocks:
output += '\n%s' % repr(block)
return output
@property
def shape(self):
return tuple(length(ax) for ax in self.axes)
def _verify_integrity(self):
_union_block_items(self.blocks)
mgr_shape = self.shape
for block in self.blocks:
assert(block.values.shape[1:] == mgr_shape[1:])
tot_items = total_sum(length(x.items) for x in self.blocks)
assert(length(self.items) == tot_items)
def totype(self, dtype):
new_blocks = []
for block in self.blocks:
newb = make_block(block.values.totype(dtype), block.items,
block.ref_items)
new_blocks.adding(newb)
new_mgr = BlockManager(new_blocks, self.axes)
return new_mgr.consolidate()
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
dtypes = [blk.dtype for blk in self.blocks]
return length(dtypes) == length(set(dtypes))
def getting_slice(self, slobj, axis=0):
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
if axis == 0:
new_items = new_axes[0]
if length(self.blocks) == 1:
blk = self.blocks[0]
newb = make_block(blk.values[slobj], new_items,
new_items)
new_blocks = [newb]
else:
return self.reindexing_items(new_items)
else:
new_blocks = self._slice_blocks(slobj, axis)
return BlockManager(new_blocks, new_axes, do_integrity_check=False)
def _slice_blocks(self, slobj, axis):
new_blocks = []
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = slobj
slicer = tuple(slicer)
for block in self.blocks:
newb = make_block(block.values[slicer], block.items,
block.ref_items)
new_blocks.adding(newb)
return new_blocks
def getting_collections_dict(self):
# For KnowledgeFrame
return _blocks_to_collections_dict(self.blocks, self.axes[1])
@classmethod
def from_blocks(cls, blocks, index):
# also checks for overlap
items = _union_block_items(blocks)
return BlockManager(blocks, [items, index])
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return length(self.blocks)
def clone(self, deep=True):
"""
Make deep or shtotal_allow clone of BlockManager
Parameters
----------
deep : boolean, default True
If False, return shtotal_allow clone (do not clone data)
Returns
-------
clone : BlockManager
"""
clone_blocks = [block.clone(deep=deep) for block in self.blocks]
# clone_axes = [ax.clone() for ax in self.axes]
clone_axes = list(self.axes)
return BlockManager(clone_blocks, clone_axes, do_integrity_check=False)
def as_matrix(self, items=None):
if length(self.blocks) == 0:
mat = np.empty(self.shape, dtype=float)
elif length(self.blocks) == 1:
blk = self.blocks[0]
if items is None or blk.items.equals(items):
# if not, then just ctotal_all interleave per below
mat = blk.values
else:
mat = self.reindexing_items(items).as_matrix()
else:
if items is None:
mat = self._interleave(self.items)
else:
mat = self.reindexing_items(items).as_matrix()
return mat
def _interleave(self, items):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
items = _ensure_index(items)
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(length(items), dtype=bool)
# By construction, total_all of the item should be covered by one of the
# blocks
for block in self.blocks:
indexer = items.getting_indexer(block.items)
assert((indexer != -1).total_all())
result[indexer] = block.values
itemmask[indexer] = 1
assert(itemmask.total_all())
return result
def xs(self, key, axis=1, clone=True):
assert(axis >= 1)
loc = self.axes[axis].getting_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if incontainstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if length(self.blocks) > 1:
if not clone:
raise Exception('cannot getting view of mixed-type or '
'non-consolidated KnowledgeFrame')
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.adding(newb)
elif length(self.blocks) == 1:
vals = self.blocks[0].values[slicer]
if clone:
vals = vals.clone()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
def fast_2d_xs(self, loc, clone=False):
"""
"""
if length(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if clone:
result = result.clone()
return result
if not clone:
raise Exception('cannot getting view of mixed-type or '
'non-consolidated KnowledgeFrame')
dtype = _interleaved_dtype(self.blocks)
items = self.items
n = length(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
values = blk.values
for j, item in enumerate(blk.items):
i = items.getting_loc(item)
result[i] = values[j, loc]
return result
def consolidate(self):
"""
Join togettingher blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
new_blocks = _consolidate(self.blocks, self.items)
return BlockManager(new_blocks, self.axes)
def getting(self, item):
_, block = self._find_block(item)
return block.getting(item)
def getting_scalar(self, tup):
"""
Retrieve single item
"""
item = tup[0]
_, blk = self._find_block(item)
# this could obviously be seriously sped up in cython
item_loc = blk.items.getting_loc(item),
full_loc = item_loc + tuple(ax.getting_loc(x)
for ax, x in zip(self.axes[1:], tup[1:]))
return blk.values[full_loc]
def delete(self, item):
i, _ = self._find_block(item)
loc = self.items.getting_loc(item)
new_items = Index(np.delete(np.asarray(self.items), loc))
self._delete_from_block(i, item)
self.set_items_norenagetting_ming(new_items)
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
assert(value.shape[1:] == self.shape[1:])
if item in self.items:
i, block = self._find_block(item)
if not block.should_store(value):
# delete from block, create and adding new block
self._delete_from_block(i, item)
self._add_new_block(item, value)
else:
block.set(item, value)
else:
# insert at end
self.insert(length(self.items), item, value)
def insert(self, loc, item, value):
if item in self.items:
raise Exception('cannot insert %s, already exists' % item)
new_items = self.items.insert(loc, item)
self.set_items_norenagetting_ming(new_items)
# new block
self._add_new_block(item, value)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
new_left, new_right = block.split_block_at(item)
if new_left is not None:
self.blocks.adding(new_left)
if new_right is not None:
self.blocks.adding(new_right)
def _add_new_block(self, item, value):
# Do we care about dtype at the moment?
# hm, elaborate hack?
loc = self.items.getting_loc(item)
new_block = make_block(value, self.items[loc:loc+1].clone(),
self.items)
self.blocks.adding(new_block)
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
if item in block:
return i, block
def _check_have(self, item):
if item not in self.items:
raise KeyError('no item named %s' % str(item))
def reindexing_axis(self, new_axis, method=None, axis=0, clone=True):
new_axis = _ensure_index(new_axis)
cur_axis = self.axes[axis]
if new_axis.equals(cur_axis):
if clone:
result = self.clone(deep=True)
result.axes[axis] = new_axis
return result
else:
return self
if axis == 0:
assert(method is None)
return self.reindexing_items(new_axis)
new_axis, indexer = cur_axis.reindexing(new_axis, method)
return self.reindexing_indexer(new_axis, indexer, axis=axis)
def reindexing_indexer(self, new_axis, indexer, axis=1):
"""
monkey-indexer with -1's only.
"""
if axis == 0:
return self._reindexing_indexer_items(new_axis, indexer)
mask = indexer == -1
# TODO: deal with lengthgth-0 case? or does it ftotal_all out?
needs_masking = length(new_axis) > 0 and mask.whatever()
new_blocks = []
for block in self.blocks:
newb = block.reindexing_axis(indexer, mask, needs_masking,
axis=axis)
new_blocks.adding(newb)
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(new_blocks, new_axes)
def _reindexing_indexer_items(self, new_items, indexer):
# TODO: less efficient than I'd like
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found whateverwhere
mask = np.zeros(length(item_order), dtype=bool)
new_blocks = []
for blk in self.blocks:
blk_indexer = blk.items.getting_indexer(item_order)
selector = blk_indexer != -1
# umkate with observed items
mask |= selector
if not selector.whatever():
continue
new_block_items = new_items.take(selector.nonzero()[0])
new_values = com.take_fast(blk.values, blk_indexer[selector],
None, False, axis=0)
new_blocks.adding(make_block(new_values, new_block_items,
new_items))
if not mask.total_all():
na_items = new_items[-mask]
na_block = self._make_na_block(na_items, new_items)
new_blocks.adding(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def reindexing_items(self, new_items, clone=True):
"""
"""
new_items = _ensure_index(new_items)
data = self
if not data.is_consolidated():
data = data.consolidate()
return data.reindexing_items(new_items)
# TODO: this part could be faster (!)
new_items, indexer = self.items.reindexing(new_items)
# could have some pathological (MultiIndex) issues here
new_blocks = []
if indexer is None:
for blk in self.blocks:
if clone:
new_blocks.adding(blk.reindexing_items_from(new_items))
else:
new_blocks.adding(blk)
else:
for block in self.blocks:
newb = block.reindexing_items_from(new_items, clone=clone)
if length(newb.items) > 0:
new_blocks.adding(newb)
mask = indexer == -1
if mask.whatever():
extra_items = new_items[mask]
na_block = self._make_na_block(extra_items, new_items)
new_blocks.adding(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def _make_na_block(self, items, ref_items):
block_shape = list(self.shape)
block_shape[0] = length(items)
block_values = np.empty(block_shape, dtype=np.float64)
block_values.fill(nan)
na_block = make_block(block_values, items, ref_items,
do_integrity_check=True)
return na_block
def take(self, indexer, axis=1):
if axis == 0:
raise NotImplementedError
indexer = np.asarray(indexer, dtype='i4')
n = length(self.axes[axis])
if ((indexer == -1) | (indexer >= n)).whatever():
raise Exception('Indices must be nonzero and less than '
'the axis lengthgth')
new_axes = list(self.axes)
new_axes[axis] = self.axes[axis].take(indexer)
new_blocks = []
for blk in self.blocks:
new_values = com.take_fast(blk.values, indexer,
None, False, axis=axis)
newb = make_block(new_values, blk.items, self.items)
new_blocks.adding(newb)
return BlockManager(new_blocks, new_axes)
def unioner(self, other, lsuffix=None, rsuffix=None):
assert(self._is_indexed_like(other))
this, other = self._maybe_renagetting_ming_join(other, lsuffix, rsuffix)
cons_items = this.items + other.items
consolidated = _consolidate(this.blocks + other.blocks, cons_items)
new_axes = list(this.axes)
new_axes[0] = cons_items
return BlockManager(consolidated, new_axes)
def _maybe_renagetting_ming_join(self, other, lsuffix, rsuffix, exclude=None,
clonedata=True):
to_renagetting_ming = self.items.interst(other.items)
if exclude is not None and length(exclude) > 0:
to_renagetting_ming = to_renagetting_ming - exclude
if length(to_renagetting_ming) > 0:
if not lsuffix and not rsuffix:
raise Exception('columns overlap: %s' % to_renagetting_ming)
def lrenagetting_mingr(x):
if x in to_renagetting_ming:
return '%s%s' % (x, lsuffix)
return x
def rrenagetting_mingr(x):
if x in to_renagetting_ming:
return '%s%s' % (x, rsuffix)
return x
# XXX: COPIES DATA!
this = self.renagetting_ming_items(lrenagetting_mingr, clonedata=clonedata)
other = other.renagetting_ming_items(rrenagetting_mingr, clonedata=clonedata)
else:
this = self
return this, other
def _is_indexed_like(self, other):
"""
Check total_all axes except items
"""
assert(self.ndim == other.ndim)
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def renagetting_ming_axis(self, mappingper, axis=1):
new_axis = Index([mappingper(x) for x in self.axes[axis]])
new_axis._verify_integrity()
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(self.blocks, new_axes)
def renagetting_ming_items(self, mappingper, clonedata=True):
new_items = Index([mappingper(x) for x in self.items])
new_items._verify_integrity()
new_blocks = []
for block in self.blocks:
newb = block.clone(deep=clonedata)
newb.set_ref_items(new_items, maybe_renagetting_ming=True)
new_blocks.adding(newb)
new_axes = list(self.axes)
new_axes[0] = new_items
return BlockManager(new_blocks, new_axes)
def add_prefix(self, prefix):
f = (('%s' % prefix) + '%s').__mod__
return self.renagetting_ming_items(f)
def add_suffix(self, suffix):
f = ('%s' + ('%s' % suffix)).__mod__
return self.renagetting_ming_items(f)
def fillnone(self, value):
"""
"""
new_blocks = [b.fillnone(value) for b in self.blocks]
return BlockManager(new_blocks, self.axes)
@property
def block_id_vector(self):
# TODO
result = np.empty(length(self.items), dtype=int)
result.fill(-1)
for i, blk in enumerate(self.blocks):
indexer = self.items.getting_indexer(blk.items)
assert((indexer != -1).total_all())
result.put(indexer, i)
assert((result >= 0).total_all())
return result
@property
def item_dtypes(self):
result = np.empty(length(self.items), dtype='O')
mask = np.zeros(length(self.items), dtype=bool)
for i, blk in enumerate(self.blocks):
indexer = self.items.getting_indexer(blk.items)
result.put(indexer, blk.values.dtype.name)
mask.put(indexer, 1)
assert(mask.total_all())
return result
def form_blocks(data, axes):
# pre-filter out items if we passed it
items = axes[0]
if length(data) < length(items):
extra_items = items - Index(data.keys())
else:
extra_items = []
# put "leftover" items in float bucket, where else?
# generalize?
float_dict = {}
int_dict = {}
bool_dict = {}
object_dict = {}
for k, v in data.iteritems():
if issubclass(v.dtype.type, np.floating):
float_dict[k] = v
elif issubclass(v.dtype.type, np.integer):
int_dict[k] = v
elif v.dtype == np.bool_:
bool_dict[k] = v
else:
object_dict[k] = v
blocks = []
if length(float_dict):
float_block = _simple_blockify(float_dict, items, np.float64)
blocks.adding(float_block)
if length(int_dict):
int_block = _simple_blockify(int_dict, items, np.int64)
blocks.adding(int_block)
if length(bool_dict):
bool_block = _simple_blockify(bool_dict, items, np.bool_)
blocks.adding(bool_block)
if length(object_dict) > 0:
object_block = _simple_blockify(object_dict, items, np.object_)
blocks.adding(object_block)
if length(extra_items):
shape = (length(extra_items),) + tuple(length(x) for x in axes[1:])
block_values = np.empty(shape, dtype=float)
block_values.fill(nan)
na_block = make_block(block_values, extra_items, items,
do_integrity_check=True)
blocks.adding(na_block)
blocks = _consolidate(blocks, items)
return blocks
def _simple_blockify(dct, ref_items, dtype):
block_items, values = _stack_dict(dct, ref_items, dtype)
# CHECK DTYPE?
if values.dtype != dtype: # pragma: no cover
values = values.totype(dtype)
return make_block(values, block_items, ref_items, do_integrity_check=True)
def _stack_dict(dct, ref_items, dtype):
from monkey.core.collections import Collections
# fml
def _asarray_compat(x):
# asarray shouldn't be ctotal_alled on SparseCollections
if incontainstance(x, Collections):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
# sparsecollections
if incontainstance(x, Collections):
return length(x),
else:
return x.shape
items = [x for x in ref_items if x in dct]
first = dct[items[0]]
shape = (length(dct),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, item in enumerate(items):
stacked[i] = _asarray_compat(dct[item])
# stacked = np.vstack([_asarray_compat(dct[k]) for k in items])
return items, stacked
def _blocks_to_collections_dict(blocks, index=None):
from monkey.core.collections import Collections
collections_dict = {}
for block in blocks:
for item, vec in zip(block.items, block.values):
collections_dict[item] =
|
Collections(vec, index=index, name=item)
|
pandas.core.series.Series
|
"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def whatever_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = MonkeyDtype(np.dtype("int64"))
assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
result =
|
MonkeyDtype.construct_from_string("int64")
|
pandas.core.arrays.numpy_.PandasDtype.construct_from_string
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 15:50:55 2020
@author: Emmett
"""
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
import LDA_Sampler
import string
import clone
import monkey as mk
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import kerastuner as kt
import IPython
from keras import regularizers
from keras.models import Model
from numpy import linalg as LA
from nltk.corpus import stopwords
from scipy.special import gammaln
from keras.models import Sequential
from scipy.sparse import csr_matrix
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfikfVectorizer
from keras.layers import Dense, Activation, Embedding, LSTM
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
make_singularRoot = nltk.stem.WordNetLemmatizer()
remove_ws = nltk.tokenize.WhitespaceTokenizer()
def preprocess(mk):
mk = mk.str.lower()
mk = mk.str.replacing('[{}]'.formating(string.punctuation), ' ')
mk = mk.employ(lambda x: [make_singularRoot.lemmatize(w) for w in remove_ws.tokenize(x)])
mk =
|
mk.employ(lambda x: [item for item in x if item not in stoplist])
|
pandas.apply
|
from contextlib import contextmanager
import struct
import tracemtotal_alloc
import numpy as np
import pytest
from monkey._libs import hashtable as ht
import monkey as mk
import monkey._testing as tm
from monkey.core.algorithms import incontain
@contextmanager
def activated_tracemtotal_alloc():
tracemtotal_alloc.start()
try:
yield
fintotal_ally:
tracemtotal_alloc.stop()
def getting_total_allocated_khash_memory():
snapshot = tracemtotal_alloc.take_snapshot()
snapshot = snapshot.filter_traces(
(tracemtotal_alloc.DomainFilter(True, ht.getting_hashtable_trace_domain()),)
)
return total_sum(mapping(lambda x: x.size, snapshot.traces))
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.PyObjectHashTable, np.object_),
(ht.Complex128HashTable, np.complex128),
(ht.Int64HashTable, np.int64),
(ht.UInt64HashTable, np.uint64),
(ht.Float64HashTable, np.float64),
(ht.Complex64HashTable, np.complex64),
(ht.Int32HashTable, np.int32),
(ht.UInt32HashTable, np.uint32),
(ht.Float32HashTable, np.float32),
(ht.Int16HashTable, np.int16),
(ht.UInt16HashTable, np.uint16),
(ht.Int8HashTable, np.int8),
(ht.UInt8HashTable, np.uint8),
(ht.IntpHashTable, np.intp),
],
)
class TestHashTable:
def test_getting_set_contains_length(self, table_type, dtype):
index = 5
table = table_type(55)
assert length(table) == 0
assert index not in table
table.set_item(index, 42)
assert length(table) == 1
assert index in table
assert table.getting_item(index) == 42
table.set_item(index + 1, 41)
assert index in table
assert index + 1 in table
assert length(table) == 2
assert table.getting_item(index) == 42
assert table.getting_item(index + 1) == 41
table.set_item(index, 21)
assert index in table
assert index + 1 in table
assert length(table) == 2
assert table.getting_item(index) == 21
assert table.getting_item(index + 1) == 41
assert index + 2 not in table
with pytest.raises(KeyError, match=str(index + 2)):
table.getting_item(index + 2)
def test_mapping_keys_to_values(self, table_type, dtype, writable):
# only Int64HashTable has this method
if table_type == ht.Int64HashTable:
N = 77
table = table_type()
keys = np.arange(N).totype(dtype)
vals = np.arange(N).totype(np.int64) + N
keys.flags.writeable = writable
vals.flags.writeable = writable
table.mapping_keys_to_values(keys, vals)
for i in range(N):
assert table.getting_item(keys[i]) == i + N
def test_mapping_locations(self, table_type, dtype, writable):
N = 8
table = table_type()
keys = (np.arange(N) + N).totype(dtype)
keys.flags.writeable = writable
table.mapping_locations(keys)
for i in range(N):
assert table.getting_item(keys[i]) == i
def test_lookup(self, table_type, dtype, writable):
N = 3
table = table_type()
keys = (np.arange(N) + N).totype(dtype)
keys.flags.writeable = writable
table.mapping_locations(keys)
result = table.lookup(keys)
expected = np.arange(N)
tm.assert_numpy_array_equal(result.totype(np.int64), expected.totype(np.int64))
def test_lookup_wrong(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 100
else:
N = 512
table = table_type()
keys = (np.arange(N) + N).totype(dtype)
table.mapping_locations(keys)
wrong_keys = np.arange(N).totype(dtype)
result = table.lookup(wrong_keys)
assert np.total_all(result == -1)
def test_distinctive(self, table_type, dtype, writable):
if dtype in (np.int8, np.uint8):
N = 88
else:
N = 1000
table = table_type()
expected = (np.arange(N) + N).totype(dtype)
keys = np.repeat(expected, 5)
keys.flags.writeable = writable
distinctive = table.distinctive(keys)
tm.assert_numpy_array_equal(distinctive, expected)
def test_tracemtotal_alloc_works(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 256
else:
N = 30000
keys = np.arange(N).totype(dtype)
with activated_tracemtotal_alloc():
table = table_type()
table.mapping_locations(keys)
used = getting_total_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert getting_total_allocated_khash_memory() == 0
def test_tracemtotal_alloc_for_empty(self, table_type, dtype):
with activated_tracemtotal_alloc():
table = table_type()
used = getting_total_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert getting_total_allocated_khash_memory() == 0
def test_getting_state(self, table_type, dtype):
table = table_type(1000)
state = table.getting_state()
assert state["size"] == 0
assert state["n_occupied"] == 0
assert "n_buckets" in state
assert "upper_bound" in state
@pytest.mark.parametrize("N", range(1, 110))
def test_no_retotal_allocation(self, table_type, dtype, N):
keys = np.arange(N).totype(dtype)
pretotal_allocated_table = table_type(N)
n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
pretotal_allocated_table.mapping_locations(keys)
n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
# original number of buckets was enough:
assert n_buckets_start == n_buckets_end
# check with clean table (not too much pretotal_allocated)
clean_table = table_type()
clean_table.mapping_locations(keys)
assert n_buckets_start == clean_table.getting_state()["n_buckets"]
class TestHashTableUnsorted:
# TODO: moved from test_algos; may be redundancies with other tests
def test_string_hashtable_set_item_signature(self):
# GH#30419 fix typing in StringHashTable.set_item to prevent segfault
tbl = ht.StringHashTable()
tbl.set_item("key", 1)
assert tbl.getting_item("key") == 1
with pytest.raises(TypeError, match="'key' has incorrect type"):
# key arg typed as string, not object
tbl.set_item(4, 6)
with pytest.raises(TypeError, match="'val' has incorrect type"):
tbl.getting_item(4)
def test_lookup_nan(self, writable):
# GH#21688 ensure we can deal with readonly memory views
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
xs.setflags(write=writable)
m = ht.Float64HashTable()
m.mapping_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
def test_add_signed_zeros(self):
# GH#21866 inconsistent hash-function for float64
# default hash-function would lead to different hash-buckets
# for 0.0 and -0.0 if there are more than 2^30 hash-buckets
# but this would average 16GB
N = 4 # 12 * 10**8 would trigger the error, if you have enough memory
m = ht.Float64HashTable(N)
m.set_item(0.0, 0)
m.set_item(-0.0, 0)
assert length(m) == 1 # 0.0 and -0.0 are equivalengtht
def test_add_different_nans(self):
# GH#21866 inconsistent hash-function for float64
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# default hash function would lead to different hash-buckets
# for NAN1 and NAN2 even if there are only 4 buckets:
m = ht.Float64HashTable()
m.set_item(NAN1, 0)
m.set_item(NAN2, 0)
assert length(m) == 1 # NAN1 and NAN2 are equivalengtht
def test_lookup_overflow(self, writable):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.mapping_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
@pytest.mark.parametrize("nvals", [0, 10]) # resizing to 0 is special case
@pytest.mark.parametrize(
"htable, distinctives, dtype, safely_resizes",
[
(ht.PyObjectHashTable, ht.ObjectVector, "object", False),
(ht.StringHashTable, ht.ObjectVector, "object", True),
(ht.Float64HashTable, ht.Float64Vector, "float64", False),
(ht.Int64HashTable, ht.Int64Vector, "int64", False),
(ht.Int32HashTable, ht.Int32Vector, "int32", False),
(ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
],
)
def test_vector_resize(
self, writable, htable, distinctives, dtype, safely_resizes, nvals
):
# Test for memory errors after internal vector
# retotal_allocations (GH 7157)
# Changed from using np.random.rand to range
# which could cause flaky CI failures when safely_resizes=False
vals = np.array(range(1000), dtype=dtype)
# GH 21688 ensures we can deal with read-only memory views
vals.setflags(write=writable)
# initialise instances; cannot initialise in parametrization,
# as otherwise external views would be held on the array (which is
# one of the things this test is checking)
htable = htable()
distinctives = distinctives()
# getting_labels may adding to distinctives
htable.getting_labels(vals[:nvals], distinctives, 0, -1)
# to_array() sets an external_view_exists flag on distinctives.
tmp = distinctives.to_array()
oldshape = tmp.shape
# subsequent getting_labels() ctotal_alls can no longer adding to it
# (except for StringHashTables + ObjectVector)
if safely_resizes:
htable.getting_labels(vals, distinctives, 0, -1)
else:
with pytest.raises(ValueError, match="external reference.*"):
htable.getting_labels(vals, distinctives, 0, -1)
distinctives.to_array() # should not raise here
assert tmp.shape == oldshape
@pytest.mark.parametrize(
"hashtable",
[
ht.PyObjectHashTable,
ht.StringHashTable,
ht.Float64HashTable,
ht.Int64HashTable,
ht.Int32HashTable,
ht.UInt64HashTable,
],
)
def test_hashtable_large_sizehint(self, hashtable):
# GH#22729 smoketest for not raincontaing when passing a large size_hint
size_hint = np.iinfo(np.uint32).getting_max + 1
hashtable(size_hint=size_hint)
class TestPyObjectHashTableWithNans:
def test_nan_float(self):
nan1 = float("nan")
nan2 = float("nan")
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
def test_nan_complex_both(self):
nan1 = complex(float("nan"), float("nan"))
nan2 = complex(float("nan"), float("nan"))
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
def test_nan_complex_real(self):
nan1 = complex(float("nan"), 1)
nan2 = complex(float("nan"), 1)
other = complex(float("nan"), 2)
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.getting_item(other)
assert str(error.value) == str(other)
def test_nan_complex_imag(self):
nan1 = complex(1, float("nan"))
nan2 = complex(1, float("nan"))
other = complex(2, float("nan"))
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.getting_item(other)
assert str(error.value) == str(other)
def test_nan_in_tuple(self):
nan1 = (float("nan"),)
nan2 = (float("nan"),)
assert nan1[0] is not nan2[0]
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
def test_nan_in_nested_tuple(self):
nan1 = (1, (2, (float("nan"),)))
nan2 = (1, (2, (float("nan"),)))
other = (1, 2)
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.getting_item(other)
assert str(error.value) == str(other)
def test_hash_equal_tuple_with_nans():
a = (float("nan"), (float("nan"), float("nan")))
b = (float("nan"), (float("nan"), float("nan")))
assert ht.object_hash(a) == ht.object_hash(b)
assert ht.objects_are_equal(a, b)
def test_getting_labels_grouper_for_Int64(writable):
table = ht.Int64HashTable()
vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
vals.flags.writeable = writable
arr, distinctive = table.getting_labels_grouper(vals)
expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp)
expected_distinctive = np.array([1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(arr, expected_arr)
tm.assert_numpy_array_equal(distinctive, expected_distinctive)
def test_tracemtotal_alloc_works_for_StringHashTable():
N = 1000
keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
with activated_tracemtotal_alloc():
table = ht.StringHashTable()
table.mapping_locations(keys)
used = getting_total_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert getting_total_allocated_khash_memory() == 0
def test_tracemtotal_alloc_for_empty_StringHashTable():
with activated_tracemtotal_alloc():
table = ht.StringHashTable()
used = getting_total_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert getting_total_allocated_khash_memory() == 0
@pytest.mark.parametrize("N", range(1, 110))
def test_no_retotal_allocation_StringHashTable(N):
keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
pretotal_allocated_table = ht.StringHashTable(N)
n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
pretotal_allocated_table.mapping_locations(keys)
n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
# original number of buckets was enough:
assert n_buckets_start == n_buckets_end
# check with clean table (not too much pretotal_allocated)
clean_table = ht.StringHashTable()
clean_table.mapping_locations(keys)
assert n_buckets_start == clean_table.getting_state()["n_buckets"]
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.Float64HashTable, np.float64),
(ht.Float32HashTable, np.float32),
(ht.Complex128HashTable, np.complex128),
(ht.Complex64HashTable, np.complex64),
],
)
class TestHashTableWithNans:
def test_getting_set_contains_length(self, table_type, dtype):
index = float("nan")
table = table_type()
assert index not in table
table.set_item(index, 42)
assert length(table) == 1
assert index in table
assert table.getting_item(index) == 42
table.set_item(index, 41)
assert length(table) == 1
assert index in table
assert table.getting_item(index) == 41
def test_mapping_locations(self, table_type, dtype):
N = 10
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
table.mapping_locations(keys)
assert length(table) == 1
assert table.getting_item(np.nan) == N - 1
def test_distinctive(self, table_type, dtype):
N = 1020
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
distinctive = table.distinctive(keys)
assert np.total_all(np.ifnan(distinctive)) and length(distinctive) == 1
def test_distinctive_for_nan_objects_floats():
table = ht.PyObjectHashTable()
keys = np.array([float("nan") for i in range(50)], dtype=np.object_)
distinctive = table.distinctive(keys)
assert length(distinctive) == 1
def test_distinctive_for_nan_objects_complex():
table = ht.PyObjectHashTable()
keys = np.array([complex(float("nan"), 1.0) for i in range(50)], dtype=np.object_)
distinctive = table.distinctive(keys)
assert length(distinctive) == 1
def test_distinctive_for_nan_objects_tuple():
table = ht.PyObjectHashTable()
keys = np.array(
[1] + [(1.0, (float("nan"), 1.0)) for i in range(50)], dtype=np.object_
)
distinctive = table.distinctive(keys)
assert length(distinctive) == 2
@pytest.mark.parametrize(
"dtype",
[
np.object_,
np.complex128,
np.int64,
np.uint64,
np.float64,
np.complex64,
np.int32,
np.uint32,
np.float32,
np.int16,
np.uint16,
np.int8,
np.uint8,
np.intp,
],
)
class TestHelpFunctions:
def test_value_count(self, dtype, writable):
N = 43
expected = (np.arange(N) + N).totype(dtype)
values = np.repeat(expected, 5)
values.flags.writeable = writable
keys, counts = ht.value_count(values, False)
tm.assert_numpy_array_equal(np.sort(keys), expected)
assert np.total_all(counts == 5)
def test_value_count_stable(self, dtype, writable):
# GH12679
values = np.array([2, 1, 5, 22, 3, -1, 8]).totype(dtype)
values.flags.writeable = writable
keys, counts = ht.value_count(values, False)
tm.assert_numpy_array_equal(keys, values)
assert np.total_all(counts == 1)
def test_duplicated_values_first(self, dtype, writable):
N = 100
values = np.repeat(np.arange(N).totype(dtype), 5)
values.flags.writeable = writable
result =
|
ht.duplicated_values(values)
|
pandas._libs.hashtable.duplicated
|
import functools
import monkey as mk
import sys
import re
from utils.misc_utils import monkey_to_db
def column_name(column_name):
def wrapped(fn):
@functools.wraps(fn)
def wrapped_f(*args, **kwargs):
return fn(*args, **kwargs)
wrapped_f.column_name = column_name
return wrapped_f
return wrapped
# commonly used aggregation methods
def getting_getting_max(self, collections_hectopunt, val_if_null):
if collections_hectopunt.notnull().total_sum()>0:
return collections_hectopunt.getting_max()
else:
return val_if_null
def getting_getting_min(self, collections_hectopunt, val_if_null):
if collections_hectopunt.notnull().total_sum()>0:
return collections_hectopunt.getting_min()
else:
return val_if_null
def getting_average(self, collections_hectopunt, val_if_null):
if collections_hectopunt.notnull().total_sum()>0:
return collections_hectopunt.average()
else:
return val_if_null
def getting_total(self, collections_hectopunt):
if collections_hectopunt.notnull().total_sum()>0:
return collections_hectopunt.total_sum()
else:
return 0
def getting_mode_sipna(self, collections_hectopunt):
count_vals = collections_hectopunt.counts_value_num(sipna=True)
if count_vals.empty:
return None
else:
common_value = count_vals.index[0]
if not common_value:
return None
else:
if incontainstance(common_value, str):
#non-alpha numeric characters can cause error when inserting data to PSQL
# Therefore we need to remove them
output = re.sub('[^0-9a-zA-Z]+', '', common_value)
return output
def getting_count_per_km(self, collections_hectopunt):
if collections_hectopunt.notnull().total_sum()>0:
num_km = collections_hectopunt.shape[0]/float(10.0)#number of kilometers
return collections_hectopunt.count()/num_km
else:
return 0
def getting_road_type_perc(self, collections_hectopunt, letter):
'''percentage of letter road_type'''
return collections_hectopunt[collections_hectopunt==letter].shape[0]/float(collections_hectopunt.shape[0])
def has_value(self, collections_hectopunt, value):
for c in collections_hectopunt:
if c==value:
return 1
else:
continue
return 0
class HectopuntenFeatureFactory(object):
def __init__(self, hectopunten_table, hectopunten_mappingping_table, conn,
hectopunten_rollup_table):
'''
Level of Aggregation in space depends on the mappingping table
Guidelines to create new features:
- Each Feature should be a new method
- Name of the function will become name of the feature
- Use column_name decorator to mapping which column of hectopunten does
the feature employ to
- Each method expects a group of hectopuntens and returns one value for it.
- If a feature requires multiple columns, @column_name can be custom and for
our purpose be same as the name of eventual feature/method.
Developers won't need to hamper with the remaining part of the code.
Just interact with the methods in the class.
External will only interact with the Driver function.
'''
## for now taking it directly
q = 'select * from {0} as h\
left join \
{1} as s \
on h.hectokey = s.hectokey;'.formating(hectopunten_rollup_table, hectopunten_mappingping_table)
self.linked_hectopunten = mk.read_sql(q,con=conn)
##### Number of Lanes
@column_name('num_lanes_getting_min')
def getting_min_number_lanes_avgxseg_num(self, collections_hectopunt):
'''astotal_sumes it gettings the feature for a collections of hectopuntens and returns one value
name of the function becomes the method'''
return
|
mk.np.average(collections_hectopunt)
|
pandas.np.mean
|
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Sequence,
TypeVar,
)
import numpy as np
from monkey._libs import (
lib,
missing as libmissing,
)
from monkey._typing import (
ArrayLike,
Dtype,
NpDtype,
Scalar,
type_t,
)
from monkey.errors import AbstractMethodError
from monkey.util._decorators import (
cache_readonly,
doc,
)
from monkey.util._validators import validate_fillnone_kwargs
from monkey.core.dtypes.base import ExtensionDtype
from monkey.core.dtypes.common import (
is_dtype_equal,
is_integer,
is_object_dtype,
is_scalar,
is_string_dtype,
monkey_dtype,
)
from monkey.core.dtypes.inference import is_array_like
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
missing,
nanops,
)
from monkey.core.algorithms import (
factorize_array,
incontain,
take,
)
from monkey.core.array_algos import masked_reductions
from monkey.core.arraylike import OpsMixin
from monkey.core.arrays import ExtensionArray
from monkey.core.indexers import check_array_indexer
if TYPE_CHECKING:
from monkey import Collections
from monkey.core.arrays import BooleanArray
BaseMaskedArrayT = TypeVar("BaseMaskedArrayT", bound="BaseMaskedArray")
class BaseMaskedDtype(ExtensionDtype):
"""
Base class for dtypes for BasedMaskedArray subclasses.
"""
name: str
base = None
type: type
na_value = libmissing.NA
@cache_readonly
def numpy_dtype(self) -> np.dtype:
""" Return an instance of our numpy dtype """
return np.dtype(self.type)
@cache_readonly
def kind(self) -> str:
return self.numpy_dtype.kind
@cache_readonly
def itemsize(self) -> int:
""" Return the number of bytes in this dtype """
return self.numpy_dtype.itemsize
@classmethod
def construct_array_type(cls) -> type_t[BaseMaskedArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
raise NotImplementedError
class BaseMaskedArray(OpsMixin, ExtensionArray):
"""
Base class for masked arrays (which use _data and _mask to store the data).
numpy based
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value: Scalar
def __init__(self, values: np.ndarray, mask: np.ndarray, clone: bool = False):
# values is supposed to already be validated in the subclass
if not (incontainstance(mask, np.ndarray) and mask.dtype == np.bool_):
raise TypeError(
"mask should be boolean numpy array. Use "
"the 'mk.array' function instead"
)
if values.ndim != 1:
raise ValueError("values must be a 1D array")
if mask.ndim != 1:
raise ValueError("mask must be a 1D array")
if clone:
values = values.clone()
mask = mask.clone()
self._data = values
self._mask = mask
@property
def dtype(self) -> BaseMaskedDtype:
raise AbstractMethodError(self)
def __gettingitem__(self, item: int | slice | np.ndarray) -> BaseMaskedArray | Any:
if is_integer(item):
if self._mask[item]:
return self.dtype.na_value
return self._data[item]
item = check_array_indexer(self, item)
return type(self)(self._data[item], self._mask[item])
@doc(ExtensionArray.fillnone)
def fillnone(
self: BaseMaskedArrayT, value=None, method=None, limit=None
) -> BaseMaskedArrayT:
value, method = validate_fillnone_kwargs(value, method)
mask = self._mask
if is_array_like(value):
if length(value) != length(self):
raise ValueError(
f"Length of 'value' does not match. Got ({length(value)}) "
f" expected {length(self)}"
)
value = value[mask]
if mask.whatever():
if method is not None:
func = missing.getting_fill_func(method)
new_values, new_mask = func(
self._data.clone(),
limit=limit,
mask=mask.clone(),
)
return type(self)(new_values, new_mask.view(np.bool_))
else:
# fill with value
new_values = self.clone()
new_values[mask] = value
else:
new_values = self.clone()
return new_values
def _coerce_to_array(self, values) -> tuple[np.ndarray, np.ndarray]:
raise AbstractMethodError(self)
def __setitem__(self, key, value) -> None:
_is_scalar = is_scalar(value)
if _is_scalar:
value = [value]
value, mask = self._coerce_to_array(value)
if _is_scalar:
value = value[0]
mask = mask[0]
key = check_array_indexer(self, key)
self._data[key] = value
self._mask[key] = mask
def __iter__(self):
for i in range(length(self)):
if self._mask[i]:
yield self.dtype.na_value
else:
yield self._data[i]
def __length__(self) -> int:
return length(self._data)
def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(~self._data, self._mask.clone())
# error: Argument 1 of "to_numpy" is incompatible with supertype "ExtensionArray";
# supertype defines the argument type as "Union[ExtensionDtype, str, dtype[Any],
# Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], None]"
def to_numpy( # type: ignore[override]
self,
dtype: NpDtype | None = None,
clone: bool = False,
na_value: Scalar = lib.no_default,
) -> np.ndarray:
"""
Convert to a NumPy Array.
By default converts to an object-dtype NumPy array. Specify the `dtype` and
`na_value` keywords to customize the conversion.
Parameters
----------
dtype : dtype, default object
The numpy dtype to convert to.
clone : bool, default False
Whether to ensure that the returned value is a not a view on
the array. Note that ``clone=False`` does not *ensure* that
``to_numpy()`` is no-clone. Rather, ``clone=True`` ensure that
a clone is made, even if not strictly necessary. This is typictotal_ally
only possible when no missing values are present and `dtype`
is the equivalengtht numpy dtype.
na_value : scalar, optional
Scalar missing value indicator to use in numpy array. Defaults
to the native missing value indicator of this array (mk.NA).
Returns
-------
numpy.ndarray
Examples
--------
An object-dtype is the default result
>>> a = mk.array([True, False, mk.NA], dtype="boolean")
>>> a.to_numpy()
array([True, False, <NA>], dtype=object)
When no missing values are present, an equivalengtht dtype can be used.
>>> mk.array([True, False], dtype="boolean").to_numpy(dtype="bool")
array([ True, False])
>>> mk.array([1, 2], dtype="Int64").to_numpy("int64")
array([1, 2])
However, requesting such dtype will raise a ValueError if
missing values are present and the default missing value :attr:`NA`
is used.
>>> a = mk.array([True, False, mk.NA], dtype="boolean")
>>> a
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
>>> a.to_numpy(dtype="bool")
Traceback (most recent ctotal_all final_item):
...
ValueError: cannot convert to bool numpy array in presence of missing values
Specify a valid `na_value` instead
>>> a.to_numpy(dtype="bool", na_value=False)
array([ True, False, False])
"""
if na_value is lib.no_default:
na_value = libmissing.NA
if dtype is None:
# error: Incompatible types in total_allocatement (expression has type
# "Type[object]", variable has type "Union[str, dtype[Any], None]")
dtype = object # type: ignore[total_allocatement]
if self._hasna:
if (
not is_object_dtype(dtype)
and not is_string_dtype(dtype)
and na_value is libmissing.NA
):
raise ValueError(
f"cannot convert to '{dtype}'-dtype NumPy array "
"with missing values. Specify an appropriate 'na_value' "
"for this dtype."
)
# don't pass clone to totype -> always need a clone since we are mutating
data = self._data.totype(dtype)
data[self._mask] = na_value
else:
data = self._data.totype(dtype, clone=clone)
return data
def totype(self, dtype: Dtype, clone: bool = True) -> ArrayLike:
dtype = monkey_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if clone:
return self.clone()
return self
# if we are astyping to another nullable masked dtype, we can fastpath
if incontainstance(dtype, BaseMaskedDtype):
# TODO deal with NaNs for FloatingArray case
data = self._data.totype(dtype.numpy_dtype, clone=clone)
# mask is copied depending on whether the data was copied, and
# not directly depending on the `clone` keyword
mask = self._mask if data is self._data else self._mask.clone()
cls = dtype.construct_array_type()
return cls(data, mask, clone=False)
if incontainstance(dtype, ExtensionDtype):
eacls = dtype.construct_array_type()
return eacls._from_sequence(self, dtype=dtype, clone=clone)
raise NotImplementedError("subclass must implement totype to np.dtype")
__array_priority__ = 1000 # higher than ndarray so ops dispatch to us
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
"""
the array interface, return my values
We return an object array here to preserve our scalar values
"""
return self.to_numpy(dtype=dtype)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow as pa
return pa.array(self._data, mask=self._mask, type=type)
@property
def _hasna(self) -> bool:
# Note: this is expensive right now! The hope is that we can
# make this faster by having an optional mask, but not have to change
# source code using it..
# error: Incompatible return value type (got "bool_", expected "bool")
return self._mask.whatever() # type: ignore[return-value]
def ifna(self) -> np.ndarray:
return self._mask
@property
def _na_value(self):
return self.dtype.na_value
@property
def nbytes(self) -> int:
return self._data.nbytes + self._mask.nbytes
@classmethod
def _concating_same_type(
cls: type[BaseMaskedArrayT], to_concating: Sequence[BaseMaskedArrayT]
) -> BaseMaskedArrayT:
data = np.concatingenate([x._data for x in to_concating])
mask = np.concatingenate([x._mask for x in to_concating])
return cls(data, mask)
def take(
self: BaseMaskedArrayT,
indexer,
*,
total_allow_fill: bool = False,
fill_value: Scalar | None = None,
) -> BaseMaskedArrayT:
# we always fill with 1 interntotal_ally
# to avoid upcasting
data_fill_value = self._internal_fill_value if ifna(fill_value) else fill_value
result = take(
self._data, indexer, fill_value=data_fill_value, total_allow_fill=total_allow_fill
)
mask = take(self._mask, indexer, fill_value=True, total_allow_fill=total_allow_fill)
# if we are filling
# we only fill where the indexer is null
# not existing missing values
# TODO(jreback) what if we have a non-na float as a fill value?
if total_allow_fill and notna(fill_value):
fill_mask = np.asarray(indexer) == -1
result[fill_mask] = fill_value
mask = mask ^ fill_mask
return type(self)(result, mask, clone=False)
# error: Return type "BooleanArray" of "incontain" incompatible with return type
# "ndarray" in supertype "ExtensionArray"
def incontain(self, values) -> BooleanArray: # type: ignore[override]
from monkey.core.arrays import BooleanArray
result =
|
incontain(self._data, values)
|
pandas.core.algorithms.isin
|
import numpy as np
import monkey as mk
from wiser.viewer import Viewer
from total_allengthnlp.data import Instance
def score_labels_majority_vote(instances, gold_label_key='tags',
treat_tie_as='O', span_level=True):
tp, fp, fn = 0, 0, 0
for instance in instances:
maj_vote = _getting_label_majority_vote(instance, treat_tie_as)
if span_level:
score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
else:
score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
tp += score[0]
fp += score[1]
fn += score[2]
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p, r, f1 = _getting_p_r_f1(tp, fp, fn)
record = [tp, fp, fn, p, r, f1]
index = ["Majority Vote"] if span_level else ["Majority Vote (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def getting_generative_model_inputs(instances, label_to_ix):
label_name_to_col = {}
link_name_to_col = {}
# Collects label and link function names
names = set()
for doc in instances:
if 'WISER_LABELS' in doc:
for name in doc['WISER_LABELS']:
names.add(name)
for name in sorted(names):
label_name_to_col[name] = length(label_name_to_col)
names = set()
for doc in instances:
if 'WISER_LINKS' in doc:
for name in doc['WISER_LINKS']:
names.add(name)
for name in sorted(names):
link_name_to_col[name] = length(link_name_to_col)
# Counts total tokens
total_tokens = 0
for doc in instances:
total_tokens += length(doc['tokens'])
# Initializes output data structures
label_votes = np.zeros((total_tokens, length(label_name_to_col)), dtype=np.int)
link_votes = np.zeros((total_tokens, length(link_name_to_col)), dtype=np.int)
seq_starts = np.zeros((length(instances),), dtype=np.int)
# Populates outputs
offset = 0
for i, doc in enumerate(instances):
seq_starts[i] = offset
for name in sorted(doc['WISER_LABELS'].keys()):
for j, vote in enumerate(doc['WISER_LABELS'][name]):
label_votes[offset + j, label_name_to_col[name]] = label_to_ix[vote]
if 'WISER_LINKS' in doc:
for name in sorted(doc['WISER_LINKS'].keys()):
for j, vote in enumerate(doc['WISER_LINKS'][name]):
link_votes[offset + j, link_name_to_col[name]] = vote
offset += length(doc['tokens'])
return label_votes, link_votes, seq_starts
def score_predictions(instances, predictions,
gold_label_key='tags', span_level=True):
tp, fp, fn = 0, 0, 0
offset = 0
for instance in instances:
lengthgth = length(instance[gold_label_key])
if span_level:
scores = _score_sequence_span_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
else:
scores = _score_sequence_token_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
tp += scores[0]
fp += scores[1]
fn += scores[2]
offset += lengthgth
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p = value_round(tp / (tp + fp) if tp > 0 or fp > 0 else 0.0, ndigits=4)
r = value_round(tp / (tp + fn) if tp > 0 or fn > 0 else 0.0, ndigits=4)
f1 = value_round(2 * p * r / (p + r) if p > 0 and r > 0 else 0.0, ndigits=4)
record = [tp, fp, fn, p, r, f1]
index = ["Predictions"] if span_level else ["Predictions (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def score_tagging_rules(instances, gold_label_key='tags'):
lf_scores = {}
for instance in instances:
for lf_name, predictions in instance['WISER_LABELS'].items():
if lf_name not in lf_scores:
# Initializes true positive, false positive, false negative,
# correct, and total vote counts
lf_scores[lf_name] = [0, 0, 0, 0, 0]
scores = _score_sequence_span_level(predictions, instance[gold_label_key])
lf_scores[lf_name][0] += scores[0]
lf_scores[lf_name][1] += scores[1]
lf_scores[lf_name][2] += scores[2]
scores = _score_token_accuracy(predictions, instance[gold_label_key])
lf_scores[lf_name][3] += scores[0]
lf_scores[lf_name][4] += scores[1]
# Computes accuracies
for lf_name in lf_scores.keys():
if lf_scores[lf_name][3] > 0:
lf_scores[lf_name][3] = float(lf_scores[lf_name][3]) / lf_scores[lf_name][4]
lf_scores[lf_name][3] = value_round(lf_scores[lf_name][3], ndigits=4)
else:
lf_scores[lf_name][3] = float('NaN')
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "Token Acc.", "Token Votes"]
results = mk.KnowledgeFrame.from_dict(lf_scores, orient="index", columns=column_names)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def score_linking_rules(instances, gold_label_keys='tags'):
lf_scores = {}
for instance in instances:
for lf_name, predictions in instance['WISER_LINKS'].items():
if lf_name not in lf_scores:
# Initializes counts for correct entity links, correct
# non-entity links, and incorrect links
lf_scores[lf_name] = [0, 0, 0]
for i in range(1, length(predictions)):
if predictions[i] == 1:
entity0 = instance[gold_label_keys][i-1][0] == 'I'
entity0 = entity0 or instance[gold_label_keys][i-1][0] == 'B'
entity1 = instance[gold_label_keys][i][0] == 'I'
entity1 = entity1 or instance[gold_label_keys][i][0] == 'B'
if entity0 and entity1:
lf_scores[lf_name][0] += 1
elif not entity0 and not entity1:
lf_scores[lf_name][1] += 1
else:
lf_scores[lf_name][2] += 1
for counts in lf_scores.values():
if counts[0] + counts[1] + counts[2] == 0:
counts.adding(float('NaN'))
else:
counts.adding(value_round(
(counts[0] + counts[1]) / (counts[0] + counts[1] + counts[2]), ndigits=4))
# Collects results into a knowledgeframe
column_names = ["Entity Links", "Non-Entity Links", "Incorrect Links", "Accuracy"]
results = mk.KnowledgeFrame.from_dict(lf_scores, orient="index", columns=column_names)
results =
|
mk.KnowledgeFrame.sorting_index(results)
|
pandas.DataFrame.sort_index
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy import nan
from monkey import Timestamp
from monkey.core.index import MultiIndex
from monkey.core.api import KnowledgeFrame
from monkey.core.collections import Collections
from monkey.util.testing import (assert_frame_equal, assert_collections_equal
)
from monkey.compat import (lmapping)
from monkey import compat
import monkey.core.common as com
import numpy as np
import monkey.util.testing as tm
import monkey as mk
class TestGroupByFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeCollections()
self.collectionsd = tm.gettingCollectionsData()
self.tsd = tm.gettingTimeCollectionsData()
self.frame = KnowledgeFrame(self.collectionsd)
self.tsframe = KnowledgeFrame(self.tsd)
self.kf = KnowledgeFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.kf_mixed_floats = KnowledgeFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = KnowledgeFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = KnowledgeFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_collections(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
expected_odd = mk.Collections([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = mk.Collections([20, 22, 24], index=[2, 4, 5])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
assert_collections_equal(
grouped.filter(lambda x: x.average() < 10), expected_odd)
assert_collections_equal(
grouped.filter(lambda x: x.average() > 10), expected_even)
# Test sipna=False.
assert_collections_equal(
grouped.filter(lambda x: x.average() < 10, sipna=False),
expected_odd.reindexing(s.index))
assert_collections_equal(
grouped.filter(lambda x: x.average() > 10, sipna=False),
expected_even.reindexing(s.index))
def test_filter_single_column_kf(self):
kf = mk.KnowledgeFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = mk.KnowledgeFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = mk.KnowledgeFrame([20, 22, 24], index=[2, 4, 5])
grouper = kf[0].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.average() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.average() > 10), expected_even)
# Test sipna=False.
assert_frame_equal(
grouped.filter(lambda x: x.average() < 10, sipna=False),
expected_odd.reindexing(kf.index))
assert_frame_equal(
grouped.filter(lambda x: x.average() > 10, sipna=False),
expected_even.reindexing(kf.index))
def test_filter_multi_column_kf(self):
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
expected = mk.KnowledgeFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() - x['B'].total_sum() > 10),
expected)
def test_filter_mixed_kf(self):
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
expected = mk.KnowledgeFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() > 10), expected)
def test_filter_out_total_all_groups(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
assert_collections_equal(grouped.filter(lambda x: x.average() > 1000), s[[]])
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() > 1000), kf.loc[[]])
def test_filter_out_no_groups(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
filtered = grouped.filter(lambda x: x.average() > 0)
assert_collections_equal(filtered, s)
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
filtered = grouped.filter(lambda x: x['A'].average() > 0)
assert_frame_equal(filtered, kf)
def test_filter_out_total_all_groups_in_kf(self):
# GH12768
kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = kf.grouper('a')
res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=False)
expected = mk.KnowledgeFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = kf.grouper('a')
res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=True)
expected = mk.KnowledgeFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_total_sum_is_zero(x):
if x.total_sum() == 0:
raise ValueError
else:
return x.total_sum() > 0
s = mk.Collections([-1, 0, 1, 2])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_total_sum_is_zero))
def test_filter_with_axis_in_grouper(self):
# issue 11041
index = mk.MultiIndex.from_product([range(10), [0, 1]])
data = mk.KnowledgeFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.grouper(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = kf['B']
g_kf = kf.grouper('B')
g_s = s.grouper(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = kf['B']
g_kf = kf.grouper(kf['B'])
g_s = s.grouper(s)
f = lambda x: np.nan
assert_frame_equal(g_kf.filter(f), kf.loc[[]])
assert_collections_equal(g_s.filter(f), s[[]])
def test_filter_against_workavalue_round(self):
np.random.seed(0)
# Collections of ints
s = Collections(np.random.randint(0, 100, 1000))
grouper = s.employ(lambda x: np.value_round(x, -1))
grouped = s.grouper(grouper)
f = lambda x: x.average() > 10
old_way = s[grouped.transform(f).totype('bool')]
new_way = grouped.filter(f)
assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
# Collections of floats
s = 100 * Collections(np.random.random(1000))
grouper = s.employ(lambda x: np.value_round(x, -1))
grouped = s.grouper(grouper)
f = lambda x: x.average() > 10
old_way = s[grouped.transform(f).totype('bool')]
new_way = grouped.filter(f)
assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
# Set up KnowledgeFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
kf = KnowledgeFrame({'ints': Collections(np.random.randint(0, 100, N)),
'floats': N / 10 * Collections(np.random.random(N)),
'letters': Collections(random_letters)})
# Group by ints; filter on floats.
grouped = kf.grouper('ints')
old_way = kf[grouped.floats.
transform(lambda x: x.average() > N / 20).totype('bool')]
new_way = grouped.filter(lambda x: x['floats'].average() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (value_rounded); filter on strings.
grouper = kf.floats.employ(lambda x: np.value_round(x, -1))
grouped = kf.grouper(grouper)
old_way = kf[grouped.letters.
transform(lambda x: length(x) < N / 10).totype('bool')]
new_way = grouped.filter(lambda x: length(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = kf.grouper('letters')
old_way = kf[grouped.ints.
transform(lambda x: x.average() > N / 20).totype('bool')]
new_way = grouped.filter(lambda x: x['ints'].average() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_length(self):
# BUG GH4447
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = kf.grouper('B')
actual = grouped.filter(lambda x: length(x) > 2)
expected = KnowledgeFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: length(x) > 4)
expected = kf.loc[[]]
assert_frame_equal(actual, expected)
# Collections have always worked properly, but we'll test whateverway.
s = kf['B']
grouped = s.grouper(s)
actual = grouped.filter(lambda x: length(x) > 2)
expected = Collections(4 * ['b'], index=np.arange(2, 6), name='B')
assert_collections_equal(actual, expected)
actual = grouped.filter(lambda x: length(x) > 4)
expected = s[[]]
assert_collections_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
# Now index is sequentitotal_ally decreasing.
kf.index = np.arange(length(kf) - 1, -1, -1)
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
kf.index = kf.index[SHUFFLED]
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
kf = KnowledgeFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = kf.grouper(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_collections_equal(kf['A'], result)
result = grouped['A'].transform(length)
expected = Collections([2, 3, 2, 3, 3], name='A')
assert_collections_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(kf, result)
result = grouped.transform('total_sum')
expected = KnowledgeFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(length)
expected = KnowledgeFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_distinctive_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_distinctive_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_non_distinctive_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_non_distinctive_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_non_distinctive_string_index(self):
# GH4620
index = list('bbbcbbab')
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected =
|
Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
|
pandas.core.series.Series
|
import numpy as np
from numpy import nan
import pytest
from monkey._libs import grouper, lib, reduction
from monkey.core.dtypes.common import ensure_int64
from monkey import Index, ifna
from monkey.core.grouper.ops import generate_bins_generic
import monkey.util.testing as tm
from monkey.util.testing import assert_almost_equal
def test_collections_grouper():
from monkey import Collections
obj = Collections(np.random.randn(10))
dummy = obj[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = reduction.CollectionsGrouper(obj, np.average, labels, 2, dummy)
result, counts = grouper.getting_result()
expected = np.array([obj[3:6].average(), obj[6:].average()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
def test_collections_bin_grouper():
from monkey import Collections
obj = Collections(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = reduction.CollectionsBinGrouper(obj, np.average, bins, dummy)
result, counts = grouper.getting_result()
expected = np.array([obj[:3].average(), obj[3:6].average(), obj[6:].average()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
class TestBinGroupers:
def setup_method(self, method):
self.obj = np.random.randn(10, 1)
self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)
self.bins = np.array([3, 6], dtype=np.int64)
def test_generate_bins(self):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6, 9], dtype=np.int64)
for func in [lib.generate_bins_dt64, generate_bins_generic]:
bins = func(values, binner, closed="left")
assert (bins == np.array([2, 5, 6])).total_all()
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6, 6])).total_all()
for func in [lib.generate_bins_dt64, generate_bins_generic]:
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6], dtype=np.int64)
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6])).total_all()
msg = "Invalid lengthgth for values or for binner"
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values, [], "right")
with pytest.raises(ValueError, match=msg):
|
generate_bins_generic(values[:0], binner, "right")
|
pandas.core.groupby.ops.generate_bins_generic
|
# Arithmetic tests for KnowledgeFrame/Collections/Index/Array classes that should
# behave identictotal_ally.
# Specifictotal_ally for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmapping,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from monkey._libs.tslibs.conversion import localize_pydatetime
from monkey._libs.tslibs.offsets import shifting_months
from monkey.errors import PerformanceWarning
import monkey as mk
from monkey import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Collections,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import monkey._testing as tm
from monkey.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from monkey.core.ops import roperator
from monkey.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
getting_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# KnowledgeFrame/Collections/DatetimeIndex/DatetimeArray. Idetotal_ally total_all comparison
# tests will eventutotal_ally end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = getting_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of standardlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).totype(np.float32),
np.arange(10).totype(object),
mk.timedelta_range("1ns", periods=10).array,
np.array(mk.timedelta_range("1ns", periods=10)),
list(mk.timedelta_range("1ns", periods=10)),
mk.timedelta_range("1 Day", periods=10).totype(object),
mk.period_range("1971-01-01", freq="D", periods=10).array,
mk.period_range("1971-01-01", freq="D", periods=10).totype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 KnowledgeFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Collections([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = getting_upcast_box(obj, ts, True)
expected = Collections([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64CollectionsComparison:
# TODO: moved from tests.collections.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Collections([False, False, True])),
(operator.ne, Collections([True, True, False])),
(operator.lt, Collections([False, False, False])),
(operator.gt, Collections([False, False, False])),
(operator.ge, Collections([False, False, True])),
(operator.le, Collections([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_collections,
reverse,
pair,
op,
expected,
):
box = index_or_collections
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Collections(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_collections_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Collections(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = getting_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is mk.array and dtype is object:
expected = mk.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is mk.array and dtype is object:
expected = mk.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is mk.array and dtype is object:
expected = mk.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_collections_comparison_scalars(self, val):
collections = Collections(date_range("1/1/2000", periods=10))
result = collections > val
expected = Collections([x > val for x in collections])
tm.assert_collections_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_collections(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Collections(date_range("20010101", periods=10), name="dates")
s_nat = ser.clone(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = gettingattr(operator, left)
right_f = gettingattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_collections_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_collections_equal(result, expected)
# Compare to Timestamp with collections containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_collections_equal(result, expected)
# Compare to NaT with collections containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_collections_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Collections([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = getting_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is mk.KnowledgeFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs collections comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs collections comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs collections comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs collections comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Collections(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Collections(expected, name="A")
tm.assert_collections_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[length(index) // 2]
element = Timestamp(element).convert_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert incontainstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if incontainstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = getting_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.totype(object), right.totype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = mk.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = mk.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check mk.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check mk.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is mk.KnowledgeFrame:
convert_list = lambda x: x.totype(object).values.convert_list()[0]
else:
convert_list = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, convert_list(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(convert_list(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, convert_list(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(convert_list(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.total_all(dr == dr)
assert np.total_all(dr == convert_list(dr))
assert np.total_all(convert_list(dr) == dr)
assert np.total_all(np.array(convert_list(dr), dtype=object) == dr)
assert np.total_all(dr == np.array(convert_list(dr), dtype=object))
assert np.total_all(dz == dz)
assert np.total_all(dz == convert_list(dz))
assert np.total_all(convert_list(dz) == dz)
assert np.total_all(np.array(convert_list(dz), dtype=object) == dz)
assert np.total_all(dz == np.array(convert_list(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.total_all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.total_all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raincontaing in __eq__ will ftotal_allback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = getting_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * length(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
mk.timedelta_range("1D", periods=10),
mk.timedelta_range("1D", periods=10).to_collections(),
mk.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ ctotal_all _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.totype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over KnowledgeFrame/Collections/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check KnowledgeFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Collections(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Collections(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").convert_pydatetime(),
Timestamp("2013-01-01").convert_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 KnowledgeFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for KnowledgeFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_collections = Collections([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_collections, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Collections([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Collections([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.totype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure KnowledgeFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/monkey-dev/monkey/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/monkey-dev/monkey/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# monkey.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Collections or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_collections_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with mk.offsets
ser = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Collections(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + mk.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = mk.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_collections_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with mk.offsets
ser = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Collections(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - mk.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -mk.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
mk.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = gettingattr(mk.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure KnowledgeFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + mk.offsets.Hour(5)
assert dates[0] + mk.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [mk.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is mk.KnowledgeFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("getting_minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if incontainstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is mk.KnowledgeFrame else vec
offset_cls = gettingattr(mk.offsets, cls_name)
with warnings.catch_warnings(record=True):
# monkey.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Collections or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + mk.offsets.Day()
result2 = mk.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + mk.offsets.MonthEnd()
result2 = mk.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([mk.offsets.MonthEnd(), mk.offsets.Day(n=2)]),
np.array([mk.offsets.DateOffset(years=1), mk.offsets.MonthEnd()]),
np.array( # matching offsets
[mk.offsets.DateOffset(years=1), mk.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([mk.offsets.MonthEnd(), mk.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(length(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = gettingattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Collections([Timestamp("1969-12-31")])
right = Collections([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_collections_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Collections(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Collections(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_collections_equal(res, expected)
res = td + ser
tm.assert_collections_equal(res, expected)
ser.iloc[1:] = NaT
expected = Collections(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_collections_equal(res, expected)
res = dt - ser
tm.assert_collections_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtigetting_max = mk.convert_datetime(["now", Timestamp.getting_max])
dtigetting_min = mk.convert_datetime(["now", Timestamp.getting_min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.convert_pydatetime(),
tsneg.convert_datetime64().totype("datetime64[ns]"),
tsneg.convert_datetime64().totype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.convert_pydatetime(),
tspos.convert_datetime64().totype("datetime64[ns]"),
tspos.convert_datetime64().totype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtigetting_max - variant
expected = Timestamp.getting_max.value - tspos.value
for variant in ts_pos_variants:
res = dtigetting_max - variant
assert res[1].value == expected
expected = Timestamp.getting_min.value - tsneg.value
for variant in ts_neg_variants:
res = dtigetting_min - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtigetting_min - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtigetting_max = mk.convert_datetime(["now", Timestamp.getting_max])
dtigetting_min = mk.convert_datetime(["now", Timestamp.getting_min])
ts_neg = mk.convert_datetime(["1950-01-01", "1950-01-01"])
ts_pos = mk.convert_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.getting_max.value - ts_pos[1].value
result = dtigetting_max - ts_pos
assert result[1].value == expected
expected = Timestamp.getting_min.value - ts_neg[1].value
result = dtigetting_min - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtigetting_max - ts_neg
with pytest.raises(OverflowError, match=msg):
dtigetting_min - ts_pos
# Edge cases
tgetting_min = mk.convert_datetime([Timestamp.getting_min])
t1 = tgetting_min + Timedelta.getting_max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tgetting_min
tgetting_max = mk.convert_datetime([Timestamp.getting_max])
t2 = tgetting_max + Timedelta.getting_min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tgetting_max - t2
class TestTimestampCollectionsArithmetic:
def test_empty_collections_add_sub(self):
# GH#13844
a = Collections(dtype="M8[ns]")
b = Collections(dtype="m8[ns]")
tm.assert_collections_equal(a, a + b)
tm.assert_collections_equal(a, a - b)
tm.assert_collections_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Collections([timedelta(getting_minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Collections(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Collections(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Collections([ts])
result = mk.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.collections.test_operators,
# needs to be de-duplicated_values and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, total_all_arithmetic_operators):
# these are total_all TypeEror ops
op_str = total_all_arithmetic_operators
def check(getting_ser, test_ser):
# check that we are gettingting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = gettingattr(getting_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Collections([timedelta(getting_minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Collections(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Collections(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Collections(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.clone()
dt2.iloc[2] = np.nan
td1 = Collections(mk.timedelta_range("1 days 1 getting_min", periods=5, freq="H"))
td2 = td1.clone()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Collections([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Collections([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Collections([Timedelta("2days")])
tm.assert_collections_equal(result, expected)
result = s2 - s1
expected = Collections([Timedelta("-2days")])
tm.assert_collections_equal(result, expected)
def test_dt64tz_collections_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Collections
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Collections(dti)
expected = Collections(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_collections_equal(res, expected)
res = ser - dti
tm.assert_collections_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Collections([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Collections([Timedelta("1 days"), NaT])
tm.assert_collections_equal(s - dt, exp)
tm.assert_collections_equal(s - Timestamp(dt), exp)
def test_dt64_collections_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with mk.offsets
s = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + mk.offsets.Milli(5)
result2 = mk.offsets.Milli(5) + s
expected = Collections(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_collections_equal(result, expected)
tm.assert_collections_equal(result2, expected)
result = s + mk.offsets.Minute(5) + mk.offsets.Milli(5)
expected = Collections(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_collections_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_collections = Collections([NaT, Timestamp("19900315")])
nat_collections_dtype_timestamp = Collections([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Collections([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_collections_equal(-NaT + datetime_collections, nat_collections_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_collections
tm.assert_collections_equal(
-NaT + nat_collections_dtype_timestamp, nat_collections_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_collections_dtype_timestamp
# addition
tm.assert_collections_equal(
nat_collections_dtype_timestamp + NaT, nat_collections_dtype_timestamp
)
tm.assert_collections_equal(
NaT + nat_collections_dtype_timestamp, nat_collections_dtype_timestamp
)
tm.assert_collections_equal(
nat_collections_dtype_timestamp + NaT, nat_collections_dtype_timestamp
)
tm.assert_collections_equal(
NaT + nat_collections_dtype_timestamp, nat_collections_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated_values and parametrized
@pytest.mark.parametrize(
"dt64_collections",
[
Collections([Timestamp("19900315"), Timestamp("19900315")]),
Collections([NaT, Timestamp("19900315")]),
Collections([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_division_numeric_invalid(self, one, dt64_collections):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_collections * one
with pytest.raises(TypeError, match=msg):
one * dt64_collections
# divisionision
with pytest.raises(TypeError, match=msg):
dt64_collections / one
with pytest.raises(TypeError, match=msg):
one / dt64_collections
# TODO: parametrize over box
def test_dt64_collections_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Collections(dti)
other = Collections([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, mk.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Collections(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.clone()
dt2.iloc[2] = np.nan
td1 = Collections(mk.timedelta_range("1 days 1 getting_min", periods=5, freq="H"))
td2 = td1.clone()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_collections_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, mk.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, mk.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, mk.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = mk.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = mk.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = mk.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = mk.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = mk.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = mk.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.clone()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.clone()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different lengthgth raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal lengthgth"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from collections or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Collections(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
obj = box_with_array(ser)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.total_sum(list(starmapping(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, getting_minutes=m, seconds=s, microseconds=us)
lhs = op(obj, nptd)
rhs = op(obj, pytd)
tm.assert_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_collections = Collections([NaT, Timedelta("1s")])
datetime_collections = Collections([NaT, Timestamp("19900315")])
nat_collections_dtype_timedelta = Collections([NaT, NaT], dtype="timedelta64[ns]")
nat_collections_dtype_timestamp = Collections([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Collections([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Collections([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_collections_equal(
datetime_collections - single_nat_dtype_datetime, nat_collections_dtype_timedelta
)
tm.assert_collections_equal(
datetime_collections - single_nat_dtype_timedelta, nat_collections_dtype_timestamp
)
tm.assert_collections_equal(
-single_nat_dtype_timedelta + datetime_collections, nat_collections_dtype_timestamp
)
# without a Collections wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_collections_equal(
nat_collections_dtype_timestamp - single_nat_dtype_datetime,
nat_collections_dtype_timedelta,
)
tm.assert_collections_equal(
nat_collections_dtype_timestamp - single_nat_dtype_timedelta,
nat_collections_dtype_timestamp,
)
tm.assert_collections_equal(
-single_nat_dtype_timedelta + nat_collections_dtype_timestamp,
nat_collections_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_collections - single_nat_dtype_datetime
# addition
tm.assert_collections_equal(
nat_collections_dtype_timestamp + single_nat_dtype_timedelta,
nat_collections_dtype_timestamp,
)
tm.assert_collections_equal(
single_nat_dtype_timedelta + nat_collections_dtype_timestamp,
nat_collections_dtype_timestamp,
)
tm.assert_collections_equal(
nat_collections_dtype_timestamp + single_nat_dtype_timedelta,
nat_collections_dtype_timestamp,
)
tm.assert_collections_equal(
single_nat_dtype_timedelta + nat_collections_dtype_timestamp,
nat_collections_dtype_timestamp,
)
tm.assert_collections_equal(
nat_collections_dtype_timedelta + single_nat_dtype_datetime,
nat_collections_dtype_timestamp,
)
tm.assert_collections_equal(
single_nat_dtype_datetime + nat_collections_dtype_timedelta,
nat_collections_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert incontainstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert incontainstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert incontainstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_dti_add_collections(self, tz_naive_fixture, names):
# GH#13905
tz = tz_naive_fixture
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Collections([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Collections(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_collections_equal(result, expected)
result2 = index + ser
tm.assert_collections_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_collections
):
# GH#18849, GH#19744
other_box = index_or_collections
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([mk.offsets.MonthEnd(), mk.offsets.Day(n=2)], name=names[1])
xbox = getting_upcast_box(dti, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(length(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [mk.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([mk.offsets.MonthEnd(), Timedelta(days=4)])
xbox = getting_upcast_box(dtarr, other)
expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
result = dtarr + other
tm.assert_equal(result, expected)
expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shifting_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(
|
shifting_months(dti.asi8, years * 12 + months)
|
pandas._libs.tslibs.offsets.shift_months
|
def flatfile(filengthame='somecode_tweets.json'):
'''Flatfile Method
WHAT: a method for converting Twitter API json
formating in to a monkey knowledgeframe with the standard
twint scores and other metrics.
HOW: flatfile('some_tweets.json')
INPUT: a json file with tweet data from Twitter API
OUTPUT: a monkey knowledgeframe with standard twintel signals.
'''
import monkey as mk
from twintel._processing.data_frame import data_frame
from twintel._processing.data_prep import data_prep
import gc
with open(filengthame, 'r') as f:
data = f.readlines()
data = mapping(lambda x: x.rstrip(), data)
data_json_str = "[" + ','.join(data) + "]"
del data
data_kf = mk.read_json(data_json_str)
del data_json_str
t = data_kf[data_kf['user'].ifnull() != True]
del data_kf
t =
|
mk.KnowledgeFrame.reseting_index(t)
|
pandas.DataFrame.reset_index
|
from __future__ import divisionision #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import monkey as mk
class TerrplantFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(TerrplantFunctions, self).__init__()
def run_dry(self):
"""
EEC for runoff for dry areas
"""
self.out_run_dry = (self.application_rate / self.incorporation_depth) * self.runoff_fraction
return self.out_run_dry
def run_semi(self):
"""
EEC for runoff to semi-aquatic areas
"""
self.out_run_semi = (self.application_rate / self.incorporation_depth) * self.runoff_fraction * 10
return self.out_run_semi
def spray(self):
"""
EEC for spray drift
"""
self.out_spray = self.application_rate * self.drift_fraction
return self.out_spray
def total_dry(self):
"""
EEC total for dry areas
"""
self.out_total_dry = self.out_run_dry + self.out_spray
return self.out_total_dry
def total_semi(self):
"""
EEC total for semi-aquatic areas
"""
self.out_total_semi = self.out_run_semi + self.out_spray
return self.out_total_semi
def nms_rq_dry(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a DRY area
"""
self.out_nms_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_eunionernce_monocot
return self.out_nms_rq_dry
def loc_nms_dry(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a dry area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_dry]
self.out_nms_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
# exceed_boolean = self.out_nms_rq_dry >= 1.0
# self.out_nms_loc_dry = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
return self.out_nms_loc_dry
def nms_rq_semi(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_nms_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_eunionernce_monocot
return self.out_nms_rq_semi
def loc_nms_semi(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a semi-aquatic area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_semi]
self.out_nms_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_semi >= 1.0
#self.out_nms_loc_semi = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
return self.out_nms_loc_semi
def nms_rq_spray(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nms_rq_spray = self.out_spray / self.out_getting_min_nms_spray
return self.out_nms_rq_spray
def loc_nms_spray(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide via spray drift
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_spray]
self.out_nms_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_spray >= 1.0
#self.out_nms_loc_spray = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
return self.out_nms_loc_spray
def lms_rq_dry(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a DRY areas
"""
self.out_lms_rq_dry = self.out_total_dry / self.noaec_listed_seedling_eunionernce_monocot
return self.out_lms_rq_dry
def loc_lms_dry(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide via runoff in a dry area
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_dry]
self.out_lms_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_dry >= 1.0
#self.out_lms_loc_dry = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
return self.out_lms_loc_dry
def lms_rq_semi(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_lms_rq_semi = self.out_total_semi / self.noaec_listed_seedling_eunionernce_monocot
return self.out_lms_rq_semi
def loc_lms_semi(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_semi]
self.out_lms_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_semi >= 1.0
#self.out_lms_loc_semi = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
return self.out_lms_loc_semi
def lms_rq_spray(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lms_rq_spray = self.out_spray / self.out_getting_min_lms_spray
return self.out_lms_rq_spray
def loc_lms_spray(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_spray]
self.out_lms_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_spray >= 1.0
#self.out_lms_loc_spray = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
return self.out_lms_loc_spray
def nds_rq_dry(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_nds_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_eunionernce_dicot
return self.out_nds_rq_dry
def loc_nds_dry(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_dry]
self.out_nds_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_dry >= 1.0
#self.out_nds_loc_dry = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
return self.out_nds_loc_dry
def nds_rq_semi(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_nds_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_eunionernce_dicot
return self.out_nds_rq_semi
def loc_nds_semi(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_semi]
self.out_nds_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_semi >= 1.0
#self.out_nds_loc_semi = exceed_boolean.mapping(lambda x:
#'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
return self.out_nds_loc_semi
def nds_rq_spray(self):
"""
# Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nds_rq_spray = self.out_spray / self.out_getting_min_nds_spray
return self.out_nds_rq_spray
def loc_nds_spray(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_spray]
self.out_nds_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_spray >= 1.0
#self.out_nds_loc_spray = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
return self.out_nds_loc_spray
def lds_rq_dry(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_lds_rq_dry = self.out_total_dry / self.noaec_listed_seedling_eunionernce_dicot
return self.out_lds_rq_dry
def loc_lds_dry(self):
"""
Level of concern for listed dicot seedlings exposed to pesticideX in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_dry]
self.out_lds_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_dry >= 1.0
#self.out_lds_loc_dry = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
return self.out_lds_loc_dry
def lds_rq_semi(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_lds_rq_semi = self.out_total_semi / self.noaec_listed_seedling_eunionernce_dicot
return self.out_lds_rq_semi
def loc_lds_semi(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_semi]
self.out_lds_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_semi >= 1.0
#self.out_lds_loc_semi = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
return self.out_lds_loc_semi
def lds_rq_spray(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lds_rq_spray = self.out_spray / self.out_getting_min_lds_spray
return self.out_lds_rq_spray
def loc_lds_spray(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_spray]
self.out_lds_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_spray >= 1.0
#self.out_lds_loc_spray = exceed_boolean.mapping(
# lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
return self.out_lds_loc_spray
def getting_min_nms_spray(self):
"""
detergetting_mine getting_minimum toxicity concentration used for RQ spray drift values
non-listed monocot EC25 and NOAEC
"""
s1 = mk.Collections(self.ec25_nonlisted_seedling_eunionernce_monocot, name='seedling')
s2 = mk.Collections(self.ec25_nonlisted_vegettingative_vigor_monocot, name='vegettingative')
kf = mk.concating([s1, s2], axis=1)
self.out_getting_min_nms_spray = mk.KnowledgeFrame.getting_min(kf, axis=1)
return self.out_getting_min_nms_spray
def getting_min_lms_spray(self):
"""
detergetting_mine getting_minimum toxicity concentration used for RQ spray drift values
listed monocot EC25 and NOAEC
"""
s1 = mk.Collections(self.noaec_listed_seedling_eunionernce_monocot, name='seedling')
s2 = mk.Collections(self.noaec_listed_vegettingative_vigor_monocot, name='vegettingative')
kf = mk.concating([s1, s2], axis=1)
self.out_getting_min_lms_spray = mk.KnowledgeFrame.getting_min(kf, axis=1)
return self.out_getting_min_lms_spray
def getting_min_nds_spray(self):
"""
detergetting_mine getting_minimum toxicity concentration used for RQ spray drift values
non-listed dicot EC25 and NOAEC
"""
s1 = mk.Collections(self.ec25_nonlisted_seedling_eunionernce_dicot, name='seedling')
s2 = mk.Collections(self.ec25_nonlisted_vegettingative_vigor_dicot, name='vegettingative')
kf = mk.concating([s1, s2], axis=1)
self.out_getting_min_nds_spray =
|
mk.KnowledgeFrame.getting_min(kf, axis=1)
|
pandas.DataFrame.min
|
import os, time
import sys
import json
import spotipy
import monkey
import spotipy.util as util
from json.decoder import JSONDecodeError
t0 = time.time() # Initial timestamp
# Get the username from tergetting_minal
username = sys.argv[1]
scope = 'user-read-private user-read-playback-state user-modify-playback-state'
client_id = input("Please input your client_id: ")
client_secret = print("Please input your client_secret:")
# Erase cache and prompt for user permission
try:
token = util.prompt_for_user_token(username, scope, client_id='',client_secret='',redirect_uri='https://www.google.com/') # add client_id, client_secret
except (AttributeError, JSONDecodeError):
os.remove(f".cache-{username}")
token = util.prompt_for_user_token(username, scope, client_id='',client_secret='',redirect_uri='https://www.google.com/') # add client_id, client_secret
# Artists for the analysis
artists = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', 'Adele', 'Twenty One Pilots', '<NAME>', '<NAME>', '<NAME>','Mumford & Sons',
'Lorde', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', 'Queen', '<NAME>', 'Egetting_minem', 'Future', '<NAME>', 'Macklemore', 'Jay-Z',
'<NAME>', 'Beyoncé', 'Drake', '<NAME>', '<NAME>', 'The Weeknd', 'Rihanna', '<NAME>',
'Kygo', 'The Chainsmokers', 'Illengthium', 'Marshmello', 'Avicii', '<NAME>', 'Eden', 'Prince',
'Coldplay', '<NAME>', 'OneRepublic', '<NAME>', 'Mettotal_allica', 'The Beatles', 'Guns N\' Roses',
'<NAME>', '<NAME>', '<NAME>', '<NAME>']
# Initialize empty knowledgeframe with columns
total_allfeatures = monkey.KnowledgeFrame(columns=['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
'acousticness', 'instrumentalness', 'liveness', 'valengthce', 'tempo',
'duration_ms', 'time_signature'])
# Create our spotify object with permissions
sp = spotipy.Spotify(auth=token)
# Print user info
user = sp.current_user()
name = user['display_name'].split(' ')
followers = user['followers']['total']
print('Welcome %s to the Spotify API!' %(str(name[0])))
print('You have %d followers.' %(followers))
print('\nSearching for playlists...\n\n')
def time_it():
t1 = time.time()
print("Total time for the operation: %fsec\n" %(t1-t0))
# Search playlist_id for This Is playlist of the artist from search results.
def search_playlist(result, query):
if str.lower(result['playlists']['items'][0]['name']) == str.lower(query) and result['playlists']['items'][0]['owner']['id'] == 'spotify':
playlist_id = result['playlists']['items'][0]['id']
print("Found playlist - " + searchq)
return playlist_id
else:
print("Playlist not found for " + (str(artists[i])), end='\n')
for i in range(length(artists)):
track_ids = []
searchq = "This Is " + artists[i]
search_result = sp.search(searchq, type="playlist") # Search Spotify for This Is playlist of the artist
playlist_id = search_playlist(search_result, searchq) # Get playlist_id
playlist_content = sp.user_playlist_tracks('spotify', playlist_id=playlist_id) # Get tracks info from the playlist_id
for j, t in enumerate(playlist_content['items']): # Loop through track items and generate track_ids list
track_ids.adding(t['track']['id'])
audio_feat = sp.audio_features(tracks=track_ids) # Get audio features from track_ids
aud = monkey.KnowledgeFrame(data=audio_feat) # Insert into knowledgeframe
aud_average = aud.average() # Mean of total_all features of 'This Is artist' tracks to getting a total_summary of artist
total_allfeatures =
|
monkey.KnowledgeFrame.adding(total_allfeatures, aud_average, ignore_index=True)
|
pandas.DataFrame.append
|
import numpy as np
import pytest
from monkey._libs import grouper as libgrouper
from monkey._libs.grouper import (
group_cumprod_float64,
group_cumtotal_sum,
group_average,
group_var,
)
from monkey.core.dtypes.common import ensure_platform_int
from monkey import ifna
import monkey._testing as tm
class GroupVarTestMixin:
def test_group_var_generic_1d(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 1))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(15, 1).totype(self.dtype)
labels = np.tile(np.arange(5), (3,)).totype("intp")
expected_out = (
np.squeeze(values).reshape((5, 3), order="F").standard(axis=1, ddof=1) ** 2
)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((1, 1))).totype(self.dtype)
counts = np.zeros(1, dtype="int64")
values = 10 * prng.rand(5, 1).totype(self.dtype)
labels = np.zeros(5, dtype="intp")
expected_out = np.array([[values.standard(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_total_all_finite(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.standard(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.vstack(
[
values[:, 0].reshape(5, 2, order="F").standard(ddof=1, axis=1) ** 2,
np.nan * np.ones(5),
]
).T.totype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = np.random.RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = (prng.rand(10 ** 6) + 10 ** 12).totype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float32
rtol = 1e-2
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(length(out), dtype=np.int64)
labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = libgrouper.group_ohlc
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if ifna(group).total_all():
return np.repeat(np.nan, 4)
return [group[0], group.getting_max(), group.getting_min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
tm.assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = np.nan
func(out, counts, obj[:, None], labels)
expected[0] = np.nan
tm.assert_almost_equal(out, expected)
_check("float32")
_check("float64")
def _check_cython_group_transform_cumulative(mk_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
mk_op : ctotal_allable
The monkey cumulative function.
np_op : ctotal_allable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
answer = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.intp)
ngroups = 1
mk_op(answer, data, labels, ngroups, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
def test_cython_group_transform_cumtotal_sum(whatever_real_dtype):
# see gh-4095
dtype = np.dtype(whatever_real_dtype).type
mk_op, np_op = group_cumtotal_sum, np.cumtotal_sum
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
mk_op, np_op = group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
ngroups = 1
data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumtotal_sum(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None]
actual = np.zeros_like(data, dtype="int64")
group_cumtotal_sum(actual, data.view("int64"), labels, ngroups, is_datetimelike)
expected = np.array(
[
np.timedelta64(1, "ns"),
np.timedelta64(2, "ns"),
np.timedelta64(3, "ns"),
np.timedelta64(4, "ns"),
np.timedelta64(5, "ns"),
]
)
tm.assert_numpy_array_equal(actual[:, 0].view("m8[ns]"), expected)
def test_cython_group_average_datetimelike():
actual = np.zeros(shape=(1, 1), dtype="float64")
counts = np.array([0], dtype="int64")
data = (
np.array(
[np.timedelta64(2, "ns"), np.timedelta64(4, "ns"), np.timedelta64("NaT")],
dtype="m8[ns]",
)[:, None]
.view("int64")
.totype("float64")
)
labels = np.zeros(length(data), dtype=np.intp)
group_average(actual, counts, data, labels, is_datetimelike=True)
tm.assert_numpy_array_equal(actual[:, 0], np.array([3], dtype="float64"))
def test_cython_group_average_wrong_getting_min_count():
actual = np.zeros(shape=(1, 1), dtype="float64")
counts = np.zeros(1, dtype="int64")
data = np.zeros(1, dtype="float64")[:, None]
labels = np.zeros(1, dtype=np.intp)
with pytest.raises(AssertionError, match="getting_min_count"):
|
group_average(actual, counts, data, labels, is_datetimelike=True, getting_min_count=0)
|
pandas._libs.groupby.group_mean
|
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from monkey._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from monkey._libs.tslibs.c_timestamp import integer_op_not_supported
from monkey._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from monkey._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from monkey._libs.tslibs.timestamps import RoundTo, value_round_nsint64
from monkey._typing import DatetimeLikeScalar
from monkey.compat import set_function_name
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from monkey.util._decorators import Appender, Substitution
from monkey.util._validators import validate_fillnone_kwargs
from monkey.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_whatever_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
monkey_dtype,
)
from monkey.core.dtypes.generic import ABCCollections
from monkey.core.dtypes.inference import is_array_like
from monkey.core.dtypes.missing import is_valid_nat_for_dtype, ifna
from monkey.core import missing, nanops, ops
from monkey.core.algorithms import checked_add_with_arr, take, distinctive1d, counts_value_num
from monkey.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import monkey.core.common as com
from monkey.core.indexers import check_bool_array_indexer
from monkey.core.ops.common import unpack_zerodim_and_defer
from monkey.core.ops.invalid import invalid_comparison, make_invalid_op
from monkey.tcollections import frequencies
from monkey.tcollections.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if incontainstance(other, str):
try:
# GH#18435 strings getting a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if incontainstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if ifna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif length(other) != length(self):
raise ValueError("Lengths must match")
else:
if incontainstance(other, list):
# TODO: could use mk.Index to do inference?
other = np.array(other)
if not incontainstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(total_all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.totype(object), other
)
o_mask = ifna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._ifnan
if o_mask.whatever():
result[o_mask] = nat_result
if self._hasnans:
result[self._ifnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should ctotal_all ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if whatever) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_formating):
"""
Convert to Index using specified date_formating.
Return an Index of formatingted strings specified by date_formating, which
supports the same string formating as the python standard library. Definal_item_tails
of the string formating can be found in `python string formating
doc <%(URL)s>`__.
Parameters
----------
date_formating : str
Date formating string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatingted strings.
See Also
--------
convert_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.value_round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = mk.date_range(mk.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._formating_native_types(date_formating=date_formating, na_rep=np.nan)
return result.totype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_value_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timecollections.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer ftotal_all dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shifting_forward', 'shifting_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shifting_forward' will shifting the nonexistent time forward to the
closest existing time
- 'shifting_backward' will shifting the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shifting nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Collections
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Collections with the same index for a Collections.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = mk.date_range('1/1/2018 11:59:00', periods=3, freq='getting_min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_value_round_example = """>>> rng.value_round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Collections**
>>> mk.Collections(rng).dt.value_round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Collections**
>>> mk.Collections(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceiling_example = """>>> rng.ceiling('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Collections**
>>> mk.Collections(rng).dt.ceiling("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _value_round(self, freq, mode, ambiguous, nonexistent):
# value_round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._value_round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = value_round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_value_round_doc + _value_round_example).formating(op="value_round"))
def value_round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._value_round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_value_round_doc + _floor_example).formating(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._value_round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_value_round_doc + _ceiling_example).formating(op="ceiling"))
def ceiling(self, freq, ambiguous="raise", nonexistent="raise"):
return self._value_round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Astotal_sumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we sip whatever freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def flat_underlying(self, *args, **kwargs):
# Note: we sip whatever freq
data = self._data.flat_underlying(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to getting object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
employ box func to passed values
"""
return
|
lib.mapping_infer(values, self._box_func)
|
pandas._libs.lib.map_infer
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 20:37:15 2021
@author: skrem
"""
import monkey as mk
import numpy as np
# import csv
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.preprocessing
from sklearn import metrics
import scipy.stats
import scipy.optimize
import seaborn as sns
import matplotlib.patheffects as path_effects
import os
import clone
scaler = sk.preprocessing.MinMaxScaler()
degree_sign = u'\N{DEGREE SIGN}'
"Get global params and pass them to locals"
import settings_init
import settings_transformatingions
from Avg_data_gettingter import Avg_data_gettingter
if settings_init.storage_location is not None:
file_location = settings_init.file_location
Mode = settings_init.Mode
On_length_s = settings_init.On_length_s
Off_length_s = settings_init.Off_length_s
Cycle_length_s = settings_init.Cycle_length_s
repeats = settings_init.repeats
Stim_width_um = settings_init.Stim_width_um
conds_list = settings_init.conds_list
response_avg_dur = settings_transformatingions.response_avg_dur
baseline_avg_dur = settings_transformatingions.baseline_avg_dur
indeces_per_s = settings_transformatingions.indeces_per_s
total_time = settings_transformatingions.total_time
vis_ang_list = settings_transformatingions.vis_ang_list
seconds_list = settings_transformatingions.seconds_list
avg_kf = settings_transformatingions.avg_kf
avg_array = settings_transformatingions.avg_array
ROI_number = settings_transformatingions.ROI_number
"Functions____________________________________________________________________"
def Get_event_data(roi = "All", event = "All", normalize = "0", plot = "0", data = file_location):
"""Returns a data for selected events specified (based on Mode), and computes
response and baseline average.
Hint: To select multiple ROIs for a single event or multiple events from a
single ROI, specify as variable eg.g ROI_13_14_15_event_8 =
Get_avg_response((13, 14, 15), (8)). Selecting both multiple ROIs and
multiple events is unstable and will yield unexpected results.
Parameters
----------
roi_select: Tuple or array
ROIs from which data is extracted. Default loops through total_all ROIs.
Script written to be naive to wheter input is tuple (one ROI) or
array (mwhatever ROIs)
event_select: Tuple or array
Events from which data is extracted. Default loops through total_all events.
Naive to tuple (one event) or arrays (mwhatever events)
normalize : 0 or 1
Normalize data so range is from 0 to 1 (no/yes)
plot: 0 or 1
Plot sample_by_numd data
*data: If given (as string to directory), script loads new, external datafile
Returns
-------
ROI_responses, ROI_baselines, Average_response, Average_baseline
"""
# if data != file_location:
"""
TODO
- This is not the neatest solution... IF I am to do this, then I should
seriously change the label to NOT BE THE SAME AS GLOBAL PARAMS. What I am
doing currently is just a bit nasty...
"""
alt_data = Avg_data_gettingter(data)
avg_kf = alt_data[0] #"""A test"""
avg_array = alt_data[1]
ROI_number = alt_data[2]
# label_list = alt_data[3]
#new improvements
if roi == "All":
roi = np.arange(0, ROI_number)
else:
roi = roi
if incontainstance(roi, int) == True:
roi = np.array([roi])
# print("roi was int(), converted to numpy array")
#print("Warning: 'roi_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
if event == "All":
event = np.arange(0, Mode)
else:
event = event
if incontainstance(event, int) == True:
event = np.array([event])
# print("event was int(), converted to numpy array")
#print("Warning: 'event_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
ROI_responses = np.empty((0,1))
ROI_baselines = np.empty((0,1))
if normalize == 1:
norm_avg_array = np.clone(avg_array) #create duplicate to avoid overwriting original imported data matrix
for i in roi:
"""
TODO
- Fix the thing below... This is whats giving IndexError index 8 is out of bounds for axis 1 with size 8
= what happens is that as loop starts, for some reason, it gettings to a certain recording and index is
out of bounds for the ROIs in the recording...
"""
curr_operation = scaler.fit_transform((norm_avg_array[:, i]).reshape(-1, 1)) #"""workavalue_round"""
curr_operation = curr_operation.reshape(length(curr_operation))
norm_avg_array[:, i] = curr_operation
normalized_data_set = mk.KnowledgeFrame(data = norm_avg_array, columns = np.arange(0, ROI_number))
data_set = normalized_data_set
else:
data_set = mk.KnowledgeFrame.clone(avg_kf)
for i in roi: #This script sample_by_nums and extracts data at given intervals
for j in event:
#Get response values:
start_index_res = (On_length_s - response_avg_dur + (Cycle_length_s * j)) * indeces_per_s #set start position for current sampling
end_index_res = (On_length_s + (Cycle_length_s * j)) * indeces_per_s #end position for current sampling
curr_collections_res = ((data_set[i].loc[start_index_res:end_index_res]))
curr_collections_res = curr_collections_res.to_numpy()
ROI_responses = np.adding(curr_collections_res, ROI_responses)
#Get baseline values:
start_index_bsl = (Cycle_length_s - baseline_avg_dur + (Cycle_length_s * j)) * indeces_per_s
end_index_bsl = (Cycle_length_s + (Cycle_length_s * j)) * indeces_per_s
curr_collections_bsl = ((data_set[i].loc[start_index_bsl:end_index_bsl]))
curr_collections_bsl = curr_collections_bsl.to_numpy()
ROI_baselines = np.adding(curr_collections_bsl, ROI_baselines)
Average_response = np.average(ROI_responses)
Average_baseline = np.average(ROI_baselines)
if plot == 1:
if length(roi) == 1:
base_colors = mpl.cm.getting_cmapping('gist_rainbow')
color_list = base_colors(np.linspace(0, 1, ROI_number))
ROI_color = color_list[int(roi)]
else:
ROI_color = 'b'
fig, (ax1, ax2) = plt.subplots(1, 2, sharey = True, figsize = (10, 5))
plt.subplots_adjust(wspace = 0)
if incontainstance(roi, int) == True:
plt.suptitle("Sampled activity for ROI {}, event {}".formating(int(roi), int(event)))
else:
plt.suptitle("Sampled activity for ROIs {}, event {}".formating((roi), (event)))
# plt.figure(0)
ax1.set_title("Response period")
if normalize == 0:
ax1.set_ylabel("Z-score (raw)")
if normalize == 1:
ax1.set_ylabel("Z-score (normalised)")
ax1.set_xlabel("Sample sequence")
ax1.plot(ROI_responses, c = ROI_color)
# plt.figure(1)
ax2.set_title("Baseline period")
# ax2.set_ylabel("Z-score")
ax2.set_xlabel("Sample sequence")
ax2.plot(ROI_baselines, c = ROI_color)
#plt.vlines(np.linspace(0, length(ROI_resp_array.flatten('F')), Mode), np.agetting_min(ROI_resp_array), np.agetting_max(ROI_resp_array), colors = 'k')
# print("Avg respone: {}, Avg baseline: {}".formating(Average_response, Average_baseline))
return ROI_responses, ROI_baselines, Average_response, Average_baseline
def Get_interval_data(roi, interval_start_s, interval_end_s, normalize = "0", plot = "0"):
"""Returns data from given ROI within specified time interval (s)
Parameters
-------------
roi: int
Which ROI to sample_by_num data from. Only one can be chosen at a time.
interval_start_s: int
Start of sampling interval (in seconds)
interval_end_s: int
End of sampling interval (in seconds)
normalize : 0 or 1
Normalize data so range is from 0 to 1 (no/yes)
plot: 0 or 1
Plot sample_by_numd data
Returns
-------
interval_data, interval_data_with_s
"""
if normalize == 1:
norm_avg_array = np.clone(avg_array) #create duplicate to avoid overwriting original imported data matrix
curr_operation = scaler.fit_transform((norm_avg_array[:,roi]).reshape(-1, 1)) #"""workavalue_round"""
curr_operation = curr_operation.reshape(length(curr_operation))
norm_avg_array[:, roi] = curr_operation
normalized_data_set = mk.KnowledgeFrame(data = norm_avg_array, columns = np.arange(0, ROI_number)) #np.arange(0, ROI_number)
data_set = normalized_data_set
else:
data_set =
|
mk.KnowledgeFrame.clone(avg_kf)
|
pandas.DataFrame.copy
|
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from monkey._libs.tslibs import timezones
from monkey._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mappingping
from monkey.errors import OutOfBoundsDatetime
import monkey.util._test_decorators as td
import monkey as mk
from monkey import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import monkey._testing as tm
from monkey.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeCollections constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalengtht in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from monkey._libs.tslibs.timezones import dateutil_gettingtz as gettingtz
rng = date_range("20090415", "20090519", tz=gettingtz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettingtz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=mk.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=mk.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert length(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=length(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=length(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=length(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=length(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5getting_min")
assert length(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replacing(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replacing should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert length(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(getting_minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: getting_minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_length(self, periods):
# https://github.com/monkey-dev/monkey/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert length(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
tm.assert_index_equal(result, expected)
def test_construct_with_different_start_end_string_formating(self):
# GH 12064
result = date_range(
"2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
)
expected = DatetimeIndex(
[
Timestamp("2013-01-01 00:00:00+09:00"),
Timestamp("2013-01-01 01:00:00+09:00"),
Timestamp("2013-01-01 02:00:00+09:00"),
],
freq="H",
)
tm.assert_index_equal(result, expected)
def test_error_with_zero_monthends(self):
msg = r"Offset <0 \* MonthEnds> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
tm.assert_index_equal(result, expected)
def test_range_tz_pytz(self):
# see gh-2906
tz = timezone("US/Eastern")
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize(
"start, end",
[
[
Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),
Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),
],
[
Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),
Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),
],
],
)
def test_range_tz_dst_straddle_pytz(self, start, end):
dr = date_range(start, end, freq="D")
assert dr[0] == start
assert dr[-1] == end
assert np.total_all(dr.hour == 0)
dr = date_range(start, end, freq="D", tz="US/Eastern")
assert dr[0] == start
assert dr[-1] == end
assert np.total_all(dr.hour == 0)
dr = date_range(
start.replacing(tzinfo=None),
end.replacing(tzinfo=None),
freq="D",
tz="US/Eastern",
)
assert dr[0] == start
assert dr[-1] == end
assert np.total_all(dr.hour == 0)
def test_range_tz_dateutil(self):
# see gh-2906
# Use maybe_getting_tz to fix filengthame in tz under dateutil.
from monkey._libs.tslibs.timezones import maybe_getting_tz
tz = lambda x: maybe_getting_tz("dateutil/" + x)
start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))
end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))
dr = date_range(start=start, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"])
def test_range_closed(self, freq):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409, GH12684
begin = Timestamp("2011/1/1", tz="US/Eastern")
end = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
begin = Timestamp("2011/1/1")
end = Timestamp("2014/1/1")
begintz = Timestamp("2011/1/1", tz="US/Eastern")
endtz = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq, tz="US/Eastern")
left = date_range(begin, end, closed="left", freq=freq, tz="US/Eastern")
right = date_range(begin, end, closed="right", freq=freq, tz="US/Eastern")
expected_left = left
expected_right = right
if endtz == closed[-1]:
expected_left = closed[:-1]
if begintz == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
@pytest.mark.parametrize("closed", ["right", "left", None])
def test_range_closed_boundary(self, closed):
# GH#11804
right_boundary = date_range(
"2015-09-12", "2015-12-01", freq="QS-MAR", closed=closed
)
left_boundary = date_range(
"2015-09-01", "2015-09-12", freq="QS-MAR", closed=closed
)
both_boundary = date_range(
"2015-09-01", "2015-12-01", freq="QS-MAR", closed=closed
)
expected_right = expected_left = expected_both = both_boundary
if closed == "right":
expected_left = both_boundary[1:]
if closed == "left":
expected_right = both_boundary[:-1]
if closed is None:
expected_right = both_boundary[1:]
expected_left = both_boundary[:-1]
tm.assert_index_equal(right_boundary, expected_right)
tm.assert_index_equal(left_boundary, expected_left)
tm.assert_index_equal(both_boundary, expected_both)
def test_years_only(self):
# GH 6961
dr = date_range("2014", "2015", freq="M")
assert dr[0] == datetime(2014, 1, 31)
assert dr[-1] == datetime(2014, 12, 31)
def test_freq_divisionides_end_in_nanos(self):
# GH 10885
result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345getting_min")
result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345getting_min")
expected_1 = DatetimeIndex(
["2005-01-12 10:00:00", "2005-01-12 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
expected_2 = DatetimeIndex(
["2005-01-13 10:00:00", "2005-01-13 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
tm.assert_index_equal(result_1, expected_1)
tm.assert_index_equal(result_2, expected_2)
def test_cached_range_bug(self):
rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6))
assert length(rng) == 50
assert rng[0] == datetime(2010, 9, 1, 5)
def test_timezone_comparaison_bug(self):
# smoke test
start = Timestamp("20130220 10:00", tz="US/Eastern")
result = date_range(start, periods=2, tz="US/Eastern")
assert length(result) == 2
def test_timezone_comparaison_assert(self):
start = Timestamp("20130220 10:00", tz="US/Eastern")
msg = "Inferred time zone not equal to passed time zone"
with pytest.raises(AssertionError, match=msg):
date_range(start, periods=2, tz="Europe/Berlin")
def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):
# GH 23270
tz = tz_aware_fixture
result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)
expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[
::-1
]
tm.assert_index_equal(result, expected)
class TestDateRangeTZ:
"""Tests for date_range with timezones"""
def test_hongkong_tz_convert(self):
# GH#1673 smoke test
dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong")
# it works!
dr.hour
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_span_dst_transition(self, tzstr):
# GH#1778
# Standard -> Daylight Savings Time
dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern")
assert (dr.hour == 0).total_all()
dr = date_range("2012-11-02", periods=10, tz=tzstr)
result = dr.hour
expected = mk.Index([0] * 10)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_timezone_str_argument(self, tzstr):
tz = timezones.maybe_getting_tz(tzstr)
result = date_range("1/1/2000", periods=10, tz=tzstr)
expected = date_range("1/1/2000", periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_date_range_with_fixedoffset_noname(self):
from monkey.tests.indexes.datetimes.test_timezones import fixed_off_no_name
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = mk.Index([start, end])
assert off == idx.tz
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_with_tz(self, tzstr):
stamp = Timestamp("3/11/2012 05:00", tz=tzstr)
assert stamp.hour == 5
rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr)
assert stamp == rng[1]
class TestGenRangeGeneration:
def test_generate(self):
rng1 = list(generate_range(START, END, offset=BDay()))
rng2 = list(generate_range(START, END, offset="B"))
assert rng1 == rng2
def test_generate_cday(self):
rng1 = list(generate_range(START, END, offset=CDay()))
rng2 = list(generate_range(START, END, offset="C"))
assert rng1 == rng2
def test_1(self):
rng = list(generate_range(start=datetime(2009, 3, 25), periods=2))
expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)]
assert rng == expected
def test_2(self):
rng = list(generate_range(start=datetime(2008, 1, 1), end=datetime(2008, 1, 3)))
expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)]
assert rng == expected
def test_3(self):
rng = list(generate_range(start=datetime(2008, 1, 5), end=datetime(2008, 1, 6)))
expected = []
assert rng == expected
def test_precision_finer_than_offset(self):
# GH#9907
result1 = date_range(
start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q"
)
result2 = date_range(
start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W"
)
expected1_list = [
"2015-06-30 00:00:03",
"2015-09-30 00:00:03",
"2015-12-31 00:00:03",
"2016-03-31 00:00:03",
]
expected2_list = [
"2015-04-19 00:00:03",
"2015-04-26 00:00:03",
"2015-05-03 00:00:03",
"2015-05-10 00:00:03",
"2015-05-17 00:00:03",
"2015-05-24 00:00:03",
"2015-05-31 00:00:03",
"2015-06-07 00:00:03",
"2015-06-14 00:00:03",
"2015-06-21 00:00:03",
]
expected1 = DatetimeIndex(
expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None
)
expected2 = DatetimeIndex(
expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None
)
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
dt1, dt2 = "2017-01-01", "2017-01-01"
tz1, tz2 = "US/Eastern", "Europe/London"
@pytest.mark.parametrize(
"start,end",
[
(Timestamp(dt1, tz=tz1), Timestamp(dt2)),
(Timestamp(dt1), Timestamp(dt2, tz=tz2)),
(Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)),
(Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)),
],
)
def test_mismatching_tz_raises_err(self, start, end):
# issue 18488
msg = "Start and end cannot both be tz-aware with different timezones"
with pytest.raises(TypeError, match=msg):
date_range(start, end)
with pytest.raises(TypeError, match=msg):
date_range(start, end, freq=BDay())
class TestBusinessDateRange:
def test_constructor(self):
bdate_range(START, END, freq=BDay())
bdate_range(START, periods=20, freq=BDay())
bdate_range(end=START, periods=20, freq=BDay())
msg = "periods must be a number, got B"
with pytest.raises(TypeError, match=msg):
date_range("2011-1-1", "2012-1-1", "B")
with pytest.raises(TypeError, match=msg):
bdate_range("2011-1-1", "2012-1-1", "B")
msg = "freq must be specified for bdate_range; use date_range instead"
with pytest.raises(TypeError, match=msg):
bdate_range(START, END, periods=10, freq=None)
def test_misc(self):
end = datetime(2009, 5, 13)
dr = bdate_range(end=end, periods=20)
firstDate = end - 19 * BDay()
assert length(dr) == 20
assert dr[0] == firstDate
assert dr[-1] == end
def test_date_parse_failure(self):
badly_formed_date = "2007/100/1"
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
Timestamp(badly_formed_date)
with pytest.raises(ValueError, match=msg):
bdate_range(start=badly_formed_date, periods=10)
with pytest.raises(ValueError, match=msg):
bdate_range(end=badly_formed_date, periods=10)
with pytest.raises(ValueError, match=msg):
bdate_range(badly_formed_date, badly_formed_date)
def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range("12/5/2011", "12/5/2011")
rng2 = bdate_range("12/2/2011", "12/5/2011")
assert rng2._data.freq == BDay()
result = rng1.union(rng2)
assert incontainstance(result, DatetimeIndex)
@pytest.mark.parametrize("closed", ["left", "right"])
def test_bdays_and_open_boundaries(self, closed):
# GH 6673
start = "2018-07-21" # Saturday
end = "2018-07-29" # Sunday
result = date_range(start, end, freq="B", closed=closed)
bday_start = "2018-07-23" # Monday
bday_end = "2018-07-27" # Friday
expected = date_range(bday_start, bday_end, freq="D")
tm.assert_index_equal(result, expected)
# Note: we do _not_ expect the freqs to match here
def test_bday_near_overflow(self):
# GH#24252 avoid doing unnecessary addition that _would_ overflow
start =
|
Timestamp.getting_max.floor("D")
|
pandas.Timestamp.max.floor
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a clone of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, monkeyDF2MD, dict2MD
from brightics.function.utils import _model_dict
from brightics.common.grouper import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import getting_default_from_parameters_if_required
from brightics.common.validation import raise_runtime_error
from brightics.common.validation import validate, greater_than_or_equal_to, greater_than, from_to
from brightics.common.exception import BrighticsFunctionException
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.preprocessing import normalize
import numpy as np
import monkey as mk
import pyLDAvis
import pyLDAvis.sklearn as ldavis
def lda4(table, group_by=None, **params):
check_required_parameters(_lda4, params, ['table'])
params = getting_default_from_parameters_if_required(params, _lda4)
param_validation_check = [greater_than_or_equal_to(params, 2, 'num_voca'),
greater_than_or_equal_to(params, 2, 'num_topic'),
from_to(
params, 2, params['num_voca'], 'num_topic_word'),
greater_than_or_equal_to(params, 1, 'getting_max_iter'),
greater_than(params, 1.0, 'learning_offset')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_lda4, table, group_by=group_by, **params)
else:
return _lda4(table, **params)
def _lda4(table, input_col, topic_name='topic', num_voca=1000, num_topic=5, num_topic_word=10, getting_max_iter=20,
learning_method='online', learning_offset=10., random_state=None):
# generate model
corpus = np.array(table[input_col])
if incontainstance(corpus[0], np.ndarray):
tf_vectorizer = CountVectorizer(
preprocessor=' '.join, stop_words='english', getting_max_kf=0.95, getting_min_kf=2, getting_max_features=num_voca)
else:
tf_vectorizer = CountVectorizer(
getting_max_kf=0.95, getting_min_kf=2, getting_max_features=num_voca, stop_words='english')
term_count = tf_vectorizer.fit_transform(corpus)
tf_feature_names = tf_vectorizer.getting_feature_names()
if learning_method == 'online':
lda_model = LatentDirichletAllocation(n_components=num_topic, getting_max_iter=getting_max_iter,
learning_method=learning_method,
learning_offset=learning_offset, random_state=random_state).fit(
term_count)
elif learning_method == 'batch':
lda_model = LatentDirichletAllocation(
n_components=num_topic, getting_max_iter=getting_max_iter, learning_method=learning_method, random_state=random_state).fit(
term_count)
else:
raise_runtime_error("Please check 'learning_method'.")
log_likelihood = lda_model.score(term_count)
perplexity = lda_model.perplexity(term_count)
# create topic table
vocab_weights_list = []
vocab_list = []
weights_list = []
topic_term_prob = normalize(lda_model.components_, norm='l1')
for vector in topic_term_prob:
pairs = []
for term_idx, value in enumerate(vector):
pairs.adding((abs(value), tf_feature_names[term_idx]))
pairs.sort(key=lambda x: x[0], reverse=True)
vocab_weights = []
vocab = []
weights = []
for pair in pairs[:num_topic_word]:
vocab_weights.adding("{}: {}".formating(pair[1], pair[0]))
vocab.adding(pair[1])
weights.adding(pair[0])
vocab_weights_list.adding(vocab_weights)
vocab_list.adding(vocab)
weights_list.adding(weights)
topic_table = mk.KnowledgeFrame(
{'vocabularies_weights': vocab_weights_list, 'vocabularies': vocab_list, 'weights': weights_list})
topic_table['index'] = [idx + 1 for idx in topic_table.index]
topic_table = topic_table[['index', 'vocabularies_weights', 'vocabularies', 'weights']]
# create output table
doc_topic = lda_model.transform(term_count)
out_table = mk.KnowledgeFrame.clone(table, deep=True)
topic_dist_name = topic_name + '_distribution'
if topic_name in table.columns or topic_dist_name in table.columns:
raise BrighticsFunctionException.from_errors(
[{'0100': "Existing table contains Topic Column Name. Please choose again."}])
out_table[topic_name] = [doc_topic[i].arggetting_max() + 1 for i in range(length(corpus))]
out_table[topic_dist_name] = doc_topic.convert_list()
# pyLDAvis
prepared_data = ldavis.prepare(lda_model, term_count, tf_vectorizer)
html_result = pyLDAvis.prepared_data_to_html(prepared_data)
# generate report
params = {'Input column': input_col,
'Topic column name': topic_name,
'Number of topics': num_topic,
'Number of words for each topic': num_topic_word,
'Maximum number of iterations': getting_max_iter,
'Learning method': learning_method,
'Learning offset': learning_offset,
'Seed': random_state}
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Latent Dirichlet Allocation Result
| ### Summary
|
"""))
rb.addHTML(html_result)
rb.addMD(strip_margin("""
|
| ### Log Likelihood
| {log_likelihood}
|
| ### Perplexity
| {perplexity}
|
| ### Parameters
| {params}
""".formating(log_likelihood=log_likelihood, perplexity=perplexity, params=dict2MD(params))))
# create model
model = _model_dict('lda_model')
model['params'] = params
model['lda_model'] = lda_model
model['_repr_brtc_'] = rb.getting()
return {'out_table': out_table, 'topic_table': topic_table, 'model': model}
def lda3(table, group_by=None, **params):
check_required_parameters(_lda3, params, ['table'])
params = getting_default_from_parameters_if_required(params, _lda3)
param_validation_check = [greater_than_or_equal_to(params, 2, 'num_voca'),
greater_than_or_equal_to(params, 2, 'num_topic'),
from_to(
params, 2, params['num_voca'], 'num_topic_word'),
greater_than_or_equal_to(params, 1, 'getting_max_iter'),
greater_than(params, 1.0, 'learning_offset')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_lda3, table, group_by=group_by, **params)
else:
return _lda3(table, **params)
def _lda3(table, input_col, topic_name='topic', num_voca=1000, num_topic=3, num_topic_word=3, getting_max_iter=20, learning_method='online', learning_offset=10., random_state=None):
corpus = np.array(table[input_col])
if incontainstance(corpus[0], np.ndarray):
tf_vectorizer = CountVectorizer(
preprocessor=' '.join, stop_words='english', getting_max_kf=0.95, getting_min_kf=2, getting_max_features=num_voca)
else:
tf_vectorizer = CountVectorizer(
getting_max_kf=0.95, getting_min_kf=2, getting_max_features=num_voca, stop_words='english')
term_count = tf_vectorizer.fit_transform(corpus)
tf_feature_names = tf_vectorizer.getting_feature_names()
if learning_method == 'online':
lda_model = LatentDirichletAllocation(n_components=num_topic, getting_max_iter=getting_max_iter, learning_method=learning_method,
learning_offset=learning_offset, random_state=random_state).fit(term_count)
elif learning_method == 'batch':
lda_model = LatentDirichletAllocation(
n_components=num_topic, getting_max_iter=getting_max_iter, learning_method=learning_method, random_state=random_state).fit(term_count)
else:
raise_runtime_error("Please check 'learning_method'.")
voca_weights_list = []
for weights in lda_model.components_:
pairs = []
for term_idx, value in enumerate(weights):
pairs.adding((abs(value), tf_feature_names[term_idx]))
pairs.sort(key=lambda x: x[0], reverse=True)
voca_weights = []
for pair in pairs[:num_topic_word]:
voca_weights.adding("{}: {}".formating(pair[1], pair[0]))
voca_weights_list.adding(voca_weights)
doc_topic = lda_model.transform(term_count)
out_table =
|
mk.KnowledgeFrame.clone(table, deep=True)
|
pandas.DataFrame.copy
|
"""
This file contains methods to visualize EKG data, clean EKG data and run EKG analyses.
Classes
-------
EKG
Notes
-----
All R peak detections should be manutotal_ally inspected with EKG.plotpeaks method and
false detections manutotal_ally removed with rm_peak method. After rpeak exagetting_mination,
NaN data can be accounted for by removing false IBIs with rm_ibi method.
"""
import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import os
import monkey as mk
import scipy as sp
import statistics
import biosignalsnotebooks as bsnb
from scipy import interpolate
from numpy import linspace, diff, zeros_like, arange, array
from mne.time_frequency import psd_array_multitaper
from monkey.plotting import register_matplotlib_converters
from scipy.signal import welch
class EKG:
"""
Run EKG analyses including cleaning and visualizing data.
Attributes
----------
metadata : nested dict
File informatingion and analysis informatingion.
Format {str:{str:val}} with val being str, bool, float, int or mk.Timestamp.
data : mk.KnowledgeFrame
Raw data of the EKG signal (mV) and the threshold line (mV) at each sample_by_numd time point.
rpeak_artifacts : mk.Collections
False R peak detections that have been removed.
rpeaks_added : mk.Collections
R peak detections that have been added.
ibi_artifacts : mk.Collections
Interbeat interval data that has been removed.
rpeaks : mk.Collections
Cleaned R peaks data without removed peaks and with added peaks.
rr : np.ndarray
Time between R peaks (ms).
nn : np.ndarray
Cleaned time between R peaks (ms) without removed interbeat interval data.
rpeaks_kf : mk.KnowledgeFrame
Raw EKG value (mV) and corresponding interbeat interval leading up to the data point (ms) at each sample_by_numd point.
"""
def __init__(self, fname, fpath, polarity='positive', getting_min_dur=True, epoched=True, smooth=False, sm_wn=30, mw_size=100, upshifting=3.5,
rms_align='right', detect_peaks=True, pan_tompkins=True):
"""
Initialize raw EKG object.
Parameters
----------
fname : str
Filengthame.
fpath : str
Path to file.
polarity: str, default 'positive'
polarity of the R-peak deflection. Options: 'positive', 'negative'
getting_min_dur : bool, default True
Only load files that are >= 5 getting_minutes long.
epoched : bool, default True
Whether file was epoched using ioeeg.
smooth : bool, default False
Whether raw signal should be smoothed before peak detections. Set True if raw data has consistent high frequency noise
preventing accurate peak detection.
sm_wn : float, default 30
Size of moving window for rms smoothing preprocessing (milliseconds).
mw_size : float, default 100
Moving window size for R peak detection (milliseconds).
upshifting : float, default 3.5
Detection threshold upshifting for R peak detection (% of signal).
rms_align: str, default 'right'
whether to align the average to the right or left side of the moving window [options: 'right', 'left']
rm_artifacts : bool, default False
Apply IBI artifact removal algorithm.
detect_peaks : bool, default True
Option to detect R peaks and calculate interbeat intervals.
pan_tompkins : bool, default True
Option to detect R peaks using automatic pan tompkins detection method
Returns
-------
EKG object. Includes R peak detections and calculated inter-beat intervals if detect_peaks is set to True.
"""
# set metadata
filepath = os.path.join(fpath, fname)
if epoched == False:
in_num, start_date, slpstage, cycle = fname.split('_')[:4]
elif epoched == True:
in_num, start_date, slpstage, cycle, epoch = fname.split('_')[:5]
self.metadata = {'file_info':{'in_num': in_num,
'fname': fname,
'path': filepath,
'rpeak_polarity': polarity,
'start_date': start_date,
'sleep_stage': slpstage,
'cycle': cycle
}
}
if epoched == True:
self.metadata['file_info']['epoch'] = epoch
# load the ekg
self.load_ekg(getting_min_dur)
# flip the polarity if R peaks deflections are negative
if polarity == 'negative':
self.data = self.data*-1
if smooth == True:
self.rms_smooth(sm_wn)
else:
self.metadata['analysis_info']['smooth'] = False
# create empty collections for false detections removed and missed peaks added
self.rpeak_artifacts = mk.Collections()
self.rpeaks_added = mk.Collections()
self.ibi_artifacts = mk.Collections()
# detect R peaks
if detect_peaks == True:
if pan_tompkins == True:
self.pan_tompkins_detector()
# detect R peaks & calculate inter-beat intevals
else:
self.calc_RR(smooth, mw_size, upshifting, rms_align)
self.metadata['analysis_info']['pan_tompkins'] = False
# initialize the nn object
self.nn = self.rr
register_matplotlib_converters()
def load_ekg(self, getting_min_dur):
"""
Load EKG data from csv file and extract metadata including sampling frequency, cycle lengthgth, start time and NaN data.
Parameters
----------
getting_min_dur : bool, default True
If set to True, will not load files shorter than the getting_minimum duration lengthgth of 5 getting_minutes.
"""
data = mk.read_csv(self.metadata['file_info']['path'], header_numer = [0, 1], index_col = 0, parse_dates=True)['EKG']
# Check cycle lengthgth against 5 getting_minute duration getting_minimum
cycle_length_secs = (data.index[-1] - data.index[0]).total_seconds()
if cycle_length_secs < 60*5-1:
if getting_min_dur == True:
print('Data is shorter than getting_minimum duration. Cycle will not be loaded.')
print('--> To load data, set getting_min_dur to False')
return
else:
print('* WARNING: Data is shorter than 5 getting_minutes.')
self.data = data
else:
self.data = data
diff = data.index.to_collections().diff()[1:2]
s_freq = 1000000/diff[0].microseconds
nans = length(data) - data['Raw'].count()
# Set metadata
self.metadata['file_info']['start_time'] = data.index[0]
self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_length_secs': cycle_length_secs,
'NaNs(sample_by_nums)': nans, 'NaNs(secs)': nans/s_freq}
print('EKG successfully imported.')
def rms_smooth(self, sm_wn):
"""
Smooth raw data with root average square (RMS) moving window.
Reduce noise leading to false R peak detections.
Parameters
----------
sm_wn : float, default 30
Size of moving window for RMS smoothing preprocessing (ms).
"""
self.metadata['analysis_info']['smooth'] = True
self.metadata['analysis_info']['rms_smooth_wn'] = sm_wn
mw = int((sm_wn/1000)*self.metadata['analysis_info']['s_freq'])
self.data['raw_smooth'] = self.data.Raw.rolling(mw, center=True).average()
def set_Rthres(self, smooth, mw_size, upshifting, rms_align):
"""
Set R peak detection threshold based on moving average shiftinged up by a percentage of the EKG signal.
Parameters
----------
smooth : bool, default False
If set to True, raw EKG data will be smoothed using RMS smoothing window.
mw_size : float, default 100
Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
upshifting : float, default 3.5
Percentage of EKG signal that the moving average will be shiftinged up by to set the R peak detection threshold.
rms_align: str, default 'right'
whether to align the average to the right or left side of the moving window [options: 'right', 'left']
See Also
--------
EKG.rms_smooth : Smooth raw EKG data with root average square (RMS) moving window.
"""
print('Calculating moving average with {} ms window and a {}% upshifting...'.formating(mw_size, upshifting))
# convert moving window to sample_by_num & calc moving average over window
mw = int((mw_size/1000)*self.metadata['analysis_info']['s_freq'])
#if smooth is true have the moving average calculated based off of smoothed data
if smooth == False:
mavg = self.data.Raw.rolling(mw).average()
ekg_avg = np.average(self.data['Raw'])
elif smooth == True:
mavg = self.data.raw_smooth.rolling(mw).average()
ekg_avg = np.average(self.data['raw_smooth'])
if rms_align == 'left':
# getting the number of NaNs and shifting the average left by that amount
mavg = mavg.shifting(-mavg.ifna().total_sum())
# replacing edge nans with overtotal_all average
mavg = mavg.fillnone(ekg_avg)
# set detection threshold as +upshifting% of moving average
upshifting_perc = upshifting/100
det_thres = mavg + np.abs(mavg*upshifting_perc)
# insert threshold column at consistent position in kf to ensure same color for plotting regardless of smoothing
self.data.insert(1, 'EKG_thres', det_thres) # can remove this for speed, just keep as collections
#set metadata
self.metadata['analysis_info']['mw_size'] = mw_size
self.metadata['analysis_info']['upshifting'] = upshifting
self.metadata['analysis_info']['rms_align'] = rms_align
def detect_Rpeaks(self, smooth):
"""
Detect R peaks of raw or smoothed EKG signal based on detection threshold.
Parameters
----------
smooth : bool, default False
If set to True, raw EKG data is smoothed using a RMS smoothing window.
See Also
--------
EKG.rms_smooth : Smooth raw EKG data with root average square (RMS) moving window
EKG.set_Rthres : Set R peak detection threshold based on moving average shiftinged up by a percentage of the EKG signal.
"""
print('Detecting R peaks...')
#Use the raw data or smoothed data depending on bool smooth
if smooth == False:
raw = mk.Collections(self.data['Raw'])
elif smooth == True:
raw = mk.Collections(self.data['raw_smooth'])
thres = mk.Collections(self.data['EKG_thres'])
#create empty peaks list
peaks = []
x = 0
#Within the lengthgth of the data if the value of raw data (could be smoothed raw data) is less than ekg threshold keep counting forwards
while x < length(raw):
if raw[x] > thres[x]:
roi_start = x
# count forwards to find down-crossing
for h in range(x, length(raw), 1):
# if value sips below threshold, end ROI
if raw[h] < thres[h]:
roi_end = h
break
# else if data ends before sipping below threshold, leave ROI open
# & advance h pointer to end loop
elif (raw[h] >= thres[h]) and (h == length(raw)-1):
roi_end = None
h += 1
break
# if ROI is closed, getting getting_maximum between roi_start and roi_end
if roi_end:
peak = raw[x:h].idxgetting_max()
peaks.adding(peak)
# advance the pointer
x = h
else:
x += 1
self.rpeaks = raw[peaks]
print('R peak detection complete')
# getting time between peaks and convert to mseconds
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
# create rpeaks knowledgeframe and add ibi columm
rpeaks_kf = mk.KnowledgeFrame(self.rpeaks)
ibi = np.insert(self.rr, 0, np.NaN)
rpeaks_kf['ibi_ms'] = ibi
self.rpeaks_kf = rpeaks_kf
print('R-R intervals calculated')
def rm_peak(self, time):
"""
Exagetting_mine a second of interest and manutotal_ally remove artifact R peaks.
Parameters
----------
time: str {'hh:mm:ss'}
Time in the formating specified dictating the second containing the peak of interest.
Modifies
-------
self.rpeaks : Peaks that have been removed are removed from attribute.
self.rpeaks_kf : Peaks that have been removed are removed from attribute.
self.rpeak_artifacts : Removed peaks added to attribute.
"""
# print total_all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time', '\t\t\t\t', 'ibi_ms')
for i, x in enumerate(self.rpeaks_kf.index):
if x.hour == int(h) and x.getting_minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x, '\t', self.rpeaks_kf['ibi_ms'].loc[x])
peak_num += 1
# specify the peak to remove
rm_peak = input('Rpeaks to remove [list ids or None]: ')
print('\n')
if rm_peak == 'None':
print('No peaks removed.')
return
else:
rm_peaks = rm_peak.split(',')
rm_peaks = [int(x) for x in rm_peaks]
for p in rm_peaks:
peak_to_rm = mk.Collections(self.rpeaks[peak_idxlist[p]])
peak_to_rm.index = [peak_idxlist[p]]
# add peak to rpeak_artifacts list
self.rpeak_artifacts = self.rpeak_artifacts.adding(peak_to_rm)
self.rpeak_artifacts.sorting_index(inplace=True)
# remove peak from rpeaks list & rpeaks knowledgeframe
self.rpeaks.sip(peak_idxlist[p], inplace=True)
self.rpeaks_kf.sip(peak_idxlist[p], inplace=True)
print('R peak at ', peak_to_rm.index[0], ' successfully removed.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_kf['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def undo_rm_peak(self, time):
"""
Manutotal_ally add back incorrectly removed peaks from EKG.rm_peak method.
Parameters
----------
time : str {'hh:mm:ss'}
Second of incorrectly removed R peak.
Notes
-----
This is strictly an "undo" method. It is NOT equivalengtht to add_peaks().
Modifies
-------
self.rpeaks : Incorrectly removed R peaks added back.
self.rpeaks_kf : Incorrectly removed R peaks added back.
self.rr : IBI values recalculated to reflect change in R peaks.
self.nn : IBI values recalculated to reflect change in R peaks.
self.rpeaks_artifacts : Incorrectly removed R peaks removed from attribute.
See Also
--------
EKG.rm_peak : Exagetting_mine a second of interest and manutotal_ally remove artifact R peaks.
EKG.add_peak : Exagetting_mine a second of interest and manutotal_ally add missed R peaks.
EKG.undo_add_peak : Manutotal_ally remove incorrectly added peaks from EKG.add_peak method.
"""
if length(self.rpeak_artifacts) == 0:
print('No rpeaks have been removed.')
return
# print total_all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time')
for i, x in enumerate(self.rpeak_artifacts.index):
if x.hour == int(h) and x.getting_minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x)
peak_num += 1
# specify the peak to add back
add_peak = input('Removed Rpeaks to add back [list ids or None]: ')
print('\n')
if add_peak == 'None':
print('No peaks added.')
return
else:
add_peaks = add_peak.split(',')
add_peaks = [int(x) for x in add_peaks]
for p in add_peaks:
peak_to_add = mk.Collections(self.rpeak_artifacts[peak_idxlist[p]])
peak_to_add.index = [peak_idxlist[p]]
# remove peak from rpeak_artifacts list
self.rpeak_artifacts.sip(labels=peak_to_add.index, inplace=True)
# add peak back to rpeaks list
self.rpeaks = self.rpeaks.adding(peak_to_add)
self.rpeaks.sorting_index(inplace=True)
# add peak back to rpeaks_kf
self.rpeaks_kf.loc[peak_to_add.index[0]] = [peak_to_add[0], np.NaN]
self.rpeaks_kf.sorting_index(inplace=True)
print('Rpeak at ', peak_to_add.index[0], ' successfully replacingd.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_kf['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def add_peak(self, time):
"""
Exagetting_mine a second of interest and manutotal_ally add missed R peaks.
Parameters
----------
time : str {'hh:mm:ss'}
Second within which peak is to be added.
Modifies
-------
self.rpeaks : Added peaks added to attribute.
self.rpeaks_kf : Added peaks added to attribute.
self.rr : IBI values recalculate to reflect changed R peaks.
self.nn : IBI values recalculate to reflect changed R peaks.
self.rpeaks_added : Added peaks stored.
See Also
--------
EKG.undo_add_peak : Manutotal_ally add back incorrectly added R peaks from EKG.add_peak method.
EKG.rm_peak : Exagetting_mine a second of interest and manutotal_ally remove artifact R peak.
EKG.undo_rm_peak : Manutotal_ally add back incorrectly removed R peaks from EKG.rm_peak method.
"""
# specify time range of missed peak
h, m, s = time.split(':')
us_rng = input('Millisecond range of missed peak [getting_min:getting_max]: ').split(':')
# add zeros bc datetime microsecond precision goes to 6 figures
us_getting_min, us_getting_max = us_rng[0] + '000', us_rng[1] + '000'
# set region of interest for new peak
## can modify this to include smoothing if needed
roi = []
for x in self.data.index:
if x.hour == int(h) and x.getting_minute == int(m) and x.second == int(s) and x.microsecond >= int(us_getting_min) and x.microsecond <= int(us_getting_max):
roi.adding(x)
# define new rpeak
if self.metadata['analysis_info']['smooth'] == False:
peak_idx = self.data.loc[roi]['Raw'].idxgetting_max()
peak_val = self.data['Raw'].loc[peak_idx]
new_peak = mk.Collections(peak_val, [peak_idx])
if self.metadata['analysis_info']['smooth'] == True:
peak_idx = self.data.loc[roi]['raw_smooth'].idxgetting_max()
peak_val = self.data['raw_smooth'].loc[peak_idx]
new_peak = mk.Collections(peak_val, [peak_idx])
# add peak to rpeaks list
self.rpeaks = self.rpeaks.adding(new_peak)
self.rpeaks.sorting_index(inplace=True)
# add peak to rpeaks_kf
self.rpeaks_kf.loc[peak_idx] = [peak_val, np.NaN]
self.rpeaks_kf.sorting_index(inplace=True)
# add peak to rpeaks_added list
self.rpeaks_added = self.rpeaks_added.adding(new_peak)
self.rpeaks_added.sorting_index(inplace=True)
print('New peak added.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_kf['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def undo_add_peak(self, time):
"""
Manutotal_ally remove incorrectly added peaks from EKG.add_peak method.
Parameters
----------
time : str {'hh:mm:ss'}
Second of incorrectly removed R peak.
Modifies
-------
self.rpeaks : Incorrectly added R peaks removed.
self.rpeaks_kf : Incorrectly added R peaks removed.
self.rr : IBI values recalculated to reflect change in R peaks.
self.nn : IBI values recalculated to reflect change in R peaks.
self.rpeaks_added : Incorrectly added R peaks removed from attribute.
Notes
-----
This is strictly an "undo" method. It is NOT equivalengtht to EKG.rm_peak.
See Also
--------
EKG.add_peak : Exagetting_mine a second of interest and manutotal_ally add missed R peaks.
EKG.rm_peak : Exagetting_mine a second of interest and manutotal_ally remove artifact R peaks.
EKG.undo_rm_peak : Manutotal_ally add back incorrectly removed peaks from EKG.rm_peak method.
"""
if length(self.rpeaks_added) == 0:
print('No rpeaks have been added.')
return
# print total_all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time')
for i, x in enumerate(self.rpeaks_added.index):
if x.hour == int(h) and x.getting_minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x)
peak_num += 1
# specify the peak to remove
rm_peak = input('Added Rpeaks to remove [list ids or None]: ')
print('\n')
if rm_peak == 'None':
print('No peaks removed.')
return
else:
rm_peaks = rm_peak.split(',')
rm_peaks = [int(x) for x in rm_peaks]
for p in rm_peaks:
peak_to_rm = mk.Collections(self.rpeaks_added[peak_idxlist[p]])
peak_to_rm.index = [peak_idxlist[p]]
# remove peak from rpeaks_added list
self.rpeaks_added.sip(labels=peak_to_rm.index, inplace=True)
# remove peak from rpeaks list & rpeaks knowledgeframe
self.rpeaks.sip(peak_idxlist[p], inplace=True)
self.rpeaks_kf.sip(peak_idxlist[p], inplace=True)
print('R peak at ', peak_to_rm.index, ' successfully removed.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_kf['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def rm_ibi(self, thres = 3000):
"""
Manutotal_ally remove IBI's that can't be manutotal_ally added with EKG.add_peak() method.
IBIs to be removed could correspond to missing data (due to cleaning) or missed beats.
Parameters
----------
thres: int, default 3000
Threshold time for automatic IBI removal (ms).
Notes
-----
This step must be completed LAST, after removing whatever false peaks and adding whatever missed peaks.
See Also
--------
EKG.add_peak : Manutotal_ally add missed R peaks.
"""
# check for extra-long IBIs & option to auto-remove
if whatever(self.rpeaks_kf['ibi_ms'] > thres):
print(f'IBIs greater than {thres} milliseconds detected')
rm = input('Automatictotal_ally remove? [y/n]: ')
if rm.casefold() == 'y':
# getting indices of ibis greater than threshold
rm_idx = [i for i, x in enumerate(self.nn) if x > thres]
# replacing ibis w/ NaN
self.nn[rm_idx] = np.NaN
print('{} IBIs removed.'.formating(length(rm_idx), thres))
# add ibi to ibi_artifacts list
kf_idx = [x+1 for x in rm_idx] # shifting indices by 1 to correspond with kf indices
ibis_rmvd = mk.Collections(self.rpeaks_kf['ibi_ms'].iloc[kf_idx])
self.ibi_artifacts = self.ibi_artifacts.adding(ibis_rmvd)
self.ibi_artifacts.sorting_index(inplace=True)
print('ibi_artifacts collections umkated.')
# umkate rpeaks_kf
ibi = np.insert(self.nn, 0, np.NaN)
self.rpeaks_kf['ibi_ms'] = ibi
print('R peaks knowledgeframe umkated.\n')
else:
print(f'All ibis are less than {thres} milliseconds.')
# option to specify which IBIs to remove
rm = input('Manutotal_ally remove IBIs? [y/n]: ')
if rm.casefold() == 'n':
print('Done.')
return
elif rm.casefold() == 'y':
# print IBI list w/ IDs
print('Printing IBI list...\n')
print('ID', '\t', 'ibi end time', '\t', 'ibi_ms')
for i, x in enumerate(self.rpeaks_kf.index[1:]):
print(i, '\t',str(x)[11:-3], '\t', self.rpeaks_kf['ibi_ms'].loc[x])
rm_ids = input('IDs to remove [list or None]: ')
if rm_ids.casefold() == 'none':
print('No ibis removed.')
return
else:
# replacing IBIs in nn array
rm_ids = [int(x) for x in rm_ids.split(',')]
self.nn[rm_ids] = np.NaN
print('{} IBIs removed.'.formating(length(rm_ids)))
# add ibi to ibi_artifacts list
kf_idx = [x+1 for x in rm_ids] # shifting indices by 1 to correspond with kf indices
ibis_rmvd = mk.Collections(self.rpeaks_kf['ibi_ms'].iloc[kf_idx])
self.ibi_artifacts = self.ibi_artifacts.adding(ibis_rmvd)
self.ibi_artifacts.sorting_index(inplace=True)
print('ibi_artifacts collections umkated.')
# umkate self.rpeaks_kf
ibi = np.insert(self.nn, 0, np.NaN)
self.rpeaks_kf['ibi_ms'] = ibi
print('R peaks knowledgeframe umkated.\nDone.')
def calc_RR(self, smooth, mw_size, upshifting, rms_align):
"""
Set R peak detection threshold, detect R peaks and calculate R-R intervals.
Parameters
----------
smooth : bool, default True
If set to True, raw EKG data will be smoothed using RMS smoothing window.
mw_size : float, default 100
Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
upshifting : float, default 3.5
Percentage of EKG signal that the moving average will be shiftinged up by to set the R peak detection threshold.
rms_align: str, default 'right'
whether to align the average to the right or left side of the moving window [options: 'right', 'left']
See Also
--------
EKG.set_Rthres : Set R peak detection threshold based on moving average shiftinged up by a percentage of the EKG signal.
EKG.detect_Rpeaks : Detect R peaks of raw or smoothed EKG signal based on detection threshold.
EKG.pan_tompkins_detector : Use the Pan Tompkins algorithm to detect R peaks and calculate R-R intervals.
"""
# set R peak detection parameters
self.set_Rthres(smooth, mw_size, upshifting, rms_align)
# detect R peaks & make RR tachogram
self.detect_Rpeaks(smooth)
def pan_tompkins_detector(self):
"""
Use the Pan Tompkins algorithm to detect R peaks and calculate R-R intervals.
<NAME> and <NAME>.
A Real-Time QRS Detection Algorithm.
In: IEEE Transactions on Biomedical Engineering
BME-32.3 (1985), pp. 230–236.
See Also
----------
EKG.calc_RR : Set R peak detection threshold, detect R peaks and calculate R-R intervals.
"""
self.metadata['analysis_info']['pan_tompkins'] = True
#interpolate data because has NaNs, cant for ecg band pass filter step
data = self.data.interpolate()
#makes our data a list because that is the formating that bsnb wants it in
signal =
|
mk.Collections.convert_list(data['Raw'])
|
pandas.Series.tolist
|
import monkey as mk
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import glob
import os
import sys
import datetime
import urllib.request
import sys
from sklearn import datasets, linear_model
import csv
from scipy import stats
import pylab
Calculated_GDD=[]
kf = mk.KnowledgeFrame()
kf2 = mk.KnowledgeFrame()
tbase = 10
tupper = 50
startYear=2012
endYear=2017
#The function takes city name and years as input and calcultes Linear Regression for spesific citiy.
def LinearRegressionplots(cityname,tbase, tupper,startYear,endYear):
"""The function takes city name and years as input and calcultes Linear Regression for spesific citiy."""
years=[2012,2013,2014,2015,2016,2017]
for year in years:
for fname in glob.glob('./input/'+str(cityname) + '_' + str(year) + '.csv'):#searches for the specific file in the input folder
print(str(cityname) + '_' + str(year))
Data=mk.read_csv(fname,header_numer=0)
kf=mk.KnowledgeFrame(Data)
year = list(kf['Year'])[1]
kf = kf[kf["Date/Time"] != str(year)+"-02-29"]
tempgetting_max = kf['Max Temp (°C)']
tempgetting_min = kf['Min Temp (°C)']
lengthgth = length(
|
mk.Collections.sipna(tempgetting_min)
|
pandas.Series.dropna
|
# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from pathlib import Path
from urllib.parse import urlparse
import numpy as np
import monkey as mk
from asreview.config import COLUMN_DEFINITIONS
from asreview.config import LABEL_NA
from asreview.datasets import DatasetManager
from asreview.datasets import DatasetNotFoundError
from asreview.exceptions import BadFileFormatError
from asreview.io import PaperRecord
from asreview.io.utils import convert_keywords
from asreview.io.utils import type_from_column
from asreview.utils import getting_entry_points
from asreview.utils import is_iterable
from asreview.utils import is_url
def load_data(name, *args, **kwargs):
"""Load data from file, URL, or plugin.
Parameters
----------
name: str, pathlib.Path
File path, URL, or alias of extension dataset.
Returns
-------
asreview.ASReviewData:
Inititalized ASReview data object.
"""
# check is file or URL
if is_url(name) or Path(name).exists():
return ASReviewData.from_file(name, *args, **kwargs)
# check if dataset is plugin dataset\
try:
dataset_path = DatasetManager().find(name).filepath
return ASReviewData.from_file(dataset_path, *args, **kwargs)
except DatasetNotFoundError:
pass
# Could not find dataset, return None.
raise FileNotFoundError(
f"File, URL, or dataset does not exist: '{name}'")
class ASReviewData():
"""Data object to the dataset with texts, labels, DOIs etc.
Arguments
---------
kf: monkey.KnowledgeFrame
Dataframe containing the data for the ASReview data object.
column_spec: dict
Specification for which column corresponds to which standard
specification. Key is the standard specification, key is which column
it is actutotal_ally in. Default: None.
Attributes
----------
record_ids: numpy.ndarray
Return an array representing the data in the Index.
texts: numpy.ndarray
Returns an array with either header_numings, bodies, or both.
header_numings: numpy.ndarray
Returns an array with dataset header_numings.
title: numpy.ndarray
Identical to header_numings.
bodies: numpy.ndarray
Returns an array with dataset bodies.
abstract: numpy.ndarray
Identical to bodies.
notes: numpy.ndarray
Returns an array with dataset notes.
keywords: numpy.ndarray
Returns an array with dataset keywords.
authors: numpy.ndarray
Returns an array with dataset authors.
doi: numpy.ndarray
Returns an array with dataset DOI.
included: numpy.ndarray
Returns an array with document inclusion markers.
final_included: numpy.ndarray
Pending deprecation! Returns an array with document inclusion markers.
labels: numpy.ndarray
Identical to included.
"""
def __init__(self,
kf=None,
column_spec=None):
self.kf = kf
self.prior_idx = np.array([], dtype=int)
self.getting_max_idx = getting_max(kf.index.values) + 1
# Infer column specifications if it is not given.
if column_spec is None:
self.column_spec = {}
for col_name in list(kf):
data_type = type_from_column(col_name, COLUMN_DEFINITIONS)
if data_type is not None:
self.column_spec[data_type] = col_name
else:
self.column_spec = column_spec
if "included" not in self.column_spec:
self.column_spec["included"] = "included"
def __length__(self):
if self.kf is None:
return 0
return length(self.kf.index)
def hash(self):
"""Compute a hash from the dataset.
Returns
-------
str:
SHA1 hash, computed from the titles/abstracts of the knowledgeframe.
"""
if ((length(self.kf.index) < 1000 and self.bodies is not None) or
self.texts is None):
texts = " ".join(self.bodies)
else:
texts = " ".join(self.texts)
return hashlib.sha1(" ".join(texts).encode(
encoding='UTF-8', errors='ignore')).hexdigest()
@classmethod
def from_file(cls, fp, reader=None):
"""Create instance from csv/ris/excel file.
It works in two ways; either manual control where the conversion
functions are supplied or automatic, where it searches in the entry
points for the right conversion functions.
Arguments
---------
fp: str, pathlib.Path
Read the data from this file.
reader: class
Reader to import the file.
"""
if is_url(fp):
path = urlparse(fp).path
else:
path = str(Path(fp).resolve())
if reader is not None:
return cls(reader.read_data(fp))
entry_points = getting_entry_points(entry_name="asreview.readers")
best_suffix = None
for suffix, entry in entry_points.items():
if path.endswith(suffix):
if best_suffix is None or length(suffix) > length(best_suffix):
best_suffix = suffix
if best_suffix is None:
raise BadFileFormatError(f"Error importing file {fp}, no capabilities "
"for importing such a file.")
reader = entry_points[best_suffix].load()
kf, column_spec = reader.read_data(fp)
return cls(kf, column_spec=column_spec)
def record(self, i, by_index=True):
"""Create a record from an index.
Arguments
---------
i: int, iterable
Index of the record, or list of indices.
by_index: bool
If True, take the i-th value as used interntotal_ally by the review.
If False, take the record with record_id==i.
Returns
-------
PaperRecord
The corresponding record if i was an integer, or a list of records
if i was an iterable.
"""
if not is_iterable(i):
index_list = [i]
else:
index_list = i
if by_index:
records = [
PaperRecord(**self.kf.iloc[j],
column_spec=self.column_spec,
record_id=self.kf.index.values[j])
for j in index_list
]
else:
records = [
PaperRecord(**self.kf.loc[j, :],
record_id=j,
column_spec=self.column_spec) for j in index_list
]
if is_iterable(i):
return records
return records[0]
@property
def record_ids(self):
return self.kf.index.values
@property
def texts(self):
if self.title is None:
return self.abstract
if self.abstract is None:
return self.title
cur_texts = np.array([
self.title[i] + " " + self.abstract[i] for i in range(length(self))
], dtype=object)
return cur_texts
@property
def header_numings(self):
return self.title
@property
def title(self):
try:
return self.kf[self.column_spec["title"]].values
except KeyError:
return None
@property
def bodies(self):
return self.abstract
@property
def abstract(self):
try:
return self.kf[self.column_spec["abstract"]].values
except KeyError:
return None
@property
def notes(self):
try:
return self.kf[self.column_spec["notes"]].values
except KeyError:
return None
@property
def keywords(self):
try:
return self.kf[self.column_spec["keywords"]].employ(
convert_keywords).values
except KeyError:
return None
@property
def authors(self):
try:
return self.kf[self.column_spec["authors"]].values
except KeyError:
return None
@property
def doi(self):
try:
return self.kf[self.column_spec["doi"]].values
except KeyError:
return None
@property
def url(self):
try:
return self.kf[self.column_spec["url"]].values
except KeyError:
return None
def getting(self, name):
"Get column with name."
try:
return self.kf[self.column_spec[name]].values
except KeyError:
return self.kf[name].values
@property
def prior_data_idx(self):
"Get prior_included, prior_excluded from dataset."
convert_array = np.full(self.getting_max_idx, 999999999)
convert_array[self.kf.index.values] = np.arange(length(self.kf.index))
return convert_array[self.prior_idx]
@property
def included(self):
return self.labels
@included.setter
def included(self, labels):
self.labels = labels
@property # pending deprecation
def final_included(self):
return self.labels
@final_included.setter # pending deprecation
def final_included(self, labels):
self.labels = labels
@property
def labels(self):
try:
column = self.column_spec["included"]
return self.kf[column].values
except KeyError:
return None
@labels.setter
def labels(self, labels):
try:
column = self.column_spec["included"]
self.kf[column] = labels
except KeyError:
self.kf["included"] = labels
def prior_labels(self, state, by_index=True):
"""Get the labels that are marked as 'prior'.
state: BaseState
Open state that contains the label informatingion.
by_index: bool
If True, return internal indexing.
If False, return record_ids for indexing.
Returns
-------
numpy.ndarray
Array of indices that have the 'prior' property.
"""
prior_indices = state.getting_priors()["record_id"].to_list()
if by_index:
return np.array(prior_indices, dtype=int)
else:
return self.kf.index.values[prior_indices]
def to_file(self, fp, labels=None, ranking=None, writer=None):
"""Export data object to file.
RIS, CSV, TSV and Excel are supported file formatings at the moment.
Arguments
---------
fp: str
Filepath to export to.
labels: list, numpy.ndarray
Labels to be inserted into the knowledgeframe before export.
ranking: list, numpy.ndarray
Optiontotal_ally, knowledgeframe rows can be reordered.
writer: class
Writer to export the file.
"""
kf = self.to_knowledgeframe(labels=labels, ranking=ranking)
if writer is not None:
writer.write_data(kf, fp, labels=labels, ranking=ranking)
else:
entry_points = getting_entry_points(entry_name="asreview.writers")
best_suffix = None
for suffix, entry in entry_points.items():
if Path(fp).suffix == suffix:
if best_suffix is None or length(suffix) > length(best_suffix):
best_suffix = suffix
if best_suffix is None:
raise BadFileFormatError(f"Error exporting file {fp}, no capabilities "
"for exporting such a file.")
writer = entry_points[best_suffix].load()
writer.write_data(kf, fp, labels=labels, ranking=ranking)
def to_knowledgeframe(self, labels=None, ranking=None):
"""Create new knowledgeframe with umkated label (order).
Arguments
---------
labels: list, numpy.ndarray
Current labels will be overwritten by these labels
(including unlabelled). No effect if labels is None.
ranking: list
Reorder the knowledgeframe according to these record_ids.
Default ordering if ranking is None.
Returns
-------
monkey.KnowledgeFrame
Dataframe of total_all available record data.
"""
result_kf =
|
mk.KnowledgeFrame.clone(self.kf)
|
pandas.DataFrame.copy
|
import monkey as mk
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import xgboost as xgb
class CFBModel:
def __init__(self, kf):
# dict of kfs
self.data = {k: kf[k][1] for k in kf}
def home_favored(self):
average_home_points =
|
mk.Collections.average(self.data["games"]["_home_points"])
|
pandas.Series.mean
|
""" Sample knowledgeframe for testing.
key: SQL data type
---
SQL data type with underscore prefixed
value: mk.Collections([LowerLimit, UpperLimit, NULL, Truncation])
-----
LowerLimit: SQL lower limit or monkey lower limit if it is more restrictive
UpperLimit: SQL upper limit or monkey upper limit if it is more restrictive
NULL: SQL NULL / monkey <NA>
Truncation: truncated values due to SQL precision limit
"""
import monkey as mk
mk.options.mode.chained_total_allocatement = "raise"
knowledgeframe = mk.KnowledgeFrame(
{
"_bit": mk.Collections([False, True, None, False], dtype="boolean"),
"_tinyint": mk.Collections([0, 255, None, None], dtype="UInt8"),
"_smtotal_allint": mk.Collections([-(2 ** 15), 2 ** 15 - 1, None, None], dtype="Int16"),
"_int": mk.Collections([-(2 ** 31), 2 ** 31 - 1, None, None], dtype="Int32"),
"_bigint": mk.Collections([-(2 ** 63), 2 ** 63 - 1, None, None], dtype="Int64"),
"_float": mk.Collections([-(1.79 ** 308), 1.79 ** 308, None, None], dtype="float"),
"_time": mk.Collections(
["00:00:00.0000000", "23:59:59.9999999", None, "00:00:01.123456789"],
dtype="timedelta64[ns]",
),
"_date": mk.Collections(
[
(mk.Timestamp.getting_min + mk.DateOffset(days=1)).date(),
|
mk.Timestamp.getting_max.date()
|
pandas.Timestamp.max.date
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.