body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def deploySQL(self):
'\n ---------------------------------------------------------------------------\n Returns the SQL code needed to deploy the model.\n\n Returns\n -------\n str\n the SQL code needed to deploy the model.\n '
sql = self.deploy_predict_
if ((self.parameters['d'] > 0) or ((self.parameters['D'] > 0) and (self.parameters['s'] > 0))):
for i in range(0, (self.parameters['d'] + 1)):
for k in range(0, max(((self.parameters['D'] + 1) * min(1, self.parameters['s'])), 1)):
if ((k, i) != (0, 0)):
comb_i_d = ((math.factorial(self.parameters['d']) / math.factorial((self.parameters['d'] - i))) / math.factorial(i))
comb_k_D = ((math.factorial(self.parameters['D']) / math.factorial((self.parameters['D'] - k))) / math.factorial(k))
sql += ' + {} * LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])'.format(((((- 1) ** ((i + k) + 1)) * comb_i_d) * comb_k_D), (i + (self.parameters['s'] * k)))
return sql
| 196,671,721,592,775,420
|
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Returns
-------
str
the SQL code needed to deploy the model.
|
verticapy/learn/tsa.py
|
deploySQL
|
MiConnell/VerticaPy
|
python
|
def deploySQL(self):
'\n ---------------------------------------------------------------------------\n Returns the SQL code needed to deploy the model.\n\n Returns\n -------\n str\n the SQL code needed to deploy the model.\n '
sql = self.deploy_predict_
if ((self.parameters['d'] > 0) or ((self.parameters['D'] > 0) and (self.parameters['s'] > 0))):
for i in range(0, (self.parameters['d'] + 1)):
for k in range(0, max(((self.parameters['D'] + 1) * min(1, self.parameters['s'])), 1)):
if ((k, i) != (0, 0)):
comb_i_d = ((math.factorial(self.parameters['d']) / math.factorial((self.parameters['d'] - i))) / math.factorial(i))
comb_k_D = ((math.factorial(self.parameters['D']) / math.factorial((self.parameters['D'] - k))) / math.factorial(k))
sql += ' + {} * LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])'.format(((((- 1) ** ((i + k) + 1)) * comb_i_d) * comb_k_D), (i + (self.parameters['s'] * k)))
return sql
|
def fpredict(self, L: list):
'\n ---------------------------------------------------------------------------\n Computes the prediction.\n\n Parameters\n ----------\n L: list\n List containing the data. It must be a two-dimensional list containing \n multiple rows. Each row must include as first element the ordered predictor\n and as nth elements the nth - 1 exogenous variable (nth > 2). \n\n Returns\n -------\n float\n the prediction.\n '
def sub_arp(L: list):
L_final = []
for i in range(len(L)):
result = L[(- i)]
for i in range(len(self.coef_.values['coefficient'])):
elem = self.coef_.values['predictor'][i]
if (elem.lower() == 'intercept'):
result -= self.coef_.values['coefficient'][i]
elif (elem.lower()[0:2] == 'ar'):
nb = int(elem[2:])
try:
result -= (self.coef_.values['coefficient'][i] * L[(- nb)])
except:
result = None
L_final = ([result] + L_final)
return L_final
def fepsilon(L: list):
if ((self.parameters['p'] > 0) or (self.parameters['P'] > 0)):
L_tmp = sub_arp(L)
else:
L_tmp = L
try:
result = (L_tmp[(- 1)] - self.ma_avg_)
for i in range(1, self.parameters['max_pik']):
result -= (self.ma_piq_.values['coefficient'][i] * (L_tmp[(- i)] - self.ma_avg_))
return result
except:
return 0
if ((self.parameters['p'] == 0) and (self.parameters['q'] == 0) and (self.parameters['d'] == 0) and (self.parameters['s'] == 0) and (not self.exogenous)):
return self.ma_avg_
try:
yt = [elem[0] for elem in L]
yt_copy = [elem[0] for elem in L]
yt.reverse()
if (self.parameters['d'] > 0):
for i in range(self.parameters['d']):
yt = [(yt[(i - 1)] - yt[i]) for i in range(1, len(yt))]
if ((self.parameters['D'] > 0) and (self.parameters['s'] > 0)):
for i in range(self.parameters['D']):
yt = [(yt[(i - self.parameters['s'])] - yt[i]) for i in range(self.parameters['s'], len(yt))]
yt.reverse()
(result, j) = (0, 1)
for i in range(len(self.coef_.values['coefficient'])):
elem = self.coef_.values['predictor'][i]
if (elem.lower() == 'intercept'):
result += self.coef_.values['coefficient'][i]
elif (elem.lower()[0:2] == 'ar'):
nb = int(elem[2:])
result += (self.coef_.values['coefficient'][i] * yt[(- nb)])
elif (elem.lower()[0:2] == 'ma'):
nb = int(elem[2:])
result += (self.coef_.values['coefficient'][i] * fepsilon(yt[:((- nb) - 1)]))
else:
result += (self.coef_.values['coefficient'][i] * L[(- 1)][j])
j += 1
for i in range(0, (self.parameters['d'] + 1)):
for k in range(0, max(((self.parameters['D'] + 1) * min(1, self.parameters['s'])), 1)):
if ((k, i) != (0, 0)):
comb_i_d = ((math.factorial(self.parameters['d']) / math.factorial((self.parameters['d'] - i))) / math.factorial(i))
comb_k_D = ((math.factorial(self.parameters['D']) / math.factorial((self.parameters['D'] - k))) / math.factorial(k))
result += (((((- 1) ** ((i + k) + 1)) * comb_i_d) * comb_k_D) * yt_copy[(- (i + (self.parameters['s'] * k)))])
return result
except:
return None
| -3,849,201,933,801,486,000
|
---------------------------------------------------------------------------
Computes the prediction.
Parameters
----------
L: list
List containing the data. It must be a two-dimensional list containing
multiple rows. Each row must include as first element the ordered predictor
and as nth elements the nth - 1 exogenous variable (nth > 2).
Returns
-------
float
the prediction.
|
verticapy/learn/tsa.py
|
fpredict
|
MiConnell/VerticaPy
|
python
|
def fpredict(self, L: list):
'\n ---------------------------------------------------------------------------\n Computes the prediction.\n\n Parameters\n ----------\n L: list\n List containing the data. It must be a two-dimensional list containing \n multiple rows. Each row must include as first element the ordered predictor\n and as nth elements the nth - 1 exogenous variable (nth > 2). \n\n Returns\n -------\n float\n the prediction.\n '
def sub_arp(L: list):
L_final = []
for i in range(len(L)):
result = L[(- i)]
for i in range(len(self.coef_.values['coefficient'])):
elem = self.coef_.values['predictor'][i]
if (elem.lower() == 'intercept'):
result -= self.coef_.values['coefficient'][i]
elif (elem.lower()[0:2] == 'ar'):
nb = int(elem[2:])
try:
result -= (self.coef_.values['coefficient'][i] * L[(- nb)])
except:
result = None
L_final = ([result] + L_final)
return L_final
def fepsilon(L: list):
if ((self.parameters['p'] > 0) or (self.parameters['P'] > 0)):
L_tmp = sub_arp(L)
else:
L_tmp = L
try:
result = (L_tmp[(- 1)] - self.ma_avg_)
for i in range(1, self.parameters['max_pik']):
result -= (self.ma_piq_.values['coefficient'][i] * (L_tmp[(- i)] - self.ma_avg_))
return result
except:
return 0
if ((self.parameters['p'] == 0) and (self.parameters['q'] == 0) and (self.parameters['d'] == 0) and (self.parameters['s'] == 0) and (not self.exogenous)):
return self.ma_avg_
try:
yt = [elem[0] for elem in L]
yt_copy = [elem[0] for elem in L]
yt.reverse()
if (self.parameters['d'] > 0):
for i in range(self.parameters['d']):
yt = [(yt[(i - 1)] - yt[i]) for i in range(1, len(yt))]
if ((self.parameters['D'] > 0) and (self.parameters['s'] > 0)):
for i in range(self.parameters['D']):
yt = [(yt[(i - self.parameters['s'])] - yt[i]) for i in range(self.parameters['s'], len(yt))]
yt.reverse()
(result, j) = (0, 1)
for i in range(len(self.coef_.values['coefficient'])):
elem = self.coef_.values['predictor'][i]
if (elem.lower() == 'intercept'):
result += self.coef_.values['coefficient'][i]
elif (elem.lower()[0:2] == 'ar'):
nb = int(elem[2:])
result += (self.coef_.values['coefficient'][i] * yt[(- nb)])
elif (elem.lower()[0:2] == 'ma'):
nb = int(elem[2:])
result += (self.coef_.values['coefficient'][i] * fepsilon(yt[:((- nb) - 1)]))
else:
result += (self.coef_.values['coefficient'][i] * L[(- 1)][j])
j += 1
for i in range(0, (self.parameters['d'] + 1)):
for k in range(0, max(((self.parameters['D'] + 1) * min(1, self.parameters['s'])), 1)):
if ((k, i) != (0, 0)):
comb_i_d = ((math.factorial(self.parameters['d']) / math.factorial((self.parameters['d'] - i))) / math.factorial(i))
comb_k_D = ((math.factorial(self.parameters['D']) / math.factorial((self.parameters['D'] - k))) / math.factorial(k))
result += (((((- 1) ** ((i + k) + 1)) * comb_i_d) * comb_k_D) * yt_copy[(- (i + (self.parameters['s'] * k)))])
return result
except:
return None
|
def fit(self, input_relation: Union[(vDataFrame, str)], y: str, ts: str, X: list=[], test_relation: Union[(vDataFrame, str)]=''):
'\n ---------------------------------------------------------------------------\n Trains the model.\n\n Parameters\n ----------\n input_relation: str/vDataFrame\n Training relation.\n y: str\n Response column.\n ts: str\n vcolumn used to order the data.\n X: list, optional\n exogenous columns used to fit the model.\n test_relation: str/vDataFrame, optional\n Relation used to test the model.\n\n Returns\n -------\n object\n model\n '
check_types([('input_relation', input_relation, [str, vDataFrame]), ('y', y, [str]), ('test_relation', test_relation, [str, vDataFrame]), ('ts', ts, [str])])
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (input_relation if isinstance(input_relation, str) else input_relation.__genSQL__())
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
(self.y, self.ts, self.deploy_predict_) = (str_column(y), str_column(ts), '')
self.coef_ = tablesample({'predictor': [], 'coefficient': []})
(self.ma_avg_, self.ma_piq_) = (None, None)
(X, schema) = ([str_column(elem) for elem in X], schema_relation(self.name)[0])
(self.X, self.exogenous) = ([], X)
relation = '(SELECT *, [VerticaPy_y] AS VerticaPy_y_copy FROM {}) VERTICAPY_SUBTABLE '
model = LinearRegression(name=self.name, solver=self.parameters['solver'], max_iter=self.parameters['max_iter'], tol=self.parameters['tol'])
if ((self.parameters['p'] == 0) and (self.parameters['q'] == 0) and (self.parameters['d'] == 0) and (self.parameters['s'] == 0) and (not self.exogenous)):
query = 'SELECT AVG({}) FROM {}'.format(self.y, self.input_relation)
self.ma_avg_ = self.cursor.execute(query).fetchone()[0]
self.deploy_predict_ = str(self.ma_avg_)
if (self.parameters['d'] > 0):
for i in range(self.parameters['d']):
relation = '(SELECT [VerticaPy_y] - LAG([VerticaPy_y], 1) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE'.format(relation)
if ((self.parameters['D'] > 0) and (self.parameters['s'] > 0)):
for i in range(self.parameters['D']):
relation = '(SELECT [VerticaPy_y] - LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE'.format(self.parameters['s'], relation)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop('{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), cursor=self.cursor, method='view')
except:
pass
if ((self.parameters['p'] > 0) or (self.parameters['P'] > 0)):
columns = ['LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}'.format(i, i) for i in range(1, (self.parameters['p'] + 1))]
AR = ['AR{}'.format(i) for i in range(1, (self.parameters['p'] + 1))]
if (self.parameters['s'] > 0):
for i in range(1, (self.parameters['P'] + 1)):
if ((i * self.parameters['s']) not in range(1, (self.parameters['p'] + 1))):
columns += ['LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}'.format((i * self.parameters['s']), (i * self.parameters['s']))]
AR += ['AR{}'.format((i * self.parameters['s']))]
relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), relation)
drop_temp_elem(self, schema)
query = 'CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}'.format(schema, get_session(self.cursor), relation.format(self.input_relation).replace('[VerticaPy_ts]', self.ts).replace('[VerticaPy_y]', self.y).replace('[VerticaPy_key_columns]', (', ' + ', '.join(([self.ts] + X)))))
try:
self.cursor.execute(query)
self.X += (AR + X)
model.fit(input_relation='{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), X=self.X, y=self.y)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
self.coef_.values['predictor'] = model.coef_.values['predictor']
self.coef_.values['coefficient'] = model.coef_.values['coefficient']
alphaq = model.coef_.values['coefficient']
model.drop()
epsilon_final = ((('[VerticaPy_y] - ' + str(alphaq[0])) + ' - ') + ' - '.join([((str(alphaq[i]) + ' * ') + 'LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])'.format(i)) for i in range(1, (self.parameters['p'] + 1))]))
self.deploy_predict_ = ((str(alphaq[0]) + ' + ') + ' + '.join([((str(alphaq[i]) + ' * ') + 'LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])'.format(i)) for i in range(1, (self.parameters['p'] + 1))]))
if ((self.parameters['s'] > 0) and (self.parameters['P'] > 0)):
epsilon_final += (' - ' + ' - '.join([((str(alphaq[i]) + ' * ') + 'LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])'.format((i * self.parameters['s']))) for i in range((self.parameters['p'] + 1), ((self.parameters['p'] + (self.parameters['P'] if (self.parameters['s'] > 0) else 0)) + 1))]))
self.deploy_predict_ += (' + ' + ' + '.join([((str(alphaq[i]) + ' * ') + 'LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])'.format((i * self.parameters['s']))) for i in range((self.parameters['p'] + 1), ((self.parameters['p'] + (self.parameters['P'] if (self.parameters['s'] > 0) else 0)) + 1))]))
for (idx, elem) in enumerate(X):
epsilon_final += ' - {} * [X{}]'.format(alphaq[(((idx + self.parameters['p']) + (self.parameters['P'] if (self.parameters['s'] > 0) else 0)) + 1)], idx)
self.deploy_predict_ += ' + {} * [X{}]'.format(alphaq[(((idx + self.parameters['p']) + (self.parameters['P'] if (self.parameters['s'] > 0) else 0)) + 1)], idx)
relation = '(SELECT {} AS [VerticaPy_y], {}, VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE'.format(epsilon_final, ', '.join(AR), relation)
if ((self.parameters['q'] > 0) or ((self.parameters['Q'] > 0) and (self.parameters['s'] > 0))):
transform_relation = relation.replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts)
transform_relation = transform_relation.replace('[VerticaPy_key_columns]', (', ' + ', '.join((X + [ts]))))
for (idx, elem) in enumerate(X):
transform_relation = transform_relation.replace('[X{}]'.format(idx), elem)
query = 'SELECT COUNT(*), AVG({}) FROM {}'.format(self.y, transform_relation.format(self.input_relation))
result = self.cursor.execute(query).fetchone()
self.ma_avg_ = result[1]
n = result[0]
n = max(max(min(max((n ** (1.0 / 3.0)), 8), self.parameters['papprox_ma']), self.parameters['q']), ((self.parameters['Q'] * self.parameters['s']) + 1))
n = int(n)
columns = ['LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS ARq{}'.format(i, i) for i in range(1, n)]
ARq = ['ARq{}'.format(i) for i in range(1, n)]
tmp_relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), relation)
for (idx, elem) in enumerate(X):
tmp_relation = tmp_relation.replace('[X{}]'.format(idx), elem)
drop_temp_elem(self, schema)
query = 'CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}'.format(schema, get_session(self.cursor), tmp_relation.format(self.input_relation).replace('[VerticaPy_ts]', self.ts).replace('[VerticaPy_y]', self.y).replace('[VerticaPy_key_columns]', (', ' + ', '.join(([self.ts] + X)))))
try:
self.cursor.execute(query)
model.fit(input_relation='{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), X=ARq, y=self.y)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
if (not self.coef_.values['predictor']):
self.coef_.values['predictor'] += ['Intercept']
self.coef_.values['coefficient'] += [self.ma_avg_]
self.deploy_predict_ = str(self.ma_avg_)
alphaq = model.coef_.values['coefficient'][1:]
model.drop()
(thetaq, piq) = ([], ([(- 1)] + []))
for j in range(0, len(alphaq)):
thetaq += [(sum([(alphaq[((j - i) - 1)] * thetaq[i]) for i in range(0, j)]) + alphaq[j])]
for j in range(self.parameters['q']):
self.coef_.values['predictor'] += ['ma{}'.format((j + 1))]
self.coef_.values['coefficient'] += [thetaq[j]]
self.deploy_predict_ += ' + {} * MA{}'.format(thetaq[j], (j + 1))
if (self.parameters['s'] > 0):
for j in range(1, (self.parameters['Q'] + 1)):
self.coef_.values['predictor'] += ['ma{}'.format((self.parameters['s'] * j))]
self.coef_.values['coefficient'] += [thetaq[((self.parameters['s'] * j) - 1)]]
self.deploy_predict_ += ' + {} * MA{}'.format(thetaq[((self.parameters['s'] * j) - 1)], (self.parameters['s'] * j))
for j in range(0, self.parameters['max_pik']):
piq_tmp = 0
for i in range(0, self.parameters['q']):
if ((j - i) > 0):
piq_tmp -= (thetaq[i] * piq[(j - i)])
elif ((j - i) == 0):
piq_tmp -= thetaq[i]
piq = (piq + [piq_tmp])
self.ma_piq_ = tablesample({'coefficient': piq})
epsilon = ((('[VerticaPy_y] - ' + str(self.ma_avg_)) + ' - ') + ' - '.join([((str(piq[i]) + ' * ') + 'LAG([VerticaPy_y] - {}, {}) OVER (ORDER BY [VerticaPy_ts])'.format(self.ma_avg_, i)) for i in range(1, self.parameters['max_pik'])]))
epsilon += ' AS MA0'
relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(epsilon, relation)
columns = ['LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}'.format(i, i) for i in range(1, (self.parameters['q'] + 1))]
MA = ['MA{}'.format(i) for i in range(1, (self.parameters['q'] + 1))]
if (self.parameters['s'] > 0):
columns += ['LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}'.format((i * self.parameters['s']), (i * self.parameters['s'])) for i in range(1, (self.parameters['Q'] + 1))]
MA += ['MA{}'.format((i * self.parameters['s'])) for i in range(1, (self.parameters['Q'] + 1))]
relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), relation)
self.X += MA
transform_relation = relation.replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts)
transform_relation = transform_relation.replace('[VerticaPy_key_columns]', (', ' + ', '.join((X + [ts]))))
for (idx, elem) in enumerate(X):
transform_relation = transform_relation.replace('[X{}]'.format(idx), elem)
self.transform_relation = relation
model_save = {'type': 'SARIMAX', 'input_relation': self.input_relation, 'test_relation': self.test_relation, 'transform_relation': self.transform_relation, 'deploy_predict': self.deploy_predict_, 'ma_avg': self.ma_avg_, 'ma_piq': (self.ma_piq_.values if self.ma_piq_ else None), 'X': self.X, 'y': self.y, 'ts': self.ts, 'exogenous': self.exogenous, 'coef': self.coef_.values, 'p': self.parameters['p'], 'd': self.parameters['d'], 'q': self.parameters['q'], 'P': self.parameters['P'], 'D': self.parameters['D'], 'Q': self.parameters['Q'], 's': self.parameters['s'], 'tol': self.parameters['tol'], 'max_iter': self.parameters['max_iter'], 'solver': self.parameters['solver'], 'max_pik': self.parameters['max_pik'], 'papprox_ma': self.parameters['papprox_ma']}
insert_verticapy_schema(model_name=self.name, model_type='SARIMAX', model_save=model_save, cursor=self.cursor)
return self
| -788,220,881,952,957,400
|
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
y: str
Response column.
ts: str
vcolumn used to order the data.
X: list, optional
exogenous columns used to fit the model.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
model
|
verticapy/learn/tsa.py
|
fit
|
MiConnell/VerticaPy
|
python
|
def fit(self, input_relation: Union[(vDataFrame, str)], y: str, ts: str, X: list=[], test_relation: Union[(vDataFrame, str)]=):
'\n ---------------------------------------------------------------------------\n Trains the model.\n\n Parameters\n ----------\n input_relation: str/vDataFrame\n Training relation.\n y: str\n Response column.\n ts: str\n vcolumn used to order the data.\n X: list, optional\n exogenous columns used to fit the model.\n test_relation: str/vDataFrame, optional\n Relation used to test the model.\n\n Returns\n -------\n object\n model\n '
check_types([('input_relation', input_relation, [str, vDataFrame]), ('y', y, [str]), ('test_relation', test_relation, [str, vDataFrame]), ('ts', ts, [str])])
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (input_relation if isinstance(input_relation, str) else input_relation.__genSQL__())
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
(self.y, self.ts, self.deploy_predict_) = (str_column(y), str_column(ts), )
self.coef_ = tablesample({'predictor': [], 'coefficient': []})
(self.ma_avg_, self.ma_piq_) = (None, None)
(X, schema) = ([str_column(elem) for elem in X], schema_relation(self.name)[0])
(self.X, self.exogenous) = ([], X)
relation = '(SELECT *, [VerticaPy_y] AS VerticaPy_y_copy FROM {}) VERTICAPY_SUBTABLE '
model = LinearRegression(name=self.name, solver=self.parameters['solver'], max_iter=self.parameters['max_iter'], tol=self.parameters['tol'])
if ((self.parameters['p'] == 0) and (self.parameters['q'] == 0) and (self.parameters['d'] == 0) and (self.parameters['s'] == 0) and (not self.exogenous)):
query = 'SELECT AVG({}) FROM {}'.format(self.y, self.input_relation)
self.ma_avg_ = self.cursor.execute(query).fetchone()[0]
self.deploy_predict_ = str(self.ma_avg_)
if (self.parameters['d'] > 0):
for i in range(self.parameters['d']):
relation = '(SELECT [VerticaPy_y] - LAG([VerticaPy_y], 1) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE'.format(relation)
if ((self.parameters['D'] > 0) and (self.parameters['s'] > 0)):
for i in range(self.parameters['D']):
relation = '(SELECT [VerticaPy_y] - LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE'.format(self.parameters['s'], relation)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop('{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), cursor=self.cursor, method='view')
except:
pass
if ((self.parameters['p'] > 0) or (self.parameters['P'] > 0)):
columns = ['LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}'.format(i, i) for i in range(1, (self.parameters['p'] + 1))]
AR = ['AR{}'.format(i) for i in range(1, (self.parameters['p'] + 1))]
if (self.parameters['s'] > 0):
for i in range(1, (self.parameters['P'] + 1)):
if ((i * self.parameters['s']) not in range(1, (self.parameters['p'] + 1))):
columns += ['LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}'.format((i * self.parameters['s']), (i * self.parameters['s']))]
AR += ['AR{}'.format((i * self.parameters['s']))]
relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), relation)
drop_temp_elem(self, schema)
query = 'CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}'.format(schema, get_session(self.cursor), relation.format(self.input_relation).replace('[VerticaPy_ts]', self.ts).replace('[VerticaPy_y]', self.y).replace('[VerticaPy_key_columns]', (', ' + ', '.join(([self.ts] + X)))))
try:
self.cursor.execute(query)
self.X += (AR + X)
model.fit(input_relation='{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), X=self.X, y=self.y)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
self.coef_.values['predictor'] = model.coef_.values['predictor']
self.coef_.values['coefficient'] = model.coef_.values['coefficient']
alphaq = model.coef_.values['coefficient']
model.drop()
epsilon_final = ((('[VerticaPy_y] - ' + str(alphaq[0])) + ' - ') + ' - '.join([((str(alphaq[i]) + ' * ') + 'LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])'.format(i)) for i in range(1, (self.parameters['p'] + 1))]))
self.deploy_predict_ = ((str(alphaq[0]) + ' + ') + ' + '.join([((str(alphaq[i]) + ' * ') + 'LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])'.format(i)) for i in range(1, (self.parameters['p'] + 1))]))
if ((self.parameters['s'] > 0) and (self.parameters['P'] > 0)):
epsilon_final += (' - ' + ' - '.join([((str(alphaq[i]) + ' * ') + 'LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])'.format((i * self.parameters['s']))) for i in range((self.parameters['p'] + 1), ((self.parameters['p'] + (self.parameters['P'] if (self.parameters['s'] > 0) else 0)) + 1))]))
self.deploy_predict_ += (' + ' + ' + '.join([((str(alphaq[i]) + ' * ') + 'LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])'.format((i * self.parameters['s']))) for i in range((self.parameters['p'] + 1), ((self.parameters['p'] + (self.parameters['P'] if (self.parameters['s'] > 0) else 0)) + 1))]))
for (idx, elem) in enumerate(X):
epsilon_final += ' - {} * [X{}]'.format(alphaq[(((idx + self.parameters['p']) + (self.parameters['P'] if (self.parameters['s'] > 0) else 0)) + 1)], idx)
self.deploy_predict_ += ' + {} * [X{}]'.format(alphaq[(((idx + self.parameters['p']) + (self.parameters['P'] if (self.parameters['s'] > 0) else 0)) + 1)], idx)
relation = '(SELECT {} AS [VerticaPy_y], {}, VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE'.format(epsilon_final, ', '.join(AR), relation)
if ((self.parameters['q'] > 0) or ((self.parameters['Q'] > 0) and (self.parameters['s'] > 0))):
transform_relation = relation.replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts)
transform_relation = transform_relation.replace('[VerticaPy_key_columns]', (', ' + ', '.join((X + [ts]))))
for (idx, elem) in enumerate(X):
transform_relation = transform_relation.replace('[X{}]'.format(idx), elem)
query = 'SELECT COUNT(*), AVG({}) FROM {}'.format(self.y, transform_relation.format(self.input_relation))
result = self.cursor.execute(query).fetchone()
self.ma_avg_ = result[1]
n = result[0]
n = max(max(min(max((n ** (1.0 / 3.0)), 8), self.parameters['papprox_ma']), self.parameters['q']), ((self.parameters['Q'] * self.parameters['s']) + 1))
n = int(n)
columns = ['LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS ARq{}'.format(i, i) for i in range(1, n)]
ARq = ['ARq{}'.format(i) for i in range(1, n)]
tmp_relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), relation)
for (idx, elem) in enumerate(X):
tmp_relation = tmp_relation.replace('[X{}]'.format(idx), elem)
drop_temp_elem(self, schema)
query = 'CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}'.format(schema, get_session(self.cursor), tmp_relation.format(self.input_relation).replace('[VerticaPy_ts]', self.ts).replace('[VerticaPy_y]', self.y).replace('[VerticaPy_key_columns]', (', ' + ', '.join(([self.ts] + X)))))
try:
self.cursor.execute(query)
model.fit(input_relation='{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), X=ARq, y=self.y)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
if (not self.coef_.values['predictor']):
self.coef_.values['predictor'] += ['Intercept']
self.coef_.values['coefficient'] += [self.ma_avg_]
self.deploy_predict_ = str(self.ma_avg_)
alphaq = model.coef_.values['coefficient'][1:]
model.drop()
(thetaq, piq) = ([], ([(- 1)] + []))
for j in range(0, len(alphaq)):
thetaq += [(sum([(alphaq[((j - i) - 1)] * thetaq[i]) for i in range(0, j)]) + alphaq[j])]
for j in range(self.parameters['q']):
self.coef_.values['predictor'] += ['ma{}'.format((j + 1))]
self.coef_.values['coefficient'] += [thetaq[j]]
self.deploy_predict_ += ' + {} * MA{}'.format(thetaq[j], (j + 1))
if (self.parameters['s'] > 0):
for j in range(1, (self.parameters['Q'] + 1)):
self.coef_.values['predictor'] += ['ma{}'.format((self.parameters['s'] * j))]
self.coef_.values['coefficient'] += [thetaq[((self.parameters['s'] * j) - 1)]]
self.deploy_predict_ += ' + {} * MA{}'.format(thetaq[((self.parameters['s'] * j) - 1)], (self.parameters['s'] * j))
for j in range(0, self.parameters['max_pik']):
piq_tmp = 0
for i in range(0, self.parameters['q']):
if ((j - i) > 0):
piq_tmp -= (thetaq[i] * piq[(j - i)])
elif ((j - i) == 0):
piq_tmp -= thetaq[i]
piq = (piq + [piq_tmp])
self.ma_piq_ = tablesample({'coefficient': piq})
epsilon = ((('[VerticaPy_y] - ' + str(self.ma_avg_)) + ' - ') + ' - '.join([((str(piq[i]) + ' * ') + 'LAG([VerticaPy_y] - {}, {}) OVER (ORDER BY [VerticaPy_ts])'.format(self.ma_avg_, i)) for i in range(1, self.parameters['max_pik'])]))
epsilon += ' AS MA0'
relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(epsilon, relation)
columns = ['LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}'.format(i, i) for i in range(1, (self.parameters['q'] + 1))]
MA = ['MA{}'.format(i) for i in range(1, (self.parameters['q'] + 1))]
if (self.parameters['s'] > 0):
columns += ['LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}'.format((i * self.parameters['s']), (i * self.parameters['s'])) for i in range(1, (self.parameters['Q'] + 1))]
MA += ['MA{}'.format((i * self.parameters['s'])) for i in range(1, (self.parameters['Q'] + 1))]
relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), relation)
self.X += MA
transform_relation = relation.replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts)
transform_relation = transform_relation.replace('[VerticaPy_key_columns]', (', ' + ', '.join((X + [ts]))))
for (idx, elem) in enumerate(X):
transform_relation = transform_relation.replace('[X{}]'.format(idx), elem)
self.transform_relation = relation
model_save = {'type': 'SARIMAX', 'input_relation': self.input_relation, 'test_relation': self.test_relation, 'transform_relation': self.transform_relation, 'deploy_predict': self.deploy_predict_, 'ma_avg': self.ma_avg_, 'ma_piq': (self.ma_piq_.values if self.ma_piq_ else None), 'X': self.X, 'y': self.y, 'ts': self.ts, 'exogenous': self.exogenous, 'coef': self.coef_.values, 'p': self.parameters['p'], 'd': self.parameters['d'], 'q': self.parameters['q'], 'P': self.parameters['P'], 'D': self.parameters['D'], 'Q': self.parameters['Q'], 's': self.parameters['s'], 'tol': self.parameters['tol'], 'max_iter': self.parameters['max_iter'], 'solver': self.parameters['solver'], 'max_pik': self.parameters['max_pik'], 'papprox_ma': self.parameters['papprox_ma']}
insert_verticapy_schema(model_name=self.name, model_type='SARIMAX', model_save=model_save, cursor=self.cursor)
return self
|
def plot(self, vdf: vDataFrame=None, y: str='', ts: str='', X: list=[], dynamic: bool=False, one_step: bool=True, observed: bool=True, confidence: bool=True, nlead: int=10, nlast: int=0, limit: int=1000, ax=None, **style_kwds):
'\n ---------------------------------------------------------------------------\n Draws the SARIMAX model.\n\n Parameters\n ----------\n vdf: vDataFrame, optional\n Object to use to run the prediction.\n y: str, optional\n Response column.\n ts: str, optional\n vcolumn used to order the data.\n X: list, optional\n exogenous vcolumns.\n dynamic: bool, optional\n If set to True, the dynamic forecast will be drawn.\n one_step: bool, optional\n If set to True, the one step ahead forecast will be drawn.\n observed: bool, optional\n If set to True, the observation will be drawn.\n confidence: bool, optional\n If set to True, the confidence ranges will be drawn.\n nlead: int, optional\n Number of predictions computed by the dynamic forecast after\n the last ts date.\n nlast: int, optional\n The dynamic forecast will start nlast values before the last\n ts date.\n limit: int, optional\n Maximum number of past elements to use.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax\n Matplotlib axes object\n '
if (not vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types([('limit', limit, [int, float]), ('nlead', nlead, [int, float]), ('dynamic', dynamic, [bool]), ('observed', observed, [bool]), ('one_step', one_step, [bool]), ('confidence', confidence, [bool]), ('vdf', vdf, [vDataFrame])])
(delta_limit, limit) = (limit, max(max(limit, ((self.parameters['p'] + 1) + nlast), (((self.parameters['P'] * self.parameters['s']) + 1) + nlast)), 200))
delta_limit = max(((limit - delta_limit) - nlast), 0)
assert (dynamic or one_step or observed), ParameterError('No option selected.\n You should set either dynamic, one_step or observed to True.')
assert (((nlead + nlast) > 0) or (not dynamic)), ParameterError("Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True.")
if dynamic:
assert (not self.exogenous), Exception('Dynamic Plots are only possible for SARIMA models (no exegenous variables), not SARIMAX.')
if (not y):
y = self.y
if (not ts):
ts = self.ts
if (not X):
X = self.exogenous
result = self.predict(vdf=vdf, y=y, ts=ts, X=X, nlead=0, name='_verticapy_prediction_')
error_eps = (1.96 * math.sqrt(self.score(method='mse')))
print_info = verticapy.options['print_info']
verticapy.options['print_info'] = False
try:
result = result.select([ts, y, '_verticapy_prediction_']).dropna().sort([ts]).tail(limit).values
except:
verticapy.options['print_info'] = print_info
raise
verticapy.options['print_info'] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
(lower_osa, upper_osa) = ([((float(elem) - error_eps) if (elem != None) else None) for elem in one_step_ahead[1]], [((float(elem) + error_eps) if (elem != None) else None) for elem in one_step_ahead[1]])
if dynamic:
deltat = (result[columns[0]][(- 1)] - result[columns[0]][(- 2)])
lead_time_list = []
if (nlast > 0):
lead_list = [[elem] for elem in result[columns[1]][:(- nlast)]]
else:
lead_list = [[elem] for elem in result[columns[1]]]
for i in range(nlast):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [result[columns[0]][(i - nlast)]]
if lead_time_list:
start_time = lead_time_list[(- 1)]
else:
start_time = result[columns[0]][(- 1)]
for i in range(nlead):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [(start_time + ((i + 1) * deltat))]
dynamic_forecast = (([result[columns[0]][((- nlast) - 1)]] + lead_time_list), ([result[columns[1]][((- nlast) - 1)]] + [elem[0] for elem in lead_list[((- nlast) - nlead):]]))
(lower_d, upper_d) = ([], [])
for i in range(len(dynamic_forecast[1])):
if ((self.parameters['s'] > 0) and (self.parameters['p'] == 0) and (self.parameters['d'] == 0) and (self.parameters['q'] == 0)):
delta_error = (error_eps * math.sqrt((int((i / self.parameters['s'])) + 1)))
else:
delta_error = (error_eps * math.sqrt((i + 1)))
lower_d += [(float(dynamic_forecast[1][i]) - delta_error)]
upper_d += [(float(dynamic_forecast[1][i]) + delta_error)]
else:
(lower_d, upper_d, dynamic_forecast) = ([], [], ([], []))
alpha = 0.3
if (not ax):
(fig, ax) = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {'color': colors[2], 'linewidth': 2}
param2 = {'color': colors[3], 'linewidth': 2, 'linestyle': ':'}
param3 = {'color': colors[0], 'linewidth': 2, 'linestyle': 'dashed'}
if dynamic:
ax.fill_between(dynamic_forecast[0], (1.02 * float(min(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), (1.02 * float(max(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), alpha=0.04, color=updated_dict(param3, style_kwds, 2)['color'])
if confidence:
ax.fill_between(dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color='#555555')
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color='#000000')
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color='#000000')
ax.plot(dynamic_forecast[0], dynamic_forecast[1], label='Dynamic Forecast', **updated_dict(param3, style_kwds, 2))
if one_step:
if confidence:
ax.fill_between(one_step_ahead[0][delta_limit:], lower_osa[delta_limit:], upper_osa[delta_limit:], alpha=0.04, color='#555555')
ax.plot(one_step_ahead[0][delta_limit:], lower_osa[delta_limit:], alpha=0.04, color='#000000')
ax.plot(one_step_ahead[0][delta_limit:], upper_osa[delta_limit:], alpha=0.04, color='#000000')
ax.plot(one_step_ahead[0][delta_limit:], one_step_ahead[1][delta_limit:], label='One-step ahead Forecast', **updated_dict(param2, style_kwds, 1))
if observed:
ax.plot(true_value[0][delta_limit:], true_value[1][delta_limit:], label='Observed', **updated_dict(param1, style_kwds, 0))
ax.set_title('SARIMAX({},{},{})({},{},{})_{}'.format(self.parameters['p'], self.parameters['d'], self.parameters['q'], self.parameters['P'], self.parameters['D'], self.parameters['Q'], self.parameters['s']))
ax.set_xlabel(ts)
ax.legend(loc='center left', bbox_to_anchor=[1, 0.5])
ax.set_ylim((1.02 * float(min(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), (1.02 * float(max(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))))
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
| -6,783,633,173,507,660,000
|
---------------------------------------------------------------------------
Draws the SARIMAX model.
Parameters
----------
vdf: vDataFrame, optional
Object to use to run the prediction.
y: str, optional
Response column.
ts: str, optional
vcolumn used to order the data.
X: list, optional
exogenous vcolumns.
dynamic: bool, optional
If set to True, the dynamic forecast will be drawn.
one_step: bool, optional
If set to True, the one step ahead forecast will be drawn.
observed: bool, optional
If set to True, the observation will be drawn.
confidence: bool, optional
If set to True, the confidence ranges will be drawn.
nlead: int, optional
Number of predictions computed by the dynamic forecast after
the last ts date.
nlast: int, optional
The dynamic forecast will start nlast values before the last
ts date.
limit: int, optional
Maximum number of past elements to use.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
|
verticapy/learn/tsa.py
|
plot
|
MiConnell/VerticaPy
|
python
|
def plot(self, vdf: vDataFrame=None, y: str=, ts: str=, X: list=[], dynamic: bool=False, one_step: bool=True, observed: bool=True, confidence: bool=True, nlead: int=10, nlast: int=0, limit: int=1000, ax=None, **style_kwds):
'\n ---------------------------------------------------------------------------\n Draws the SARIMAX model.\n\n Parameters\n ----------\n vdf: vDataFrame, optional\n Object to use to run the prediction.\n y: str, optional\n Response column.\n ts: str, optional\n vcolumn used to order the data.\n X: list, optional\n exogenous vcolumns.\n dynamic: bool, optional\n If set to True, the dynamic forecast will be drawn.\n one_step: bool, optional\n If set to True, the one step ahead forecast will be drawn.\n observed: bool, optional\n If set to True, the observation will be drawn.\n confidence: bool, optional\n If set to True, the confidence ranges will be drawn.\n nlead: int, optional\n Number of predictions computed by the dynamic forecast after\n the last ts date.\n nlast: int, optional\n The dynamic forecast will start nlast values before the last\n ts date.\n limit: int, optional\n Maximum number of past elements to use.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax\n Matplotlib axes object\n '
if (not vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types([('limit', limit, [int, float]), ('nlead', nlead, [int, float]), ('dynamic', dynamic, [bool]), ('observed', observed, [bool]), ('one_step', one_step, [bool]), ('confidence', confidence, [bool]), ('vdf', vdf, [vDataFrame])])
(delta_limit, limit) = (limit, max(max(limit, ((self.parameters['p'] + 1) + nlast), (((self.parameters['P'] * self.parameters['s']) + 1) + nlast)), 200))
delta_limit = max(((limit - delta_limit) - nlast), 0)
assert (dynamic or one_step or observed), ParameterError('No option selected.\n You should set either dynamic, one_step or observed to True.')
assert (((nlead + nlast) > 0) or (not dynamic)), ParameterError("Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True.")
if dynamic:
assert (not self.exogenous), Exception('Dynamic Plots are only possible for SARIMA models (no exegenous variables), not SARIMAX.')
if (not y):
y = self.y
if (not ts):
ts = self.ts
if (not X):
X = self.exogenous
result = self.predict(vdf=vdf, y=y, ts=ts, X=X, nlead=0, name='_verticapy_prediction_')
error_eps = (1.96 * math.sqrt(self.score(method='mse')))
print_info = verticapy.options['print_info']
verticapy.options['print_info'] = False
try:
result = result.select([ts, y, '_verticapy_prediction_']).dropna().sort([ts]).tail(limit).values
except:
verticapy.options['print_info'] = print_info
raise
verticapy.options['print_info'] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
(lower_osa, upper_osa) = ([((float(elem) - error_eps) if (elem != None) else None) for elem in one_step_ahead[1]], [((float(elem) + error_eps) if (elem != None) else None) for elem in one_step_ahead[1]])
if dynamic:
deltat = (result[columns[0]][(- 1)] - result[columns[0]][(- 2)])
lead_time_list = []
if (nlast > 0):
lead_list = [[elem] for elem in result[columns[1]][:(- nlast)]]
else:
lead_list = [[elem] for elem in result[columns[1]]]
for i in range(nlast):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [result[columns[0]][(i - nlast)]]
if lead_time_list:
start_time = lead_time_list[(- 1)]
else:
start_time = result[columns[0]][(- 1)]
for i in range(nlead):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [(start_time + ((i + 1) * deltat))]
dynamic_forecast = (([result[columns[0]][((- nlast) - 1)]] + lead_time_list), ([result[columns[1]][((- nlast) - 1)]] + [elem[0] for elem in lead_list[((- nlast) - nlead):]]))
(lower_d, upper_d) = ([], [])
for i in range(len(dynamic_forecast[1])):
if ((self.parameters['s'] > 0) and (self.parameters['p'] == 0) and (self.parameters['d'] == 0) and (self.parameters['q'] == 0)):
delta_error = (error_eps * math.sqrt((int((i / self.parameters['s'])) + 1)))
else:
delta_error = (error_eps * math.sqrt((i + 1)))
lower_d += [(float(dynamic_forecast[1][i]) - delta_error)]
upper_d += [(float(dynamic_forecast[1][i]) + delta_error)]
else:
(lower_d, upper_d, dynamic_forecast) = ([], [], ([], []))
alpha = 0.3
if (not ax):
(fig, ax) = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {'color': colors[2], 'linewidth': 2}
param2 = {'color': colors[3], 'linewidth': 2, 'linestyle': ':'}
param3 = {'color': colors[0], 'linewidth': 2, 'linestyle': 'dashed'}
if dynamic:
ax.fill_between(dynamic_forecast[0], (1.02 * float(min(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), (1.02 * float(max(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), alpha=0.04, color=updated_dict(param3, style_kwds, 2)['color'])
if confidence:
ax.fill_between(dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color='#555555')
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color='#000000')
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color='#000000')
ax.plot(dynamic_forecast[0], dynamic_forecast[1], label='Dynamic Forecast', **updated_dict(param3, style_kwds, 2))
if one_step:
if confidence:
ax.fill_between(one_step_ahead[0][delta_limit:], lower_osa[delta_limit:], upper_osa[delta_limit:], alpha=0.04, color='#555555')
ax.plot(one_step_ahead[0][delta_limit:], lower_osa[delta_limit:], alpha=0.04, color='#000000')
ax.plot(one_step_ahead[0][delta_limit:], upper_osa[delta_limit:], alpha=0.04, color='#000000')
ax.plot(one_step_ahead[0][delta_limit:], one_step_ahead[1][delta_limit:], label='One-step ahead Forecast', **updated_dict(param2, style_kwds, 1))
if observed:
ax.plot(true_value[0][delta_limit:], true_value[1][delta_limit:], label='Observed', **updated_dict(param1, style_kwds, 0))
ax.set_title('SARIMAX({},{},{})({},{},{})_{}'.format(self.parameters['p'], self.parameters['d'], self.parameters['q'], self.parameters['P'], self.parameters['D'], self.parameters['Q'], self.parameters['s']))
ax.set_xlabel(ts)
ax.legend(loc='center left', bbox_to_anchor=[1, 0.5])
ax.set_ylim((1.02 * float(min(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), (1.02 * float(max(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))))
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
|
def predict(self, vdf: vDataFrame, y: str='', ts: str='', X: list=[], nlead: int=0, name: str=''):
'\n ---------------------------------------------------------------------------\n Predicts using the input relation.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n y: str, optional\n Response column.\n ts: str, optional\n vcolumn used to order the data.\n X: list, optional\n exogenous vcolumns.\n nlead: int, optional\n Number of records to predict after the last ts date.\n name: str, optional\n Name of the added vcolumn. If empty, a name will be generated.\n\n Returns\n -------\n vDataFrame\n object including the prediction.\n '
check_types([('name', name, [str]), ('y', y, [str]), ('ts', ts, [str]), ('X', X, [list]), ('nlead', nlead, [int, float]), ('vdf', vdf, [vDataFrame])])
if (not y):
y = self.y
if (not ts):
ts = self.ts
if (not X):
X = self.exogenous
columns_check([y, ts], vdf)
(y, ts) = vdf_columns_names([y, ts], vdf)
name = (('{}_'.format(self.type) + ''.join((ch for ch in self.name if ch.isalnum()))) if (not name) else name)
key_columns = (', ' + ', '.join(vdf.get_columns(exclude_columns=[y])))
transform_relation = self.transform_relation.replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts)
transform_relation = transform_relation.replace('[VerticaPy_key_columns]', key_columns)
predictSQL = (self.deploySQL().replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts) + ' AS {}'.format(name))
for (idx, elem) in enumerate(X):
transform_relation = transform_relation.replace('[X{}]'.format(idx), elem)
predictSQL = predictSQL.replace('[X{}]'.format(idx), elem)
columns = ((vdf.get_columns(exclude_columns=[y]) + [predictSQL]) + ['VerticaPy_y_copy AS {}'.format(y)])
relation = vdf.__genSQL__()
for i in range(nlead):
query = 'SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1'.format(ts, ts, ts, relation, ts)
deltat = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(ts, deltat, relation)
next_t = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
if (i == 0):
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(next_t, ts, ', '.join(['NULL AS {}'.format(column) for column in vdf.get_columns(exclude_columns=[ts])]))
relation_tmp = '(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE'.format(', '.join(([ts] + vdf.get_columns(exclude_columns=[ts]))), relation, new_line)
query = 'SELECT {} FROM {} ORDER BY {} DESC LIMIT 1'.format(self.deploySQL().replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts), transform_relation.format(relation_tmp), ts)
prediction = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
columns_tmp = vdf.get_columns(exclude_columns=[ts, y])
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} AS {} {}".format(next_t, ts, prediction, y, ((', ' if columns_tmp else '') + ', '.join(['NULL AS {}'.format(column) for column in columns_tmp])))
relation = '(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE'.format(', '.join(([ts, y] + vdf.get_columns(exclude_columns=[ts, y]))), relation, new_line)
final_relation = '(SELECT {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), transform_relation.format(relation))
result = vdf_from_relation(final_relation, 'SARIMAX', self.cursor)
if (nlead > 0):
result[y].apply("CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(ts, first_t, '{}'))
return result
| 4,688,031,092,358,843,000
|
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
y: str, optional
Response column.
ts: str, optional
vcolumn used to order the data.
X: list, optional
exogenous vcolumns.
nlead: int, optional
Number of records to predict after the last ts date.
name: str, optional
Name of the added vcolumn. If empty, a name will be generated.
Returns
-------
vDataFrame
object including the prediction.
|
verticapy/learn/tsa.py
|
predict
|
MiConnell/VerticaPy
|
python
|
def predict(self, vdf: vDataFrame, y: str=, ts: str=, X: list=[], nlead: int=0, name: str=):
'\n ---------------------------------------------------------------------------\n Predicts using the input relation.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n y: str, optional\n Response column.\n ts: str, optional\n vcolumn used to order the data.\n X: list, optional\n exogenous vcolumns.\n nlead: int, optional\n Number of records to predict after the last ts date.\n name: str, optional\n Name of the added vcolumn. If empty, a name will be generated.\n\n Returns\n -------\n vDataFrame\n object including the prediction.\n '
check_types([('name', name, [str]), ('y', y, [str]), ('ts', ts, [str]), ('X', X, [list]), ('nlead', nlead, [int, float]), ('vdf', vdf, [vDataFrame])])
if (not y):
y = self.y
if (not ts):
ts = self.ts
if (not X):
X = self.exogenous
columns_check([y, ts], vdf)
(y, ts) = vdf_columns_names([y, ts], vdf)
name = (('{}_'.format(self.type) + .join((ch for ch in self.name if ch.isalnum()))) if (not name) else name)
key_columns = (', ' + ', '.join(vdf.get_columns(exclude_columns=[y])))
transform_relation = self.transform_relation.replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts)
transform_relation = transform_relation.replace('[VerticaPy_key_columns]', key_columns)
predictSQL = (self.deploySQL().replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts) + ' AS {}'.format(name))
for (idx, elem) in enumerate(X):
transform_relation = transform_relation.replace('[X{}]'.format(idx), elem)
predictSQL = predictSQL.replace('[X{}]'.format(idx), elem)
columns = ((vdf.get_columns(exclude_columns=[y]) + [predictSQL]) + ['VerticaPy_y_copy AS {}'.format(y)])
relation = vdf.__genSQL__()
for i in range(nlead):
query = 'SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1'.format(ts, ts, ts, relation, ts)
deltat = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(ts, deltat, relation)
next_t = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
if (i == 0):
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(next_t, ts, ', '.join(['NULL AS {}'.format(column) for column in vdf.get_columns(exclude_columns=[ts])]))
relation_tmp = '(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE'.format(', '.join(([ts] + vdf.get_columns(exclude_columns=[ts]))), relation, new_line)
query = 'SELECT {} FROM {} ORDER BY {} DESC LIMIT 1'.format(self.deploySQL().replace('[VerticaPy_y]', y).replace('[VerticaPy_ts]', ts), transform_relation.format(relation_tmp), ts)
prediction = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
columns_tmp = vdf.get_columns(exclude_columns=[ts, y])
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} AS {} {}".format(next_t, ts, prediction, y, ((', ' if columns_tmp else ) + ', '.join(['NULL AS {}'.format(column) for column in columns_tmp])))
relation = '(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE'.format(', '.join(([ts, y] + vdf.get_columns(exclude_columns=[ts, y]))), relation, new_line)
final_relation = '(SELECT {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), transform_relation.format(relation))
result = vdf_from_relation(final_relation, 'SARIMAX', self.cursor)
if (nlead > 0):
result[y].apply("CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(ts, first_t, '{}'))
return result
|
def deploySQL(self):
'\n ---------------------------------------------------------------------------\n Returns the SQL code needed to deploy the model.\n\n Returns\n -------\n str\n the SQL code needed to deploy the model.\n '
sql = []
for (idx, coefs) in enumerate(self.coef_):
coefs_tmp = coefs.values['coefficient']
predictors_tmp = coefs.values['predictor']
sql += [((str(coefs_tmp[0]) + ' + ') + ' + '.join([((str(coefs_tmp[i]) + ' * ') + str(predictors_tmp[i])) for i in range(1, len(coefs_tmp))]))]
return sql
| 6,084,383,689,124,714,000
|
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Returns
-------
str
the SQL code needed to deploy the model.
|
verticapy/learn/tsa.py
|
deploySQL
|
MiConnell/VerticaPy
|
python
|
def deploySQL(self):
'\n ---------------------------------------------------------------------------\n Returns the SQL code needed to deploy the model.\n\n Returns\n -------\n str\n the SQL code needed to deploy the model.\n '
sql = []
for (idx, coefs) in enumerate(self.coef_):
coefs_tmp = coefs.values['coefficient']
predictors_tmp = coefs.values['predictor']
sql += [((str(coefs_tmp[0]) + ' + ') + ' + '.join([((str(coefs_tmp[i]) + ' * ') + str(predictors_tmp[i])) for i in range(1, len(coefs_tmp))]))]
return sql
|
def features_importance(self, X_idx: int=0, ax=None, show: bool=True, **style_kwds):
"\n ---------------------------------------------------------------------------\n Computes the model's features importance.\n\n Parameters\n ----------\n X_idx: int/str, optional\n Index of the main vector vcolumn used to draw the features importance.\n It can also be the name of a predictor vcolumn.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n show: bool\n If set to True, draw the features importance.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax\n Matplotlib axes object\n "
check_types([('X_idx', X_idx, [int, float, str]), ('show', show, [bool])])
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for (idx, elem) in enumerate(self.X):
if (str_column(elem).lower() == X_idx):
X_idx = idx
break
assert (isinstance(X_idx, (float, int)) and (len(self.X) > X_idx >= 0)), ParameterError("The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(len(self.X)))
relation = self.transform_relation.replace('[VerticaPy_ts]', self.ts).format(self.test_relation)
for (idx, elem) in enumerate(self.X):
relation = relation.replace('[X{}]'.format(idx), elem)
min_max = vdf_from_relation(relation=self.input_relation, cursor=self.cursor).agg(func=['min', 'max'], columns=self.X).transpose()
coefficient = self.coef_[X_idx].values
coeff_importances = {}
coeff_sign = {}
for (idx, coef) in enumerate(coefficient['predictor']):
if (idx > 0):
predictor = int(coef.split('_')[0].replace('ar', ''))
predictor = str_column(self.X[predictor])
(minimum, maximum) = min_max[predictor]
val = coefficient['coefficient'][idx]
coeff_importances[coef] = (abs(val) * (maximum - minimum))
coeff_sign[coef] = (1 if (val >= 0) else (- 1))
total = sum([coeff_importances[elem] for elem in coeff_importances])
for elem in coeff_importances:
coeff_importances[elem] = ((100 * coeff_importances[elem]) / total)
if show:
plot_importance(coeff_importances, coeff_sign, print_legend=True, ax=ax, **style_kwds)
importances = {'index': ['importance', 'sign']}
for elem in coeff_importances:
importances[elem] = [coeff_importances[elem], coeff_sign[elem]]
return tablesample(values=importances).transpose()
| -3,235,239,240,650,255,400
|
---------------------------------------------------------------------------
Computes the model's features importance.
Parameters
----------
X_idx: int/str, optional
Index of the main vector vcolumn used to draw the features importance.
It can also be the name of a predictor vcolumn.
ax: Matplotlib axes object, optional
The axes to plot on.
show: bool
If set to True, draw the features importance.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
|
verticapy/learn/tsa.py
|
features_importance
|
MiConnell/VerticaPy
|
python
|
def features_importance(self, X_idx: int=0, ax=None, show: bool=True, **style_kwds):
"\n ---------------------------------------------------------------------------\n Computes the model's features importance.\n\n Parameters\n ----------\n X_idx: int/str, optional\n Index of the main vector vcolumn used to draw the features importance.\n It can also be the name of a predictor vcolumn.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n show: bool\n If set to True, draw the features importance.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax\n Matplotlib axes object\n "
check_types([('X_idx', X_idx, [int, float, str]), ('show', show, [bool])])
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for (idx, elem) in enumerate(self.X):
if (str_column(elem).lower() == X_idx):
X_idx = idx
break
assert (isinstance(X_idx, (float, int)) and (len(self.X) > X_idx >= 0)), ParameterError("The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(len(self.X)))
relation = self.transform_relation.replace('[VerticaPy_ts]', self.ts).format(self.test_relation)
for (idx, elem) in enumerate(self.X):
relation = relation.replace('[X{}]'.format(idx), elem)
min_max = vdf_from_relation(relation=self.input_relation, cursor=self.cursor).agg(func=['min', 'max'], columns=self.X).transpose()
coefficient = self.coef_[X_idx].values
coeff_importances = {}
coeff_sign = {}
for (idx, coef) in enumerate(coefficient['predictor']):
if (idx > 0):
predictor = int(coef.split('_')[0].replace('ar', ))
predictor = str_column(self.X[predictor])
(minimum, maximum) = min_max[predictor]
val = coefficient['coefficient'][idx]
coeff_importances[coef] = (abs(val) * (maximum - minimum))
coeff_sign[coef] = (1 if (val >= 0) else (- 1))
total = sum([coeff_importances[elem] for elem in coeff_importances])
for elem in coeff_importances:
coeff_importances[elem] = ((100 * coeff_importances[elem]) / total)
if show:
plot_importance(coeff_importances, coeff_sign, print_legend=True, ax=ax, **style_kwds)
importances = {'index': ['importance', 'sign']}
for elem in coeff_importances:
importances[elem] = [coeff_importances[elem], coeff_sign[elem]]
return tablesample(values=importances).transpose()
|
def fit(self, input_relation: Union[(vDataFrame, str)], X: list, ts: str, test_relation: Union[(vDataFrame, str)]=''):
'\n ---------------------------------------------------------------------------\n Trains the model.\n\n Parameters\n ----------\n input_relation: str/vDataFrame\n Training relation.\n X: list\n List of the response columns.\n ts: str\n vcolumn used to order the data.\n test_relation: str/vDataFrame, optional\n Relation used to test the model.\n\n Returns\n -------\n object\n self\n '
check_types([('input_relation', input_relation, [str, vDataFrame]), ('X', X, [list]), ('ts', ts, [str]), ('test_relation', test_relation, [str, vDataFrame])])
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (input_relation if isinstance(input_relation, str) else input_relation.__genSQL__())
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
(self.ts, self.deploy_predict_) = (str_column(ts), [])
(self.X, schema) = ([str_column(elem) for elem in X], schema_relation(self.name)[0])
model = LinearRegression(name=self.name, solver=self.parameters['solver'], max_iter=self.parameters['max_iter'], tol=self.parameters['tol'])
(columns, AR) = ([], [])
for (idx, elem) in enumerate(self.X):
for i in range(1, (self.parameters['p'] + 1)):
columns += ['LAG([X{}], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}_{}'.format(idx, i, idx, i)]
AR += ['AR{}_{}'.format(idx, i)]
self.transform_relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), '{}')
relation = self.transform_relation.replace('[VerticaPy_ts]', self.ts).format(self.input_relation)
for (idx, elem) in enumerate(self.X):
relation = relation.replace('[X{}]'.format(idx), elem)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop('{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), cursor=self.cursor, method='view')
except:
pass
drop_temp_elem(self, schema)
try:
query = 'CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}'.format(schema, get_session(self.cursor), relation)
self.cursor.execute(query)
self.coef_ = []
for elem in X:
model.fit(input_relation='{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), X=AR, y=elem)
self.coef_ += [model.coef_]
model.drop()
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
model_save = {'type': 'VAR', 'input_relation': self.input_relation, 'test_relation': self.test_relation, 'transform_relation': self.transform_relation, 'deploy_predict': self.deploy_predict_, 'X': self.X, 'ts': self.ts, 'p': self.parameters['p'], 'tol': self.parameters['tol'], 'max_iter': self.parameters['max_iter'], 'solver': self.parameters['solver']}
for (idx, elem) in enumerate(self.coef_):
model_save['coef_{}'.format(idx)] = elem.values
insert_verticapy_schema(model_name=self.name, model_type='VAR', model_save=model_save, cursor=self.cursor)
return self
| 4,751,591,637,537,357,000
|
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
X: list
List of the response columns.
ts: str
vcolumn used to order the data.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
self
|
verticapy/learn/tsa.py
|
fit
|
MiConnell/VerticaPy
|
python
|
def fit(self, input_relation: Union[(vDataFrame, str)], X: list, ts: str, test_relation: Union[(vDataFrame, str)]=):
'\n ---------------------------------------------------------------------------\n Trains the model.\n\n Parameters\n ----------\n input_relation: str/vDataFrame\n Training relation.\n X: list\n List of the response columns.\n ts: str\n vcolumn used to order the data.\n test_relation: str/vDataFrame, optional\n Relation used to test the model.\n\n Returns\n -------\n object\n self\n '
check_types([('input_relation', input_relation, [str, vDataFrame]), ('X', X, [list]), ('ts', ts, [str]), ('test_relation', test_relation, [str, vDataFrame])])
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (input_relation if isinstance(input_relation, str) else input_relation.__genSQL__())
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
(self.ts, self.deploy_predict_) = (str_column(ts), [])
(self.X, schema) = ([str_column(elem) for elem in X], schema_relation(self.name)[0])
model = LinearRegression(name=self.name, solver=self.parameters['solver'], max_iter=self.parameters['max_iter'], tol=self.parameters['tol'])
(columns, AR) = ([], [])
for (idx, elem) in enumerate(self.X):
for i in range(1, (self.parameters['p'] + 1)):
columns += ['LAG([X{}], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}_{}'.format(idx, i, idx, i)]
AR += ['AR{}_{}'.format(idx, i)]
self.transform_relation = '(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), '{}')
relation = self.transform_relation.replace('[VerticaPy_ts]', self.ts).format(self.input_relation)
for (idx, elem) in enumerate(self.X):
relation = relation.replace('[X{}]'.format(idx), elem)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop('{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), cursor=self.cursor, method='view')
except:
pass
drop_temp_elem(self, schema)
try:
query = 'CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}'.format(schema, get_session(self.cursor), relation)
self.cursor.execute(query)
self.coef_ = []
for elem in X:
model.fit(input_relation='{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}'.format(schema, get_session(self.cursor)), X=AR, y=elem)
self.coef_ += [model.coef_]
model.drop()
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
model_save = {'type': 'VAR', 'input_relation': self.input_relation, 'test_relation': self.test_relation, 'transform_relation': self.transform_relation, 'deploy_predict': self.deploy_predict_, 'X': self.X, 'ts': self.ts, 'p': self.parameters['p'], 'tol': self.parameters['tol'], 'max_iter': self.parameters['max_iter'], 'solver': self.parameters['solver']}
for (idx, elem) in enumerate(self.coef_):
model_save['coef_{}'.format(idx)] = elem.values
insert_verticapy_schema(model_name=self.name, model_type='VAR', model_save=model_save, cursor=self.cursor)
return self
|
def fpredict(self, L: list):
'\n ---------------------------------------------------------------------------\n Computes the prediction.\n\n Parameters\n ----------\n L: list\n List containing the data. It must be a two-dimensional list containing \n multiple rows. Each row must include as first element the ordered predictor \n and as nth elements the nth - 1 exogenous variable (nth > 2).\n\n Returns\n -------\n float\n the prediction.\n '
try:
result = []
result_tmp = 0
for i in range(len(self.X)):
result_tmp = 0
for j in range(len(self.coef_[i].values['coefficient'])):
elem = self.coef_[i].values['predictor'][j]
if (elem.lower() == 'intercept'):
result_tmp += self.coef_[i].values['coefficient'][j]
else:
(ni, nj) = elem[2:].split('_')
(ni, nj) = (int(ni), int(nj))
result_tmp += (self.coef_[i].values['coefficient'][j] * L[(- nj)][ni])
result += [result_tmp]
return result
except:
return None
| 1,739,647,055,321,242,600
|
---------------------------------------------------------------------------
Computes the prediction.
Parameters
----------
L: list
List containing the data. It must be a two-dimensional list containing
multiple rows. Each row must include as first element the ordered predictor
and as nth elements the nth - 1 exogenous variable (nth > 2).
Returns
-------
float
the prediction.
|
verticapy/learn/tsa.py
|
fpredict
|
MiConnell/VerticaPy
|
python
|
def fpredict(self, L: list):
'\n ---------------------------------------------------------------------------\n Computes the prediction.\n\n Parameters\n ----------\n L: list\n List containing the data. It must be a two-dimensional list containing \n multiple rows. Each row must include as first element the ordered predictor \n and as nth elements the nth - 1 exogenous variable (nth > 2).\n\n Returns\n -------\n float\n the prediction.\n '
try:
result = []
result_tmp = 0
for i in range(len(self.X)):
result_tmp = 0
for j in range(len(self.coef_[i].values['coefficient'])):
elem = self.coef_[i].values['predictor'][j]
if (elem.lower() == 'intercept'):
result_tmp += self.coef_[i].values['coefficient'][j]
else:
(ni, nj) = elem[2:].split('_')
(ni, nj) = (int(ni), int(nj))
result_tmp += (self.coef_[i].values['coefficient'][j] * L[(- nj)][ni])
result += [result_tmp]
return result
except:
return None
|
def plot(self, vdf: vDataFrame=None, X: list=[], ts: str='', X_idx: int=0, dynamic: bool=False, one_step: bool=True, observed: bool=True, confidence: bool=True, nlead: int=10, nlast: int=0, limit: int=1000, ax=None, **style_kwds):
'\n ---------------------------------------------------------------------------\n Draws the VAR model.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n X: list, optional\n List of the response columns.\n ts: str, optional\n vcolumn used to order the data.\n X_idx: int, optional\n Index of the main vector vcolumn to draw. It can also be the name of a \n predictor vcolumn.\n dynamic: bool, optional\n If set to True, the dynamic forecast will be drawn.\n one_step: bool, optional\n If set to True, the one step ahead forecast will be drawn.\n observed: bool, optional\n If set to True, the observation will be drawn.\n confidence: bool, optional\n If set to True, the confidence ranges will be drawn.\n nlead: int, optional\n Number of predictions computed by the dynamic forecast after\n the last ts date.\n nlast: int, optional\n The dynamic forecast will start nlast values before the last\n ts date.\n limit: int, optional\n Maximum number of past elements to use.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax \n Matplotlib axes object\n '
if (not vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types([('limit', limit, [int, float]), ('nlead', nlead, [int, float]), ('X_idx', X_idx, [int, float, str]), ('dynamic', dynamic, [bool]), ('observed', observed, [bool]), ('one_step', one_step, [bool]), ('confidence', confidence, [bool]), ('vdf', vdf, [vDataFrame])])
(delta_limit, limit) = (limit, max(max(limit, ((self.parameters['p'] + 1) + nlast)), 200))
delta_limit = max(((limit - delta_limit) - nlast), 0)
if (not ts):
ts = self.ts
if (not X):
X = self.X
assert (dynamic or one_step or observed), ParameterError('No option selected.\n You should set either dynamic, one_step or observed to True.')
assert (((nlead + nlast) > 0) or (not dynamic)), ParameterError("Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True.")
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for (idx, elem) in enumerate(X):
if (str_column(elem).lower() == X_idx):
X_idx = idx
break
assert (isinstance(X_idx, (float, int)) and (len(self.X) > X_idx >= 0)), ParameterError("The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(len(self.X)))
result_all = self.predict(vdf=vdf, X=X, ts=ts, nlead=0, name=['_verticapy_prediction_{}_'.format(idx) for idx in range(len(self.X))])
(y, prediction) = (X[X_idx], '_verticapy_prediction_{}_'.format(X_idx))
error_eps = (1.96 * math.sqrt(self.score(method='mse').values['mse'][X_idx]))
print_info = verticapy.options['print_info']
verticapy.options['print_info'] = False
try:
result = result_all.select([ts, y, prediction]).dropna().sort([ts]).tail(limit).values
except:
verticapy.options['print_info'] = print_info
raise
verticapy.options['print_info'] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
(lower_osa, upper_osa) = ([((float(elem) - error_eps) if (elem != None) else None) for elem in one_step_ahead[1]], [((float(elem) + error_eps) if (elem != None) else None) for elem in one_step_ahead[1]])
if dynamic:
print_info = verticapy.options['print_info']
verticapy.options['print_info'] = False
try:
result = result_all.select(([ts] + X)).dropna().sort([ts]).tail(limit).values
except:
verticapy.options['print_info'] = print_info
raise
verticapy.options['print_info'] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
deltat = (result[columns[0]][(- 1)] - result[columns[0]][(- 2)])
(lead_time_list, lead_list) = ([], [])
if (nlast > 0):
for i in range(len(result[columns[0]][:(- nlast)])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
else:
for i in range(len(result[columns[0]])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
for i in range(nlast):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [result[columns[0]][(i - nlast)]]
if lead_time_list:
start_time = lead_time_list[(- 1)]
else:
start_time = result[columns[0]][(- 1)]
for i in range(nlead):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [(start_time + ((i + 1) * deltat))]
dynamic_forecast = (([result[columns[0]][((- nlast) - 1)]] + lead_time_list), ([result[columns[(1 + X_idx)]][((- nlast) - 1)]] + [elem[X_idx] for elem in lead_list[((- nlast) - nlead):]]))
(lower_d, upper_d) = ([], [])
for i in range(len(dynamic_forecast[1])):
delta_error = (error_eps * math.sqrt((i + 1)))
lower_d += [(float(dynamic_forecast[1][i]) - delta_error)]
upper_d += [(float(dynamic_forecast[1][i]) + delta_error)]
else:
(lower_d, upper_d, dynamic_forecast) = ([], [], ([], []))
alpha = 0.3
if (not ax):
(fig, ax) = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {'color': colors[2], 'linewidth': 2}
param2 = {'color': colors[3], 'linewidth': 2, 'linestyle': ':'}
param3 = {'color': colors[0], 'linewidth': 2, 'linestyle': 'dashed'}
if dynamic:
ax.fill_between(dynamic_forecast[0], (1.02 * float(min(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), (1.02 * float(max(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), alpha=0.04, color=updated_dict(param3, style_kwds, 2)['color'])
if confidence:
ax.fill_between(dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color='#555555')
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color='#000000')
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color='#000000')
ax.plot(dynamic_forecast[0], dynamic_forecast[1], label='Dynamic Forecast', **updated_dict(param3, style_kwds, 2))
if one_step:
if confidence:
ax.fill_between(one_step_ahead[0][delta_limit:], lower_osa[delta_limit:], upper_osa[delta_limit:], alpha=0.04, color='#555555')
ax.plot(one_step_ahead[0][delta_limit:], lower_osa[delta_limit:], alpha=0.04, color='#000000')
ax.plot(one_step_ahead[0][delta_limit:], upper_osa[delta_limit:], alpha=0.04, color='#000000')
ax.plot(one_step_ahead[0][delta_limit:], one_step_ahead[1][delta_limit:], label='One-step ahead Forecast', **updated_dict(param2, style_kwds, 1))
if observed:
ax.plot(true_value[0][delta_limit:], true_value[1][delta_limit:], label='Observed', **updated_dict(param1, style_kwds, 0))
ax.set_title('VAR({}) [{}]'.format(self.parameters['p'], y))
ax.set_xlabel(ts)
ax.legend(loc='center left', bbox_to_anchor=[1, 0.5])
ax.set_ylim((1.02 * float(min(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), (1.02 * float(max(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))))
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
| 2,941,692,781,028,544
|
---------------------------------------------------------------------------
Draws the VAR model.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
X: list, optional
List of the response columns.
ts: str, optional
vcolumn used to order the data.
X_idx: int, optional
Index of the main vector vcolumn to draw. It can also be the name of a
predictor vcolumn.
dynamic: bool, optional
If set to True, the dynamic forecast will be drawn.
one_step: bool, optional
If set to True, the one step ahead forecast will be drawn.
observed: bool, optional
If set to True, the observation will be drawn.
confidence: bool, optional
If set to True, the confidence ranges will be drawn.
nlead: int, optional
Number of predictions computed by the dynamic forecast after
the last ts date.
nlast: int, optional
The dynamic forecast will start nlast values before the last
ts date.
limit: int, optional
Maximum number of past elements to use.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
|
verticapy/learn/tsa.py
|
plot
|
MiConnell/VerticaPy
|
python
|
def plot(self, vdf: vDataFrame=None, X: list=[], ts: str=, X_idx: int=0, dynamic: bool=False, one_step: bool=True, observed: bool=True, confidence: bool=True, nlead: int=10, nlast: int=0, limit: int=1000, ax=None, **style_kwds):
'\n ---------------------------------------------------------------------------\n Draws the VAR model.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n X: list, optional\n List of the response columns.\n ts: str, optional\n vcolumn used to order the data.\n X_idx: int, optional\n Index of the main vector vcolumn to draw. It can also be the name of a \n predictor vcolumn.\n dynamic: bool, optional\n If set to True, the dynamic forecast will be drawn.\n one_step: bool, optional\n If set to True, the one step ahead forecast will be drawn.\n observed: bool, optional\n If set to True, the observation will be drawn.\n confidence: bool, optional\n If set to True, the confidence ranges will be drawn.\n nlead: int, optional\n Number of predictions computed by the dynamic forecast after\n the last ts date.\n nlast: int, optional\n The dynamic forecast will start nlast values before the last\n ts date.\n limit: int, optional\n Maximum number of past elements to use.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax \n Matplotlib axes object\n '
if (not vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types([('limit', limit, [int, float]), ('nlead', nlead, [int, float]), ('X_idx', X_idx, [int, float, str]), ('dynamic', dynamic, [bool]), ('observed', observed, [bool]), ('one_step', one_step, [bool]), ('confidence', confidence, [bool]), ('vdf', vdf, [vDataFrame])])
(delta_limit, limit) = (limit, max(max(limit, ((self.parameters['p'] + 1) + nlast)), 200))
delta_limit = max(((limit - delta_limit) - nlast), 0)
if (not ts):
ts = self.ts
if (not X):
X = self.X
assert (dynamic or one_step or observed), ParameterError('No option selected.\n You should set either dynamic, one_step or observed to True.')
assert (((nlead + nlast) > 0) or (not dynamic)), ParameterError("Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True.")
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for (idx, elem) in enumerate(X):
if (str_column(elem).lower() == X_idx):
X_idx = idx
break
assert (isinstance(X_idx, (float, int)) and (len(self.X) > X_idx >= 0)), ParameterError("The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(len(self.X)))
result_all = self.predict(vdf=vdf, X=X, ts=ts, nlead=0, name=['_verticapy_prediction_{}_'.format(idx) for idx in range(len(self.X))])
(y, prediction) = (X[X_idx], '_verticapy_prediction_{}_'.format(X_idx))
error_eps = (1.96 * math.sqrt(self.score(method='mse').values['mse'][X_idx]))
print_info = verticapy.options['print_info']
verticapy.options['print_info'] = False
try:
result = result_all.select([ts, y, prediction]).dropna().sort([ts]).tail(limit).values
except:
verticapy.options['print_info'] = print_info
raise
verticapy.options['print_info'] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
(lower_osa, upper_osa) = ([((float(elem) - error_eps) if (elem != None) else None) for elem in one_step_ahead[1]], [((float(elem) + error_eps) if (elem != None) else None) for elem in one_step_ahead[1]])
if dynamic:
print_info = verticapy.options['print_info']
verticapy.options['print_info'] = False
try:
result = result_all.select(([ts] + X)).dropna().sort([ts]).tail(limit).values
except:
verticapy.options['print_info'] = print_info
raise
verticapy.options['print_info'] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
deltat = (result[columns[0]][(- 1)] - result[columns[0]][(- 2)])
(lead_time_list, lead_list) = ([], [])
if (nlast > 0):
for i in range(len(result[columns[0]][:(- nlast)])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
else:
for i in range(len(result[columns[0]])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
for i in range(nlast):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [result[columns[0]][(i - nlast)]]
if lead_time_list:
start_time = lead_time_list[(- 1)]
else:
start_time = result[columns[0]][(- 1)]
for i in range(nlead):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [(start_time + ((i + 1) * deltat))]
dynamic_forecast = (([result[columns[0]][((- nlast) - 1)]] + lead_time_list), ([result[columns[(1 + X_idx)]][((- nlast) - 1)]] + [elem[X_idx] for elem in lead_list[((- nlast) - nlead):]]))
(lower_d, upper_d) = ([], [])
for i in range(len(dynamic_forecast[1])):
delta_error = (error_eps * math.sqrt((i + 1)))
lower_d += [(float(dynamic_forecast[1][i]) - delta_error)]
upper_d += [(float(dynamic_forecast[1][i]) + delta_error)]
else:
(lower_d, upper_d, dynamic_forecast) = ([], [], ([], []))
alpha = 0.3
if (not ax):
(fig, ax) = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {'color': colors[2], 'linewidth': 2}
param2 = {'color': colors[3], 'linewidth': 2, 'linestyle': ':'}
param3 = {'color': colors[0], 'linewidth': 2, 'linestyle': 'dashed'}
if dynamic:
ax.fill_between(dynamic_forecast[0], (1.02 * float(min(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), (1.02 * float(max(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), alpha=0.04, color=updated_dict(param3, style_kwds, 2)['color'])
if confidence:
ax.fill_between(dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color='#555555')
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color='#000000')
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color='#000000')
ax.plot(dynamic_forecast[0], dynamic_forecast[1], label='Dynamic Forecast', **updated_dict(param3, style_kwds, 2))
if one_step:
if confidence:
ax.fill_between(one_step_ahead[0][delta_limit:], lower_osa[delta_limit:], upper_osa[delta_limit:], alpha=0.04, color='#555555')
ax.plot(one_step_ahead[0][delta_limit:], lower_osa[delta_limit:], alpha=0.04, color='#000000')
ax.plot(one_step_ahead[0][delta_limit:], upper_osa[delta_limit:], alpha=0.04, color='#000000')
ax.plot(one_step_ahead[0][delta_limit:], one_step_ahead[1][delta_limit:], label='One-step ahead Forecast', **updated_dict(param2, style_kwds, 1))
if observed:
ax.plot(true_value[0][delta_limit:], true_value[1][delta_limit:], label='Observed', **updated_dict(param1, style_kwds, 0))
ax.set_title('VAR({}) [{}]'.format(self.parameters['p'], y))
ax.set_xlabel(ts)
ax.legend(loc='center left', bbox_to_anchor=[1, 0.5])
ax.set_ylim((1.02 * float(min(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))), (1.02 * float(max(((true_value[1] + dynamic_forecast[1]) + one_step_ahead[1])))))
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
|
def predict(self, vdf: vDataFrame, X: list=[], ts: str='', nlead: int=0, name: list=[]):
'\n ---------------------------------------------------------------------------\n Predicts using the input relation.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n X: list, optional\n List of the response columns.\n ts: str, optional\n vcolumn used to order the data.\n nlead: int, optional\n Number of records to predict after the last ts date.\n name: list, optional\n Names of the added vcolumns. If empty, names will be generated.\n\n Returns\n -------\n vDataFrame\n object including the prediction.\n '
check_types([('name', name, [list]), ('ts', ts, [str]), ('nlead', nlead, [int, float]), ('X', X, [list]), ('vdf', vdf, [vDataFrame])])
if (not ts):
ts = self.ts
if (not X):
X = self.X
columns_check((X + [ts]), vdf)
X = vdf_columns_names(X, vdf)
ts = vdf_columns_names([ts], vdf)[0]
(all_pred, names) = ([], [])
transform_relation = self.transform_relation.replace('[VerticaPy_ts]', self.ts)
for (idx, elem) in enumerate(X):
name_tmp = (('{}_'.format(self.type) + ''.join((ch for ch in elem if ch.isalnum()))) if (len(name) != len(X)) else name[idx])
all_pred += ['{} AS {}'.format(self.deploySQL()[idx], name_tmp)]
transform_relation = transform_relation.replace('[X{}]'.format(idx), elem)
columns = (vdf.get_columns() + all_pred)
relation = vdf.__genSQL__()
for i in range(nlead):
query = 'SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1'.format(ts, ts, ts, relation, ts)
deltat = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(ts, deltat, relation)
next_t = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
if (i == 0):
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(next_t, ts, ', '.join(['NULL AS {}'.format(column) for column in vdf.get_columns(exclude_columns=[ts])]))
relation_tmp = '(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE'.format(', '.join(([ts] + vdf.get_columns(exclude_columns=[ts]))), relation, new_line)
query = 'SELECT {} FROM {} ORDER BY {} DESC LIMIT 1'.format(', '.join(self.deploySQL()), transform_relation.format(relation_tmp), ts)
prediction = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()
for (idx, elem) in enumerate(X):
prediction[idx] = '{} AS {}'.format(prediction[idx], elem)
columns_tmp = vdf.get_columns(exclude_columns=([ts] + X))
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} {}".format(next_t, ts, ', '.join(prediction), ((', ' if columns_tmp else '') + ', '.join(['NULL AS {}'.format(column) for column in columns_tmp])))
relation = '(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE'.format(', '.join((([ts] + X) + vdf.get_columns(exclude_columns=([ts] + X)))), relation, new_line)
final_relation = '(SELECT {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), transform_relation.format(relation))
result = vdf_from_relation(final_relation, 'VAR', self.cursor)
if (nlead > 0):
for elem in X:
result[elem].apply("CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(ts, first_t, '{}'))
return result
| 344,865,179,313,746,200
|
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
X: list, optional
List of the response columns.
ts: str, optional
vcolumn used to order the data.
nlead: int, optional
Number of records to predict after the last ts date.
name: list, optional
Names of the added vcolumns. If empty, names will be generated.
Returns
-------
vDataFrame
object including the prediction.
|
verticapy/learn/tsa.py
|
predict
|
MiConnell/VerticaPy
|
python
|
def predict(self, vdf: vDataFrame, X: list=[], ts: str=, nlead: int=0, name: list=[]):
'\n ---------------------------------------------------------------------------\n Predicts using the input relation.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n X: list, optional\n List of the response columns.\n ts: str, optional\n vcolumn used to order the data.\n nlead: int, optional\n Number of records to predict after the last ts date.\n name: list, optional\n Names of the added vcolumns. If empty, names will be generated.\n\n Returns\n -------\n vDataFrame\n object including the prediction.\n '
check_types([('name', name, [list]), ('ts', ts, [str]), ('nlead', nlead, [int, float]), ('X', X, [list]), ('vdf', vdf, [vDataFrame])])
if (not ts):
ts = self.ts
if (not X):
X = self.X
columns_check((X + [ts]), vdf)
X = vdf_columns_names(X, vdf)
ts = vdf_columns_names([ts], vdf)[0]
(all_pred, names) = ([], [])
transform_relation = self.transform_relation.replace('[VerticaPy_ts]', self.ts)
for (idx, elem) in enumerate(X):
name_tmp = (('{}_'.format(self.type) + .join((ch for ch in elem if ch.isalnum()))) if (len(name) != len(X)) else name[idx])
all_pred += ['{} AS {}'.format(self.deploySQL()[idx], name_tmp)]
transform_relation = transform_relation.replace('[X{}]'.format(idx), elem)
columns = (vdf.get_columns() + all_pred)
relation = vdf.__genSQL__()
for i in range(nlead):
query = 'SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1'.format(ts, ts, ts, relation, ts)
deltat = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(ts, deltat, relation)
next_t = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()[0]
if (i == 0):
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(next_t, ts, ', '.join(['NULL AS {}'.format(column) for column in vdf.get_columns(exclude_columns=[ts])]))
relation_tmp = '(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE'.format(', '.join(([ts] + vdf.get_columns(exclude_columns=[ts]))), relation, new_line)
query = 'SELECT {} FROM {} ORDER BY {} DESC LIMIT 1'.format(', '.join(self.deploySQL()), transform_relation.format(relation_tmp), ts)
prediction = vdf._VERTICAPY_VARIABLES_['cursor'].execute(query).fetchone()
for (idx, elem) in enumerate(X):
prediction[idx] = '{} AS {}'.format(prediction[idx], elem)
columns_tmp = vdf.get_columns(exclude_columns=([ts] + X))
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} {}".format(next_t, ts, ', '.join(prediction), ((', ' if columns_tmp else ) + ', '.join(['NULL AS {}'.format(column) for column in columns_tmp])))
relation = '(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE'.format(', '.join((([ts] + X) + vdf.get_columns(exclude_columns=([ts] + X)))), relation, new_line)
final_relation = '(SELECT {} FROM {}) VERTICAPY_SUBTABLE'.format(', '.join(columns), transform_relation.format(relation))
result = vdf_from_relation(final_relation, 'VAR', self.cursor)
if (nlead > 0):
for elem in X:
result[elem].apply("CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(ts, first_t, '{}'))
return result
|
def test_status_view_not_logged_in(self):
' test status view '
destination = ('/login/?next=' + urllib.parse.quote('/config/status/', safe=''))
response = self.client.get('/config/status/', follow=True)
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
| 3,453,664,318,599,190,000
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_view_not_logged_in
|
cclauss/dfirtrack
|
python
|
def test_status_view_not_logged_in(self):
' '
destination = ('/login/?next=' + urllib.parse.quote('/config/status/', safe=))
response = self.client.get('/config/status/', follow=True)
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
|
def test_status_view_logged_in(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(response.status_code, 200)
| 8,133,930,087,436,671,000
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_view_logged_in
|
cclauss/dfirtrack
|
python
|
def test_status_view_logged_in(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(response.status_code, 200)
|
def test_status_view_template(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertTemplateUsed(response, 'dfirtrack_config/status/status.html')
| -8,359,420,942,218,433,000
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_view_template
|
cclauss/dfirtrack
|
python
|
def test_status_view_template(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertTemplateUsed(response, 'dfirtrack_config/status/status.html')
|
def test_status_view_get_user_context(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(str(response.context['user']), 'testuser_status')
| -5,865,377,632,099,459,000
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_view_get_user_context
|
cclauss/dfirtrack
|
python
|
def test_status_view_get_user_context(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(str(response.context['user']), 'testuser_status')
|
def test_status_view_redirect(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
destination = urllib.parse.quote('/config/status/', safe='/')
response = self.client.get('/config/status', follow=True)
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
| 5,283,357,441,722,809,000
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_view_redirect
|
cclauss/dfirtrack
|
python
|
def test_status_view_redirect(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
destination = urllib.parse.quote('/config/status/', safe='/')
response = self.client.get('/config/status', follow=True)
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
|
def test_status_view_get_object_context(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
analysisstatus_all = Analysisstatus.objects.all().order_by('analysisstatus_name')
artifactpriority_all = Artifactpriority.objects.all().order_by('artifactpriority_name')
artifactstatus_all = Artifactstatus.objects.all().order_by('artifactstatus_name')
casepriority_all = Casepriority.objects.all().order_by('casepriority_name')
casestatus_all = Casestatus.objects.all().order_by('casestatus_name')
systemstatus_all = Systemstatus.objects.all().order_by('systemstatus_name')
taskstatus_all = Taskstatus.objects.all().order_by('taskstatus_name')
taskpriority_all = Taskpriority.objects.all().order_by('taskpriority_name')
self.assertEqual(response.context['artifacts_number'], 2)
self.assertEqual(response.context['cases_number'], 4)
self.assertEqual(response.context['systems_number'], 3)
self.assertEqual(response.context['tasks_number'], 1)
self.assertEqual(type(response.context['analysisstatus_all']), type(analysisstatus_all))
self.assertEqual(type(response.context['artifactpriority_all']), type(artifactpriority_all))
self.assertEqual(type(response.context['artifactstatus_all']), type(artifactstatus_all))
self.assertEqual(type(response.context['casepriority_all']), type(casepriority_all))
self.assertEqual(type(response.context['casestatus_all']), type(casestatus_all))
self.assertEqual(type(response.context['systemstatus_all']), type(systemstatus_all))
self.assertEqual(type(response.context['taskpriority_all']), type(taskpriority_all))
self.assertEqual(type(response.context['taskstatus_all']), type(taskstatus_all))
| 9,154,925,588,400,285,000
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_view_get_object_context
|
cclauss/dfirtrack
|
python
|
def test_status_view_get_object_context(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
analysisstatus_all = Analysisstatus.objects.all().order_by('analysisstatus_name')
artifactpriority_all = Artifactpriority.objects.all().order_by('artifactpriority_name')
artifactstatus_all = Artifactstatus.objects.all().order_by('artifactstatus_name')
casepriority_all = Casepriority.objects.all().order_by('casepriority_name')
casestatus_all = Casestatus.objects.all().order_by('casestatus_name')
systemstatus_all = Systemstatus.objects.all().order_by('systemstatus_name')
taskstatus_all = Taskstatus.objects.all().order_by('taskstatus_name')
taskpriority_all = Taskpriority.objects.all().order_by('taskpriority_name')
self.assertEqual(response.context['artifacts_number'], 2)
self.assertEqual(response.context['cases_number'], 4)
self.assertEqual(response.context['systems_number'], 3)
self.assertEqual(response.context['tasks_number'], 1)
self.assertEqual(type(response.context['analysisstatus_all']), type(analysisstatus_all))
self.assertEqual(type(response.context['artifactpriority_all']), type(artifactpriority_all))
self.assertEqual(type(response.context['artifactstatus_all']), type(artifactstatus_all))
self.assertEqual(type(response.context['casepriority_all']), type(casepriority_all))
self.assertEqual(type(response.context['casestatus_all']), type(casestatus_all))
self.assertEqual(type(response.context['systemstatus_all']), type(systemstatus_all))
self.assertEqual(type(response.context['taskpriority_all']), type(taskpriority_all))
self.assertEqual(type(response.context['taskstatus_all']), type(taskstatus_all))
|
def test_status_view_get_statushistory_entry_numbers_context(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(type(response.context['statushistory_all']), type(reversed(Statushistory.objects.all())))
| 4,925,314,560,294,860,000
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_view_get_statushistory_entry_numbers_context
|
cclauss/dfirtrack
|
python
|
def test_status_view_get_statushistory_entry_numbers_context(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(type(response.context['statushistory_all']), type(reversed(Statushistory.objects.all())))
|
def test_status_detail_view_not_logged_in(self):
' test status view '
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
destination = ('/login/?next=' + urllib.parse.quote((('/config/status/' + str(statushistory_id)) + '/'), safe=''))
response = self.client.get((('/config/status/' + str(statushistory_id)) + '/'), follow=True)
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
| -80,654,200,140,861,870
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_detail_view_not_logged_in
|
cclauss/dfirtrack
|
python
|
def test_status_detail_view_not_logged_in(self):
' '
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
destination = ('/login/?next=' + urllib.parse.quote((('/config/status/' + str(statushistory_id)) + '/'), safe=))
response = self.client.get((('/config/status/' + str(statushistory_id)) + '/'), follow=True)
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
|
def test_status_detail_view_logged_in(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get((('/config/status/' + str(statushistory_id)) + '/'))
self.assertEqual(response.status_code, 200)
| -7,726,723,892,825,041,000
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_detail_view_logged_in
|
cclauss/dfirtrack
|
python
|
def test_status_detail_view_logged_in(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get((('/config/status/' + str(statushistory_id)) + '/'))
self.assertEqual(response.status_code, 200)
|
def test_status_detail_view_template(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get((('/config/status/' + str(statushistory_id)) + '/'))
self.assertTemplateUsed(response, 'dfirtrack_config/status/status_detail.html')
| 1,164,327,776,074,919,400
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_detail_view_template
|
cclauss/dfirtrack
|
python
|
def test_status_detail_view_template(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get((('/config/status/' + str(statushistory_id)) + '/'))
self.assertTemplateUsed(response, 'dfirtrack_config/status/status_detail.html')
|
def test_status_detail_view_get_user_context(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get((('/config/status/' + str(statushistory_id)) + '/'))
self.assertEqual(str(response.context['user']), 'testuser_status')
| 3,040,392,916,661,167,600
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_detail_view_get_user_context
|
cclauss/dfirtrack
|
python
|
def test_status_detail_view_get_user_context(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get((('/config/status/' + str(statushistory_id)) + '/'))
self.assertEqual(str(response.context['user']), 'testuser_status')
|
def test_status_detail_view_redirect(self):
' test status view '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
destination = urllib.parse.quote((('/config/status/' + str(statushistory_id)) + '/'), safe='/')
response = self.client.get(('/config/status/' + str(statushistory_id)), follow=True)
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
| 2,169,419,817,809,991,700
|
test status view
|
dfirtrack_config/tests/status/test_status_views.py
|
test_status_detail_view_redirect
|
cclauss/dfirtrack
|
python
|
def test_status_detail_view_redirect(self):
' '
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
destination = urllib.parse.quote((('/config/status/' + str(statushistory_id)) + '/'), safe='/')
response = self.client.get(('/config/status/' + str(statushistory_id)), follow=True)
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
|
def find_format_specifiers(s):
'Find all format specifiers in a string.'
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if (percent < 0):
break
try:
specifiers.append(s[(percent + 1)])
except:
print('Failed to get specifier')
pos = (percent + 2)
return specifiers
| -4,348,967,928,584,775,700
|
Find all format specifiers in a string.
|
contrib/devtools/update-translations.py
|
find_format_specifiers
|
ALLMINER/elli
|
python
|
def find_format_specifiers(s):
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if (percent < 0):
break
try:
specifiers.append(s[(percent + 1)])
except:
print('Failed to get specifier')
pos = (percent + 2)
return specifiers
|
def split_format_specifiers(specifiers):
'Split format specifiers between numeric (Qt) and others (strprintf)'
numeric = []
other = []
for s in specifiers:
if (s in {'1', '2', '3', '4', '5', '6', '7', '8', '9'}):
numeric.append(s)
else:
other.append(s)
if numeric:
other = []
return (set(numeric), other)
| 1,703,264,572,144,296,000
|
Split format specifiers between numeric (Qt) and others (strprintf)
|
contrib/devtools/update-translations.py
|
split_format_specifiers
|
ALLMINER/elli
|
python
|
def split_format_specifiers(specifiers):
numeric = []
other = []
for s in specifiers:
if (s in {'1', '2', '3', '4', '5', '6', '7', '8', '9'}):
numeric.append(s)
else:
other.append(s)
if numeric:
other = []
return (set(numeric), other)
|
def sanitize_string(s):
'Sanitize string for printing'
return s.replace('\n', ' ')
| -720,585,957,357,829,600
|
Sanitize string for printing
|
contrib/devtools/update-translations.py
|
sanitize_string
|
ALLMINER/elli
|
python
|
def sanitize_string(s):
return s.replace('\n', ' ')
|
def remove_invalid_characters(s):
'Remove invalid characters from translation string'
return FIX_RE.sub(b'', s)
| 4,520,350,627,527,509,500
|
Remove invalid characters from translation string
|
contrib/devtools/update-translations.py
|
remove_invalid_characters
|
ALLMINER/elli
|
python
|
def remove_invalid_characters(s):
return FIX_RE.sub(b, s)
|
def test_constructor_must_create_object_when_analytical_data_has_float_values(self, linearity_validator_obj):
'Given analytical data\n The LinearityValidator\n Should create a list of floats\n '
assert (linearity_validator_obj.analytical_data == [0.1, 0.2, 0.15])
assert (linearity_validator_obj.concentration_data == [0.1, 0.2, 0.3])
| -6,116,320,948,205,417,000
|
Given analytical data
The LinearityValidator
Should create a list of floats
|
tests/unit/test_validators/test_linearity_validator.py
|
test_constructor_must_create_object_when_analytical_data_has_float_values
|
abxsantos/analytical-validation
|
python
|
def test_constructor_must_create_object_when_analytical_data_has_float_values(self, linearity_validator_obj):
'Given analytical data\n The LinearityValidator\n Should create a list of floats\n '
assert (linearity_validator_obj.analytical_data == [0.1, 0.2, 0.15])
assert (linearity_validator_obj.concentration_data == [0.1, 0.2, 0.3])
|
def test_ordinary_least_squares_linear_regression_must_pass_float_when_given_correct_data(self, ordinary_least_squares_regression_mock, add_constant_mock, linearity_validator_obj):
'Given concentration values = float\n The ordinary_least_squares_linear_regression\n Then must set properties'
linearity_validator_obj.ordinary_least_squares_linear_regression()
assert (linearity_validator_obj.fitted_result == ordinary_least_squares_regression_mock.return_value.fit.return_value)
assert ordinary_least_squares_regression_mock.called
assert (ordinary_least_squares_regression_mock.call_args_list == [call(linearity_validator_obj.analytical_data, add_constant_mock.return_value)])
assert add_constant_mock.called
assert (add_constant_mock.call_args_list == [call(linearity_validator_obj.concentration_data)])
| 8,376,493,372,959,138,000
|
Given concentration values = float
The ordinary_least_squares_linear_regression
Then must set properties
|
tests/unit/test_validators/test_linearity_validator.py
|
test_ordinary_least_squares_linear_regression_must_pass_float_when_given_correct_data
|
abxsantos/analytical-validation
|
python
|
def test_ordinary_least_squares_linear_regression_must_pass_float_when_given_correct_data(self, ordinary_least_squares_regression_mock, add_constant_mock, linearity_validator_obj):
'Given concentration values = float\n The ordinary_least_squares_linear_regression\n Then must set properties'
linearity_validator_obj.ordinary_least_squares_linear_regression()
assert (linearity_validator_obj.fitted_result == ordinary_least_squares_regression_mock.return_value.fit.return_value)
assert ordinary_least_squares_regression_mock.called
assert (ordinary_least_squares_regression_mock.call_args_list == [call(linearity_validator_obj.analytical_data, add_constant_mock.return_value)])
assert add_constant_mock.called
assert (add_constant_mock.call_args_list == [call(linearity_validator_obj.concentration_data)])
|
def test_regression_residues_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
'Given a regression model\n when regression_residues is called\n the regression residues must be created'
assert (linearity_validator_obj.regression_residues == fitted_result_obj.resid.tolist())
| -5,910,100,960,835,499,000
|
Given a regression model
when regression_residues is called
the regression residues must be created
|
tests/unit/test_validators/test_linearity_validator.py
|
test_regression_residues_exists_when_fitted_result_not_none
|
abxsantos/analytical-validation
|
python
|
def test_regression_residues_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
'Given a regression model\n when regression_residues is called\n the regression residues must be created'
assert (linearity_validator_obj.regression_residues == fitted_result_obj.resid.tolist())
|
@pytest.mark.parametrize('param_anova_f_pvalue, param_alpha, expected_result', [(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.1, True)])
def test_valid_anova_f_pvalue_must_return_true_when_r_squared_is_greater_than_0990(self, param_alpha, linearity_validator_obj, param_anova_f_pvalue, expected_result):
'Given data with an aceptable regression model\n When valid_anova_f_pvalue is called\n Then anova_f_pvalue < alpha must assert true'
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.f_pvalue = param_anova_f_pvalue
assert (linearity_validator_obj.valid_anova_f_pvalue is expected_result)
| 2,461,911,986,618,303,000
|
Given data with an aceptable regression model
When valid_anova_f_pvalue is called
Then anova_f_pvalue < alpha must assert true
|
tests/unit/test_validators/test_linearity_validator.py
|
test_valid_anova_f_pvalue_must_return_true_when_r_squared_is_greater_than_0990
|
abxsantos/analytical-validation
|
python
|
@pytest.mark.parametrize('param_anova_f_pvalue, param_alpha, expected_result', [(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.1, True)])
def test_valid_anova_f_pvalue_must_return_true_when_r_squared_is_greater_than_0990(self, param_alpha, linearity_validator_obj, param_anova_f_pvalue, expected_result):
'Given data with an aceptable regression model\n When valid_anova_f_pvalue is called\n Then anova_f_pvalue < alpha must assert true'
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.f_pvalue = param_anova_f_pvalue
assert (linearity_validator_obj.valid_anova_f_pvalue is expected_result)
|
@pytest.mark.parametrize('param_significant_slope, param_alpha, expected_result', [(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.1, True)])
def test_significant_slope_must_return_true_when_slope_pvalue_is_smaller_than_alpha(self, linearity_validator_obj, param_significant_slope, param_alpha, expected_result):
'Given homokedastic data\n When check_hypothesis is called\n Then slope_is_significant must assert true'
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = ('mock value', param_significant_slope)
assert (linearity_validator_obj.significant_slope is expected_result)
| 7,304,967,778,565,503,000
|
Given homokedastic data
When check_hypothesis is called
Then slope_is_significant must assert true
|
tests/unit/test_validators/test_linearity_validator.py
|
test_significant_slope_must_return_true_when_slope_pvalue_is_smaller_than_alpha
|
abxsantos/analytical-validation
|
python
|
@pytest.mark.parametrize('param_significant_slope, param_alpha, expected_result', [(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.1, True)])
def test_significant_slope_must_return_true_when_slope_pvalue_is_smaller_than_alpha(self, linearity_validator_obj, param_significant_slope, param_alpha, expected_result):
'Given homokedastic data\n When check_hypothesis is called\n Then slope_is_significant must assert true'
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = ('mock value', param_significant_slope)
assert (linearity_validator_obj.significant_slope is expected_result)
|
@pytest.mark.parametrize('param_insignificant_intercept, param_alpha, expected_result', [(0.051, 0.05, True), (10, 0.1, True), (0.049, 0.05, False), (0.001, 0.1, False)])
def test_insignificant_intercept_must_return_true_when_intercept_pvalue_is_greater_than_alpha(self, linearity_validator_obj, param_alpha, param_insignificant_intercept, expected_result):
'Given homokedastic data\n When check_hypothesis is called\n Then intercept_not_significant must assert true'
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = (param_insignificant_intercept, 'mock value')
assert (linearity_validator_obj.insignificant_intercept is expected_result)
| 3,910,188,703,419,715,000
|
Given homokedastic data
When check_hypothesis is called
Then intercept_not_significant must assert true
|
tests/unit/test_validators/test_linearity_validator.py
|
test_insignificant_intercept_must_return_true_when_intercept_pvalue_is_greater_than_alpha
|
abxsantos/analytical-validation
|
python
|
@pytest.mark.parametrize('param_insignificant_intercept, param_alpha, expected_result', [(0.051, 0.05, True), (10, 0.1, True), (0.049, 0.05, False), (0.001, 0.1, False)])
def test_insignificant_intercept_must_return_true_when_intercept_pvalue_is_greater_than_alpha(self, linearity_validator_obj, param_alpha, param_insignificant_intercept, expected_result):
'Given homokedastic data\n When check_hypothesis is called\n Then intercept_not_significant must assert true'
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = (param_insignificant_intercept, 'mock value')
assert (linearity_validator_obj.insignificant_intercept is expected_result)
|
@pytest.mark.parametrize('param_r_squared, expected_result', [(1, True), (0.99, True), (0.98, False)])
def test_valid_r_squared_must_return_true_when_r_squared_is_greater_than_0990(self, linearity_validator_obj, param_r_squared, expected_result):
'Given homokedastic data\n When check_hypothesis is called\n Then r_squared > 0.990 must assert true'
linearity_validator_obj.fitted_result.rsquared = param_r_squared
assert (linearity_validator_obj.valid_r_squared is expected_result)
| -1,165,562,256,004,436,000
|
Given homokedastic data
When check_hypothesis is called
Then r_squared > 0.990 must assert true
|
tests/unit/test_validators/test_linearity_validator.py
|
test_valid_r_squared_must_return_true_when_r_squared_is_greater_than_0990
|
abxsantos/analytical-validation
|
python
|
@pytest.mark.parametrize('param_r_squared, expected_result', [(1, True), (0.99, True), (0.98, False)])
def test_valid_r_squared_must_return_true_when_r_squared_is_greater_than_0990(self, linearity_validator_obj, param_r_squared, expected_result):
'Given homokedastic data\n When check_hypothesis is called\n Then r_squared > 0.990 must assert true'
linearity_validator_obj.fitted_result.rsquared = param_r_squared
assert (linearity_validator_obj.valid_r_squared is expected_result)
|
def test_run_breusch_pagan_test_must_raise_exception_when_model_is_none(self):
'Not given a model parameter\n The check_homokedasticity\n Should raise exception'
analytical_data = [[0.1, 0.2, 0.15]]
concentration_data = [[0.2, 0.2, 0.3]]
with pytest.raises(DataWasNotFitted):
LinearityValidator(analytical_data, concentration_data).run_breusch_pagan_test()
| 3,710,939,284,436,963,300
|
Not given a model parameter
The check_homokedasticity
Should raise exception
|
tests/unit/test_validators/test_linearity_validator.py
|
test_run_breusch_pagan_test_must_raise_exception_when_model_is_none
|
abxsantos/analytical-validation
|
python
|
def test_run_breusch_pagan_test_must_raise_exception_when_model_is_none(self):
'Not given a model parameter\n The check_homokedasticity\n Should raise exception'
analytical_data = [[0.1, 0.2, 0.15]]
concentration_data = [[0.2, 0.2, 0.3]]
with pytest.raises(DataWasNotFitted):
LinearityValidator(analytical_data, concentration_data).run_breusch_pagan_test()
|
def test_run_breusch_pagan_test(self, linearity_validator_obj, het_breuschpagan_mock):
'Given heterokedastic data\n When check_homokedasticity is called\n Then must return false'
linearity_validator_obj.run_breusch_pagan_test()
assert (linearity_validator_obj.breusch_pagan_pvalue == 42)
assert het_breuschpagan_mock.called
assert (het_breuschpagan_mock.call_args_list == [call(linearity_validator_obj.fitted_result.resid, linearity_validator_obj.fitted_result.model.exog)])
| -7,879,011,320,087,054,000
|
Given heterokedastic data
When check_homokedasticity is called
Then must return false
|
tests/unit/test_validators/test_linearity_validator.py
|
test_run_breusch_pagan_test
|
abxsantos/analytical-validation
|
python
|
def test_run_breusch_pagan_test(self, linearity_validator_obj, het_breuschpagan_mock):
'Given heterokedastic data\n When check_homokedasticity is called\n Then must return false'
linearity_validator_obj.run_breusch_pagan_test()
assert (linearity_validator_obj.breusch_pagan_pvalue == 42)
assert het_breuschpagan_mock.called
assert (het_breuschpagan_mock.call_args_list == [call(linearity_validator_obj.fitted_result.resid, linearity_validator_obj.fitted_result.model.exog)])
|
@pytest.mark.parametrize('durbin_watson_pvalue', [0.1, 1, 2, 2.5, 3, 3.9])
def test_check_residual_autocorrelation(self, linearity_validator_obj, durbin_watson_mock, durbin_watson_pvalue):
'Given data\n When residual_autocorrelation is called\n Then must create durbin_watson_value'
durbin_watson_mock.return_value = durbin_watson_pvalue
linearity_validator_obj.check_residual_autocorrelation()
assert (linearity_validator_obj.durbin_watson_value == durbin_watson_mock.return_value)
assert durbin_watson_mock.called
assert (durbin_watson_mock.call_args_list == [call(linearity_validator_obj.fitted_result.resid)])
| 2,716,756,573,364,205,000
|
Given data
When residual_autocorrelation is called
Then must create durbin_watson_value
|
tests/unit/test_validators/test_linearity_validator.py
|
test_check_residual_autocorrelation
|
abxsantos/analytical-validation
|
python
|
@pytest.mark.parametrize('durbin_watson_pvalue', [0.1, 1, 2, 2.5, 3, 3.9])
def test_check_residual_autocorrelation(self, linearity_validator_obj, durbin_watson_mock, durbin_watson_pvalue):
'Given data\n When residual_autocorrelation is called\n Then must create durbin_watson_value'
durbin_watson_mock.return_value = durbin_watson_pvalue
linearity_validator_obj.check_residual_autocorrelation()
assert (linearity_validator_obj.durbin_watson_value == durbin_watson_mock.return_value)
assert durbin_watson_mock.called
assert (durbin_watson_mock.call_args_list == [call(linearity_validator_obj.fitted_result.resid)])
|
def test_check_residual_autocorrelation_must_raise_exception_when_data_not_fitted(self, linearity_validator_obj):
'Given data,\n if no regression was calculated\n Should raise an exception'
linearity_validator_obj.fitted_result = None
with pytest.raises(DataWasNotFitted):
linearity_validator_obj.check_residual_autocorrelation()
| 9,115,932,334,498,802,000
|
Given data,
if no regression was calculated
Should raise an exception
|
tests/unit/test_validators/test_linearity_validator.py
|
test_check_residual_autocorrelation_must_raise_exception_when_data_not_fitted
|
abxsantos/analytical-validation
|
python
|
def test_check_residual_autocorrelation_must_raise_exception_when_data_not_fitted(self, linearity_validator_obj):
'Given data,\n if no regression was calculated\n Should raise an exception'
linearity_validator_obj.fitted_result = None
with pytest.raises(DataWasNotFitted):
linearity_validator_obj.check_residual_autocorrelation()
|
@pytest.mark.parametrize('durbin_watson_pvalue', [(- 1), 10, 4.1])
def test_check_residual_autocorrelation_must_pass_when_durbin_watson_value_is_between_0_and_4(self, linearity_validator_obj, durbin_watson_mock, durbin_watson_pvalue):
'Given data,\n When check_residual is called\n after fitting the model\n Should pass creating\n 0 < durbin_watson_value < 4'
durbin_watson_mock.return_value = durbin_watson_pvalue
assert (linearity_validator_obj.durbin_watson_value is None)
| 977,011,992,682,152,000
|
Given data,
When check_residual is called
after fitting the model
Should pass creating
0 < durbin_watson_value < 4
|
tests/unit/test_validators/test_linearity_validator.py
|
test_check_residual_autocorrelation_must_pass_when_durbin_watson_value_is_between_0_and_4
|
abxsantos/analytical-validation
|
python
|
@pytest.mark.parametrize('durbin_watson_pvalue', [(- 1), 10, 4.1])
def test_check_residual_autocorrelation_must_pass_when_durbin_watson_value_is_between_0_and_4(self, linearity_validator_obj, durbin_watson_mock, durbin_watson_pvalue):
'Given data,\n When check_residual is called\n after fitting the model\n Should pass creating\n 0 < durbin_watson_value < 4'
durbin_watson_mock.return_value = durbin_watson_pvalue
assert (linearity_validator_obj.durbin_watson_value is None)
|
def import_chart(slc_to_import: Slice, slc_to_override: Optional[Slice], import_time: Optional[int]=None) -> int:
'Inserts or overrides slc in the database.\n\n remote_id and import_time fields in params_dict are set to track the\n slice origin and ensure correct overrides for multiple imports.\n Slice.perm is used to find the datasources and connect them.\n\n :param Slice slc_to_import: Slice object to import\n :param Slice slc_to_override: Slice to replace, id matches remote_id\n :returns: The resulting id for the imported slice\n :rtype: int\n '
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
slc_to_import.reset_ownership()
params = slc_to_import.params_dict
datasource = ConnectorRegistry.get_datasource_by_name(session, slc_to_import.datasource_type, params['datasource_name'], params['schema'], params['database_name'])
slc_to_import.datasource_id = datasource.id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logger.info('Final slice: %s', str(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
| -3,424,731,141,977,397,000
|
Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
|
superset/dashboards/commands/importers/v0.py
|
import_chart
|
Jacob-ru/superset
|
python
|
def import_chart(slc_to_import: Slice, slc_to_override: Optional[Slice], import_time: Optional[int]=None) -> int:
'Inserts or overrides slc in the database.\n\n remote_id and import_time fields in params_dict are set to track the\n slice origin and ensure correct overrides for multiple imports.\n Slice.perm is used to find the datasources and connect them.\n\n :param Slice slc_to_import: Slice object to import\n :param Slice slc_to_override: Slice to replace, id matches remote_id\n :returns: The resulting id for the imported slice\n :rtype: int\n '
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
slc_to_import.reset_ownership()
params = slc_to_import.params_dict
datasource = ConnectorRegistry.get_datasource_by_name(session, slc_to_import.datasource_type, params['datasource_name'], params['schema'], params['database_name'])
slc_to_import.datasource_id = datasource.id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logger.info('Final slice: %s', str(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
|
def import_dashboard(dashboard_to_import: Dashboard, dataset_id_mapping: Optional[Dict[(int, int)]]=None, import_time: Optional[int]=None, database_id: Optional[int]=None) -> int:
"Imports the dashboard from the object to the database.\n\n Once dashboard is imported, json_metadata field is extended and stores\n remote_id and import_time. It helps to decide if the dashboard has to\n be overridden or just copies over. Slices that belong to this\n dashboard will be wired to existing tables. This function can be used\n to import/export dashboards between multiple superset instances.\n Audit metadata isn't copied over.\n "
def alter_positions(dashboard: Dashboard, old_to_new_slc_id_dict: Dict[(int, int)]) -> None:
'Updates slice_ids in the position json.\n\n Sample position_json data:\n {\n "DASHBOARD_VERSION_KEY": "v2",\n "DASHBOARD_ROOT_ID": {\n "type": "DASHBOARD_ROOT_TYPE",\n "id": "DASHBOARD_ROOT_ID",\n "children": ["DASHBOARD_GRID_ID"]\n },\n "DASHBOARD_GRID_ID": {\n "type": "DASHBOARD_GRID_TYPE",\n "id": "DASHBOARD_GRID_ID",\n "children": ["DASHBOARD_CHART_TYPE-2"]\n },\n "DASHBOARD_CHART_TYPE-2": {\n "type": "CHART",\n "id": "DASHBOARD_CHART_TYPE-2",\n "children": [],\n "meta": {\n "width": 4,\n "height": 50,\n "chartId": 118\n }\n },\n }\n '
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (isinstance(value, dict) and value.get('meta') and value.get('meta', {}).get('chartId')):
old_slice_id = value['meta']['chartId']
if (old_slice_id in old_to_new_slc_id_dict):
value['meta']['chartId'] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
def alter_native_filters(dashboard: Dashboard) -> None:
json_metadata = json.loads(dashboard.json_metadata)
native_filter_configuration = json_metadata.get('native_filter_configuration')
if (not native_filter_configuration):
return
for native_filter in native_filter_configuration:
for target in native_filter.get('targets', []):
old_dataset_id = target.get('datasetId')
if (dataset_id_mapping and (old_dataset_id is not None)):
target['datasetId'] = dataset_id_mapping.get(old_dataset_id, old_dataset_id)
dashboard.json_metadata = json.dumps(json_metadata)
logger.info('Started import of the dashboard: %s', dashboard_to_import.to_json())
session = db.session
logger.info('Dashboard has %d slices', len(dashboard_to_import.slices))
slices = copy(dashboard_to_import.slices)
dashboard_to_import.slug = None
old_json_metadata = json.loads((dashboard_to_import.json_metadata or '{}'))
old_to_new_slc_id_dict: Dict[(int, int)] = {}
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
new_filter_scopes = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {slc.params_dict['remote_id']: slc for slc in session.query(Slice).filter(Slice.datasource_id.in_(list(dataset_id_mapping.values()))).all() if ('remote_id' in slc.params_dict)}
for slc in slices:
logger.info('Importing slice %s from the dashboard: %s', slc.to_json(), dashboard_to_import.dashboard_title)
if database_id:
database_name = session.query(Database).filter((Database.id == database_id)).first().name
slc.alter_params(database_name=database_name)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = import_chart(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
new_slc_id_str = str(new_slc_id)
old_slc_id_str = str(slc.id)
if (('timed_refresh_immune_slices' in i_params_dict) and (old_slc_id_str in i_params_dict['timed_refresh_immune_slices'])):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (('expanded_slices' in i_params_dict) and (old_slc_id_str in i_params_dict['expanded_slices'])):
new_expanded_slices[new_slc_id_str] = i_params_dict['expanded_slices'][old_slc_id_str]
filter_scopes = {}
if (('filter_immune_slices' in i_params_dict) or ('filter_immune_slice_fields' in i_params_dict)):
filter_scopes = convert_filter_scopes(old_json_metadata, slices)
if ('filter_scopes' in i_params_dict):
filter_scopes = old_json_metadata.get('filter_scopes')
if filter_scopes:
new_filter_scopes = copy_filter_scopes(old_to_new_slc_id_dict=old_to_new_slc_id_dict, old_filter_scopes=filter_scopes)
existing_dashboard = None
for dash in session.query(Dashboard).all():
if (('remote_id' in dash.params_dict) and (dash.params_dict['remote_id'] == dashboard_to_import.id)):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
dashboard_to_import.remove_params(param_to_remove='filter_immune_slices')
dashboard_to_import.remove_params(param_to_remove='filter_immune_slice_fields')
if new_filter_scopes:
dashboard_to_import.alter_params(filter_scopes=new_filter_scopes)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(timed_refresh_immune_slices=new_timed_refresh_immune_slices)
alter_native_filters(dashboard_to_import)
new_slices = session.query(Slice).filter(Slice.id.in_(old_to_new_slc_id_dict.values())).all()
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
dashboard_to_import.slices = new_slices
session.add(dashboard_to_import)
session.flush()
return dashboard_to_import.id
| 7,049,112,800,936,609,000
|
Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
|
superset/dashboards/commands/importers/v0.py
|
import_dashboard
|
Jacob-ru/superset
|
python
|
def import_dashboard(dashboard_to_import: Dashboard, dataset_id_mapping: Optional[Dict[(int, int)]]=None, import_time: Optional[int]=None, database_id: Optional[int]=None) -> int:
"Imports the dashboard from the object to the database.\n\n Once dashboard is imported, json_metadata field is extended and stores\n remote_id and import_time. It helps to decide if the dashboard has to\n be overridden or just copies over. Slices that belong to this\n dashboard will be wired to existing tables. This function can be used\n to import/export dashboards between multiple superset instances.\n Audit metadata isn't copied over.\n "
def alter_positions(dashboard: Dashboard, old_to_new_slc_id_dict: Dict[(int, int)]) -> None:
'Updates slice_ids in the position json.\n\n Sample position_json data:\n {\n "DASHBOARD_VERSION_KEY": "v2",\n "DASHBOARD_ROOT_ID": {\n "type": "DASHBOARD_ROOT_TYPE",\n "id": "DASHBOARD_ROOT_ID",\n "children": ["DASHBOARD_GRID_ID"]\n },\n "DASHBOARD_GRID_ID": {\n "type": "DASHBOARD_GRID_TYPE",\n "id": "DASHBOARD_GRID_ID",\n "children": ["DASHBOARD_CHART_TYPE-2"]\n },\n "DASHBOARD_CHART_TYPE-2": {\n "type": "CHART",\n "id": "DASHBOARD_CHART_TYPE-2",\n "children": [],\n "meta": {\n "width": 4,\n "height": 50,\n "chartId": 118\n }\n },\n }\n '
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (isinstance(value, dict) and value.get('meta') and value.get('meta', {}).get('chartId')):
old_slice_id = value['meta']['chartId']
if (old_slice_id in old_to_new_slc_id_dict):
value['meta']['chartId'] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
def alter_native_filters(dashboard: Dashboard) -> None:
json_metadata = json.loads(dashboard.json_metadata)
native_filter_configuration = json_metadata.get('native_filter_configuration')
if (not native_filter_configuration):
return
for native_filter in native_filter_configuration:
for target in native_filter.get('targets', []):
old_dataset_id = target.get('datasetId')
if (dataset_id_mapping and (old_dataset_id is not None)):
target['datasetId'] = dataset_id_mapping.get(old_dataset_id, old_dataset_id)
dashboard.json_metadata = json.dumps(json_metadata)
logger.info('Started import of the dashboard: %s', dashboard_to_import.to_json())
session = db.session
logger.info('Dashboard has %d slices', len(dashboard_to_import.slices))
slices = copy(dashboard_to_import.slices)
dashboard_to_import.slug = None
old_json_metadata = json.loads((dashboard_to_import.json_metadata or '{}'))
old_to_new_slc_id_dict: Dict[(int, int)] = {}
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
new_filter_scopes = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {slc.params_dict['remote_id']: slc for slc in session.query(Slice).filter(Slice.datasource_id.in_(list(dataset_id_mapping.values()))).all() if ('remote_id' in slc.params_dict)}
for slc in slices:
logger.info('Importing slice %s from the dashboard: %s', slc.to_json(), dashboard_to_import.dashboard_title)
if database_id:
database_name = session.query(Database).filter((Database.id == database_id)).first().name
slc.alter_params(database_name=database_name)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = import_chart(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
new_slc_id_str = str(new_slc_id)
old_slc_id_str = str(slc.id)
if (('timed_refresh_immune_slices' in i_params_dict) and (old_slc_id_str in i_params_dict['timed_refresh_immune_slices'])):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (('expanded_slices' in i_params_dict) and (old_slc_id_str in i_params_dict['expanded_slices'])):
new_expanded_slices[new_slc_id_str] = i_params_dict['expanded_slices'][old_slc_id_str]
filter_scopes = {}
if (('filter_immune_slices' in i_params_dict) or ('filter_immune_slice_fields' in i_params_dict)):
filter_scopes = convert_filter_scopes(old_json_metadata, slices)
if ('filter_scopes' in i_params_dict):
filter_scopes = old_json_metadata.get('filter_scopes')
if filter_scopes:
new_filter_scopes = copy_filter_scopes(old_to_new_slc_id_dict=old_to_new_slc_id_dict, old_filter_scopes=filter_scopes)
existing_dashboard = None
for dash in session.query(Dashboard).all():
if (('remote_id' in dash.params_dict) and (dash.params_dict['remote_id'] == dashboard_to_import.id)):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
dashboard_to_import.remove_params(param_to_remove='filter_immune_slices')
dashboard_to_import.remove_params(param_to_remove='filter_immune_slice_fields')
if new_filter_scopes:
dashboard_to_import.alter_params(filter_scopes=new_filter_scopes)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(timed_refresh_immune_slices=new_timed_refresh_immune_slices)
alter_native_filters(dashboard_to_import)
new_slices = session.query(Slice).filter(Slice.id.in_(old_to_new_slc_id_dict.values())).all()
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
dashboard_to_import.slices = new_slices
session.add(dashboard_to_import)
session.flush()
return dashboard_to_import.id
|
def decode_dashboards(o: Dict[(str, Any)]) -> Any:
'\n Function to be passed into json.loads obj_hook parameter\n Recreates the dashboard object from a json representation.\n '
from superset.connectors.druid.models import DruidCluster, DruidColumn, DruidDatasource, DruidMetric
if ('__Dashboard__' in o):
return Dashboard(**o['__Dashboard__'])
if ('__Slice__' in o):
return Slice(**o['__Slice__'])
if ('__TableColumn__' in o):
return TableColumn(**o['__TableColumn__'])
if ('__SqlaTable__' in o):
return SqlaTable(**o['__SqlaTable__'])
if ('__SqlMetric__' in o):
return SqlMetric(**o['__SqlMetric__'])
if ('__DruidCluster__' in o):
return DruidCluster(**o['__DruidCluster__'])
if ('__DruidColumn__' in o):
return DruidColumn(**o['__DruidColumn__'])
if ('__DruidDatasource__' in o):
return DruidDatasource(**o['__DruidDatasource__'])
if ('__DruidMetric__' in o):
return DruidMetric(**o['__DruidMetric__'])
if ('__datetime__' in o):
return datetime.strptime(o['__datetime__'], '%Y-%m-%dT%H:%M:%S')
return o
| 5,721,995,801,095,082,000
|
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
|
superset/dashboards/commands/importers/v0.py
|
decode_dashboards
|
Jacob-ru/superset
|
python
|
def decode_dashboards(o: Dict[(str, Any)]) -> Any:
'\n Function to be passed into json.loads obj_hook parameter\n Recreates the dashboard object from a json representation.\n '
from superset.connectors.druid.models import DruidCluster, DruidColumn, DruidDatasource, DruidMetric
if ('__Dashboard__' in o):
return Dashboard(**o['__Dashboard__'])
if ('__Slice__' in o):
return Slice(**o['__Slice__'])
if ('__TableColumn__' in o):
return TableColumn(**o['__TableColumn__'])
if ('__SqlaTable__' in o):
return SqlaTable(**o['__SqlaTable__'])
if ('__SqlMetric__' in o):
return SqlMetric(**o['__SqlMetric__'])
if ('__DruidCluster__' in o):
return DruidCluster(**o['__DruidCluster__'])
if ('__DruidColumn__' in o):
return DruidColumn(**o['__DruidColumn__'])
if ('__DruidDatasource__' in o):
return DruidDatasource(**o['__DruidDatasource__'])
if ('__DruidMetric__' in o):
return DruidMetric(**o['__DruidMetric__'])
if ('__datetime__' in o):
return datetime.strptime(o['__datetime__'], '%Y-%m-%dT%H:%M:%S')
return o
|
def import_dashboards(session: Session, content: str, database_id: Optional[int]=None, import_time: Optional[int]=None) -> None:
'Imports dashboards from a stream to databases'
current_tt = int(time.time())
import_time = (current_tt if (import_time is None) else import_time)
data = json.loads(content, object_hook=decode_dashboards)
if (not data):
raise DashboardImportException(_('No data in file'))
dataset_id_mapping: Dict[(int, int)] = {}
for table in data['datasources']:
new_dataset_id = import_dataset(table, database_id, import_time=import_time)
params = json.loads(table.params)
dataset_id_mapping[params['remote_id']] = new_dataset_id
session.commit()
for dashboard in data['dashboards']:
import_dashboard(dashboard, dataset_id_mapping, import_time=import_time, database_id=database_id)
session.commit()
| -3,609,365,525,933,547,500
|
Imports dashboards from a stream to databases
|
superset/dashboards/commands/importers/v0.py
|
import_dashboards
|
Jacob-ru/superset
|
python
|
def import_dashboards(session: Session, content: str, database_id: Optional[int]=None, import_time: Optional[int]=None) -> None:
current_tt = int(time.time())
import_time = (current_tt if (import_time is None) else import_time)
data = json.loads(content, object_hook=decode_dashboards)
if (not data):
raise DashboardImportException(_('No data in file'))
dataset_id_mapping: Dict[(int, int)] = {}
for table in data['datasources']:
new_dataset_id = import_dataset(table, database_id, import_time=import_time)
params = json.loads(table.params)
dataset_id_mapping[params['remote_id']] = new_dataset_id
session.commit()
for dashboard in data['dashboards']:
import_dashboard(dashboard, dataset_id_mapping, import_time=import_time, database_id=database_id)
session.commit()
|
def alter_positions(dashboard: Dashboard, old_to_new_slc_id_dict: Dict[(int, int)]) -> None:
'Updates slice_ids in the position json.\n\n Sample position_json data:\n {\n "DASHBOARD_VERSION_KEY": "v2",\n "DASHBOARD_ROOT_ID": {\n "type": "DASHBOARD_ROOT_TYPE",\n "id": "DASHBOARD_ROOT_ID",\n "children": ["DASHBOARD_GRID_ID"]\n },\n "DASHBOARD_GRID_ID": {\n "type": "DASHBOARD_GRID_TYPE",\n "id": "DASHBOARD_GRID_ID",\n "children": ["DASHBOARD_CHART_TYPE-2"]\n },\n "DASHBOARD_CHART_TYPE-2": {\n "type": "CHART",\n "id": "DASHBOARD_CHART_TYPE-2",\n "children": [],\n "meta": {\n "width": 4,\n "height": 50,\n "chartId": 118\n }\n },\n }\n '
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (isinstance(value, dict) and value.get('meta') and value.get('meta', {}).get('chartId')):
old_slice_id = value['meta']['chartId']
if (old_slice_id in old_to_new_slc_id_dict):
value['meta']['chartId'] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
| -8,558,249,195,208,456,000
|
Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
|
superset/dashboards/commands/importers/v0.py
|
alter_positions
|
Jacob-ru/superset
|
python
|
def alter_positions(dashboard: Dashboard, old_to_new_slc_id_dict: Dict[(int, int)]) -> None:
'Updates slice_ids in the position json.\n\n Sample position_json data:\n {\n "DASHBOARD_VERSION_KEY": "v2",\n "DASHBOARD_ROOT_ID": {\n "type": "DASHBOARD_ROOT_TYPE",\n "id": "DASHBOARD_ROOT_ID",\n "children": ["DASHBOARD_GRID_ID"]\n },\n "DASHBOARD_GRID_ID": {\n "type": "DASHBOARD_GRID_TYPE",\n "id": "DASHBOARD_GRID_ID",\n "children": ["DASHBOARD_CHART_TYPE-2"]\n },\n "DASHBOARD_CHART_TYPE-2": {\n "type": "CHART",\n "id": "DASHBOARD_CHART_TYPE-2",\n "children": [],\n "meta": {\n "width": 4,\n "height": 50,\n "chartId": 118\n }\n },\n }\n '
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (isinstance(value, dict) and value.get('meta') and value.get('meta', {}).get('chartId')):
old_slice_id = value['meta']['chartId']
if (old_slice_id in old_to_new_slc_id_dict):
value['meta']['chartId'] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
|
def test_no_remote_user(self):
'\n Tests requests where no remote user is specified and insures that no\n users get created.\n '
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: None})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: ''})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
| 1,557,096,168,254,504,400
|
Tests requests where no remote user is specified and insures that no
users get created.
|
django/contrib/auth/tests/test_remote_user.py
|
test_no_remote_user
|
2roy999/django
|
python
|
def test_no_remote_user(self):
'\n Tests requests where no remote user is specified and insures that no\n users get created.\n '
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: None})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: })
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
|
def test_unknown_user(self):
'\n Tests the case where the username passed in the header does not exist\n as a User.\n '
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), (num_users + 1))
User.objects.get(username='newuser')
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(User.objects.count(), (num_users + 1))
| 8,109,131,588,071,182,000
|
Tests the case where the username passed in the header does not exist
as a User.
|
django/contrib/auth/tests/test_remote_user.py
|
test_unknown_user
|
2roy999/django
|
python
|
def test_unknown_user(self):
'\n Tests the case where the username passed in the header does not exist\n as a User.\n '
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), (num_users + 1))
User.objects.get(username='newuser')
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(User.objects.count(), (num_users + 1))
|
def test_known_user(self):
'\n Tests the case where the username passed in the header is a valid User.\n '
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: self.known_user2})
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
| 4,638,689,453,855,815,000
|
Tests the case where the username passed in the header is a valid User.
|
django/contrib/auth/tests/test_remote_user.py
|
test_known_user
|
2roy999/django
|
python
|
def test_known_user(self):
'\n \n '
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: self.known_user2})
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
|
def test_last_login(self):
"\n Tests that a user's last_login is set the first time they make a\n request but not updated in subsequent requests with the same session.\n "
user = User.objects.create(username='knownuser')
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(default_login, response.context['user'].last_login)
| -8,229,259,613,211,248,000
|
Tests that a user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
|
django/contrib/auth/tests/test_remote_user.py
|
test_last_login
|
2roy999/django
|
python
|
def test_last_login(self):
"\n Tests that a user's last_login is set the first time they make a\n request but not updated in subsequent requests with the same session.\n "
user = User.objects.create(username='knownuser')
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(default_login, response.context['user'].last_login)
|
def test_header_disappears(self):
'\n Tests that a logged in user is logged out automatically when\n the REMOTE_USER header disappears during the same browser session.\n '
User.objects.create(username='knownuser')
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), True)
User.objects.create_user(username='modeluser', password='foo')
self.client.login(username='modeluser', password='foo')
authenticate(username='modeluser', password='foo')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].username, 'modeluser')
| -8,593,508,508,490,221,000
|
Tests that a logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
|
django/contrib/auth/tests/test_remote_user.py
|
test_header_disappears
|
2roy999/django
|
python
|
def test_header_disappears(self):
'\n Tests that a logged in user is logged out automatically when\n the REMOTE_USER header disappears during the same browser session.\n '
User.objects.create(username='knownuser')
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), True)
User.objects.create_user(username='modeluser', password='foo')
self.client.login(username='modeluser', password='foo')
authenticate(username='modeluser', password='foo')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].username, 'modeluser')
|
def test_user_switch_forces_new_login(self):
'\n Tests that if the username in the header changes between requests\n that the original user is logged out\n '
User.objects.create(username='knownuser')
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
response = self.client.get('/remote_user/', **{self.header: 'newnewuser'})
self.assertNotEqual(response.context['user'].username, 'knownuser')
| -2,446,387,448,705,318,000
|
Tests that if the username in the header changes between requests
that the original user is logged out
|
django/contrib/auth/tests/test_remote_user.py
|
test_user_switch_forces_new_login
|
2roy999/django
|
python
|
def test_user_switch_forces_new_login(self):
'\n Tests that if the username in the header changes between requests\n that the original user is logged out\n '
User.objects.create(username='knownuser')
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
response = self.client.get('/remote_user/', **{self.header: 'newnewuser'})
self.assertNotEqual(response.context['user'].username, 'knownuser')
|
def tearDown(self):
'Restores settings to avoid breaking other tests.'
settings.MIDDLEWARE_CLASSES = self.curr_middleware
settings.AUTHENTICATION_BACKENDS = self.curr_auth
| 2,779,914,680,300,292,000
|
Restores settings to avoid breaking other tests.
|
django/contrib/auth/tests/test_remote_user.py
|
tearDown
|
2roy999/django
|
python
|
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.curr_middleware
settings.AUTHENTICATION_BACKENDS = self.curr_auth
|
def clean_username(self, username):
'\n Grabs username before the @ character.\n '
return username.split('@')[0]
| -2,728,894,029,981,582,300
|
Grabs username before the @ character.
|
django/contrib/auth/tests/test_remote_user.py
|
clean_username
|
2roy999/django
|
python
|
def clean_username(self, username):
'\n \n '
return username.split('@')[0]
|
def configure_user(self, user):
"\n Sets user's email address.\n "
user.email = 'example@example.com'
user.save()
return user
| 7,252,543,219,551,633,000
|
Sets user's email address.
|
django/contrib/auth/tests/test_remote_user.py
|
configure_user
|
2roy999/django
|
python
|
def configure_user(self, user):
"\n \n "
user.email = 'example@example.com'
user.save()
return user
|
def test_known_user(self):
'\n The strings passed in REMOTE_USER should be cleaned and the known users\n should not have been configured with an email address.\n '
super(RemoteUserCustomTest, self).test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, '')
self.assertEqual(User.objects.get(username='knownuser2').email, '')
| -7,770,742,196,895,024,000
|
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
|
django/contrib/auth/tests/test_remote_user.py
|
test_known_user
|
2roy999/django
|
python
|
def test_known_user(self):
'\n The strings passed in REMOTE_USER should be cleaned and the known users\n should not have been configured with an email address.\n '
super(RemoteUserCustomTest, self).test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, )
self.assertEqual(User.objects.get(username='knownuser2').email, )
|
def test_unknown_user(self):
'\n The unknown user created should be configured with an email address.\n '
super(RemoteUserCustomTest, self).test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, 'example@example.com')
| -3,766,192,825,234,334,000
|
The unknown user created should be configured with an email address.
|
django/contrib/auth/tests/test_remote_user.py
|
test_unknown_user
|
2roy999/django
|
python
|
def test_unknown_user(self):
'\n \n '
super(RemoteUserCustomTest, self).test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, 'example@example.com')
|
def word_freq(self, text) -> dict:
'\n Create document word frequency table {w1:f1, ..., wN:fN}.\n Remove stop words, punct, etc. and lowercase\n :rtype: dict\n '
doc = self.nlp(text)
word_freq_table = {}
for token in doc:
ignore = (token.is_stop or token.is_punct or token.is_quote or token.is_oov or (token.text in ['.', ',', ';', ':', '%', '-']))
if ((not ignore) and (token.text in word_freq_table)):
word_freq_table[token.lower_] += 1
elif (not ignore):
word_freq_table[token.lower_] = 1
return word_freq_table
| -1,587,509,888,795,626,500
|
Create document word frequency table {w1:f1, ..., wN:fN}.
Remove stop words, punct, etc. and lowercase
:rtype: dict
|
TextSummarization/TF_IDF.py
|
word_freq
|
asehmi/Data-Science-Meetup-Oxford
|
python
|
def word_freq(self, text) -> dict:
'\n Create document word frequency table {w1:f1, ..., wN:fN}.\n Remove stop words, punct, etc. and lowercase\n :rtype: dict\n '
doc = self.nlp(text)
word_freq_table = {}
for token in doc:
ignore = (token.is_stop or token.is_punct or token.is_quote or token.is_oov or (token.text in ['.', ',', ';', ':', '%', '-']))
if ((not ignore) and (token.text in word_freq_table)):
word_freq_table[token.lower_] += 1
elif (not ignore):
word_freq_table[token.lower_] = 1
return word_freq_table
|
def sent_word_freq(self, text) -> dict:
'\n Create sentence word frequency table {s1:{w1:f1, ..., wN:fN}, ..., sN:{w1:f1, ..., wN:fN} }.\n :rtype: dict\n '
doc = self.nlp(text)
sent_word_freq_table = {}
for sent in doc.sents:
word_freq_table = self.word_freq(sent.lower_)
sent_word_freq_table[sent.lower_[:15]] = word_freq_table
return sent_word_freq_table
| 2,916,962,073,247,642,000
|
Create sentence word frequency table {s1:{w1:f1, ..., wN:fN}, ..., sN:{w1:f1, ..., wN:fN} }.
:rtype: dict
|
TextSummarization/TF_IDF.py
|
sent_word_freq
|
asehmi/Data-Science-Meetup-Oxford
|
python
|
def sent_word_freq(self, text) -> dict:
'\n Create sentence word frequency table {s1:{w1:f1, ..., wN:fN}, ..., sN:{w1:f1, ..., wN:fN} }.\n :rtype: dict\n '
doc = self.nlp(text)
sent_word_freq_table = {}
for sent in doc.sents:
word_freq_table = self.word_freq(sent.lower_)
sent_word_freq_table[sent.lower_[:15]] = word_freq_table
return sent_word_freq_table
|
def __init__(self, **kwargs):
'\n Initializes a new WorkSubmissionKey object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param work_submission_key:\n The value to assign to the work_submission_key property of this WorkSubmissionKey.\n :type work_submission_key: str\n\n '
self.swagger_types = {'work_submission_key': 'str'}
self.attribute_map = {'work_submission_key': 'workSubmissionKey'}
self._work_submission_key = None
| -7,551,299,056,031,745,000
|
Initializes a new WorkSubmissionKey object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param work_submission_key:
The value to assign to the work_submission_key property of this WorkSubmissionKey.
:type work_submission_key: str
|
src/oci/management_agent/models/work_submission_key.py
|
__init__
|
LaudateCorpus1/oci-python-sdk
|
python
|
def __init__(self, **kwargs):
'\n Initializes a new WorkSubmissionKey object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param work_submission_key:\n The value to assign to the work_submission_key property of this WorkSubmissionKey.\n :type work_submission_key: str\n\n '
self.swagger_types = {'work_submission_key': 'str'}
self.attribute_map = {'work_submission_key': 'workSubmissionKey'}
self._work_submission_key = None
|
@property
def work_submission_key(self):
'\n **[Required]** Gets the work_submission_key of this WorkSubmissionKey.\n Work Submission Identifier\n\n\n :return: The work_submission_key of this WorkSubmissionKey.\n :rtype: str\n '
return self._work_submission_key
| -6,673,513,871,980,366,000
|
**[Required]** Gets the work_submission_key of this WorkSubmissionKey.
Work Submission Identifier
:return: The work_submission_key of this WorkSubmissionKey.
:rtype: str
|
src/oci/management_agent/models/work_submission_key.py
|
work_submission_key
|
LaudateCorpus1/oci-python-sdk
|
python
|
@property
def work_submission_key(self):
'\n **[Required]** Gets the work_submission_key of this WorkSubmissionKey.\n Work Submission Identifier\n\n\n :return: The work_submission_key of this WorkSubmissionKey.\n :rtype: str\n '
return self._work_submission_key
|
@work_submission_key.setter
def work_submission_key(self, work_submission_key):
'\n Sets the work_submission_key of this WorkSubmissionKey.\n Work Submission Identifier\n\n\n :param work_submission_key: The work_submission_key of this WorkSubmissionKey.\n :type: str\n '
self._work_submission_key = work_submission_key
| -388,778,358,695,815,200
|
Sets the work_submission_key of this WorkSubmissionKey.
Work Submission Identifier
:param work_submission_key: The work_submission_key of this WorkSubmissionKey.
:type: str
|
src/oci/management_agent/models/work_submission_key.py
|
work_submission_key
|
LaudateCorpus1/oci-python-sdk
|
python
|
@work_submission_key.setter
def work_submission_key(self, work_submission_key):
'\n Sets the work_submission_key of this WorkSubmissionKey.\n Work Submission Identifier\n\n\n :param work_submission_key: The work_submission_key of this WorkSubmissionKey.\n :type: str\n '
self._work_submission_key = work_submission_key
|
def process_request(self, request):
"\n Reads url name, args, kwargs from GET parameters, reverses the url and resolves view function\n Returns the result of resolved view function, called with provided args and kwargs\n Since the view function is called directly, it isn't ran through middlewares, so the middlewares must\n be added manually\n The final result is exactly the same as if the request was for the resolved view.\n\n Parametrized urls:\n djangoUrl.reverse can be used with parametrized urls of $resource\n In that case the reverse url is something like: /angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=:id\n $resource can either replace the ':id' part with say 2 and we can proceed as usual,\n reverse with reverse('orders', kwargs={'id': 2}).\n\n If it's not replaced we want to reverse to url we get a request to url\n '/angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=' which\n gives a request.GET QueryDict {u'djng_url_name': [u'orders'], u'djng_url_kwarg_id': [u'']}\n\n In that case we want to ignore the id param and only reverse to url with name 'orders' and no params.\n So we ignore args and kwargs that are empty strings.\n "
if (request.path == self.ANGULAR_REVERSE):
url_name = request.GET.get('djng_url_name')
url_args = request.GET.getlist('djng_url_args', [])
url_kwargs = {}
url_args = filter((lambda x: x), url_args)
for param in request.GET:
if param.startswith('djng_url_kwarg_'):
if request.GET[param]:
url_kwargs[param[15:]] = request.GET[param]
url = unquote(reverse(url_name, args=url_args, kwargs=url_kwargs))
assert (not url.startswith(self.ANGULAR_REVERSE)), 'Prevent recursive requests'
request.path = request.path_info = url
request.environ['PATH_INFO'] = url
query = request.GET.copy()
for key in request.GET:
if key.startswith('djng_url'):
query.pop(key, None)
if six.PY3:
request.environ['QUERY_STRING'] = query.urlencode()
else:
request.environ['QUERY_STRING'] = query.urlencode().encode('utf-8')
request.GET = http.QueryDict(request.environ['QUERY_STRING'])
| 8,338,263,217,393,306,000
|
Reads url name, args, kwargs from GET parameters, reverses the url and resolves view function
Returns the result of resolved view function, called with provided args and kwargs
Since the view function is called directly, it isn't ran through middlewares, so the middlewares must
be added manually
The final result is exactly the same as if the request was for the resolved view.
Parametrized urls:
djangoUrl.reverse can be used with parametrized urls of $resource
In that case the reverse url is something like: /angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=:id
$resource can either replace the ':id' part with say 2 and we can proceed as usual,
reverse with reverse('orders', kwargs={'id': 2}).
If it's not replaced we want to reverse to url we get a request to url
'/angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=' which
gives a request.GET QueryDict {u'djng_url_name': [u'orders'], u'djng_url_kwarg_id': [u'']}
In that case we want to ignore the id param and only reverse to url with name 'orders' and no params.
So we ignore args and kwargs that are empty strings.
|
djng/middleware.py
|
process_request
|
BluABK/django-angular
|
python
|
def process_request(self, request):
"\n Reads url name, args, kwargs from GET parameters, reverses the url and resolves view function\n Returns the result of resolved view function, called with provided args and kwargs\n Since the view function is called directly, it isn't ran through middlewares, so the middlewares must\n be added manually\n The final result is exactly the same as if the request was for the resolved view.\n\n Parametrized urls:\n djangoUrl.reverse can be used with parametrized urls of $resource\n In that case the reverse url is something like: /angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=:id\n $resource can either replace the ':id' part with say 2 and we can proceed as usual,\n reverse with reverse('orders', kwargs={'id': 2}).\n\n If it's not replaced we want to reverse to url we get a request to url\n '/angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=' which\n gives a request.GET QueryDict {u'djng_url_name': [u'orders'], u'djng_url_kwarg_id': [u]}\n\n In that case we want to ignore the id param and only reverse to url with name 'orders' and no params.\n So we ignore args and kwargs that are empty strings.\n "
if (request.path == self.ANGULAR_REVERSE):
url_name = request.GET.get('djng_url_name')
url_args = request.GET.getlist('djng_url_args', [])
url_kwargs = {}
url_args = filter((lambda x: x), url_args)
for param in request.GET:
if param.startswith('djng_url_kwarg_'):
if request.GET[param]:
url_kwargs[param[15:]] = request.GET[param]
url = unquote(reverse(url_name, args=url_args, kwargs=url_kwargs))
assert (not url.startswith(self.ANGULAR_REVERSE)), 'Prevent recursive requests'
request.path = request.path_info = url
request.environ['PATH_INFO'] = url
query = request.GET.copy()
for key in request.GET:
if key.startswith('djng_url'):
query.pop(key, None)
if six.PY3:
request.environ['QUERY_STRING'] = query.urlencode()
else:
request.environ['QUERY_STRING'] = query.urlencode().encode('utf-8')
request.GET = http.QueryDict(request.environ['QUERY_STRING'])
|
def test_in_response_to_provided(self):
'\n Test that the process of looking up the previous response\n in the conversation is ignored if a previous response is provided.\n '
self.chatbot.get_response(text='Hello', in_response_to='Unique previous response.')
statement = self.chatbot.storage.filter(text='Hello', in_response_to='Unique previous response.')
self.assertIsNotNone(statement)
| 4,884,029,123,685,717,000
|
Test that the process of looking up the previous response
in the conversation is ignored if a previous response is provided.
|
tests/test_chatbot.py
|
test_in_response_to_provided
|
nadimpayak/ChatBot
|
python
|
def test_in_response_to_provided(self):
'\n Test that the process of looking up the previous response\n in the conversation is ignored if a previous response is provided.\n '
self.chatbot.get_response(text='Hello', in_response_to='Unique previous response.')
statement = self.chatbot.storage.filter(text='Hello', in_response_to='Unique previous response.')
self.assertIsNotNone(statement)
|
def test_get_initialization_functions(self):
'\n Test that the initialization functions are returned.\n '
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
| 1,874,049,852,437,501,200
|
Test that the initialization functions are returned.
|
tests/test_chatbot.py
|
test_get_initialization_functions
|
nadimpayak/ChatBot
|
python
|
def test_get_initialization_functions(self):
'\n \n '
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
|
def test_get_initialization_functions_spacy_similarity(self):
'\n Test that the initialization functions are returned.\n '
from chatterbot.comparisons import spacy_similarity
list(self.chatbot.search_algorithms.values())[0].compare_statements = spacy_similarity
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
| -3,040,526,968,101,790,700
|
Test that the initialization functions are returned.
|
tests/test_chatbot.py
|
test_get_initialization_functions_spacy_similarity
|
nadimpayak/ChatBot
|
python
|
def test_get_initialization_functions_spacy_similarity(self):
'\n \n '
from chatterbot.comparisons import spacy_similarity
list(self.chatbot.search_algorithms.values())[0].compare_statements = spacy_similarity
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
|
def test_get_initialization_functions_jaccard_similarity(self):
'\n Test that the initialization functions are returned.\n '
from chatterbot.comparisons import jaccard_similarity
list(self.chatbot.search_algorithms.values())[0].compare_statements = jaccard_similarity
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
| 4,063,653,913,070,919,000
|
Test that the initialization functions are returned.
|
tests/test_chatbot.py
|
test_get_initialization_functions_jaccard_similarity
|
nadimpayak/ChatBot
|
python
|
def test_get_initialization_functions_jaccard_similarity(self):
'\n \n '
from chatterbot.comparisons import jaccard_similarity
list(self.chatbot.search_algorithms.values())[0].compare_statements = jaccard_similarity
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
|
def test_no_statements_known(self):
"\n If there is no statements in the database, then the\n user's input is the only thing that can be returned.\n "
statement_text = 'How are you?'
response = self.chatbot.get_response(statement_text)
results = list(self.chatbot.storage.filter(text=statement_text))
self.assertEqual(response.text, statement_text)
self.assertEqual(response.confidence, 0)
self.assertIsLength(results, 2)
self.assertEqual(results[0].text, statement_text)
self.assertEqual(results[1].text, statement_text)
| 992,647,802,674,425,900
|
If there is no statements in the database, then the
user's input is the only thing that can be returned.
|
tests/test_chatbot.py
|
test_no_statements_known
|
nadimpayak/ChatBot
|
python
|
def test_no_statements_known(self):
"\n If there is no statements in the database, then the\n user's input is the only thing that can be returned.\n "
statement_text = 'How are you?'
response = self.chatbot.get_response(statement_text)
results = list(self.chatbot.storage.filter(text=statement_text))
self.assertEqual(response.text, statement_text)
self.assertEqual(response.confidence, 0)
self.assertIsLength(results, 2)
self.assertEqual(results[0].text, statement_text)
self.assertEqual(results[1].text, statement_text)
|
def test_one_statement_known_no_response(self):
'\n Test the case where a single statement is known, but\n it is not in response to any other statement.\n '
self.chatbot.storage.create(text='Hello', in_response_to=None)
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 0)
self.assertEqual(response.text, 'Hello')
| -5,050,156,685,655,720,000
|
Test the case where a single statement is known, but
it is not in response to any other statement.
|
tests/test_chatbot.py
|
test_one_statement_known_no_response
|
nadimpayak/ChatBot
|
python
|
def test_one_statement_known_no_response(self):
'\n Test the case where a single statement is known, but\n it is not in response to any other statement.\n '
self.chatbot.storage.create(text='Hello', in_response_to=None)
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 0)
self.assertEqual(response.text, 'Hello')
|
def test_one_statement_one_response_known(self):
'\n Test the case that one response is known and there is a response\n entry for it in the database.\n '
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 0)
self.assertEqual(response.text, 'Hello')
| 911,156,194,905,935,900
|
Test the case that one response is known and there is a response
entry for it in the database.
|
tests/test_chatbot.py
|
test_one_statement_one_response_known
|
nadimpayak/ChatBot
|
python
|
def test_one_statement_one_response_known(self):
'\n Test the case that one response is known and there is a response\n entry for it in the database.\n '
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 0)
self.assertEqual(response.text, 'Hello')
|
def test_two_statements_one_response_known(self):
'\n Test the case that one response is known and there is a response\n entry for it in the database.\n '
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 1)
self.assertEqual(response.text, 'Hello')
| 867,824,115,065,216,400
|
Test the case that one response is known and there is a response
entry for it in the database.
|
tests/test_chatbot.py
|
test_two_statements_one_response_known
|
nadimpayak/ChatBot
|
python
|
def test_two_statements_one_response_known(self):
'\n Test the case that one response is known and there is a response\n entry for it in the database.\n '
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 1)
self.assertEqual(response.text, 'Hello')
|
def test_statement_added_to_conversation(self):
'\n An input statement should be added to the recent response list.\n '
statement = Statement(text='Wow!', conversation='test')
response = self.chatbot.get_response(statement)
self.assertEqual(statement.text, response.text)
self.assertEqual(response.conversation, 'test')
| -7,818,882,938,278,926,000
|
An input statement should be added to the recent response list.
|
tests/test_chatbot.py
|
test_statement_added_to_conversation
|
nadimpayak/ChatBot
|
python
|
def test_statement_added_to_conversation(self):
'\n \n '
statement = Statement(text='Wow!', conversation='test')
response = self.chatbot.get_response(statement)
self.assertEqual(statement.text, response.text)
self.assertEqual(response.conversation, 'test')
|
def test_get_response_unicode(self):
'\n Test the case that a unicode string is passed in.\n '
response = self.chatbot.get_response(u'سلام')
self.assertGreater(len(response.text), 0)
| -311,454,113,447,001,600
|
Test the case that a unicode string is passed in.
|
tests/test_chatbot.py
|
test_get_response_unicode
|
nadimpayak/ChatBot
|
python
|
def test_get_response_unicode(self):
'\n \n '
response = self.chatbot.get_response(u'سلام')
self.assertGreater(len(response.text), 0)
|
def test_get_response_emoji(self):
'\n Test the case that the input string contains an emoji.\n '
response = self.chatbot.get_response(u'💩 ')
self.assertGreater(len(response.text), 0)
| -1,424,362,772,954,990,300
|
Test the case that the input string contains an emoji.
|
tests/test_chatbot.py
|
test_get_response_emoji
|
nadimpayak/ChatBot
|
python
|
def test_get_response_emoji(self):
'\n \n '
response = self.chatbot.get_response(u'💩 ')
self.assertGreater(len(response.text), 0)
|
def test_get_response_non_whitespace(self):
'\n Test the case that a non-whitespace C1 control string is passed in.\n '
response = self.chatbot.get_response(u'\x80\x81\x8e\x8f\x90\x91\x92')
self.assertGreater(len(response.text), 0)
| 3,280,001,702,079,089,700
|
Test the case that a non-whitespace C1 control string is passed in.
|
tests/test_chatbot.py
|
test_get_response_non_whitespace
|
nadimpayak/ChatBot
|
python
|
def test_get_response_non_whitespace(self):
'\n \n '
response = self.chatbot.get_response(u'\x80\x81\x8e\x8f\x90\x91\x92')
self.assertGreater(len(response.text), 0)
|
def test_get_response_two_byte_characters(self):
'\n Test the case that a string containing two-byte characters is passed in.\n '
response = self.chatbot.get_response(u'田中さんにあげて下さい')
self.assertGreater(len(response.text), 0)
| 8,807,853,710,064,461,000
|
Test the case that a string containing two-byte characters is passed in.
|
tests/test_chatbot.py
|
test_get_response_two_byte_characters
|
nadimpayak/ChatBot
|
python
|
def test_get_response_two_byte_characters(self):
'\n \n '
response = self.chatbot.get_response(u'田中さんにあげて下さい')
self.assertGreater(len(response.text), 0)
|
def test_get_response_corrupted_text(self):
'\n Test the case that a string contains "corrupted" text.\n '
response = self.chatbot.get_response(u'Ṱ̺̺̕h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳.̨̹͈̣')
self.assertGreater(len(response.text), 0)
| 7,761,169,289,263,649,000
|
Test the case that a string contains "corrupted" text.
|
tests/test_chatbot.py
|
test_get_response_corrupted_text
|
nadimpayak/ChatBot
|
python
|
def test_get_response_corrupted_text(self):
'\n \n '
response = self.chatbot.get_response(u'Ṱ̺̺̕h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳.̨̹͈̣')
self.assertGreater(len(response.text), 0)
|
def test_response_with_tags_added(self):
'\n If an input statement has tags added to it,\n that data should saved with the input statement.\n '
self.chatbot.get_response(Statement(text='Hello', in_response_to='Hi', tags=['test']))
results = list(self.chatbot.storage.filter(text='Hello'))
self.assertIsLength(results, 2)
self.assertIn('test', results[0].get_tags())
self.assertEqual(results[1].get_tags(), [])
| 3,352,653,989,076,648,000
|
If an input statement has tags added to it,
that data should saved with the input statement.
|
tests/test_chatbot.py
|
test_response_with_tags_added
|
nadimpayak/ChatBot
|
python
|
def test_response_with_tags_added(self):
'\n If an input statement has tags added to it,\n that data should saved with the input statement.\n '
self.chatbot.get_response(Statement(text='Hello', in_response_to='Hi', tags=['test']))
results = list(self.chatbot.storage.filter(text='Hello'))
self.assertIsLength(results, 2)
self.assertIn('test', results[0].get_tags())
self.assertEqual(results[1].get_tags(), [])
|
def test_get_response_does_not_add_new_statement(self):
'\n Test that a new statement is not learned if `read_only` is set to True.\n '
self.chatbot.read_only = True
self.chatbot.get_response('Hi!')
results = list(self.chatbot.storage.filter(text='Hi!'))
self.assertIsLength(results, 0)
| 4,665,780,331,034,966,000
|
Test that a new statement is not learned if `read_only` is set to True.
|
tests/test_chatbot.py
|
test_get_response_does_not_add_new_statement
|
nadimpayak/ChatBot
|
python
|
def test_get_response_does_not_add_new_statement(self):
'\n \n '
self.chatbot.read_only = True
self.chatbot.get_response('Hi!')
results = list(self.chatbot.storage.filter(text='Hi!'))
self.assertIsLength(results, 0)
|
def test_search_text_results_after_training(self):
'\n ChatterBot should return close matches to an input\n string when filtering using the search_text parameter.\n '
self.chatbot.storage.create_many([Statement('Example A for search.'), Statement('Another example.'), Statement('Example B for search.'), Statement(text='Another statement.')])
results = list(self.chatbot.storage.filter(search_text=self.chatbot.storage.tagger.get_bigram_pair_string('Example A for search.')))
self.assertEqual('Example A for search.', results[0].text)
self.assertEqual('Example B for search.', results[1].text)
self.assertIsLength(results, 2)
| -2,723,408,850,424,878,000
|
ChatterBot should return close matches to an input
string when filtering using the search_text parameter.
|
tests/test_chatbot.py
|
test_search_text_results_after_training
|
nadimpayak/ChatBot
|
python
|
def test_search_text_results_after_training(self):
'\n ChatterBot should return close matches to an input\n string when filtering using the search_text parameter.\n '
self.chatbot.storage.create_many([Statement('Example A for search.'), Statement('Another example.'), Statement('Example B for search.'), Statement(text='Another statement.')])
results = list(self.chatbot.storage.filter(search_text=self.chatbot.storage.tagger.get_bigram_pair_string('Example A for search.')))
self.assertEqual('Example A for search.', results[0].text)
self.assertEqual('Example B for search.', results[1].text)
self.assertIsLength(results, 2)
|
def test_sub_adapter_agreement(self):
'\n In the case that multiple adapters agree on a given\n statement, this statement should be returned with the\n highest confidence available from these matching options.\n '
self.chatbot.logic_adapters = [TestAdapterA(self.chatbot), TestAdapterB(self.chatbot), TestAdapterC(self.chatbot)]
statement = self.chatbot.generate_response(Statement(text='Howdy!'))
self.assertEqual(statement.confidence, 0.5)
self.assertEqual(statement.text, 'Good morning.')
| -4,158,896,709,613,299,000
|
In the case that multiple adapters agree on a given
statement, this statement should be returned with the
highest confidence available from these matching options.
|
tests/test_chatbot.py
|
test_sub_adapter_agreement
|
nadimpayak/ChatBot
|
python
|
def test_sub_adapter_agreement(self):
'\n In the case that multiple adapters agree on a given\n statement, this statement should be returned with the\n highest confidence available from these matching options.\n '
self.chatbot.logic_adapters = [TestAdapterA(self.chatbot), TestAdapterB(self.chatbot), TestAdapterC(self.chatbot)]
statement = self.chatbot.generate_response(Statement(text='Howdy!'))
self.assertEqual(statement.confidence, 0.5)
self.assertEqual(statement.text, 'Good morning.')
|
def test_response_persona_is_bot(self):
'\n The response returned from the chatbot should be set to the name of the chatbot.\n '
response = self.chatbot.get_response('Hey everyone!')
self.assertEqual(response.persona, 'bot:Test Bot')
| -1,802,674,394,433,953,000
|
The response returned from the chatbot should be set to the name of the chatbot.
|
tests/test_chatbot.py
|
test_response_persona_is_bot
|
nadimpayak/ChatBot
|
python
|
def test_response_persona_is_bot(self):
'\n \n '
response = self.chatbot.get_response('Hey everyone!')
self.assertEqual(response.persona, 'bot:Test Bot')
|
def bind(self, bus):
'Overrides the default bind method of MycroftSkill.\n\n This registers messagebus handlers for the skill during startup\n but is nothing the skill author needs to consider.\n '
if bus:
super().bind(bus)
self.add_event('question:query', self.__handle_question_query)
self.add_event('question:action', self.__handle_query_action)
| 7,592,947,664,780,378,000
|
Overrides the default bind method of MycroftSkill.
This registers messagebus handlers for the skill during startup
but is nothing the skill author needs to consider.
|
mycroft/skills/common_query_skill.py
|
bind
|
AIIX/mycroft-core
|
python
|
def bind(self, bus):
'Overrides the default bind method of MycroftSkill.\n\n This registers messagebus handlers for the skill during startup\n but is nothing the skill author needs to consider.\n '
if bus:
super().bind(bus)
self.add_event('question:query', self.__handle_question_query)
self.add_event('question:action', self.__handle_query_action)
|
def __handle_query_action(self, message):
'Message handler for question:action.\n\n Extracts phrase and data from message forward this to the skills\n CQS_action method.\n '
if (message.data['skill_id'] != self.skill_id):
return
phrase = message.data['phrase']
data = message.data.get('callback_data')
self.CQS_action(phrase, data)
| -4,646,801,660,407,673,000
|
Message handler for question:action.
Extracts phrase and data from message forward this to the skills
CQS_action method.
|
mycroft/skills/common_query_skill.py
|
__handle_query_action
|
AIIX/mycroft-core
|
python
|
def __handle_query_action(self, message):
'Message handler for question:action.\n\n Extracts phrase and data from message forward this to the skills\n CQS_action method.\n '
if (message.data['skill_id'] != self.skill_id):
return
phrase = message.data['phrase']
data = message.data.get('callback_data')
self.CQS_action(phrase, data)
|
@abstractmethod
def CQS_match_query_phrase(self, phrase):
'Analyze phrase to see if it is a play-able phrase with this skill.\n\n Needs to be implemented by the skill.\n\n Arguments:\n phrase (str): User phrase, "What is an aardwark"\n\n Returns:\n (match, CQSMatchLevel[, callback_data]) or None: Tuple containing\n a string with the appropriate matching phrase, the PlayMatch\n type, and optionally data to return in the callback if the\n match is selected.\n '
return None
| -7,376,553,259,035,771,000
|
Analyze phrase to see if it is a play-able phrase with this skill.
Needs to be implemented by the skill.
Arguments:
phrase (str): User phrase, "What is an aardwark"
Returns:
(match, CQSMatchLevel[, callback_data]) or None: Tuple containing
a string with the appropriate matching phrase, the PlayMatch
type, and optionally data to return in the callback if the
match is selected.
|
mycroft/skills/common_query_skill.py
|
CQS_match_query_phrase
|
AIIX/mycroft-core
|
python
|
@abstractmethod
def CQS_match_query_phrase(self, phrase):
'Analyze phrase to see if it is a play-able phrase with this skill.\n\n Needs to be implemented by the skill.\n\n Arguments:\n phrase (str): User phrase, "What is an aardwark"\n\n Returns:\n (match, CQSMatchLevel[, callback_data]) or None: Tuple containing\n a string with the appropriate matching phrase, the PlayMatch\n type, and optionally data to return in the callback if the\n match is selected.\n '
return None
|
def CQS_action(self, phrase, data):
'Take additional action IF the skill is selected.\n\n The speech is handled by the common query but if the chosen skill\n wants to display media, set a context or prepare for sending\n information info over e-mail this can be implemented here.\n\n Args:\n phrase (str): User phrase uttered after "Play", e.g. "some music"\n data (dict): Callback data specified in match_query_phrase()\n '
pass
| -7,288,892,319,496,509,000
|
Take additional action IF the skill is selected.
The speech is handled by the common query but if the chosen skill
wants to display media, set a context or prepare for sending
information info over e-mail this can be implemented here.
Args:
phrase (str): User phrase uttered after "Play", e.g. "some music"
data (dict): Callback data specified in match_query_phrase()
|
mycroft/skills/common_query_skill.py
|
CQS_action
|
AIIX/mycroft-core
|
python
|
def CQS_action(self, phrase, data):
'Take additional action IF the skill is selected.\n\n The speech is handled by the common query but if the chosen skill\n wants to display media, set a context or prepare for sending\n information info over e-mail this can be implemented here.\n\n Args:\n phrase (str): User phrase uttered after "Play", e.g. "some music"\n data (dict): Callback data specified in match_query_phrase()\n '
pass
|
def test_arraystringreader():
'here is my test code\n\n https://docs.pytest.org/en/stable/getting-started.html#create-your-first-test\n '
size = 8
sample_array = np.random.rand(size).astype('float32')
text = ','.join([str(x) for x in sample_array])
reader = ArrayStringReader()
crafted_doc = reader.craft(text, 0)
assert (crafted_doc['blob'].shape[0] == size)
np.testing.assert_array_equal(crafted_doc['blob'], sample_array)
| 2,348,139,695,834,759,000
|
here is my test code
https://docs.pytest.org/en/stable/getting-started.html#create-your-first-test
|
crafters/numeric/ArrayStringReader/tests/test_arraystringreader.py
|
test_arraystringreader
|
Gracegrx/jina-hub
|
python
|
def test_arraystringreader():
'here is my test code\n\n https://docs.pytest.org/en/stable/getting-started.html#create-your-first-test\n '
size = 8
sample_array = np.random.rand(size).astype('float32')
text = ','.join([str(x) for x in sample_array])
reader = ArrayStringReader()
crafted_doc = reader.craft(text, 0)
assert (crafted_doc['blob'].shape[0] == size)
np.testing.assert_array_equal(crafted_doc['blob'], sample_array)
|
def __str__(self):
'\n Recursively print CallStructure\n '
stmt = (('\n' + ('.' * self.construct_id.count('.'))) + ('Struct: %s: %s' % (self.construct_id, self.structureType)))
for struct in self.structureItems:
stmt += struct.__str__()
return stmt
| 3,533,402,789,938,992,600
|
Recursively print CallStructure
|
simmbse/structure_item.py
|
__str__
|
tsherburne/ma-simpy
|
python
|
def __str__(self):
'\n \n '
stmt = (('\n' + ('.' * self.construct_id.count('.'))) + ('Struct: %s: %s' % (self.construct_id, self.structureType)))
for struct in self.structureItems:
stmt += struct.__str__()
return stmt
|
def assertGeneratorRunning(self, gen):
"\n Check that a generator-based coroutine hasn't completed yet.\n\n "
next(gen)
| 4,792,557,900,963,074,000
|
Check that a generator-based coroutine hasn't completed yet.
|
tests/utils.py
|
assertGeneratorRunning
|
LiaoSteve/websockets
|
python
|
def assertGeneratorRunning(self, gen):
"\n \n\n "
next(gen)
|
def assertGeneratorReturns(self, gen):
'\n Check that a generator-based coroutine completes and return its value.\n\n '
with self.assertRaises(StopIteration) as raised:
next(gen)
return raised.exception.value
| -4,097,259,684,251,606,500
|
Check that a generator-based coroutine completes and return its value.
|
tests/utils.py
|
assertGeneratorReturns
|
LiaoSteve/websockets
|
python
|
def assertGeneratorReturns(self, gen):
'\n \n\n '
with self.assertRaises(StopIteration) as raised:
next(gen)
return raised.exception.value
|
def __init_subclass__(cls, **kwargs):
'\n Convert test coroutines to test functions.\n\n This supports asychronous tests transparently.\n\n '
super().__init_subclass__(**kwargs)
for name in unittest.defaultTestLoader.getTestCaseNames(cls):
test = getattr(cls, name)
if asyncio.iscoroutinefunction(test):
setattr(cls, name, cls.convert_async_to_sync(test))
| 7,922,361,619,117,970,000
|
Convert test coroutines to test functions.
This supports asychronous tests transparently.
|
tests/utils.py
|
__init_subclass__
|
LiaoSteve/websockets
|
python
|
def __init_subclass__(cls, **kwargs):
'\n Convert test coroutines to test functions.\n\n This supports asychronous tests transparently.\n\n '
super().__init_subclass__(**kwargs)
for name in unittest.defaultTestLoader.getTestCaseNames(cls):
test = getattr(cls, name)
if asyncio.iscoroutinefunction(test):
setattr(cls, name, cls.convert_async_to_sync(test))
|
@staticmethod
def convert_async_to_sync(test):
'\n Convert a test coroutine to a test function.\n\n '
@functools.wraps(test)
def test_func(self, *args, **kwargs):
return self.loop.run_until_complete(test(self, *args, **kwargs))
return test_func
| 7,222,491,553,220,178,000
|
Convert a test coroutine to a test function.
|
tests/utils.py
|
convert_async_to_sync
|
LiaoSteve/websockets
|
python
|
@staticmethod
def convert_async_to_sync(test):
'\n \n\n '
@functools.wraps(test)
def test_func(self, *args, **kwargs):
return self.loop.run_until_complete(test(self, *args, **kwargs))
return test_func
|
@contextlib.contextmanager
def assertNoLogs(self, logger='websockets', level=logging.ERROR):
'\n No message is logged on the given logger with at least the given level.\n\n '
with self.assertLogs(logger, level) as logs:
logging.getLogger(logger).log(level, 'dummy')
(yield)
level_name = logging.getLevelName(level)
self.assertEqual(logs.output, [f'{level_name}:{logger}:dummy'])
| 5,522,975,056,834,048,000
|
No message is logged on the given logger with at least the given level.
|
tests/utils.py
|
assertNoLogs
|
LiaoSteve/websockets
|
python
|
@contextlib.contextmanager
def assertNoLogs(self, logger='websockets', level=logging.ERROR):
'\n \n\n '
with self.assertLogs(logger, level) as logs:
logging.getLogger(logger).log(level, 'dummy')
(yield)
level_name = logging.getLevelName(level)
self.assertEqual(logs.output, [f'{level_name}:{logger}:dummy'])
|
def assertDeprecationWarnings(self, recorded_warnings, expected_warnings):
'\n Check recorded deprecation warnings match a list of expected messages.\n\n '
self.assertEqual(len(recorded_warnings), len(expected_warnings))
for (recorded, expected) in zip(recorded_warnings, expected_warnings):
actual = recorded.message
self.assertEqual(str(actual), expected)
self.assertEqual(type(actual), DeprecationWarning)
| 5,966,202,690,485,853,000
|
Check recorded deprecation warnings match a list of expected messages.
|
tests/utils.py
|
assertDeprecationWarnings
|
LiaoSteve/websockets
|
python
|
def assertDeprecationWarnings(self, recorded_warnings, expected_warnings):
'\n \n\n '
self.assertEqual(len(recorded_warnings), len(expected_warnings))
for (recorded, expected) in zip(recorded_warnings, expected_warnings):
actual = recorded.message
self.assertEqual(str(actual), expected)
self.assertEqual(type(actual), DeprecationWarning)
|
@pytest.mark.parametrize('scenario', _generate_test_cases())
def test_decrypt_from_file(scenario):
'Tests decrypt from known good files.'
with open(scenario.ciphertext_filename, 'rb') as infile:
ciphertext = infile.read()
with open(scenario.plaintext_filename, 'rb') as infile:
plaintext = infile.read()
key_provider = StaticStoredMasterKeyProvider()
key_provider.add_master_keys_from_list(scenario.key_ids)
(decrypted_ciphertext, _header) = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=key_provider)
assert (decrypted_ciphertext == plaintext)
| 9,093,568,782,376,092,000
|
Tests decrypt from known good files.
|
test/functional/test_f_xcompat.py
|
test_decrypt_from_file
|
alex-chew/aws-encryption-sdk-python
|
python
|
@pytest.mark.parametrize('scenario', _generate_test_cases())
def test_decrypt_from_file(scenario):
with open(scenario.ciphertext_filename, 'rb') as infile:
ciphertext = infile.read()
with open(scenario.plaintext_filename, 'rb') as infile:
plaintext = infile.read()
key_provider = StaticStoredMasterKeyProvider()
key_provider.add_master_keys_from_list(scenario.key_ids)
(decrypted_ciphertext, _header) = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=key_provider)
assert (decrypted_ciphertext == plaintext)
|
def _get_raw_key(self, key_id):
'Finds a loaded raw key.'
try:
(algorithm, key_bits, padding_algorithm, padding_hash) = key_id.upper().split(b'.', 3)
key_bits = int(key_bits)
key_type = _KEY_TYPES_MAP[algorithm]
wrapping_algorithm = _WRAPPING_ALGORITHM_MAP[algorithm][key_bits][padding_algorithm][padding_hash]
static_key = _STATIC_KEYS[algorithm][key_bits]
return WrappingKey(wrapping_algorithm=wrapping_algorithm, wrapping_key=static_key, wrapping_key_type=key_type)
except KeyError:
_LOGGER.exception('Unknown Key ID: %s', key_id)
raise InvalidKeyIdError('Unknown Key ID: {}'.format(key_id))
| -7,431,842,842,781,503,000
|
Finds a loaded raw key.
|
test/functional/test_f_xcompat.py
|
_get_raw_key
|
alex-chew/aws-encryption-sdk-python
|
python
|
def _get_raw_key(self, key_id):
try:
(algorithm, key_bits, padding_algorithm, padding_hash) = key_id.upper().split(b'.', 3)
key_bits = int(key_bits)
key_type = _KEY_TYPES_MAP[algorithm]
wrapping_algorithm = _WRAPPING_ALGORITHM_MAP[algorithm][key_bits][padding_algorithm][padding_hash]
static_key = _STATIC_KEYS[algorithm][key_bits]
return WrappingKey(wrapping_algorithm=wrapping_algorithm, wrapping_key=static_key, wrapping_key_type=key_type)
except KeyError:
_LOGGER.exception('Unknown Key ID: %s', key_id)
raise InvalidKeyIdError('Unknown Key ID: {}'.format(key_id))
|
@property
def key_id(self):
'Build a key ID from instance parameters.'
return '.'.join([self.encryption_algorithm, str(self.key_bits), self.padding_algorithm, self.padding_hash])
| 2,958,328,642,598,319,600
|
Build a key ID from instance parameters.
|
test/functional/test_f_xcompat.py
|
key_id
|
alex-chew/aws-encryption-sdk-python
|
python
|
@property
def key_id(self):
return '.'.join([self.encryption_algorithm, str(self.key_bits), self.padding_algorithm, self.padding_hash])
|
def test_dimensions(self):
'\n Tests if the input and output dimensions of the cell are as expected.\n '
cell = LSTM2dCell(self.input_dim, self.cell_state_dim, self.device)
(c_ji, s_ji) = cell.forward(x=self.x_j, s_prev_hor=self.s_prev_hor, s_prev_ver=self.s_prev_ver, c_prev_hor=self.c_prev_hor, c_prev_ver=self.c_prev_ver)
c_shape = list(c_ji.shape)
s_shape = list(s_ji.shape)
self.assertEqual(c_shape, [self.batch_size, self.cell_state_dim], 'Next cell state has unexpected shape')
self.assertEqual(s_shape, [self.batch_size, self.cell_state_dim], 'Next hidden state has unexpected shape')
| -8,588,840,230,380,694,000
|
Tests if the input and output dimensions of the cell are as expected.
|
test/test_lstm2d_cell.py
|
test_dimensions
|
FlorianPfisterer/2D-LSTM-Seq2Seq
|
python
|
def test_dimensions(self):
'\n \n '
cell = LSTM2dCell(self.input_dim, self.cell_state_dim, self.device)
(c_ji, s_ji) = cell.forward(x=self.x_j, s_prev_hor=self.s_prev_hor, s_prev_ver=self.s_prev_ver, c_prev_hor=self.c_prev_hor, c_prev_ver=self.c_prev_ver)
c_shape = list(c_ji.shape)
s_shape = list(s_ji.shape)
self.assertEqual(c_shape, [self.batch_size, self.cell_state_dim], 'Next cell state has unexpected shape')
self.assertEqual(s_shape, [self.batch_size, self.cell_state_dim], 'Next hidden state has unexpected shape')
|
def test_same_over_batch(self):
'\n Tests if the outputs of the cell are the same over the batch if the same input is fed in multiple times.\n '
toy_input_dim = 4
toy_batch_size = 7
toy_state_dim = 3
toy_x = torch.Tensor([1.5, 4.2, 3.1415, 2.71]).expand(toy_batch_size, toy_input_dim)
toy_s_prev_hor = torch.Tensor([(- 0.4), 1.2, 42.195]).expand(toy_batch_size, toy_state_dim)
toy_s_prev_ver = torch.Tensor([2.3, 7.12, (- 3.14)]).expand(toy_batch_size, toy_state_dim)
toy_c_prev_hor = torch.Tensor([(- 10.1), 4.5, (- 0.1)]).expand(toy_batch_size, toy_state_dim)
toy_c_prev_ver = torch.Tensor([17, 1.001, (- 2.23)]).expand(toy_batch_size, toy_state_dim)
cell = LSTM2dCell(toy_input_dim, toy_state_dim, self.device)
(c, s) = cell.forward(x=toy_x, s_prev_hor=toy_s_prev_hor, s_prev_ver=toy_s_prev_ver, c_prev_hor=toy_c_prev_hor, c_prev_ver=toy_c_prev_ver)
c_first = c[0, :]
repeated_c_first = c_first.expand(toy_batch_size, c_first.shape[(- 1)])
self.assertTrue(repeated_c_first.allclose(c), 'Next cell state varies across same-input batch')
s_first = s[0, :]
repeated_s_first = s_first.expand(toy_batch_size, s_first.shape[(- 1)])
self.assertTrue(repeated_s_first.allclose(s), 'Next hidden state varies across same-input batch')
| 8,572,458,883,497,263,000
|
Tests if the outputs of the cell are the same over the batch if the same input is fed in multiple times.
|
test/test_lstm2d_cell.py
|
test_same_over_batch
|
FlorianPfisterer/2D-LSTM-Seq2Seq
|
python
|
def test_same_over_batch(self):
'\n \n '
toy_input_dim = 4
toy_batch_size = 7
toy_state_dim = 3
toy_x = torch.Tensor([1.5, 4.2, 3.1415, 2.71]).expand(toy_batch_size, toy_input_dim)
toy_s_prev_hor = torch.Tensor([(- 0.4), 1.2, 42.195]).expand(toy_batch_size, toy_state_dim)
toy_s_prev_ver = torch.Tensor([2.3, 7.12, (- 3.14)]).expand(toy_batch_size, toy_state_dim)
toy_c_prev_hor = torch.Tensor([(- 10.1), 4.5, (- 0.1)]).expand(toy_batch_size, toy_state_dim)
toy_c_prev_ver = torch.Tensor([17, 1.001, (- 2.23)]).expand(toy_batch_size, toy_state_dim)
cell = LSTM2dCell(toy_input_dim, toy_state_dim, self.device)
(c, s) = cell.forward(x=toy_x, s_prev_hor=toy_s_prev_hor, s_prev_ver=toy_s_prev_ver, c_prev_hor=toy_c_prev_hor, c_prev_ver=toy_c_prev_ver)
c_first = c[0, :]
repeated_c_first = c_first.expand(toy_batch_size, c_first.shape[(- 1)])
self.assertTrue(repeated_c_first.allclose(c), 'Next cell state varies across same-input batch')
s_first = s[0, :]
repeated_s_first = s_first.expand(toy_batch_size, s_first.shape[(- 1)])
self.assertTrue(repeated_s_first.allclose(s), 'Next hidden state varies across same-input batch')
|
def rnnt_decoder_predictions_tensor(self, encoder_output: torch.Tensor, encoded_lengths: torch.Tensor, return_hypotheses: bool=False) -> (List[str], Optional[List[List[str]]], Optional[Union[(Hypothesis, NBestHypotheses)]]):
'\n Decode an encoder output by autoregressive decoding of the Decoder+Joint networks.\n\n Args:\n encoder_output: torch.Tensor of shape [B, D, T].\n encoded_lengths: torch.Tensor containing lengths of the padded encoder outputs. Shape [B].\n return_hypotheses: bool. If set to True it will return list of Hypothesis or NBestHypotheses\n\n Returns:\n If `return_best_hypothesis` is set:\n A tuple (hypotheses, None):\n hypotheses - list of Hypothesis (best hypothesis per sample).\n Look at rnnt_utils.Hypothesis for more information.\n\n If `return_best_hypothesis` is not set:\n A tuple(hypotheses, all_hypotheses)\n hypotheses - list of Hypothesis (best hypothesis per sample).\n Look at rnnt_utils.Hypothesis for more information.\n all_hypotheses - list of NBestHypotheses. Each NBestHypotheses further contains a sorted\n list of all the hypotheses of the model per sample.\n Look at rnnt_utils.NBestHypotheses for more information.\n '
with torch.no_grad():
hypotheses_list = self.decoding(encoder_output=encoder_output, encoded_lengths=encoded_lengths)
hypotheses_list = hypotheses_list[0]
prediction_list = hypotheses_list
if isinstance(prediction_list[0], NBestHypotheses):
hypotheses = []
all_hypotheses = []
for nbest_hyp in prediction_list:
n_hyps = nbest_hyp.n_best_hypotheses
decoded_hyps = self.decode_hypothesis(n_hyps)
hypotheses.append(decoded_hyps[0])
all_hypotheses.append(decoded_hyps)
if return_hypotheses:
return (hypotheses, all_hypotheses)
best_hyp_text = [h.text for h in hypotheses]
all_hyp_text = [h.text for hh in all_hypotheses for h in hh]
return (best_hyp_text, all_hyp_text)
else:
hypotheses = self.decode_hypothesis(prediction_list)
if return_hypotheses:
return (hypotheses, None)
best_hyp_text = [h.text for h in hypotheses]
return (best_hyp_text, None)
| 5,355,723,997,895,747,000
|
Decode an encoder output by autoregressive decoding of the Decoder+Joint networks.
Args:
encoder_output: torch.Tensor of shape [B, D, T].
encoded_lengths: torch.Tensor containing lengths of the padded encoder outputs. Shape [B].
return_hypotheses: bool. If set to True it will return list of Hypothesis or NBestHypotheses
Returns:
If `return_best_hypothesis` is set:
A tuple (hypotheses, None):
hypotheses - list of Hypothesis (best hypothesis per sample).
Look at rnnt_utils.Hypothesis for more information.
If `return_best_hypothesis` is not set:
A tuple(hypotheses, all_hypotheses)
hypotheses - list of Hypothesis (best hypothesis per sample).
Look at rnnt_utils.Hypothesis for more information.
all_hypotheses - list of NBestHypotheses. Each NBestHypotheses further contains a sorted
list of all the hypotheses of the model per sample.
Look at rnnt_utils.NBestHypotheses for more information.
|
nemo/collections/asr/metrics/rnnt_wer.py
|
rnnt_decoder_predictions_tensor
|
JINHXu/NeMo
|
python
|
def rnnt_decoder_predictions_tensor(self, encoder_output: torch.Tensor, encoded_lengths: torch.Tensor, return_hypotheses: bool=False) -> (List[str], Optional[List[List[str]]], Optional[Union[(Hypothesis, NBestHypotheses)]]):
'\n Decode an encoder output by autoregressive decoding of the Decoder+Joint networks.\n\n Args:\n encoder_output: torch.Tensor of shape [B, D, T].\n encoded_lengths: torch.Tensor containing lengths of the padded encoder outputs. Shape [B].\n return_hypotheses: bool. If set to True it will return list of Hypothesis or NBestHypotheses\n\n Returns:\n If `return_best_hypothesis` is set:\n A tuple (hypotheses, None):\n hypotheses - list of Hypothesis (best hypothesis per sample).\n Look at rnnt_utils.Hypothesis for more information.\n\n If `return_best_hypothesis` is not set:\n A tuple(hypotheses, all_hypotheses)\n hypotheses - list of Hypothesis (best hypothesis per sample).\n Look at rnnt_utils.Hypothesis for more information.\n all_hypotheses - list of NBestHypotheses. Each NBestHypotheses further contains a sorted\n list of all the hypotheses of the model per sample.\n Look at rnnt_utils.NBestHypotheses for more information.\n '
with torch.no_grad():
hypotheses_list = self.decoding(encoder_output=encoder_output, encoded_lengths=encoded_lengths)
hypotheses_list = hypotheses_list[0]
prediction_list = hypotheses_list
if isinstance(prediction_list[0], NBestHypotheses):
hypotheses = []
all_hypotheses = []
for nbest_hyp in prediction_list:
n_hyps = nbest_hyp.n_best_hypotheses
decoded_hyps = self.decode_hypothesis(n_hyps)
hypotheses.append(decoded_hyps[0])
all_hypotheses.append(decoded_hyps)
if return_hypotheses:
return (hypotheses, all_hypotheses)
best_hyp_text = [h.text for h in hypotheses]
all_hyp_text = [h.text for hh in all_hypotheses for h in hh]
return (best_hyp_text, all_hyp_text)
else:
hypotheses = self.decode_hypothesis(prediction_list)
if return_hypotheses:
return (hypotheses, None)
best_hyp_text = [h.text for h in hypotheses]
return (best_hyp_text, None)
|
def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[(Hypothesis, NBestHypotheses)]]:
'\n Decode a list of hypotheses into a list of strings.\n\n Args:\n hypotheses_list: List of Hypothesis.\n\n Returns:\n A list of strings.\n '
for ind in range(len(hypotheses_list)):
prediction = hypotheses_list[ind].y_sequence
if (type(prediction) != list):
prediction = prediction.tolist()
prediction = [p for p in prediction if (p != self.blank_id)]
hypothesis = self.decode_tokens_to_str(prediction)
hypotheses_list[ind].text = hypothesis
if self.compute_hypothesis_token_set:
hypotheses_list[ind].tokens = self.decode_ids_to_tokens(prediction)
return hypotheses_list
| 1,439,692,469,110,841,900
|
Decode a list of hypotheses into a list of strings.
Args:
hypotheses_list: List of Hypothesis.
Returns:
A list of strings.
|
nemo/collections/asr/metrics/rnnt_wer.py
|
decode_hypothesis
|
JINHXu/NeMo
|
python
|
def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[(Hypothesis, NBestHypotheses)]]:
'\n Decode a list of hypotheses into a list of strings.\n\n Args:\n hypotheses_list: List of Hypothesis.\n\n Returns:\n A list of strings.\n '
for ind in range(len(hypotheses_list)):
prediction = hypotheses_list[ind].y_sequence
if (type(prediction) != list):
prediction = prediction.tolist()
prediction = [p for p in prediction if (p != self.blank_id)]
hypothesis = self.decode_tokens_to_str(prediction)
hypotheses_list[ind].text = hypothesis
if self.compute_hypothesis_token_set:
hypotheses_list[ind].tokens = self.decode_ids_to_tokens(prediction)
return hypotheses_list
|
@abstractmethod
def decode_tokens_to_str(self, tokens: List[int]) -> str:
'\n Implemented by subclass in order to decoder a token id list into a string.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A decoded string.\n '
raise NotImplementedError()
| 592,417,000,429,037,200
|
Implemented by subclass in order to decoder a token id list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
|
nemo/collections/asr/metrics/rnnt_wer.py
|
decode_tokens_to_str
|
JINHXu/NeMo
|
python
|
@abstractmethod
def decode_tokens_to_str(self, tokens: List[int]) -> str:
'\n Implemented by subclass in order to decoder a token id list into a string.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A decoded string.\n '
raise NotImplementedError()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.