hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0c3a3303f0c2028817456a2bcf4c72af0fc39e
| 95
|
py
|
Python
|
bastion/compute/common/image.py
|
laureanok/bas7ion
|
4cab1f5830e88beb208c4dfd564bf03eab1c2e8f
|
[
"Apache-2.0"
] | null | null | null |
bastion/compute/common/image.py
|
laureanok/bas7ion
|
4cab1f5830e88beb208c4dfd564bf03eab1c2e8f
|
[
"Apache-2.0"
] | null | null | null |
bastion/compute/common/image.py
|
laureanok/bas7ion
|
4cab1f5830e88beb208c4dfd564bf03eab1c2e8f
|
[
"Apache-2.0"
] | null | null | null |
class Image:
def __init__(self, id, name):
self.id = id
self.name = name
| 13.571429
| 33
| 0.536842
|
4a0c3a7852e600b000419012aa868d710103996f
| 8,868
|
py
|
Python
|
Lib/dumbdbm.py
|
rbuzatu90/hyperv-python
|
82bf5a72b4d956ea05affe1644b47e378dec0f4e
|
[
"bzip2-1.0.6"
] | 195
|
2016-01-14T16:03:02.000Z
|
2021-12-29T09:15:02.000Z
|
Lib/dumbdbm.py
|
odsod/cpython-internals-course
|
55fffca28e83ac0f30029c60113a3110451dfa08
|
[
"PSF-2.0"
] | 75
|
2016-01-14T16:03:02.000Z
|
2020-04-29T22:51:53.000Z
|
Lib/dumbdbm.py
|
odsod/cpython-internals-course
|
55fffca28e83ac0f30029c60113a3110451dfa08
|
[
"PSF-2.0"
] | 24
|
2016-02-29T11:45:47.000Z
|
2021-12-24T08:41:37.000Z
|
"""A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import os as _os
import __builtin__
import UserDict
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(UserDict.DictMixin):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = _os # for _commit()
_open = _open # for _commit()
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
with _open(self._datfile, 'w') as f:
self._chmod(self._datfile)
else:
f.close()
self._update()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
with f:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = eval(line)
self._index[key] = pos_and_siz_pair
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except self._os.error:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except self._os.error:
pass
with self._open(self._dirfile, 'w') as f:
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.iteritems():
f.write("%r, %r\n" % (key, pos_and_siz_pair))
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
with _open(self._datfile, 'rb') as f:
f.seek(pos)
dat = f.read(siz)
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
with _open(self._datfile, 'rb+') as f:
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
with _open(self._datfile, 'rb+') as f:
f.seek(pos)
f.write(val)
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
with _open(self._dirfile, 'a') as f:
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key, pos_and_siz_pair))
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). _setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return key in self._index
def __contains__(self, key):
return key in self._index
def iterkeys(self):
return self._index.iterkeys()
__iter__ = iterkeys
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod (self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
| 35.902834
| 78
| 0.610059
|
4a0c3b93bd1fc6f4b36b8e2f0bd8876b73c88316
| 8,918
|
py
|
Python
|
diversity_analysis_tool/graph_construction.py
|
BenevolentAI/benevolentai-dat
|
a68c2a438bb5b9651e4e0cb72cdbe02738cc88a9
|
[
"MIT"
] | 6
|
2020-09-23T08:06:44.000Z
|
2022-01-25T16:13:04.000Z
|
diversity_analysis_tool/graph_construction.py
|
BenevolentAI/benevolentai-dat
|
a68c2a438bb5b9651e4e0cb72cdbe02738cc88a9
|
[
"MIT"
] | 2
|
2021-03-03T15:29:41.000Z
|
2021-07-16T12:11:24.000Z
|
diversity_analysis_tool/graph_construction.py
|
BenevolentAI/benevolentai-dat
|
a68c2a438bb5b9651e4e0cb72cdbe02738cc88a9
|
[
"MIT"
] | 1
|
2021-03-25T07:32:41.000Z
|
2021-03-25T07:32:41.000Z
|
import os
import logging
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class GraphUtility:
# =============
# Graph Methods
# =============
def __init__(self, df, output_directory_path):
self.df = df
self.output_directory_path = output_directory_path
def build_graph(self):
"""this function collects columns with predifined column names and maps
through a dictionary of graphing functions"""
colname_dict = {
"age_band": "Age Band",
"ethnicity": "Ethnicity",
"race": "Race",
"sex": "Sex",
}
colname_dict = {
k: v for k, v in colname_dict.items() if k in self.df.columns.values
}
# Add missing colnames
missing_cols = list(set(self.df.columns.values) - set(colname_dict.keys()))
colname_dict.update({colname: colname for colname in missing_cols})
# Plot individual feature graphs
for column_name in colname_dict.keys():
self.generate_bar_graph(
column_name,
x_label="Number of participants",
y_label=colname_dict[column_name],
)
# Two variable graphs
if set(["age_band", "ethnicity"]).issubset(self.df.columns.values):
self.generate_stacked_bar_graph(
"age_band",
"ethnicity",
"Number of participants",
colname_dict["age_band"],
colname_dict["ethnicity"],
)
if set(["age_band", "race"]).issubset(self.df.columns.values):
self.generate_stacked_bar_graph(
"age_band",
"race",
"Number of participants",
colname_dict["age_band"],
colname_dict["race"],
)
if set(["race", "sex"]).issubset(self.df.columns.values):
self.generate_stacked_bar_graph(
"race",
"sex",
"Number of participants",
colname_dict["race"],
colname_dict["sex"],
)
if set(["ethnicity", "sex"]).issubset(self.df.columns.values):
self.generate_stacked_bar_graph(
"ethnicity",
"sex",
"Number of participants",
colname_dict["ethnicity"],
colname_dict["sex"],
)
if set(["age_band", "sex"]).issubset(self.df.columns.values):
self.generate_stacked_bar_graph(
"age_band",
"sex",
"Number of participants",
colname_dict["age_band"],
colname_dict["sex"],
)
self.plot_missing_rates(
colname_dict, x_label="% of entries missing", show_fraction=True
)
def plot_missing_rates(
self, colname_dict, x_label=None, y_label=None, show_fraction=True
):
"""
generates a bar graph displaying the number or percentage of missing entries for each column in colname_dict.keys()
Args:
colname_dict: dictionary with column names to plot as keys and the label for visualisation as item
x_label (optional): label for x axis. If none no x-axis label is shown.
y_label (optional): label for y axis. If none the major_category_column_name is used as label
show_fraction (optional): whether percentage or raw count should be shown, defaults to True.
"""
missing_rates = pd.DataFrame(
self.df[colname_dict.keys()].isna().sum(axis=0)
).rename(colname_dict)
if show_fraction:
missing_rates = missing_rates / self.df.shape[0]
plt.xlim(0, 1)
sns.set(
style="whitegrid",
palette="colorblind",
font="DejaVu Sans",
font_scale=1,
color_codes=True,
)
missing_rates.plot(kind="barh", stacked=False, legend=False)
plt.xticks(rotation=-45)
if x_label:
plt.xlabel(x_label)
if y_label:
plt.ylabel(y_label)
file_path = os.path.join(self.output_directory_path, f"Missingness_bar_chart")
plt.savefig(file_path, bbox_inches="tight")
logger.info(f"successfully saved missingness bar graph")
def generate_bar_graph(self, column_name, x_label=None, y_label=None):
"""
The graph functions can be called on a df and returns a visualization bar chart for one variable
Args:
column_name: specifies the column that is shown as bar chart
x_label (optional): label for x axis. If none no x-axis label is shown.
y_label (optional): label for y axis. If none the major_category_column_name is used as label
"""
sns.set(
style="whitegrid",
palette="colorblind",
font="DejaVu Sans",
font_scale=1,
color_codes=True,
)
self.df[column_name].sort_values(
na_position="last", ascending=False
).value_counts(sort=False).plot(kind="barh", stacked=False, edgecolor="none")
plt.xticks(rotation=-45)
if x_label:
plt.xlabel(x_label)
if y_label:
plt.ylabel(y_label)
file_path = os.path.join(self.output_directory_path, f"{column_name}_bar_chart")
plt.savefig(file_path, bbox_inches="tight")
logger.info(f"successfully saved {column_name} bar graph")
def generate_stacked_bar_graph(
self,
major_category_column_name,
minor_category_column_name,
x_label=None,
y_label=None,
legend_title=None,
):
"""
generates a stacked bar graph. Each graph will be labelled by a value in the
major_category_column_name. Within each bar, the height will be divided based on counts of
values in the minor_category_column_name
Args:
major_category_column_name: provides labels for each separate bar in the graph
minor_category_column_name: used to divide each bar into sections for each minor category label
x_label (optional): label for x axis. If none no x-axis label is shown.
y_label (optional): label for y axis. If none the major_category_column_name is used as label
legend_title (optional): title for legend. If none the minor_category_column_name is used as title
"""
sns.set(
style="whitegrid",
palette="colorblind",
font="DejaVu Sans",
font_scale=1,
color_codes=True,
)
stacked_bar_graph_df = self.df[
[major_category_column_name, minor_category_column_name]
]
stacked_bar_graph_df = stacked_bar_graph_df.fillna(
{minor_category_column_name: "not provided"}
)
results_df = pd.crosstab(
stacked_bar_graph_df[major_category_column_name],
stacked_bar_graph_df[minor_category_column_name],
margins=True,
)
all_df = pd.DataFrame(results_df["All"]).T.drop(columns="All")
filtered = results_df.drop(labels="All").drop(columns=["All"])
# plot stacked major/minor
filename = f"{major_category_column_name}_{minor_category_column_name}_stacked_bar_chart"
self._create_stacked_figure(filtered)
plt.xticks(rotation=-45)
if x_label:
plt.xlabel(x_label)
if y_label:
plt.ylabel(y_label)
if legend_title:
plt.legend(title=legend_title, bbox_to_anchor=(1.05, 1), loc="upper left")
file_path = os.path.join(self.output_directory_path, filename)
plt.savefig(file_path, bbox_inches="tight")
# plot only major
filename = f"{major_category_column_name}_stacked_bar_chart"
# removing index name so it doesn't appear as label 'all'
all_df.index = [""]
self._create_stacked_figure(all_df)
if x_label:
plt.xlabel(x_label)
if y_label:
plt.legend(title=y_label, bbox_to_anchor=(1.05, 1), loc="upper left")
file_path = os.path.join(self.output_directory_path, filename)
plt.savefig(file_path, bbox_inches="tight")
logger.info(
(
"successfully saved stacked bar graph for "
f"{major_category_column_name} and {minor_category_column_name}"
)
)
def _create_stacked_figure(self, frames):
fig = frames.plot(kind="barh", stacked=True, edgecolor="none")
plt.legend(title=frames.columns.name)
plt.gcf().subplots_adjust(bottom=0.30)
return fig
| 37.313808
| 123
| 0.595986
|
4a0c3bde8bff4fd364732088cc8d3df97b4000ae
| 1,099
|
py
|
Python
|
Main.py
|
RackManyLoafs/pygame-wordsearch
|
7bfb26e73e55c1db8773501aae11c894d6a65b13
|
[
"MIT"
] | null | null | null |
Main.py
|
RackManyLoafs/pygame-wordsearch
|
7bfb26e73e55c1db8773501aae11c894d6a65b13
|
[
"MIT"
] | null | null | null |
Main.py
|
RackManyLoafs/pygame-wordsearch
|
7bfb26e73e55c1db8773501aae11c894d6a65b13
|
[
"MIT"
] | null | null | null |
# File name: Main.py
# Programmer: Sebastien Marleau
# Description: imports puzzle data and starts the game
# Date: April 9th, 2019
import pygame
pygame.init()
from Game import *
class PuzzleData:
def __init__(self, title, rowCount, columnCount, letters, words):
self.title = title
self.rowCount = rowCount
self.columnCount = columnCount
self.letters = letters
self.words = words
fi = open("puzzles.txt", 'r')
puzzleDataDict = dict()
amountOfPuzzles = int(fi.readline().strip())
for puzzle in range(amountOfPuzzles):
title = fi.readline().strip()
columnCount = int(fi.readline().strip())
rowCount = int(fi.readline().strip())
letters = []
for row in range(rowCount):
letters += fi.readline().strip().split(' ')
words = []
wordCount = int(fi.readline().strip())
for word in range(wordCount):
words.append(fi.readline().strip())
puzzleDataDict[title]= PuzzleData(title=title, rowCount=rowCount, columnCount=columnCount, letters=letters, words=words)
game = Game(puzzleDataDict)
game.start()
| 23.891304
| 124
| 0.6697
|
4a0c3be65585257c1d05aa38f08fe45561c91174
| 3,208
|
py
|
Python
|
python/play/random_forest_on_token_features.py
|
antlr/groom
|
909c04b386c6d384344cd0d060dd1e3b4bde77a2
|
[
"BSD-2-Clause"
] | 408
|
2016-04-21T09:40:08.000Z
|
2022-03-22T02:05:29.000Z
|
python/play/random_forest_on_token_features.py
|
antlr/groom
|
909c04b386c6d384344cd0d060dd1e3b4bde77a2
|
[
"BSD-2-Clause"
] | 25
|
2016-01-24T17:28:49.000Z
|
2021-05-05T19:17:55.000Z
|
python/play/random_forest_on_token_features.py
|
antlr/groom
|
909c04b386c6d384344cd0d060dd1e3b4bde77a2
|
[
"BSD-2-Clause"
] | 78
|
2016-02-14T07:22:21.000Z
|
2022-02-10T08:23:12.000Z
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import matplotlib.pyplot as plt
import sys
def print_importance(forest):
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
# Compute stddev of forest's feature importances over all trees
std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)
for f in range(X_training.shape[1]):
print "%d. feature %s col %d (%f, inter-tree variability=%f)" % \
(f + 1,
token_features[indices[f]],
indices[f],
importances[indices[f]],
std[indices[f]])
def graph_importance(forest):
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
fig, ax = plt.subplots(1,1)
plt.title("Feature importances")
xlabels = [token_features[int(i)] for i in indices]
plt.bar(range(X_training.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X_training.shape[1]), xlabels, rotation=15)
plt.xlim([-1, X_training.shape[1]])
plt.ylim([0, 1])
for tick in ax.xaxis.get_major_ticks():
tick.tick1line.set_markersize(0)
tick.tick2line.set_markersize(0)
tick.label1.set_horizontalalignment('right')
plt.show()
data = np.loadtxt("samples/stringtemplate4/style.csv", delimiter=",", skiprows=1)
token_features = []
with open("samples/stringtemplate4/style.csv", 'r') as f:
token_features = f.readline().strip().split(', ')
token_features = token_features[1:] # first col is predictor var
X = data[0::,1::] # features
Y = data[0::,0] # prediction class
# get first 80% as training data, 20% as testing data
n = len(data)
last_training_index = n * 0.80
X_training = X[0:last_training_index]
X_testing = X[last_training_index:]
Y_training = Y[0:last_training_index]
Y_testing = Y[last_training_index:]
print "there are %d records, %d training and %d testing" % (len(data), len(X_training), len(X_testing))
print "a priori 'inject newline' rate is %3d/%4d = %f" % (sum(Y), len(Y), sum(Y)/float(len(Y)))
# transform categorical values
index_of_cat_features = [0, 3, 4, 5]
# todo
forest = RandomForestClassifier(n_estimators = 100)
forest = forest.fit(X_training, Y_training)
Y_predictions = forest.predict(X_testing)
print "expected 'inject newline' rate is %3d/%4d = %f" % \
(sum(Y_testing), len(Y_testing), sum(Y_testing)/float(len(Y_testing)))
print "prediction 'inject newline' rate is %3d/%4d = %f" % \
(sum(Y_predictions), len(Y_predictions), sum(Y_predictions)/float(len(Y_predictions)))
# print "predictions:"
# print Y_predictions
#
# print "actual:"
# print Y_testing
# print "diff"
# print Y_testing-Y_predictions
misclassified = int(abs(sum(Y_testing - Y_predictions)))
print "number misclassified: %d/%d=%f%%" %\
(misclassified, len(Y_testing), misclassified/float(len(Y_testing))*100)
print_importance(forest)
# now graph and show important features
graph_importance(forest)
| 34.12766
| 103
| 0.692643
|
4a0c3d3cf4cc5cbc747dbdc37d4ce8e8f777e27c
| 2,866
|
py
|
Python
|
data/yaws 1st ed--extracted/combine_the_data.py
|
doomphoenix-qxz/LavoisierCore.jl
|
46679c43d1da730b9e435c725791cc226c329854
|
[
"MIT"
] | 8
|
2019-07-25T21:33:14.000Z
|
2021-08-19T12:11:07.000Z
|
data/yaws 1st ed--extracted/combine_the_data.py
|
doomphoenix-qxz/LavoisierCore.jl
|
46679c43d1da730b9e435c725791cc226c329854
|
[
"MIT"
] | 8
|
2019-04-15T17:42:21.000Z
|
2019-11-16T00:45:55.000Z
|
data/yaws 1st ed--extracted/combine_the_data.py
|
doomphoenix-qxz/LavoisierCore.jl
|
46679c43d1da730b9e435c725791cc226c329854
|
[
"MIT"
] | 4
|
2019-04-17T19:57:12.000Z
|
2021-05-11T01:13:31.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 5 13:53:29 2019
@author: richar56
"""
import csv
import numpy as np
import pandas as pd
import os
from os import listdir
from os.path import isfile, join
import re
pattern = re.compile(r'\s+')
mypath = os.path.dirname(os.path.realpath("__file__"))
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
onlyfiles.remove("combine_the_data.py")
filename1 = "Yaws_ocr_cp_gas.csv"
def separate_first(alist):
first = alist[0]
firstchar = 0
for char in first:
if char.isdigit():
firstchar += 1
else:
break
realfirst = first[:firstchar]
realsecond = first[firstchar:]
blist = [realfirst, realsecond]
for i in range(1, len(alist)):
blist.append(alist[i])
return blist
def combine_name(alist):
if len(alist) < 4:
return []
name1 = alist[2]
name2 = alist[3]
if len(name1) == 1 and name1[0].isdigit():
#we need to see how many other digits there might be
if len(name2) == 1 and name2[0].isdigit():
else:
if all([ch.isalpha() for ch in name2]) and len(name2) > 1:
#then name2 is a real part of the name, hopefully
name = name1 + " " + name2
del alist[2]
del alist[2]
alist.insert(2, name)
return alist
else:
return alist
def preprocessing(afilename):
filename2 = afilename[:-4] + "_out.csv"
output = open(filename2, "w")
with open(afilename) as file1:
headflag = True
mywriter = csv.writer(output, delimiter=",")
for line in file1.readlines():
global headflag
sentence = re.sub(pattern, ',', line)
s2 =sentence.replace(",.,",",")
s3 = s2.replace("|","")
s4 = s3.replace("[","")
s5 = s4.replace(",_,",",")
s6 = s5.replace(",,",",")
s7 = s6.replace(",=,",",")
s8 = s7.replace("\"","")
preprocessed = s8.split(",")
try:
i = int(preprocessed[0])
except ValueError:
preprocessed = separate_first(preprocessed)
for i, item in enumerate(preprocessed):
if item == '':
preprocessed.remove('')
preprocessed = combine_name(preprocessed)
ischem = all([ch.isdigit() for ch in preprocessed[0]])
if ischem:
headflag=False
if preprocessed != [] and ischem:
mywriter.writerow(preprocessed)
output.close()
for file in onlyfiles:
preprocessing(file)
| 27.825243
| 68
| 0.508723
|
4a0c3d7497ec12f9df34e996bbd7ac43bf2ed899
| 271
|
py
|
Python
|
cogs/test.py
|
VieAnonime/Verge-Discord
|
86aa524c3a1dec67151beacb0578b9e416bb5840
|
[
"MIT"
] | 8
|
2017-07-30T11:31:28.000Z
|
2021-04-09T19:44:21.000Z
|
cogs/test.py
|
VieAnonime/Verge-Discord
|
86aa524c3a1dec67151beacb0578b9e416bb5840
|
[
"MIT"
] | null | null | null |
cogs/test.py
|
VieAnonime/Verge-Discord
|
86aa524c3a1dec67151beacb0578b9e416bb5840
|
[
"MIT"
] | 42
|
2017-09-11T14:53:37.000Z
|
2022-03-22T07:38:18.000Z
|
import discord
from discord.ext import commands
class Test:
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def test(self, ctx):
await self.bot.say("!balance")
def setup(bot):
bot.add_cog(Test(bot))
| 16.9375
| 40
| 0.656827
|
4a0c401b2a326e05b4bc5e087cdaa9a18cb91046
| 2,271
|
py
|
Python
|
tests/providers/amazon/aws/sensors/test_glue.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947
|
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
tests/providers/amazon/aws/sensors/test_glue.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603
|
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
tests/providers/amazon/aws/sensors/test_glue.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429
|
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow import configuration
from airflow.providers.amazon.aws.hooks.glue import AwsGlueJobHook
from airflow.providers.amazon.aws.sensors.glue import AwsGlueJobSensor
class TestAwsGlueJobSensor(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch.object(AwsGlueJobHook, 'get_conn')
@mock.patch.object(AwsGlueJobHook, 'get_job_state')
def test_poke(self, mock_get_job_state, mock_conn):
mock_conn.return_value.get_job_run()
mock_get_job_state.return_value = 'SUCCEEDED'
op = AwsGlueJobSensor(
task_id='test_glue_job_sensor',
job_name='aws_test_glue_job',
run_id='5152fgsfsjhsh61661',
poke_interval=1,
timeout=5,
aws_conn_id='aws_default',
)
assert op.poke(None)
@mock.patch.object(AwsGlueJobHook, 'get_conn')
@mock.patch.object(AwsGlueJobHook, 'get_job_state')
def test_poke_false(self, mock_get_job_state, mock_conn):
mock_conn.return_value.get_job_run()
mock_get_job_state.return_value = 'RUNNING'
op = AwsGlueJobSensor(
task_id='test_glue_job_sensor',
job_name='aws_test_glue_job',
run_id='5152fgsfsjhsh61661',
poke_interval=1,
timeout=5,
aws_conn_id='aws_default',
)
assert not op.poke(None)
if __name__ == '__main__':
unittest.main()
| 36.047619
| 70
| 0.707177
|
4a0c4189ae28dd4741590c5148d4186324d99124
| 3,507
|
py
|
Python
|
scripts/ingest_metadata.py
|
neurodata/boss-export
|
16e9d2b0319876e36e53d3a01622e83e75fc48f1
|
[
"Apache-2.0"
] | null | null | null |
scripts/ingest_metadata.py
|
neurodata/boss-export
|
16e9d2b0319876e36e53d3a01622e83e75fc48f1
|
[
"Apache-2.0"
] | null | null | null |
scripts/ingest_metadata.py
|
neurodata/boss-export
|
16e9d2b0319876e36e53d3a01622e83e75fc48f1
|
[
"Apache-2.0"
] | null | null | null |
#%%
import configparser
import json
import boto3
import pandas as pd
from botocore import exceptions
from boss_export.libs import ngprecomputed
#%%
session = boto3.Session(profile_name="ben-boss-dev")
s3 = session.client("s3")
BUCKET_NAME = "open-neurodata"
PUBLIC_METADATA = "scripts/all_datasets_ids.csv"
config = configparser.ConfigParser()
config.read("scripts/secrets.ini")
iam_role = config["DEFAULT"]["iam_role"]
s3_write_resource = ngprecomputed.assume_role_resource(iam_role, session)
#%%
def check_info(bucket_name, prefix):
# check if a given prefix has an info file at it's base directory
prefix = prefix.strip("/")
key = f"{prefix}/info"
try:
s3.head_object(Bucket=bucket_name, Key=key)
return 0
except exceptions.ClientError:
# Not found
return 1
def get_subdirs(bucket_name, prefix):
# gets the subdirectories under a prefix
resp = s3.list_objects(Bucket=bucket_name, Prefix=prefix, Delimiter="/")
prefix_names = [r["Prefix"] for r in resp["CommonPrefixes"]]
return prefix_names
def get_data_dirs(bucket_name, prefix, data_dirs):
# recursive function to return prefixes with data
result = check_info(bucket_name, prefix)
if result != 0:
subdirs = get_subdirs(bucket_name, prefix)
for subdir in subdirs:
data_dirs = get_data_dirs(bucket_name, subdir, data_dirs)
return data_dirs
else:
return data_dirs + [prefix]
#%%
prefixes = get_data_dirs(BUCKET_NAME, "", [])
#%%
# list out prefixes
print(prefixes)
#%%
# load the pandas dataframe w/ all the data
df = pd.read_csv(PUBLIC_METADATA, na_filter=False)
# filter the metadata for only what we need
exclude_metadata = ["to_be_deleted", "deleted_status", "downsample_arn"]
df.drop(exclude_metadata, axis=1, inplace=True)
# %%
for prefix in prefixes:
parts = prefix.strip("/").split("/")
key = prefix + "provenance"
try:
s3.head_object(Bucket=BUCKET_NAME, Key=key)
continue
except exceptions.ClientError:
# Not found
print("sending file...")
if len(parts) == 3:
coll, exp, ch = parts
if exp == "kasthuri14s1colEM" and ch == "anno":
# renamed this:
exp = "kasthuri14s1colANNO"
ch = "annotations"
row = df[(df["coll"] == coll) & (df["exp"] == exp) & (df["ch"] == ch)]
elif len(parts) == 2:
coll = None
exp, ch = parts
row = df[(df["exp"] == exp) & (df["ch"] == ch)]
else:
raise Exception
if len(row) != 1:
print(f"Missing metadata: {prefix}")
if coll != "templier":
continue
else:
print("using templier provenance")
provenance = json.dumps(
{
"owners": ["thomas.templier@epfl.ch"],
"description": "MagC, magnetic collection of ultrathin sections for volumetric correlative light and electron microscopy",
"sources": [],
"processing": [],
"url": "https://neurodata.io/data/templier2019/",
}
)
else:
provenance = row.iloc[0].to_json()
# write the metadata to a provinance.json file at that prefix
ngprecomputed.save_obj(
s3_write_resource,
BUCKET_NAME,
key,
provenance,
storage_class="STANDARD",
cache_control="no-cache",
content_type="application/json",
)
| 26.770992
| 142
| 0.615056
|
4a0c42bca628585a48d181dac41722015f0c47f6
| 475
|
py
|
Python
|
manage.py
|
Jay-68/personal-blog
|
003d09a712d311b3c2afdcd92df8e07a6ff440ed
|
[
"MIT"
] | null | null | null |
manage.py
|
Jay-68/personal-blog
|
003d09a712d311b3c2afdcd92df8e07a6ff440ed
|
[
"MIT"
] | 2
|
2021-06-08T20:32:27.000Z
|
2022-03-12T00:03:50.000Z
|
manage.py
|
Jay-68/personal-blog
|
003d09a712d311b3c2afdcd92df8e07a6ff440ed
|
[
"MIT"
] | null | null | null |
from flaskblog import create_app, db
from flask_script import Manager, Server
from flaskblog.models import User, Post
from flask_migrate import Migrate, MigrateCommand
app = create_app('production')
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('server', Server)
manager.add_command('db', MigrateCommand)
@manager.shell
def make_shell_context():
return dict(app=app, db=db, User=User, Post=Post)
if __name__ == '__main__':
manager.run()
| 22.619048
| 53
| 0.764211
|
4a0c439946e00c10230ac03fe5a798507b254984
| 5,912
|
py
|
Python
|
test/unit/provisioner/lint/test_ansible_lint.py
|
dericcrago/molecule
|
cb4dec0a7d4993395f123b2c9b0590d41e9dd557
|
[
"MIT"
] | null | null | null |
test/unit/provisioner/lint/test_ansible_lint.py
|
dericcrago/molecule
|
cb4dec0a7d4993395f123b2c9b0590d41e9dd557
|
[
"MIT"
] | null | null | null |
test/unit/provisioner/lint/test_ansible_lint.py
|
dericcrago/molecule
|
cb4dec0a7d4993395f123b2c9b0590d41e9dd557
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pytest
import sh
from molecule import config
from molecule.provisioner.lint import ansible_lint
@pytest.fixture
def _provisioner_lint_section_data():
return {
'provisioner': {
'name': 'ansible',
'lint': {
'name': 'ansible-lint',
'options': {
'foo': 'bar',
'v': True,
'exclude': ['foo', 'bar'],
'x': ['foo', 'bar'],
},
'env': {'FOO': 'bar'},
},
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(patched_config_validate, config_instance):
return ansible_lint.AnsibleLint(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance):
x = {
'default_exclude': [_instance._config.scenario.ephemeral_directory],
'exclude': [],
'x': [],
}
assert x == _instance.default_options
def test_name_property(_instance):
assert 'ansible-lint' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
@pytest.mark.parametrize(
'config_instance', ['_provisioner_lint_section_data'], indirect=True
)
def test_options_property(_instance):
x = {
'default_exclude': [_instance._config.scenario.ephemeral_directory],
'exclude': ['foo', 'bar'],
'x': ['foo', 'bar'],
'foo': 'bar',
'v': True,
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_provisioner_lint_section_data'], indirect=True
)
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {'debug': True}
x = {
'default_exclude': [_instance._config.scenario.ephemeral_directory],
'exclude': ['foo', 'bar'],
'x': ['foo', 'bar'],
'foo': 'bar',
'v': True,
}
assert x == _instance.options
def test_default_env_property(_instance):
assert 'MOLECULE_FILE' in _instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in _instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in _instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in _instance.default_env
@pytest.mark.parametrize(
'config_instance', ['_provisioner_lint_section_data'], indirect=True
)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
assert 'ANSIBLE_CONFIG' in _instance.env
assert 'ANSIBLE_ROLES_PATH' in _instance.env
assert 'ANSIBLE_LIBRARY' in _instance.env
assert 'ANSIBLE_FILTER_PLUGINS' in _instance.env
@pytest.mark.parametrize(
'config_instance', ['_provisioner_lint_section_data'], indirect=True
)
def test_bake(_instance):
_instance.bake()
x = [
str(sh.ansible_lint),
'--foo=bar',
'-v',
'-x',
'-x',
'--exclude={}'.format(_instance._config.scenario.ephemeral_directory),
'--exclude=foo',
'--exclude=bar',
_instance._config.provisioner.playbooks.converge,
'bar',
'foo',
]
result = str(_instance._ansible_lint_command).split()
assert sorted(x) == sorted(result)
def test_execute(
mocker, patched_run_command, patched_logger_info, patched_logger_success, _instance
):
_instance._ansible_lint_command = 'patched-ansiblelint-command'
_instance.execute()
patched_run_command.assert_called_once_with(
'patched-ansiblelint-command', debug=False
)
msg = 'Executing Ansible Lint on {}...'.format(
_instance._config.provisioner.playbooks.converge
)
patched_logger_info.assert_called_once_with(msg)
msg = 'Lint completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_run_command, patched_logger_warn, _instance):
c = _instance._config.config
c['provisioner']['lint']['enabled'] = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, lint is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_bakes(patched_run_command, _instance):
_instance.execute()
assert _instance._ansible_lint_command is not None
assert 1 == patched_run_command.call_count
def test_executes_catches_and_exits_return_code(
patched_run_command, patched_yamllint, _instance
):
patched_run_command.side_effect = sh.ErrorReturnCode_1(sh.ansible_lint, b'', b'')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
| 30.317949
| 87
| 0.69046
|
4a0c4690f59117c4fdd7280240490f93b96e44a2
| 48
|
py
|
Python
|
org/sfu/billing/utils/__init__.py
|
MehdiLebdi/Real-Time-Charging_system
|
9eb59c12a36b3e10d9b3bf99bf2cd09a91376a10
|
[
"Apache-2.0"
] | 1
|
2020-08-15T08:34:36.000Z
|
2020-08-15T08:34:36.000Z
|
org/sfu/billing/utils/__init__.py
|
MehdiLebdi/Real-Time-Charging_system
|
9eb59c12a36b3e10d9b3bf99bf2cd09a91376a10
|
[
"Apache-2.0"
] | null | null | null |
org/sfu/billing/utils/__init__.py
|
MehdiLebdi/Real-Time-Charging_system
|
9eb59c12a36b3e10d9b3bf99bf2cd09a91376a10
|
[
"Apache-2.0"
] | null | null | null |
__all__= ['propertiesreader','configurations']
| 16
| 46
| 0.770833
|
4a0c47de21757cdf3bb86ca957ad2b1d6717ea5c
| 3,707
|
bzl
|
Python
|
python/tensorstore/pybind11_cc_test.bzl
|
google/tensorstore
|
8df16a67553debaec098698ceaa5404eaf79634a
|
[
"BSD-2-Clause"
] | 106
|
2020-04-02T20:00:18.000Z
|
2022-03-23T20:27:31.000Z
|
python/tensorstore/pybind11_cc_test.bzl
|
0xgpapad/tensorstore
|
dfc2972e54588a7b745afea8b9322b57b26b657a
|
[
"BSD-2-Clause"
] | 28
|
2020-04-12T02:04:47.000Z
|
2022-03-23T20:27:03.000Z
|
python/tensorstore/pybind11_cc_test.bzl
|
0xgpapad/tensorstore
|
dfc2972e54588a7b745afea8b9322b57b26b657a
|
[
"BSD-2-Clause"
] | 18
|
2020-04-08T06:41:30.000Z
|
2022-02-18T03:05:49.000Z
|
# Copyright 2021 The TensorStore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supports C++ tests that use the Python C API and pybind11.
To avoid the complexity of embedding Python into a normal C++ binary, instead we
compile the test suite as a Python extension module with a single function as
the entry point (`cc_test_driver.cc`), and use a Python script
(`cc_test_driver_main.py`) as a shim to invoke the entry point in the extension
module.
"""
# The _write_template rule copies a template file to a destination file,
# applying string replacements.
def _write_template_impl(ctx):
ctx.actions.expand_template(
template = ctx.file.src,
output = ctx.outputs.out,
substitutions = ctx.attr.substitutions,
)
_write_template = rule(
attrs = {
"src": attr.label(
mandatory = True,
allow_single_file = True,
),
"substitutions": attr.string_dict(mandatory = True),
"out": attr.output(mandatory = True),
},
# output_to_genfiles is required for header files.
output_to_genfiles = True,
implementation = _write_template_impl,
)
def pybind11_cc_googletest_test(
name,
pybind11_cc_library_rule,
pybind11_py_extension_rule,
googletest_deps,
py_deps = [],
size = None,
tags = [],
**kwargs):
"""C++ GoogleTest suite that may use Python APIs.
Args:
name: Test target name.
pybind11_cc_library_rule: The `pybind11_cc_library` rule function.
pybind11_py_extension_rule: The `pybind11_py_extension` rule function.
googletest_deps: Dependencies of the test runner, must include
GoogleTest and pybind11.
py_deps: Python library dependencies.
size: Test size.
tags: Tags to apply to test target.
**kwargs: Additional arguments to `cc_library` rule.
"""
driver_module_name = name.replace("/", "_") + "_cc_test_driver"
driver_module_cc_src = driver_module_name + ".cc"
driver_module_py_src = driver_module_name + "_main.py"
_write_template(
name = driver_module_cc_src + "_gen",
src = "//python/tensorstore:cc_test_driver.cc",
substitutions = {
"CC_TEST_DRIVER_MODULE": driver_module_name,
},
out = driver_module_cc_src,
)
cc_library_name = name + "_lib"
pybind11_cc_library_rule(
name = cc_library_name,
testonly = True,
**kwargs
)
pybind11_py_extension_rule(
name = driver_module_name,
srcs = [driver_module_cc_src],
deps = [
cc_library_name,
] + googletest_deps,
testonly = True,
)
_write_template(
name = driver_module_py_src + "_gen",
src = "//python/tensorstore:cc_test_driver_main.py",
substitutions = {
"CC_TEST_DRIVER_MODULE": driver_module_name,
},
out = driver_module_py_src,
)
native.py_test(
name = name,
size = size,
srcs = [driver_module_py_src],
main = driver_module_py_src,
python_version = "PY3",
tags = tags,
deps = [driver_module_name] + py_deps,
)
| 31.415254
| 80
| 0.659563
|
4a0c47e55a1b56986e60b5b38ef5e0f0898e7ec7
| 19,954
|
py
|
Python
|
petl/util/base.py
|
arturponinski/petl
|
9215549d9351139b13c5b275515d0cc752ee72b8
|
[
"MIT"
] | null | null | null |
petl/util/base.py
|
arturponinski/petl
|
9215549d9351139b13c5b275515d0cc752ee72b8
|
[
"MIT"
] | null | null | null |
petl/util/base.py
|
arturponinski/petl
|
9215549d9351139b13c5b275515d0cc752ee72b8
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, division
import re
from itertools import islice, chain, cycle, product,\
permutations, combinations, takewhile, dropwhile, \
starmap, groupby, tee
import operator
from collections import Counter, namedtuple, OrderedDict
from itertools import compress, combinations_with_replacement
from petl.compat import imap, izip, izip_longest, ifilter, ifilterfalse, \
reduce, next, string_types, text_type
from petl.errors import FieldSelectionError
from petl.comparison import comparable_itemgetter
class IterContainer(object):
def __contains__(self, item):
for o in self:
if o == item:
return True
return False
def __len__(self):
return sum(1 for _ in self)
def __getitem__(self, item):
if isinstance(item, int):
try:
return next(islice(self, item, item+1))
except StopIteration:
raise IndexError('index out of range')
elif isinstance(item, slice):
return islice(self, item.start, item.stop, item.step)
def index(self, item):
for i, o in enumerate(self):
if o == item:
return i
raise ValueError('%s is not in container' % item)
def min(self, **kwargs):
return min(self, **kwargs)
def max(self, **kwargs):
return max(self, **kwargs)
def len(self):
return len(self)
def set(self):
return set(self)
def frozenset(self):
return frozenset(self)
def list(self):
# avoid iterating twice
return list(iter(self))
def tuple(self):
# avoid iterating twice
return tuple(iter(self))
def dict(self, **kwargs):
return dict(self, **kwargs)
def enumerate(self, start=0):
return enumerate(self, start)
def filter(self, function):
return filter(function, self)
def map(self, function):
return map(function, self)
def reduce(self, function, **kwargs):
return reduce(function, self, **kwargs)
def sum(self, *args, **kwargs):
return sum(self, *args, **kwargs)
def all(self):
return all(self)
def any(self):
return any(self)
def apply(self, function):
for item in self:
function(item)
def counter(self):
return Counter(self)
def ordereddict(self):
return OrderedDict(self)
def cycle(self):
return cycle(self)
def chain(self, *others):
return chain(self, *others)
def dropwhile(self, predicate):
return dropwhile(predicate, self)
def takewhile(self, predicate):
return takewhile(predicate, self)
def ifilter(self, predicate):
return ifilter(predicate, self)
def ifilterfalse(self, predicate):
return ifilterfalse(predicate, self)
def imap(self, function):
return imap(function, self)
def starmap(self, function):
return starmap(function, self)
def islice(self, *args):
return islice(self, *args)
def compress(self, selectors):
return compress(self, selectors)
def groupby(self, *args, **kwargs):
return groupby(self, *args, **kwargs)
def tee(self, *args, **kwargs):
return tee(self, *args, **kwargs)
def permutations(self, *args, **kwargs):
return permutations(self, *args, **kwargs)
def combinations(self, *args, **kwargs):
return combinations(self, *args, **kwargs)
def combinations_with_replacement(self, *args, **kwargs):
return combinations_with_replacement(self, *args, **kwargs)
def izip(self, *args, **kwargs):
return izip(self, *args, **kwargs)
def izip_longest(self, *args, **kwargs):
return izip_longest(self, *args, **kwargs)
def product(self, *args, **kwargs):
return product(self, *args, **kwargs)
def __add__(self, other):
return chain(self, other)
def __iadd__(self, other):
return chain(self, other)
class Table(IterContainer):
def __getitem__(self, item):
if isinstance(item, string_types):
return ValuesView(self, item)
else:
return super(Table, self).__getitem__(item)
def values(table, *field, **kwargs):
"""
Return a container supporting iteration over values in a given field or
fields. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', True],
... ['b'],
... ['b', True],
... ['c', False]]
>>> foo = etl.values(table1, 'foo')
>>> foo
foo: 'a', 'b', 'b', 'c'
>>> list(foo)
['a', 'b', 'b', 'c']
>>> bar = etl.values(table1, 'bar')
>>> bar
bar: True, None, True, False
>>> list(bar)
[True, None, True, False]
>>> # values from multiple fields
... table2 = [['foo', 'bar', 'baz'],
... [1, 'a', True],
... [2, 'bb', True],
... [3, 'd', False]]
>>> foobaz = etl.values(table2, 'foo', 'baz')
>>> foobaz
('foo', 'baz'): (1, True), (2, True), (3, False)
>>> list(foobaz)
[(1, True), (2, True), (3, False)]
The field argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes. Multiple fields can also be
provided as positional arguments.
If rows are uneven, the value of the keyword argument `missing` is returned.
"""
return ValuesView(table, *field, **kwargs)
Table.values = values
class ValuesView(IterContainer):
def __init__(self, table, *field, **kwargs):
self.table = table
# deal with field arg in a backwards-compatible way
if len(field) == 1:
field = field[0]
self.field = field
self.kwargs = kwargs
def __iter__(self):
return itervalues(self.table, self.field, **self.kwargs)
def __repr__(self):
vreprs = list(map(repr, islice(self, 6)))
r = text_type(self.field) + ': '
r += ', '.join(vreprs[:5])
if len(vreprs) > 5:
r += ', ...'
return r
def itervalues(table, field, **kwargs):
missing = kwargs.get('missing', None)
it = iter(table)
hdr = next(it)
indices = asindices(hdr, field)
assert len(indices) > 0, 'no field selected'
getvalue = operator.itemgetter(*indices)
for row in it:
try:
value = getvalue(row)
yield value
except IndexError:
if len(indices) > 1:
# try one at a time
value = list()
for i in indices:
if i < len(row):
value.append(row[i])
else:
value.append(missing)
yield tuple(value)
else:
yield missing
class TableWrapper(Table):
def __init__(self, inner):
self.inner = inner
def __iter__(self):
return iter(self.inner)
wrap = TableWrapper
def asindices(hdr, spec):
"""Convert the given field `spec` into a list of field indices."""
flds = list(map(text_type, hdr))
indices = list()
if not isinstance(spec, (list, tuple)):
spec = (spec,)
for s in spec:
# spec could be a field index (takes priority)
if isinstance(s, int) and s < len(hdr):
indices.append(s) # index fields from 0
# spec could be a field
elif s in flds:
indices.append(flds.index(s))
else:
raise FieldSelectionError(s)
return indices
def rowitemgetter(hdr, spec):
indices = asindices(hdr, spec)
getter = comparable_itemgetter(*indices)
return getter
def rowgetter(*indices):
if len(indices) == 0:
return lambda row: tuple()
elif len(indices) == 1:
# if only one index, we cannot use itemgetter, because we want a
# singleton sequence to be returned, but itemgetter with a single
# argument returns the value itself, so let's define a function
index = indices[0]
return lambda row: (row[index],) # note comma - singleton tuple
# if more than one index, use itemgetter, it should be the most efficient
else:
return operator.itemgetter(*indices)
def header(table):
"""
Return the header row for the given table. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2]]
>>> etl.header(table)
('foo', 'bar')
Note that the header row will always be returned as a tuple, regardless
of what the underlying data are.
"""
it = iter(table)
return tuple(next(it))
Table.header = header
def fieldnames(table):
"""
Return the string values of the header row. If the header row
contains only strings, then this function is equivalent to header(), i.e.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2]]
>>> etl.fieldnames(table)
('foo', 'bar')
>>> etl.header(table)
('foo', 'bar')
"""
return tuple(text_type(f) for f in header(table))
Table.fieldnames = fieldnames
def data(table, *sliceargs):
"""
Return a container supporting iteration over data rows in a given table
(i.e., without the header). E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2]]
>>> d = etl.data(table)
>>> list(d)
[['a', 1], ['b', 2]]
Positional arguments can be used to slice the data rows. The sliceargs
are passed to :func:`itertools.islice`.
"""
return DataView(table, *sliceargs)
Table.data = data
class DataView(Table):
def __init__(self, table, *sliceargs):
self.table = table
self.sliceargs = sliceargs
def __iter__(self):
return iterdata(self.table, *self.sliceargs)
def iterdata(table, *sliceargs):
it = islice(table, 1, None) # skip header row
if sliceargs:
it = islice(it, *sliceargs)
return it
def dicts(table, *sliceargs, **kwargs):
"""
Return a container supporting iteration over rows as dicts. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2]]
>>> d = etl.dicts(table)
>>> d
{'foo': 'a', 'bar': 1}
{'foo': 'b', 'bar': 2}
>>> list(d)
[{'foo': 'a', 'bar': 1}, {'foo': 'b', 'bar': 2}]
Short rows are padded with the value of the `missing` keyword argument.
"""
return DictsView(table, *sliceargs, **kwargs)
Table.dicts = dicts
class DictsView(IterContainer):
def __init__(self, table, *sliceargs, **kwargs):
self.table = table
self.sliceargs = sliceargs
self.kwargs = kwargs
def __iter__(self):
return iterdicts(self.table, *self.sliceargs, **self.kwargs)
def __repr__(self):
vreprs = list(map(repr, islice(self, 6)))
r = '\n'.join(vreprs[:5])
if len(vreprs) > 5:
r += '\n...'
return r
def iterdicts(table, *sliceargs, **kwargs):
missing = kwargs.get('missing', None)
it = iter(table)
hdr = next(it)
if sliceargs:
it = islice(it, *sliceargs)
for row in it:
yield asdict(hdr, row, missing)
def asdict(hdr, row, missing=None):
flds = [text_type(f) for f in hdr]
try:
# list comprehension should be faster
items = [(flds[i], row[i]) for i in range(len(flds))]
except IndexError:
# short row, fall back to slower for loop
items = list()
for i, f in enumerate(flds):
try:
v = row[i]
except IndexError:
v = missing
items.append((f, v))
return dict(items)
def namedtuples(table, *sliceargs, **kwargs):
"""
View the table as a container of named tuples. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2]]
>>> d = etl.namedtuples(table)
>>> d
row(foo='a', bar=1)
row(foo='b', bar=2)
>>> list(d)
[row(foo='a', bar=1), row(foo='b', bar=2)]
Short rows are padded with the value of the `missing` keyword argument.
The `name` keyword argument can be given to override the name of the
named tuple class (defaults to 'row').
"""
return NamedTuplesView(table, *sliceargs, **kwargs)
Table.namedtuples = namedtuples
class NamedTuplesView(IterContainer):
def __init__(self, table, *sliceargs, **kwargs):
self.table = table
self.sliceargs = sliceargs
self.kwargs = kwargs
def __iter__(self):
return iternamedtuples(self.table, *self.sliceargs, **self.kwargs)
def __repr__(self):
vreprs = list(map(repr, islice(self, 6)))
r = '\n'.join(vreprs[:5])
if len(vreprs) > 5:
r += '\n...'
return r
def iternamedtuples(table, *sliceargs, **kwargs):
missing = kwargs.get('missing', None)
name = kwargs.get('name', 'row')
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
nt = namedtuple(name, tuple(flds))
if sliceargs:
it = islice(it, *sliceargs)
for row in it:
yield asnamedtuple(nt, row, missing)
def asnamedtuple(nt, row, missing=None):
try:
return nt(*row)
except TypeError:
# row may be long or short
# expected number of fields
ne = len(nt._fields)
# actual number of values
na = len(row)
if ne > na:
# pad short rows
padded = tuple(row) + (missing,) * (ne-na)
return nt(*padded)
elif ne < na:
# truncate long rows
return nt(*row[:ne])
else:
raise
class Record(tuple):
def __new__(cls, row, flds, missing=None):
t = super(Record, cls).__new__(cls, row)
return t
def __init__(self, row, flds, missing=None):
self.flds = flds
self.missing = missing
def __getitem__(self, f):
if isinstance(f, int):
idx = f
elif f in self.flds:
idx = self.flds.index(f)
else:
raise KeyError('item ' + repr(f) +
' not in fields ' + repr(self.flds))
try:
return super(Record, self).__getitem__(idx)
except IndexError: # handle short rows
return self.missing
def __getattr__(self, f):
if f in self.flds:
try:
return super(Record, self).__getitem__(self.flds.index(f))
except IndexError: # handle short rows
return self.missing
else:
raise AttributeError('item ' + repr(f) +
' not in fields ' + repr(self.flds))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def records(table, *sliceargs, **kwargs):
"""
Return a container supporting iteration over rows as records, where a
record is a hybrid object supporting all possible ways of accessing values.
E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2]]
>>> d = etl.records(table)
>>> d
('a', 1)
('b', 2)
>>> list(d)
[('a', 1), ('b', 2)]
>>> [r[0] for r in d]
['a', 'b']
>>> [r['foo'] for r in d]
['a', 'b']
>>> [r.foo for r in d]
['a', 'b']
Short rows are padded with the value of the `missing` keyword argument.
"""
return RecordsView(table, *sliceargs, **kwargs)
Table.records = records
class RecordsView(IterContainer):
def __init__(self, table, *sliceargs, **kwargs):
self.table = table
self.sliceargs = sliceargs
self.kwargs = kwargs
def __iter__(self):
return iterrecords(self.table, *self.sliceargs, **self.kwargs)
def __repr__(self):
vreprs = list(map(repr, islice(self, 6)))
r = '\n'.join(vreprs[:5])
if len(vreprs) > 5:
r += '\n...'
return r
def iterrecords(table, *sliceargs, **kwargs):
missing = kwargs.get('missing', None)
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
if sliceargs:
it = islice(it, *sliceargs)
for row in it:
yield Record(row, flds, missing=missing)
def expr(s):
"""
Construct a function operating on a table record.
The expression string is converted into a lambda function by prepending
the string with ``'lambda rec: '``, then replacing anything enclosed in
curly braces (e.g., ``"{foo}"``) with a lookup on the record (e.g.,
``"rec['foo']"``), then finally calling :func:`eval`.
So, e.g., the expression string ``"{foo} * {bar}"`` is converted to the
function ``lambda rec: rec['foo'] * rec['bar']``
"""
prog = re.compile(r'\{([^}]+)\}')
def repl(matchobj):
return "rec['%s']" % matchobj.group(1)
return eval("lambda rec: " + prog.sub(repl, s))
def rowgroupby(table, key, value=None):
"""Convenient adapter for :func:`itertools.groupby`. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 3, True],
... ['b', 2]]
>>> # group entire rows
... for key, group in etl.rowgroupby(table1, 'foo'):
... print(key, list(group))
...
a [('a', 1, True)]
b [('b', 3, True), ('b', 2)]
>>> # group specific values
... for key, group in etl.rowgroupby(table1, 'foo', 'bar'):
... print(key, list(group))
...
a [1]
b [3, 2]
N.B., assumes the input table is already sorted by the given key.
"""
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
# wrap rows as records
it = (Record(row, flds) for row in it)
# determine key function
if callable(key):
getkey = key
native_key = True
else:
kindices = asindices(hdr, key)
getkey = comparable_itemgetter(*kindices)
native_key = False
git = groupby(it, key=getkey)
if value is None:
if native_key:
return git
else:
return ((k.inner, vals) for (k, vals) in git)
else:
if callable(value):
getval = value
else:
vindices = asindices(hdr, value)
getval = operator.itemgetter(*vindices)
if native_key:
return ((k, (getval(v) for v in vals))
for (k, vals) in git)
else:
return ((k.inner, (getval(v) for v in vals))
for (k, vals) in git)
Table.rowgroupby = rowgroupby
def iterpeek(it, n=1):
it = iter(it) # make sure it's an iterator
if n == 1:
peek = next(it)
return peek, chain([peek], it)
else:
peek = list(islice(it, n))
return peek, chain(peek, it)
def empty():
"""
Return an empty table. Can be useful when building up a table from a set
of columns, e.g.::
>>> import petl as etl
>>> table = (
... etl
... .empty()
... .addcolumn('foo', ['A', 'B'])
... .addcolumn('bar', [1, 2])
... )
>>> table
+-----+-----+
| foo | bar |
+=====+=====+
| 'A' | 1 |
+-----+-----+
| 'B' | 2 |
+-----+-----+
"""
return EmptyTable()
class EmptyTable(Table):
def __iter__(self):
# empty header row
yield tuple()
| 25.880674
| 80
| 0.545054
|
4a0c49257e8820a7afb4888ca5b939055a1e87db
| 866
|
py
|
Python
|
votesim/utilities/__init__.py
|
johnh865/election_sim
|
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
|
[
"MIT"
] | 8
|
2019-10-21T23:24:51.000Z
|
2021-09-14T03:04:59.000Z
|
votesim/utilities/__init__.py
|
johnh865/election_sim
|
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
|
[
"MIT"
] | 2
|
2021-02-09T23:52:47.000Z
|
2021-02-10T04:08:35.000Z
|
votesim/utilities/__init__.py
|
johnh865/election_sim
|
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
|
[
"MIT"
] | 1
|
2019-10-21T23:32:18.000Z
|
2019-10-21T23:32:18.000Z
|
# -*- coding: utf-8 -*-
"""
Miscellaneous utility functions such as file writers, decorators, etc.
"""
# from . import decorators
# from . import misc
# from . import write
# from . import recorder
# from .decorators import lazy_property, lazy_property2, clean_lazy_properties
# from .misc import (
# flatten_dict,
# unflatten_dict,
# create_file_dirs,
# detectfiles,
# )
from votesim.utilities import (
decorators,
misc,
write,
recorder,
)
from votesim.utilities.decorators import (
lazy_property,
lazy_property2,
clean_lazy_properties,
clean_some_lazy_properties,
modify_lazy_property,
reuse_doc
)
from votesim.utilities.misc import (
flatten_dict,
unflatten_dict,
create_file_dirs,
detectfiles,
)
| 22.789474
| 78
| 0.630485
|
4a0c495d5160a68736f174b1f10e29525a976236
| 1,878
|
py
|
Python
|
tempest/services/queuing/json/queuing_client.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/queuing/json/queuing_client.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/queuing/json/queuing_client.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tempest.common import rest_client
from tempest import config
CONF = config.CONF
class QueuingClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(QueuingClientJSON, self).__init__(auth_provider)
self.service = CONF.queuing.catalog_type
self.version = '1'
self.uri_prefix = 'v{0}'.format(self.version)
def list_queues(self):
uri = '{0}/queues'.format(self.uri_prefix)
resp, body = self.get(uri)
body = json.loads(body)
return resp, body
def create_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=None)
return resp, body
def get_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
body = json.loads(body)
return resp, body
def head_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.head(uri)
body = json.loads(body)
return resp, body
def delete_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp = self.delete(uri)
return resp
| 31.830508
| 69
| 0.669862
|
4a0c49eb67a420a2a9210888b35ca1b73397c897
| 215
|
py
|
Python
|
src/__init__.py
|
isabelcachola/PreSumm
|
878daed85c0f817d32bcd97a0fdb265674afe8c3
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
isabelcachola/PreSumm
|
878daed85c0f817d32bcd97a0fdb265674afe8c3
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
isabelcachola/PreSumm
|
878daed85c0f817d32bcd97a0fdb265674afe8c3
|
[
"MIT"
] | null | null | null |
import sys,os
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from src import models
from src import others
from src import distributed
from src import preprocess
from src import train
| 26.875
| 77
| 0.809302
|
4a0c4ac690170bb3f35d4be8b844c90370524757
| 1,727
|
py
|
Python
|
ask_a_mentor/migrations/0001_initial.py
|
Four-fun/stu-do-list
|
8902e7456cb5d0031d5c2799fb750557436953ad
|
[
"Unlicense"
] | null | null | null |
ask_a_mentor/migrations/0001_initial.py
|
Four-fun/stu-do-list
|
8902e7456cb5d0031d5c2799fb750557436953ad
|
[
"Unlicense"
] | null | null | null |
ask_a_mentor/migrations/0001_initial.py
|
Four-fun/stu-do-list
|
8902e7456cb5d0031d5c2799fb750557436953ad
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-05 15:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
('matkul', models.CharField(choices=[('Alin', 'Aljabar Linear'), ('MPPI', 'Metodologi Penelitian dan Penulisan Ilmiah'), ('PBP', 'Pemrograman Berbasis Platform'), ('SDA', 'Struktur Data & Algoritma'), ('SOSI', 'Sistem Operasi untuk Sistem Informasi')], max_length=200)),
('message', models.CharField(max_length=1000)),
('time', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=1000)),
('time', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 44.282051
| 286
| 0.63231
|
4a0c4b84a5c681a8ca954ae752f9568f016849e7
| 6,623
|
py
|
Python
|
env/lib/python3.7/site-packages/docusign_admin/models/certificate_response.py
|
davidgacc/docusign
|
e63167101656d0066d481844576ce687ea80eb91
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/docusign_admin/models/certificate_response.py
|
davidgacc/docusign
|
e63167101656d0066d481844576ce687ea80eb91
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/docusign_admin/models/certificate_response.py
|
davidgacc/docusign
|
e63167101656d0066d481844576ce687ea80eb91
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
DocuSign Admin API
An API for an organization administrator to manage organizations, accounts and users # noqa: E501
OpenAPI spec version: v2
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CertificateResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'issuer': 'str',
'thumbprint': 'str',
'expiration_date': 'datetime',
'is_valid': 'bool',
'links': 'list[LinkResponse]'
}
attribute_map = {
'id': 'id',
'issuer': 'issuer',
'thumbprint': 'thumbprint',
'expiration_date': 'expiration_date',
'is_valid': 'is_valid',
'links': 'links'
}
def __init__(self, id=None, issuer=None, thumbprint=None, expiration_date=None, is_valid=None, links=None): # noqa: E501
"""CertificateResponse - a model defined in Swagger""" # noqa: E501
self._id = None
self._issuer = None
self._thumbprint = None
self._expiration_date = None
self._is_valid = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if issuer is not None:
self.issuer = issuer
if thumbprint is not None:
self.thumbprint = thumbprint
if expiration_date is not None:
self.expiration_date = expiration_date
if is_valid is not None:
self.is_valid = is_valid
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this CertificateResponse. # noqa: E501
:return: The id of this CertificateResponse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CertificateResponse.
:param id: The id of this CertificateResponse. # noqa: E501
:type: str
"""
self._id = id
@property
def issuer(self):
"""Gets the issuer of this CertificateResponse. # noqa: E501
:return: The issuer of this CertificateResponse. # noqa: E501
:rtype: str
"""
return self._issuer
@issuer.setter
def issuer(self, issuer):
"""Sets the issuer of this CertificateResponse.
:param issuer: The issuer of this CertificateResponse. # noqa: E501
:type: str
"""
self._issuer = issuer
@property
def thumbprint(self):
"""Gets the thumbprint of this CertificateResponse. # noqa: E501
:return: The thumbprint of this CertificateResponse. # noqa: E501
:rtype: str
"""
return self._thumbprint
@thumbprint.setter
def thumbprint(self, thumbprint):
"""Sets the thumbprint of this CertificateResponse.
:param thumbprint: The thumbprint of this CertificateResponse. # noqa: E501
:type: str
"""
self._thumbprint = thumbprint
@property
def expiration_date(self):
"""Gets the expiration_date of this CertificateResponse. # noqa: E501
:return: The expiration_date of this CertificateResponse. # noqa: E501
:rtype: datetime
"""
return self._expiration_date
@expiration_date.setter
def expiration_date(self, expiration_date):
"""Sets the expiration_date of this CertificateResponse.
:param expiration_date: The expiration_date of this CertificateResponse. # noqa: E501
:type: datetime
"""
self._expiration_date = expiration_date
@property
def is_valid(self):
"""Gets the is_valid of this CertificateResponse. # noqa: E501
:return: The is_valid of this CertificateResponse. # noqa: E501
:rtype: bool
"""
return self._is_valid
@is_valid.setter
def is_valid(self, is_valid):
"""Sets the is_valid of this CertificateResponse.
:param is_valid: The is_valid of this CertificateResponse. # noqa: E501
:type: bool
"""
self._is_valid = is_valid
@property
def links(self):
"""Gets the links of this CertificateResponse. # noqa: E501
:return: The links of this CertificateResponse. # noqa: E501
:rtype: list[LinkResponse]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this CertificateResponse.
:param links: The links of this CertificateResponse. # noqa: E501
:type: list[LinkResponse]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CertificateResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificateResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.922764
| 125
| 0.580402
|
4a0c4b95a5acf31b4de01ea705df32a30cdba655
| 537
|
py
|
Python
|
setup.py
|
aheck/testbench
|
d8dba885817a9053e9fbd37a517751f0fe30dd93
|
[
"MIT"
] | 7
|
2019-04-29T12:38:50.000Z
|
2019-05-16T14:18:55.000Z
|
setup.py
|
aheck/testbench
|
d8dba885817a9053e9fbd37a517751f0fe30dd93
|
[
"MIT"
] | null | null | null |
setup.py
|
aheck/testbench
|
d8dba885817a9053e9fbd37a517751f0fe30dd93
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='testbench',
version='0.1.2',
description='Integration Testing with Python and VirtualBox ',
url='https://github.com/aheck/testbench',
author='Andreas Heck',
author_email='aheck@gmx.de',
license='MIT',
packages=['testbench'],
install_requires = ['pyvbox', 'paramiko'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| 29.833333
| 68
| 0.605214
|
4a0c4bfa9d234174397b8cf171a7e157e5c54d8d
| 857
|
py
|
Python
|
demo/Model/hudong_class.py
|
RayL0707/Finance_KG
|
c4614bfb12a7ce20e05b42ab81ab6038ab5143f4
|
[
"MIT"
] | null | null | null |
demo/Model/hudong_class.py
|
RayL0707/Finance_KG
|
c4614bfb12a7ce20e05b42ab81ab6038ab5143f4
|
[
"MIT"
] | null | null | null |
demo/Model/hudong_class.py
|
RayL0707/Finance_KG
|
c4614bfb12a7ce20e05b42ab81ab6038ab5143f4
|
[
"MIT"
] | null | null | null |
# coding: utf-8
class HudongItem:
title = None
detail = None
image = None
openTypeList = None
baseInfoKeyList = None
baseInfoValueList = None
label = None # label值从文件中读取
# 初始化,将字典answer赋值给类成员
def __init__(self,answer):
self.title = answer['title']
self.detail = answer['detail']
self.image = answer['image']
self.openTypeList = []
self.baseInfoKeyList = []
self.baseInfoValueList = []
label = -1
if len(answer['openTypeList']) > 0:
List = answer['openTypeList'].split('##')
for p in List:
self.openTypeList.append(p)
if len(answer['baseInfoKeyList']) > 0:
List = answer['baseInfoKeyList'].split('##')
for p in List:
self.baseInfoKeyList.append(p)
if len(answer['baseInfoValueList']) > 0:
List = answer['baseInfoValueList'].split('##')
for p in List:
self.baseInfoValueList.append(p)
| 24.485714
| 49
| 0.663944
|
4a0c4c7c01420ee71449acf356f1b6512b219ff7
| 1,139
|
py
|
Python
|
tests/test_personal_information.py
|
andrewgy8/ident
|
9ac4ee3080600147e3bc8a996425e354000da4c0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_personal_information.py
|
andrewgy8/ident
|
9ac4ee3080600147e3bc8a996425e354000da4c0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_personal_information.py
|
andrewgy8/ident
|
9ac4ee3080600147e3bc8a996425e354000da4c0
|
[
"Apache-2.0"
] | null | null | null |
from src.blockchain import SecretInformation
import unittest
import jwt
class TestPersonalInformation(unittest.TestCase):
info = dict(name='Andrew',
surname='Graham',
email='andrew@gmail.com',
phone_number='123-456-7890')
secret_key = 'happy.lucky'
def setUp(self):
self.p_info = SecretInformation(self.secret_key,
**self.info)
def test_init(self):
assert self.p_info.key == self.secret_key
encoded = self.p_info.encoded_info
string = self.p_info.info_str
assert encoded
assert isinstance(string, str)
decode = jwt.decode(encoded, self.secret_key)
assert decode.get('name') == self.info.get('name')
def test_positive_validation(self):
assert self.p_info.is_valid(**self.info)
def test_negative_validation(self):
info = dict(name='John',
surname='Smith',
email='j.smith@gmail.com',
phone_number='123-456-7890')
res = self.p_info.is_valid(**info)
assert not res
| 27.780488
| 58
| 0.588235
|
4a0c4d8fcfed106d133cc591b132850230fb0f52
| 166
|
py
|
Python
|
d3ct/plugins/out_yaml.py
|
metro-nom/d3ct
|
2d619b46fac7de29c031a3737570ca62e33d8c2f
|
[
"BSD-3-Clause"
] | null | null | null |
d3ct/plugins/out_yaml.py
|
metro-nom/d3ct
|
2d619b46fac7de29c031a3737570ca62e33d8c2f
|
[
"BSD-3-Clause"
] | null | null | null |
d3ct/plugins/out_yaml.py
|
metro-nom/d3ct
|
2d619b46fac7de29c031a3737570ca62e33d8c2f
|
[
"BSD-3-Clause"
] | null | null | null |
import yaml
from d3ct.plugins.base import PluginBase
class Generator(PluginBase):
@staticmethod
def output(py_obj):
print(yaml.dump(py_obj.data))
| 15.090909
| 40
| 0.716867
|
4a0c4d9a4de56e12bcb41da18d0eaa294dc1c6b3
| 676
|
py
|
Python
|
source/pkgsrc/databases/py-peewee/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1
|
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/databases/py-peewee/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/databases/py-peewee/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-setup.py,v 1.3 2020/11/09 09:29:12 adam Exp $
Find libsqlite3.
--- setup.py.orig 2018-08-16 18:40:47.000000000 +0000
+++ setup.py
@@ -73,8 +73,9 @@ def _have_sqlite_extension_support():
success = False
try:
compiler.link_shared_object(
- compiler.compile([src_file], output_dir=tmp_dir),
+ compiler.compile([src_file], output_dir=tmp_dir, include_dirs=['@BUILDLINK_PREFIX.sqlite3@/include']),
bin_file,
+ library_dirs=['@BUILDLINK_PREFIX.sqlite3@/lib'],
libraries=['sqlite3'])
except CCompilerError:
print('unable to compile sqlite3 C extensions - missing headers?')
| 37.555556
| 115
| 0.64645
|
4a0c4eafd91802fc3e18abd64a28dc7da181b89e
| 2,888
|
py
|
Python
|
tests/unit_tests/test_near_field_cuda.py
|
KMCzajkowski/smuthi
|
a86e1c894ac2067a05c123b8e9a621597c198caa
|
[
"MIT"
] | 3
|
2020-02-29T14:54:45.000Z
|
2022-03-18T11:51:11.000Z
|
tests/unit_tests/test_near_field_cuda.py
|
KMCzajkowski/smuthi
|
a86e1c894ac2067a05c123b8e9a621597c198caa
|
[
"MIT"
] | null | null | null |
tests/unit_tests/test_near_field_cuda.py
|
KMCzajkowski/smuthi
|
a86e1c894ac2067a05c123b8e9a621597c198caa
|
[
"MIT"
] | 3
|
2021-05-15T06:54:48.000Z
|
2021-12-27T11:30:14.000Z
|
import smuthi.initial_field as init
import smuthi.particles as part
import smuthi.coordinates as coord
import smuthi.simulation as simul
import smuthi.layers as lay
import smuthi.scattered_field as sf
import smuthi.cuda_sources as cu
import numpy as np
ld = 550
rD = [100, -100, 100]
D = [1e7, 2e7, 3e7]
waypoints = [0, 0.8, 0.8-0.1j, 2.1-0.1j, 2.1, 4]
neff_discr = 2e-2
coord.set_default_k_parallel(vacuum_wavelength = ld, neff_waypoints=waypoints, neff_resolution=neff_discr)
# initialize particle object
sphere1 = part.Sphere(position=[200, 200, 300], refractive_index=2.4 + 0.0j, radius=110, l_max=3, m_max=3)
sphere2 = part.Sphere(position=[-200, -200, 300], refractive_index=2.4 + 0.0j, radius=120, l_max=3, m_max=3)
sphere3 = part.Sphere(position=[-200, 200, 300], refractive_index=2.5 + 0.0j, radius=90, l_max=3, m_max=3)
part_list = [sphere1, sphere2, sphere3]
# initialize layer system object
lay_sys = lay.LayerSystem([0, 400, 0], [1+6j, 2.3, 1.5])
# initialize dipole object
dipole = init.DipoleSource(vacuum_wavelength=ld, dipole_moment=D, position=rD)
# run simulation
simulation = simul.Simulation(layer_system=lay_sys, particle_list=part_list, initial_field=dipole, log_to_terminal=False)
simulation.run()
xarr = np.array([-300, 400, -100, 200])
yarr = np.array([200, -100, 400, 300])
zarr = np.array([-50, 200, 600, 700])
scat_fld_exp = sf.scattered_field_piecewise_expansion(ld, part_list, lay_sys)
e_x_scat_cpu, e_y_scat_cpu, e_z_scat_cpu = scat_fld_exp.electric_field(xarr, yarr, zarr)
e_x_init_cpu, e_y_init_cpu, e_z_init_cpu = simulation.initial_field.electric_field(xarr, yarr, zarr, lay_sys)
cu.enable_gpu()
scat_fld_exp = sf.scattered_field_piecewise_expansion(ld, part_list, lay_sys)
e_x_scat_gpu, e_y_scat_gpu, e_z_scat_gpu = scat_fld_exp.electric_field(xarr, yarr, zarr)
e_x_init_gpu, e_y_init_gpu, e_z_init_gpu = simulation.initial_field.electric_field(xarr, yarr, zarr, lay_sys)
def test_electric_field():
err_scat_x = np.linalg.norm(e_x_scat_cpu - e_x_scat_gpu) / np.linalg.norm(e_x_scat_cpu)
err_in_x = np.linalg.norm(e_x_init_cpu - e_x_init_gpu) / np.linalg.norm(e_x_init_cpu)
err_scat_y = np.linalg.norm(e_y_scat_cpu - e_y_scat_gpu) / np.linalg.norm(e_y_scat_cpu)
err_in_y = np.linalg.norm(e_y_init_cpu - e_y_init_gpu) / np.linalg.norm(e_y_init_cpu)
err_scat_z = np.linalg.norm(e_z_scat_cpu - e_z_scat_gpu) / np.linalg.norm(e_z_scat_cpu)
err_in_z = np.linalg.norm(e_z_init_cpu - e_z_init_gpu) / np.linalg.norm(e_z_init_cpu)
print('scattered field component errors: ', err_scat_x, err_scat_y, err_scat_z)
print('initial field component errors: ', err_in_x, err_in_y, err_in_z)
assert err_scat_x < 1e-5
assert err_scat_y < 1e-5
assert err_scat_z < 1e-5
assert err_in_x < 1e-5
assert err_in_y < 1e-5
assert err_in_z < 1e-5
if __name__ == '__main__':
test_electric_field()
| 41.855072
| 121
| 0.754155
|
4a0c4f392a2f5bc7380eb63690e641187ecc8cf2
| 1,393
|
py
|
Python
|
nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py
|
dPys/nipype
|
75030b29297808e7c9a9e91b411b685154dff60b
|
[
"Apache-2.0"
] | 1
|
2019-03-25T14:11:18.000Z
|
2019-03-25T14:11:18.000Z
|
nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py
|
dPys/nipype
|
75030b29297808e7c9a9e91b411b685154dff60b
|
[
"Apache-2.0"
] | 1
|
2017-01-05T01:24:33.000Z
|
2017-01-05T01:24:33.000Z
|
nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py
|
wtriplett/nipype
|
388f140fceaf55438a987e9cdfa2a8e995428afd
|
[
"Apache-2.0"
] | 1
|
2020-12-16T16:36:48.000Z
|
2020-12-16T16:36:48.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..developer import JistLaminarProfileGeometry
def test_JistLaminarProfileGeometry_inputs():
input_map = dict(
args=dict(argstr="%s",),
environ=dict(nohash=True, usedefault=True,),
inProfile=dict(argstr="--inProfile %s", extensions=None,),
incomputed=dict(argstr="--incomputed %s",),
inoutside=dict(argstr="--inoutside %f",),
inregularization=dict(argstr="--inregularization %s",),
insmoothing=dict(argstr="--insmoothing %f",),
null=dict(argstr="--null %s",),
outResult=dict(argstr="--outResult %s", hash_files=False,),
xDefaultMem=dict(argstr="-xDefaultMem %d",),
xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,),
xPrefExt=dict(argstr="--xPrefExt %s",),
)
inputs = JistLaminarProfileGeometry.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_JistLaminarProfileGeometry_outputs():
output_map = dict(outResult=dict(extensions=None,),)
outputs = JistLaminarProfileGeometry.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 40.970588
| 69
| 0.664752
|
4a0c4f3efc9a7fdb4b43b70a61a7040e1b0e8517
| 19,928
|
py
|
Python
|
DL/text.py
|
sanchjain/Cicada-3301-Hackathon
|
40b332bde397894dda710b9443fa3fbbccfb128d
|
[
"MIT"
] | 1
|
2021-05-29T11:42:18.000Z
|
2021-05-29T11:42:18.000Z
|
DL/text.py
|
sanchjain/Cicada-3301-Hackathon
|
40b332bde397894dda710b9443fa3fbbccfb128d
|
[
"MIT"
] | null | null | null |
DL/text.py
|
sanchjain/Cicada-3301-Hackathon
|
40b332bde397894dda710b9443fa3fbbccfb128d
|
[
"MIT"
] | 3
|
2021-05-28T15:28:21.000Z
|
2021-05-28T15:39:27.000Z
|
from sentence_transformers import SentenceTransformer
from transformers import logging
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
logging.set_verbosity_error()
class TextFilter:
def __init__(self, threshold=0.45):
self.model = SentenceTransformer('bert-base-nli-mean-tokens')
self.threshold = threshold
def run(self, path, text):
storedEmbeds = np.load(path)
textEmbeds = self.model.encode([text])
probs = cosine_similarity(storedEmbeds, textEmbeds)
return True if probs.item(0) > self.threshold else False
def generateFilter(self, textList, path):
textEmbeds = self.model.encode(textList)
meanEmbeds = np.mean(textEmbeds, axis=0)[np.newaxis, :]
np.save(path, meanEmbeds)
return meanEmbeds
if __name__ == '__main__':
sentences = [
"I always wrote this series off as being a complete stink-fest because Jim Belushi was involved in it, and heavily. But then one day a tragic happenstance occurred. After a White Sox game ended I realized that the remote was all the way on the other side of the room somehow. Now I could have just gotten up and walked across the room to get the remote, or even to the TV to turn the channel. But then why not just get up and walk across the country to watch TV in another state? ""Nuts to that"", I said. So I decided to just hang tight on the couch and take whatever Fate had in store for me. What Fate had in store was an episode of this show, an episode about which I remember very little except that I had once again made a very broad, general sweeping blanket judgment based on zero objective or experiential evidence with nothing whatsoever to back my opinions up with, and once again I was completely right! This show is a total crud-pie! Belushi has all the comedic delivery of a hairy lighthouse foghorn. The women are physically attractive but too Stepford-is to elicit any real feeling from the viewer. There is absolutely no reason to stop yourself from running down to the local TV station with a can of gasoline and a flamethrower and sending every copy of this mutt howling back to hell. <br /><br />Except.. <br /><br />Except for the wonderful comic sty lings of Larry Joe Campbell, America's Greatest Comic Character Actor. This guy plays Belushi's brother-in-law, Andy, and he is gold. How good is he really? Well, aside from being funny, his job is to make Belushi look good. That's like trying to make butt warts look good. But Campbell pulls it off with style. Someone should invent a Nobel Prize in Comic Buffoonery so he can win it every year. Without Larry Joe this show would consist of a slightly vacant looking Courtney Thorne-Smith smacking Belushi over the head with a frying pan while he alternately beats his chest and plays with the straw on the floor of his cage. 5 stars for Larry Joe Campbell designated Comedic Bacon because he improves the flavor of everything he's in!",
"It's been about 14 years since Sharon Stone awarded viewers a leg-crossing that twisted many people's minds. And now, God knows why, she's in the game again. ""Basic Instinct 2"" is the sequel to the smash-hit erotica ""Basic Instinct"" featuring a sexy Stone and a vulnerable Michael Douglas. However, fans of the original might not even get close to this one, since ""Instinct 2"" is painful film-making, as the mediocre director Michael Caton-Jones assassinates the legacy of the first film.<br /><br />The plot of the movie starts when a car explosion breaks in right at the beginning. Catherine Tramell (Sharon Stone, trying to look forcefully sexy) is a suspect and appears to be involved in the murder. A psychiatrist (a horrible David Morrisey) is appointed to examine her, but eventually falls for an intimate game of seduction.<br /><br />And there it is, without no further explanations, the basic force that moves this ""Instinct"". Nothing much is explained and we have to sit through a sleazy, C-class erotic film. Sharon Stone stars in her first role where she is most of the time a turn-off. Part of it because of the amateurish writing, the careless direction, and terrifyingly low chemistry. The movie is full of vulgar dialogues and even more sexuality (a menage a trois scene was cut off so that this wouldn't be rated NC-17) than the first entrance in the series. ""Instinct"" is a compelling torture.<br /><br />To top it off, everything that made the original film a guilty pleasure is not found anywhere in the film. The acting here is really bad. Sharon Stone has some highlights, but here, she gets extremely obnoxious. David Morrisey stars in the worst role of his life, and seems to never make more than two expressions in the movie- confused and aroused. ""Instinct 2"" is a horrible way to continue an otherwise original series, that managed to put in thriller with erotica extremely well. Paul Verhoeven, how I miss you....<br /><br />""Basic Instinct 2"" never sounded like a good movie, and, indeed, it isn't. Some films should never get out of paper, and that is the feeling you get after watching this. Now, it is much easier to understand why Douglas and David Cronenberg dropped out, and why Sharon Stone was expecting a huge paycheck for this......-----3/10",
"someone needed to make a car payment... this is truly awful... makes jean Claude's cyborg look like gone with the wind... this is an hour I wish I could sue to get back... luckily it produced severe somnolence... from which I fell asleep. how can actors of this caliber create this dog? I would rather spend the time watching algae grow on the side of a fish tank than partake of this wholly awful concoction of several genre. I now use the DVD as a coaster on my coffee table. $5.99 at walmart is far too much to spend on this movie... if you really have to have it, wait till they throw them out after they have carried them on the inventory for several years and are frustrated that they would not sell.<br /><br />please for the love of god let this movie die of obscurity.",
"The Guidelines state that a comment must contain a minimum of four lines. That is the only reason I am saying anything more about Tomcats. Because after all, my one line summary really says everything there is to say. There is absolutely NOTHING remotely entertaining in this film.",
"This movie is a muddled mish-mash of clichés from recent cinema. There are some promising ideas in there, but while the director was clearly aiming to wind up with a hauntingly ambiguous film, what he ended up with was a confusing mess. Lead actor Daniel Wu does a fair job but with no central theme it seems as though he doesn't have much to work with. Furthermore, the movie is largely devoid of scares (although, in fairness, there are some creepy moments amid the drudgery).<br /><br />*MILD SPOILERS*<br /><br />We have the mysterious death of an estranged twin, diabolical librarians, ghostly love interests, identity confusion, death by savage monkeys, oedipal conflict, abusive stepfathers, sublimated homosexuality, and crime gang connections. The only real commonality these elements share seems to be that they cause the protagonist to express a vague sense of confusion and discontent. <br /><br />Perhaps the most disappointing aspect to this film is that despite the brother's death by monkeys being strongly featured on the DVD cover, the act itself is never directly portrayed. Instead, director Julian Lee uses what appears to be stock footage of monkeys - not very scary.<br /><br />*END SPOILERS*<br /><br />Avoid this one. For an excellent psychological, ambiguous horror tale, check out the Korean film A Tale of Two Sisters (2003)."
"Why did I waste 1.5 hours of my life watching this? Why was this film even made? Why am I even commenting on this film?<br /><br />One reviewer said this film took patience to watch and it was n't for everybody. I cannot figure out who this movie is for. maybe after dropping a hit of acid, SOMEBODY, SOMEWHERE could watch this and make some sense out of it. It is incoherent, it isn't experimental, it's plain and simple garbage. The film follows no plot line whatsoever, just when you think you have something, well.....you don't. <br /><br />I think the ending brought some finality to the film (no pun intended), the viewer gets a glimpse of what might have been going on. I don't think I put a spoiler in here, not that it would matter. This film is another must miss in the world of filmdom.",
"This film takes you on one family's impossible journey, and makes you feel every step of their odyssey. Beautifully acted and photographed, heartbreakingly real. Its last line, with its wistful hope, is one of the more powerful in memory.",
"The Russian space station 'Avna' with a crew of four Russians and two Americans is threatening to re-enter the Earth's atmosphere in a matter of days. Russia asks for NASA's help in rescuing the stranded crew and NASA scrambles the space shuttle Atlantis. The NSA also have an interest in the 'Prometheus', a prototype microwave power source being tested aboard 'Avna' and organise for one of their men to be placed on the mission.<br /><br />That's the plot. Onto less important things. The space station and the shuttle are the same, blatantly obvious models used in 'Fallout', 'Memorial Day' and 'Dark Breed' (and a handful of other films, I suspect). The model effects are so obvious throughout the entire movie and make the film look very 1960s. The sets are a little better but are far too '80s for what is supposedly a brand new station built by an American company (which later comes in as part of a conspiracy to destroy 'Avna' and the 'Prometheus' and claim the insurance. The script has a few good moments (including Yuri's farewell and the little spiel at the end) but is otherwise fairly bland and sub-standard. The acting is okay; the only real standout performance comes from Alex Veadov who offers up some of the film's better dialogue. Michael Dudikoff is, surprisingly, one of the best parts about this film. Ice-T is Ice-T. 'Nuff said. The film offers a few surprises, though, that I don't wish to spoil.<br /><br />Certainly one of the better low-grade, contemporary-set sci-fi films of the last six years, but not the best. The film is watchable but the special effects and plot will probably put a lot of viewers off. Rent the other 'Stranded' sci-fi film instead.",
"the more i think about it, there was nothing redeeming about this<br /><br />movie. i saw it 9 months ago, so my memory might have made it<br /><br />worse than it was, but i do know it was at least as bad as a 4 out of<br /><br />10. <br /><br />after seeing the movie, i met the director. he seemed so clueless<br /><br />as to what he was doing or what he had done, and as far as i<br /><br />could tell, he didn't care for the film either. even he agreed that he<br /><br />didn't really know what he was doing, and he was forced to do<br /><br />certain things because it was filmed digitally. <br /><br />i felt that the movie was trying to hard to fit in to the formula that it<br /><br />built for itself: ""9 people all have to be connected in some way. how<br /><br />can we get from point 'A' to point 'B'"" so in order get from the<br /><br />prostitute we see in the start and back to her at the end they 10<br /><br />minutes on each character's relationship to another person. it<br /><br />makes one feel choked by the 2 demensional, badly drawn<br /><br />characters.<br /><br />I just remembered the one redeeming part of the movie... Steve<br /><br />Bouchemi there is one scene where he is amazing. that's it. as i<br /><br />say... 4 out of 10.",
"This is very dated, but that's part of the charm with this 1933 movie. You can say the same for most Pre-Code films; they're just different, and usually in an interesting way.<br /><br />It was the short running time, the great acting of Spencer Tracy and the beautiful face and sweetness of Loretta Young's character which kept me watching and enjoying this stagy-but-intriguing film.<br /><br />You'd be hard-pressed to find a nicer girl than ""Trinna,"" played by the 20-year-old Young who was already into making her 50th movie! (She started acting as a small child. That, and the fact they made movies quickly back in the old days.) The camera, although in soft focus throughout much of the film, zoomed in on Loretta's face and eyes many times and I was mesmerized by her beauty.<br /><br />Playing a crotchety man with a cynical outlook on life, Tracy's ""Bill"" slowly transformed into a loving man, thanks to Trinna. Spencer delivered his lines here with such naturalness that you hardly knew he was acting.<br /><br />Although they have small roles, supporting actors Walter Connolly, Marjorie Rambeau, Arthur Hohl and Glenda Farrell leave lasting impressions long after viewing this 75-minute film. I was particularly fascinated with Connolly's role as the minister/father figure of the camp.<br /><br />The story is a little far-fetched but - hey - that's the movies. This story is about two lonely Great Depression victims trying to survive in a ""Hooverville""-type camp and it winds up to be a very touching tale.",
"A powerful adaptation of the best-selling book and the smash Broadway play about the lives of Bessie and Sadie Delany, two ""colored"" sisters who lived past the age of 100. Wonderfully played in their old age by Ruby Dee and Diahann Carroll, respectively, they tell their story in flashbacks to Amy Hill Hearth (played by Amy Madigan), a white New York Times reporter. The flashback and present-day scenes don't have as much inspirational value in them as in the book, but really are powerful. However, certain aspects of the sisters' lives, such as the inter-racial background of their mother and the reasons behind their father's stern personality are not presented clearly. You need to read the book to fully understand these things. Which is just as well, because the book's just as great! Aside from those flaws, it's wonderfully done and performed, especially by Dee and Carroll, and a very powerful and educational movie.",
"This movie's origins are a mystery to me, as I only know as much as IMDB did before I rented it. I assume that before ""Starship Troopers"", ""Killshot"" was one of the countless unaired pilots that never made it to network, cable, or otherwise. The new title of ""Kill Shot"" is comically thrown into the opening sequence, the first of many quick clues that this was not ever intended for the cinema. The quick cuts, cheesy ""Melrose Place"" music, and short 2-second close-up candid shots of the main actors let you know what you're in for.<br /><br />And I don't mind at all. I rented this movie seeing the repackaging that puts Casper Van Dien and Denise Richards on the cover in front of a volleyball net thinking it would be funny to see them in a movie besides the SciFi travesty of Starship Troopers (an excellent book, in my opinion, not so hot a movie - but that's another review). After looking it up on IMDB, my roommate and I surmised that the pilot was dragged up after the apparent success of Troopers and Richards own career (see Bond-Girl and Wild Things references here). They threw in a sex scene involving a minor character to reach the coveted R-rated status - coveted in suspense Video Rental sections, that is. In any event, they should have left it unrated if you're trying to sell it in the suspense/softcore porn section.<br /><br />All in all, it's entertaining. I hate to spoil the fun of telling you it's a TV pilot, though. That was the biggest pull while watching it - when you expect a cinematic movie and get a TV show, the differences between them make themselves more clear than usual.<br /><br />Would I rent it again? No. Would I watch this TV show? Well, why not - it's better than Baywatch. And their meager attempts at hitting all demographics would have done well back in the mid 90s. Token black guy (who's gay to avoid the TV taboo of inter-racial dating), token Asian (Japanese, I assume from the name Koji) more adept at science and computers than talking to women, beautiful, intelligent Latina pre-med student who has everything going for her except her family's bank account - this show probably would have done ok.<br /><br />But as a movie it just cracks me.<br /><br />I gave it a 7 out of 10, considering what it was and what it was forced to become. It made for a very enjoyable evening, and that's all I ask of rentals.",
"I really enjoyed the first film and when it turned up again, without thinking, or checking, I took a family of friends to see it. I was ashamed that I had enthused so much about it to them.<br /><br />Disney processed the original film just like the human body processes a delicious meal - takes in something good and turns out ... well, you know. And by having a dark-skinned person as the FBI man, the results of fingerprinting the informant were subdued.<br /><br />Taken as an isolated film, I suppose it is not too bad if one likes that weird sort of thing, but when one has read the book or seen the first film - horrible!"
]
text = "I had seen 'Kalifornia' before (must be about 10 years ago) and I still remember to be very impressed by it. That's why I wanted to see it again and all I can say is that it still hasn't lost its power, even though I'm used to a lot more when it comes to movies than that I was ten years ago.<br /><br />'Kalifornia' tells the tale of the writer Brian Kessler and his girlfriend Carrie Laughlin, a photographer, who want to move to California. But instead of stepping on a plain and flying right to the state where they say it never rains, they choose to make a trip by car. He wants to write a book about America's most famous serial killers and she will make the matching pictures. But because their car uses an enormous amount of petrol, they decide to take another couple with them, so they can spread the costs of the trip. Only one couple has answered the add, so they will automatically be the lucky ones. But they haven't met each other yet and when seeing the other couple for the first time, when their trip has already started, Carrie is shocked. Without wanting to be prejudiced, she can only conclude that Early Grayce and Adele Corners are poor white trailer park trash. She definitely doesn't want them in her car, but Brian doesn't really mind to take them with them and decides to stop and pick them up anyway. At first the couple doesn't seem to be that bad after all, but gradually Early Grayce changes from a trashy hillbilly into a remorseless murderer...<br /><br />Not only is the story very impressive, so is the acting from our four leads. Brad Pitt is incredible as Early Grayce. His performance in this movie may well be his best ever. The same for Juliette Lewis. She plays the childish and naive girlfriend that doesn't want to hear a bad word about her Early and does that really very well. But David Duchovny and Michelle Forbes are a surprise as well. They both did a very good job and I really wonder why we never heard anything from Forbes again since this movie, because she really proves to have a lot of talent.<br /><br />Overall this is a very good and impressive psychological thriller with a very powerful story, but because of the graphic violence, I can imagine that it may not be to everybody's taste (although I don't really see another way how to portray a serial killer in a believable way). Personally I really liked this movie a lot and the violence never bothered me (it's a part of the story that's too important to be left out). I reward this movie with an 8/10."
tf = TextFilter()
tf.generateFilter(sentences, './filter.npy')
print(tf.run('./filter.npy', text))
| 433.217391
| 2,527
| 0.764301
|
4a0c50595270adca2d82942e2a2192add377b708
| 3,152
|
py
|
Python
|
pyvault/crypto/encryption_utils.py
|
MattCCS/PyVault
|
c0e87a1bf731e0c7c800a599b0b33885e484b92b
|
[
"MIT"
] | null | null | null |
pyvault/crypto/encryption_utils.py
|
MattCCS/PyVault
|
c0e87a1bf731e0c7c800a599b0b33885e484b92b
|
[
"MIT"
] | null | null | null |
pyvault/crypto/encryption_utils.py
|
MattCCS/PyVault
|
c0e87a1bf731e0c7c800a599b0b33885e484b92b
|
[
"MIT"
] | null | null | null |
"""
This file contains a simple API for encrypting and
decrypting data with symmetric encryption, as well as
for generating AES keys and initialization vectors.
Author: Matthew Cotton
"""
# standard
import json
# custom
from pyvault.crypto import crypto_settings
from pyvault.crypto import errors
from pyvault.crypto import generic_utils
from pyvault.crypto import integrity_utils
from pyvault.crypto import packing_utils
# installed
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
def encrypt(aes_key, data):
plaintext = json.dumps(data)
return _encrypt_hmac(aes_key, plaintext)
def decrypt(aes_key, ciphertext):
plaintext = _decrypt_hmac(aes_key, ciphertext)
return json.loads(plaintext)
####################################
def _encrypt_hmac(aes_key, plaintext):
"""Returns packed_ciphertext."""
aes_iv = generic_utils.iv()
ciphertext = _encrypt(aes_key, aes_iv, plaintext)
hmac_sig = integrity_utils.hmac_generate(aes_key, plaintext)
return packing_utils.pack(aes_iv, ciphertext, hmac_sig)
def _decrypt_hmac(aes_key, packed_ciphertext):
"""Returns plaintext."""
(aes_iv, ciphertext, hmac_sig) = packing_utils.unpack(packed_ciphertext)
plaintext = _decrypt(aes_key, aes_iv, ciphertext)
if not integrity_utils.hmac_verify(aes_key, plaintext, hmac_sig):
raise errors.SignatureVerificationFailedError()
return plaintext
####################################
def generate_key_and_iv(key_length=crypto_settings.AES_KEY_BYTES, iv_length=crypto_settings.AES_IV_BYTES):
"""
Generates a random AES key and initialization vector
using the os.urandom method (recommended for cryptographic use).
The key and IV length are determined by the crypto_settings file.
+ produces AES key and IV (private!)
"""
aes_key = generic_utils.nonce(key_length)
aes_iv = generic_utils.nonce(iv_length)
return (aes_key, aes_iv)
def _encrypt(aes_key, aes_iv, plaintext):
"""
Encrypts the given plaintext with the given
key and initialization vector using
AES-256 in Counter (CTR) mode.
+ produces ciphertext (public)
"""
assert type(plaintext) is bytes
backend = default_backend()
try:
# AES-256 in CTR mode
cipher = Cipher(algorithms.AES(aes_key), modes.CTR(aes_iv), backend=backend)
encryptor = cipher.encryptor()
return encryptor.update(plaintext) + encryptor.finalize()
except ValueError as err:
raise errors.SymmetricEncryptionError(err)
def _decrypt(aes_key, aes_iv, ciphertext):
"""
Decrypts the given ciphertext with the given
key and initialization vector using
AES-256 in Counter (CTR) mode.
+ produces plaintext (private!)
"""
backend = default_backend()
try:
# AES-256 in CTR mode
cipher = Cipher(algorithms.AES(aes_key), modes.CTR(aes_iv), backend=backend)
decryptor = cipher.decryptor()
return decryptor.update(ciphertext)
except ValueError as err:
raise errors.SymmetricEncryptionError(err)
| 28.142857
| 106
| 0.716053
|
4a0c5095e8cd219b444c3ac2c626bf38b733db23
| 27,658
|
py
|
Python
|
labscript_devices/AndorSolis/andor_sdk/andor_utils.py
|
chrisjbillington/labscript_devices
|
75b175d44c1fdca55b7cae30a898bbea59c2a5d7
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 2
|
2020-02-02T10:27:56.000Z
|
2020-05-05T12:36:39.000Z
|
labscript_devices/AndorSolis/andor_sdk/andor_utils.py
|
chrisjbillington/labscript_devices
|
75b175d44c1fdca55b7cae30a898bbea59c2a5d7
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 43
|
2020-05-12T20:34:36.000Z
|
2022-03-29T21:47:29.000Z
|
labscript_devices/AndorSolis/andor_sdk/andor_utils.py
|
chrisjbillington/labscript_devices
|
75b175d44c1fdca55b7cae30a898bbea59c2a5d7
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 23
|
2020-05-31T03:15:59.000Z
|
2022-02-25T14:36:45.000Z
|
import numpy as np
import time
from .andor_solis import *
from .andor_capabilities import *
from zprocess import rich_print
s, ms, us, ns = 1.0, 1e-3, 1e-6, 1e-9
class AndorCam(object):
default_acquisition_attrs = {
'acquisition': 'single',
'emccd': False,
'emccd_gain': 50,
'preamp': False,
'preamp_gain': 1.0,
'exposure_time': 20 * ms,
'shutter_output': 'low',
'int_shutter_mode': 'auto',
'ext_shutter_mode': 'auto',
'shutter_t_open': 100,
'shutter_t_close': 100,
'readout': 'full_image',
'crop': False,
'trigger': 'internal',
'trigger_edge': 'rising',
'number_accumulations': 1,
'accumulation_period': 3 * ms,
'number_kinetics': 1,
'kinetics_period': 30 * ms,
'xbin': 1,
'ybin': 1,
'center_row': None,
'height': 1024,
'width': 1024,
'left_start': 1,
'bottom_start': 1,
'v_offset': 0,
'acquisition_timeout': 5 / ms,
'cooldown': False,
'water_cooling': False,
'temperature': 20,
}
def __init__(self, name='andornymous'):
""" Methods of this class pack the sdk functions
and define more convenient functions to carry out
an acquisition """
# WhoamI?
self.name = name
# Do I want to know everything about you? Set this
# attribute to True if you want to print a bunch of
# information and configuration properties.
self.chatty = True
# State
self.cooling = False
self.preamp = False
self.emccd = False
self.emccd_gain = None
self.armed = False
self.initialize_camera()
def initialize_camera(self):
""" Initialization function; starts communication with the
sensor, pulling information such as capabilities, which are
useful for further acquisition settings """
rich_print('Connecting to camera...', color='yellow')
Initialize()
self.serial_number = GetCameraSerialNumber()
# Pull model and other capabilities struct
self.check_capabilities()
# Pull hardware attributes
self.head_name = GetHeadModel()
self.x_size, self.y_size = GetDetector()
self.x_pixel_size, self.y_pixel_size = GetPixelSize()
self.hardware_version = GetHardwareVersion()
# Pull software attributes
self.software_version = GetSoftwareVersion()
if self.chatty:
rich_print(
"Software version: ", self.software_version, color='cornflowerblue'
)
# Pull important capability ranges
self.temperature_range = GetTemperatureRange()
self.emccd_gain_range = GetEMGainRange()
self.number_of_preamp_gains = GetNumberPreAmpGains()
self.preamp_gain_range = (
GetPreAmpGain(0),
GetPreAmpGain(self.number_of_preamp_gains - 1),
)
def check_capabilities(self):
""" Do checks based on the _AC dict """
# Pull the hardware noted capabilities
self.andor_capabilities = GetCapabilities()
self.model = camera_type.get_type(self.andor_capabilities.ulCameraType)
self.acq_caps = acq_mode.check(self.andor_capabilities.ulAcqModes)
self.read_caps = read_mode.check(self.andor_capabilities.ulReadModes)
self.trig_caps = trigger_mode.check(self.andor_capabilities.ulTriggerModes)
self.pixmode = pixel_mode.check(self.andor_capabilities.ulPixelMode)
self.setfuncs = set_functions.check(self.andor_capabilities.ulSetFunctions)
self.getfuncs = get_functions.check(self.andor_capabilities.ulGetFunctions)
self.features = features.check(self.andor_capabilities.ulFeatures)
self.emgain_caps = em_gain.check(self.andor_capabilities.ulEMGainCapability)
if self.chatty:
rich_print(f"Camera Capabilities", color='cornflowerblue')
rich_print(f" acq_caps: {self.acq_caps}", color='lightsteelblue')
rich_print(f" read_caps: {self.read_caps}", color='lightsteelblue')
rich_print(f" trig_caps: {self.trig_caps}", color='lightsteelblue')
rich_print(f" pixmode: {self.pixmode}", color='lightsteelblue')
rich_print(f" model: {self.model}", color='goldenrod')
rich_print(f" set funcs: {self.setfuncs}", color='firebrick')
rich_print(f" get funcs: {self.getfuncs}", color='firebrick')
rich_print(f" features: {self.features}", color='lightsteelblue')
rich_print(f" emgain_caps: {self.emgain_caps}", color='lightsteelblue')
def enable_cooldown(
self, temperature_setpoint=20, water_cooling=False, wait_until_stable=False
):
""" Calls all the functions relative to temperature control
and stabilization. Enables cooling down, waits for stabilization
and finishes when the status first gets a stabilized setpoint """
if (
temperature_setpoint < self.temperature_range[0]
or temperature_setpoint > self.temperature_range[1]
):
raise ValueError(
f"""Invalid temperature setpoint; supported range is
{self.temperature_range} C"""
)
# Set the thermal timeout to several seconds (realistic
# thermalization will happen over this timescale)
thermal_timeout = 10 * s
# When cooling down, set fan depending on the cooling reservoir
if water_cooling:
SetFanMode(2)
else:
# Fan - low speed to minimize mechanical noise
SetFanMode(1)
# Set temperature and enable TEC
SetTemperature(temperature_setpoint)
CoolerON()
# Pull initial temperature and cooling status
self.temperature, self.temperature_status = GetTemperatureF()
# Wait until stable
if wait_until_stable:
while 'TEMP_NOT_REACHED' in self.temperature_status:
if self.chatty:
print(f"Temperature not reached: T = {self.temperature}")
time.sleep(thermal_timeout)
self.temperature, self.temperature_status = GetTemperatureF()
while 'TEMP_STABILIZED' not in self.temperature_status:
if self.chatty:
print(f"Temperature not stable: T = {self.temperature}")
time.sleep(thermal_timeout)
self.temperature, self.temperature_status = GetTemperatureF()
self.cooling = True
# Always return to ambient temperature on Shutdown
SetCoolerMode(0)
def enable_preamp(self, preamp_gain):
""" Calls all the functions relative to the
preamplifier gain control. """
if not preamp_gain in np.linspace(self.preamp_gain_range[0],
self.preamp_gain_range[-1],
self.number_of_preamp_gains):
raise ValueError(f"Invalid preamp gain value..."+
f"valid range is {self.preamp_gain_range}")
# Get all preamp options, match and set
preamp_options = list(
[GetPreAmpGain(index) for index in range(self.number_of_preamp_gains)]
)
SetPreAmpGain(preamp_options.index(preamp_gain))
self.preamp_gain = preamp_gain
self.preamp = True
def enable_emccd(self, emccd_gain):
""" Calls all the functions relative to the
emccd gain control. """
if not emccd_gain in self.emccd_gain_range:
raise ValueError(
f"""Invalid emccd gain value, valid range is {self.emccd_gain_range}"""
)
if not self.cooling:
raise ValueError(
f"""Please enable the temperature control by setting the acquisition
attribute 'cooldown' to 'True' before enabling the EMCCD gain, as this
will prolong the lifetime of the sensor"""
)
SetEMCCDGain(emccd_gain)
self.emccd_gain = GetEMCCDGain()
self.emccd = True
def setup_vertical_shift(self, custom_option=1):
""" Calls the functions needed to adjust the vertical
shifting speed on the sensor for a given acquisition"""
# Sets to the slowest one by default to mitigate noise
# unless the acquisition has been explicitly chosen
# to be in fast kinetics mode, for which custom methods
# are used and a custom_option shifts between available
# speeds, 0 is fastest, 3 is slowest.
# Example shift speed options from an iXon 888 USB (by E.A.)
# 0 --> 0.6 us/pix
# 1 --> 1.13 us/pix
# 2 --> 2.2 us/pix
# 3 --> 4.3 us/pix
# Fastest default vertical shifting speed choice is left to hardware
if custom_option is None:
self.index_vs_speed, self.vs_speed = GetFastestRecommendedVSSpeed()
SetVSSpeed(self.index_vs_speed)
else:
self.index_vs_speed = custom_option
# For FastKinetics mode the calls are different
if 'fast_kinetics' in self.acquisition_mode:
number_fkvs_speeds = GetNumberFKVShiftSpeeds()
if not custom_option in range(number_fkvs_speeds):
raise ValueError("Invalid vertical shift speed custom option value")
SetFKVShiftSpeed(custom_option)
self.vs_speed = GetFKVShiftSpeedF(custom_option)
else:
n_available_vertical_speeds = GetNumberVSSpeeds()
if not custom_option in range(n_available_vertical_speeds):
raise ValueError("Invalid custom option for vertical shift speed")
self.vs_speed = GetVSSpeed(custom_option)
SetVSSpeed(self.index_vs_speed)
# For the fastest shifting speed, the clock voltage amp needs
# to be adjusted to prevent smearing during readout. Check the
# hardware documentation to change this accordingly.
# Default corresponds to an iXon 888 USB:
if custom_option == 0:
SetVSAmplitude(3)
def setup_horizontal_shift(self, custom_option=None):
""" Calls the functions needed to adjust the horizontal
shifting speed on the sensor for a given acquisition"""
# Sets to the fastest one by default to reduce download time
# but this probably plays down on the readout noise
intermediate_speed, self.index_hs_speed, ad_number = 0, 0, 0
for channel in range(GetNumberADChannels()):
n_allowed_speeds = GetNumberHSSpeeds(channel, 0)
for speed_index in range(n_allowed_speeds):
speed = GetHSSpeed(channel, 0, speed_index)
if speed > intermediate_speed:
intermediate_speed = speed
self.index_hs_speed = speed_index
ad_number = channel
self.hs_speed = intermediate_speed
SetADChannel(ad_number)
SetHSSpeed(0, self.index_hs_speed)
# Get actual horizontal shifting (i.e. digitization) speed
self.horizontal_shift_speed = GetHSSpeed(ad_number, 0, self.index_hs_speed)
def setup_acquisition(self, added_attributes=None):
""" Main acquisition configuration method. Available acquisition modes are
below. The relevant methods are called with the corresponding acquisition
attributes dictionary, then the camera is armed and ready """
if added_attributes is None:
added_attributes = {}
# Override default acquisition attrs with added ones
self.acquisition_attributes = self.default_acquisition_attrs.copy()
self.acquisition_attributes.update(added_attributes)
self.acquisition_mode = self.acquisition_attributes['acquisition']
if self.acquisition_attributes['preamp']:
self.enable_preamp(self.acquisition_attributes['preamp_gain'])
if self.acquisition_attributes['emccd']:
self.enable_emccd(self.acquisition_attributes['emccd_gain'])
if self.acquisition_attributes['cooldown']:
self.enable_cooldown(
self.acquisition_attributes['temperature'],
self.acquisition_attributes['water_cooling'],
wait_until_stable = True,
)
# Get current temperature and temperature status
self.temperature, self.temperature_status = GetTemperatureF()
if self.chatty:
rich_print(
f"""At setup_acquisition the temperature is:
{self.temperature}; with status {self.temperature_status}""",
color='magenta',
)
# Available modes
modes = {
'single': 1,
'accumulate': 2,
'kinetic_series': 3,
'fast_kinetics': 4,
'run_till_abort': 5,
}
self.setup_trigger(**self.acquisition_attributes)
# Configure horizontal shifting (serial register clocks)
self.setup_horizontal_shift()
# Configure vertical shifting (image and storage area clocks)
self.setup_vertical_shift()
SetAcquisitionMode(modes[self.acquisition_mode])
# Configure added acquisition specific parameters
if 'accumulate' in self.acquisition_mode:
self.configure_accumulate(**self.acquisition_attributes)
elif 'kinetic_series' in self.acquisition_mode:
self.configure_kinetic_series(**self.acquisition_attributes)
elif 'fast_kinetics' in self.acquisition_mode:
self.configure_fast_kinetics(**self.acquisition_attributes)
elif 'run_till_abort' in self.acquisition_mode:
self.configure_run_till_abort(**self.acquisition_attributes)
# Set exposure time, note that this may be overriden
# by the readout, trigger or shutter timings thereafter
SetExposureTime(self.acquisition_attributes['exposure_time'])
self.setup_shutter(**self.acquisition_attributes)
self.setup_readout(**self.acquisition_attributes)
# Get actual timings
self.exposure_time, self.accum_timing, self.kinetics_timing = GetAcquisitionTimings()
if 'fast_kinetics' in self.acquisition_mode:
self.exposure_time = GetFKExposureTime()
# Arm sensor
self.armed = True
self.keepClean_time = GetKeepCleanTime()
# Note: The GetReadOutTime call breaks in FK mode for unknown reasons
if 'fast_kinetics' not in self.acquisition_mode:
self.readout_time = GetReadOutTime()
else:
# Made up number, somehow FK doesn't work with GetReadOutTime()
self.readout_time = 1000.0
def configure_accumulate(self, **attrs):
""" Takes a sequence of single scans and adds them together """
SetNumberAccumulations(attrs['number_accumulations'])
# In External Trigger mode the delay between each scan making up
# the acquisition is not under the control of the Andor system but
# is synchronized to an externally generated trigger pulse.
if 'internal' in attrs['trigger']:
SetAccumulationCycleTime(attrs['accumulation_period'])
def configure_kinetic_series(self, **attrs):
""" Captures a sequence of single scans, or possibly, depending on
the camera, a sequence of accumulated scans """
SetNumberKinetics(attrs['number_kinetics'])
if attrs['trigger'] == 'internal' and attrs['number_kinetics'] > 1:
SetKineticCycleTime(attrs['kinetics_period'])
# Setup accumulations for the series if necessary
if attrs['number_accumulations'] > 1:
self.configure_accumulate(**attrs)
else:
SetNumberAccumulations(1)
def configure_fast_kinetics(self, **attrs):
""" Special readout mode that uses the actual sensor as a temporary
storage medium and allows an extremely fast sequence of images to be
captured """
fk_modes = {'FVB': 0, 'full_image': 4}
self.number_fast_kinetics = self.acquisition_attributes['number_kinetics']
if 'exposed_rows' not in attrs.keys():
# Assume that fast kinetics series fills CCD maximally,
# and compute the number of exposed rows per exposure
exposed_rows = int(self.y_size / attrs['number_kinetics'])
else:
exposed_rows = attrs['exposed_rows']
SetFastKineticsEx(
exposed_rows,
attrs['number_kinetics'],
attrs['exposure_time'],
fk_modes[attrs['readout']],
attrs['xbin'],
attrs['ybin'],
attrs['v_offset'],
)
def configure_run_till_abort(self, **attrs):
""" Continually performs scans of the CCD until aborted """
if 'internal' in attrs['trigger']:
SetKineticCycleTime(0)
else:
raise Exception("Can't run_till_abort mode if external trigger")
def setup_trigger(self, **attrs):
""" Sets different aspects of the trigger"""
# Available modes
modes = {
'internal': 0,
'external': 1,
'external_start': 6,
'external_exposure': 7,
}
edge_modes = {'rising': 0, 'falling': 1}
# TODO: Maybe check the self.trig_caps attribute here
# before attempting to set a trigger mode that may not
# even be present.
SetTriggerMode(modes[attrs['trigger']])
# Specify edge if invertible trigger capability is present
if 'INVERT' in self.trig_caps:
SetTriggerInvert(edge_modes[attrs['trigger_edge']])
if attrs['trigger'] == 'external':
SetFastExtTrigger(1)
def setup_shutter(self, **attrs):
""" Sets different aspects of the shutter and exposure"""
# Available modes
modes = {
'auto': 0,
'perm_open': 1,
'perm_closed': 2,
'open_FVB_series': 4,
'open_any_series': 5,
}
shutter_outputs = {'low': 0, 'high': 1}
# TODO: Add SetShutterEX support for labscript
SetShutter(
shutter_outputs[attrs['shutter_output']],
modes[attrs['int_shutter_mode']],
attrs['shutter_t_close'] + int(round(attrs['exposure_time'] / ms)),
attrs['shutter_t_open'],
)
def setup_readout(self, **attrs):
""" Sets different aspects of the data readout, including
image shape, readout mode and amplification during readout options """
# Available modes
modes = {
'FVB': 0,
'multi_track': 1,
'random_track': 2,
'single_track': 3,
'full_image': 4,
}
SetReadMode(modes[attrs['readout']])
if attrs['readout'] == 'single_track':
SetSingleTrack(attrs['center_row'], attrs['height'])
# For full vertical binning setup a 1d-array shape
if attrs['readout'] == 'FVB':
attrs['width'] = 1
self.image_shape = (
attrs['height'] // attrs['ybin'],
attrs['width'] // attrs['xbin'],
)
# For a full-frame kinetic series, we simply set the frame for readout.
# If we use crop mode, we need to enable the second output amplifier since
# crop mode is only available then. The default EM gain mode is 0, where
# the gain is controlled by internal DAC settings in the default range (see
# self.emgain_caps). For higher ranges use SetEMGainMode(). We also need to
# enable the frame transfer mode.
if self.acquisition_mode == 'kinetic_series':
if self.acquisition_attributes['crop']:
SetOutputAmplifier(0)
SetFrameTransferMode(1)
SetIsolatedCropModeEx(
int(1),
int(attrs['height']),
int(attrs['width']),
attrs['ybin'],
attrs['xbin'],
attrs['left_start'],
attrs['bottom_start'],
)
else:
SetFrameTransferMode(0)
SetIsolatedCropModeEx(
int(0),
int(attrs['height']),
int(attrs['width']),
attrs['ybin'],
attrs['xbin'],
attrs['left_start'],
attrs['bottom_start'],
)
SetImage(
attrs['xbin'],
attrs['ybin'],
attrs['left_start'],
attrs['width'] + attrs['left_start'] - 1,
attrs['bottom_start'],
attrs['height'] + attrs['bottom_start'] - 1,
)
def acquire(self):
""" Carries down the acquisition, if the camera is armed and
waits for an acquisition event for acquisition timeout (has to be
in milliseconds), default to 5 seconds """
acquisition_timeout = self.acquisition_attributes['acquisition_timeout']
def homemade_wait_for_acquisition():
self.acquisition_status = ''
start_wait = time.time()
while self.acquisition_status != 'DRV_IDLE':
self.acquisition_status = GetStatus()
# TODO: Also count the number of acquired (buffered) images and
# stop when it matches the expected number of frames.
t0 = time.time() - start_wait
if t0 > acquisition_timeout * ms:
rich_print(
"homemade_wait_for_acquisition: timeout occured",
color='firebrick',
)
break
time.sleep(0.05)
if self.chatty:
rich_print(
f"Leaving homemade_wait with status {self.acquisition_status} ",
color='goldenrod',
)
rich_print(
f"homemade_wait_for_acquisition: elapsed time {t0/ms} ms, out of max {acquisition_timeout} ms",
color='goldenrod',
)
if not self.armed:
raise Exception("Cannot start acquisition until armed")
else:
self.acquisition_status = GetStatus()
if 'DRV_IDLE' in self.acquisition_status:
StartAcquisition()
if self.chatty:
rich_print(
f"Waiting for {acquisition_timeout} ms for timeout ...",
color='yellow',
)
homemade_wait_for_acquisition()
# Last chance, check if the acquisition is finished, update
# acquisition status otherwise, abort and raise an error
self.acquisition_status = GetStatus()
self.armed = False
if self.acquisition_status != 'DRV_IDLE':
AbortAcquisition()
raise AndorException('Acquisition aborted due to timeout')
def download_acquisition(self):
""" Download buffered acquisition. For fast kinetics, returns a 3D
array of shape (N_fast_kinetics, Ny//N, Nx). Otherwise, returns array
of shape (N_exposures, Ny, Nx)."""
N = self.acquisition_attributes['number_kinetics']
if 'fast_kinetics' in self.acquisition_mode:
shape = (N, self.image_shape[0] // N, self.image_shape[1])
else:
shape = (N, self.image_shape[0], self.image_shape[1])
# self.abort_acquisition # This seems like a bad thing to do here...
# Lets see what we have in memory
available_images = GetNumberAvailableImages()
if self.chatty:
print(
f"Number of available images in the circular buffer is {available_images}."
)
if (available_images[1] - available_images[0]) + 1 == N:
# Special save format for FK frames
if 'fast_kinetics' in self.acquisition_mode:
data = GetAcquiredData(shape)
if self.chatty:
print(
f"Shape of the downloaded images in FK mode are {data.shape}"
)
# Regular save format for other acquisition modes
else:
print("Shape passed to GetAcquiredData is", shape)
data = GetAcquiredData(shape)
# data = GetOldestImage16(shape)
print("Data shape and dtype is:", data.shape, data.dtype)
else:
print(
f"""------> Incorrect number of images to download:
{available_images}, expecting: {N}."""
)
data = np.zeros(shape)
# Optional clear buffer
# FreeInternalMemory()
return data
def abort_acquisition(self):
"""Abort"""
if self.chatty:
rich_print("Debug: Abort Called", color='yellow')
AbortAcquisition()
def shutdown(self):
""" Shuts camera down, if unarmed """
if self.armed:
raise ValueError(
"""Cannot shutdown while the camera is armed, please
finish or abort the current acquisition before shutdown"""
)
else:
ShutDown()
if __name__ in '__main__':
pass
# cam = AndorCam()
# # First test should arm with default attrs and go
# cam.setup_acquisition(added_attributes={'exposure_time':25*ms,})
# cam.acquire()
# single_acq_image = cam.download_acquisition()
# # Second test, configure 3-shot kinetic series, trigger internally,
# # sequence is similar to absorption imaging series
# internal_kinetics_attrs = {
# 'exposure_time':20*ms,
# 'acquisition':'kinetic_series',
# 'number_kinetics':3,
# 'kinetics_period':20*ms,
# 'readout':'full_image',
# 'int_shutter_mode':'perm_open',
# }
# cam.setup_acquisition(internal_kinetics_attrs)
# cam.acquire()
# kinetics_series_images = cam.download_acquisition()
# # Third test, 10-shot fast kinetics, internal trigger and no binning.
# fast_kinetics_attrs = {
# 'exposure_time':1*ms,
# 'acquisition':'fast_kinetics',
# 'number_kinetics':16,
# 'readout_shape':(1, cam.x_size, cam.y_size),
# 'readout':'full_image',
# 'int_shutter_mode':'perm_open',
# }
# cam.setup_acquisition(fast_kinetics_attrs)
# cam.snap()
# fast_kinetics_image = cam.grab_acquisition()
# import matplotlib.pyplot as plt
# plt.figure()
# plt.imshow(single_acq_image[0], cmap='seismic')
# plt.figure()
# ax = plt.subplot(311)
# ax.imshow(kinetics_series_images[0], cmap='seismic')
# ax = plt.subplot(312)
# ax.imshow(kinetics_series_images[1], cmap='seismic')
# ax = plt.subplot(313)
# ax.imshow(kinetics_series_images[2], cmap='seismic')
# plt.figure()
# plt.imshow(fast_kinetics_image[0], cmap='seismic')
| 38.574616
| 115
| 0.596934
|
4a0c50a47aaca582243e6cd75df9cba4c00c4478
| 2,972
|
py
|
Python
|
plugins/custom_thumbnail.py
|
AJTITAN23/AJURL
|
33143368ecf39500af2484d9bd654b65bfc114cd
|
[
"MIT"
] | 1
|
2021-06-13T02:25:16.000Z
|
2021-06-13T02:25:16.000Z
|
plugins/custom_thumbnail.py
|
AJTITAN23/AJURL
|
33143368ecf39500af2484d9bd654b65bfc114cd
|
[
"MIT"
] | null | null | null |
plugins/custom_thumbnail.py
|
AJTITAN23/AJURL
|
33143368ecf39500af2484d9bd654b65bfc114cd
|
[
"MIT"
] | null | null | null |
import os
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from pyrogram import Client, filters
from translation import Translation
import database.database as sql
from database.database import *
@Client.on_message(filters.private & filters.photo)
async def save_photo(bot, update):
if update.media_group_id is not None:
# album is sent
download_location = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + "/" + str(update.media_group_id) + "/"
# create download directory, if not exist
if not os.path.isdir(download_location):
os.makedirs(download_location)
await sql.df_thumb(update.from_user.id, update.message_id)
await bot.download_media(
message=update,
file_name=download_location
)
else:
# received single photo
download_location = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + ".jpg"
await sql.df_thumb(update.from_user.id, update.message_id)
await bot.download_media(
message=update,
file_name=download_location
)
await bot.send_message(
chat_id=update.chat.id,
text="**Thumbnail saved successfully**",
reply_to_message_id=update.message_id
)
@Client.on_message(filters.private & filters.command(["delthumb"]))
async def delete_thumbnail(bot, update):
thumb_image_path = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + ".jpg"
#download_location = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id)
try:
await sql.del_thumb(update.from_user.id)
os.remove(thumb_image_path)
#os.remove(download_location + ".json")
except:
pass
await bot.send_message(
chat_id=update.chat.id,
text ="**✅ Custom Thumbnail cleared succesfully @ajmovieweb**",
reply_to_message_id=update.message_id
)
@Client.on_message(filters.private & filters.command(["showthumb"]))
async def show_thumb(bot, update):
thumb_image_path = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + ".jpg"
if not os.path.exists(thumb_image_path):
mes = await thumb(update.from_user.id)
if mes != None:
m = await bot.get_messages(update.chat.id, mes.msg_id)
await m.download(file_name=thumb_image_path)
thumb_image_path = thumb_image_path
else:
thumb_image_path = None
if thumb_image_path is not None:
try:
await bot.send_photo(
chat_id=update.chat.id,
photo=thumb_image_path
)
except:
pass
elif thumb_image_path is None:
await bot.send_message(
chat_id=update.chat.id,
text="no thumbnail found",
reply_to_message_id=update.message_id
)
| 33.022222
| 126
| 0.64031
|
4a0c50e3502d046a6c085614745de79c6b9ab202
| 113,883
|
py
|
Python
|
hops/featurestore_impl/core.py
|
davitbzh/hops-util-py
|
f4c150d71e690b608c1cae10542a3f6bbb13f1ac
|
[
"Apache-2.0"
] | 1
|
2020-07-28T11:20:21.000Z
|
2020-07-28T11:20:21.000Z
|
hops/featurestore_impl/core.py
|
davitbzh/hops-util-py
|
f4c150d71e690b608c1cae10542a3f6bbb13f1ac
|
[
"Apache-2.0"
] | null | null | null |
hops/featurestore_impl/core.py
|
davitbzh/hops-util-py
|
f4c150d71e690b608c1cae10542a3f6bbb13f1ac
|
[
"Apache-2.0"
] | null | null | null |
"""
Featurestore Core Implementation
Module hierarchy of featurestore implementation:
- featurestore
|
--- core
|
----dao
----exceptions
----query_planner
----rest
----util
----featureframes
----visualizations
"""
import json
from hops import constants, util, hdfs, tls
from hops.featurestore_impl.dao.common.featurestore_metadata import FeaturestoreMetadata
from hops.featurestore_impl.dao.datasets.training_dataset import TrainingDataset
from hops.featurestore_impl.dao.featuregroups.featuregroup import Featuregroup
from hops.featurestore_impl.dao.stats.statistics import Statistics
from hops.featurestore_impl.dao.storageconnectors.jdbc_connector import JDBCStorageConnector
from hops.featurestore_impl.exceptions.exceptions import FeaturegroupNotFound, HiveDatabaseNotFound, \
TrainingDatasetNotFound, CouldNotConvertDataframe, TFRecordSchemaNotFound, FeatureDistributionsNotComputed, \
FeatureCorrelationsNotComputed, FeatureClustersNotComputed, DescriptiveStatisticsNotComputed, HiveNotEnabled, \
StorageConnectorNotFound, CannotInsertIntoOnDemandFeatureGroup, CannotUpdateStatisticsOfOnDemandFeatureGroup, \
CannotGetPartitionsOfOnDemandFeatureGroup, StorageConnectorTypeNotSupportedForFeatureImport, \
CannotDisableOnlineFeatureServingForOnDemandFeatureGroup, CannotEnableOnlineFeatureServingForOnDemandFeatureGroup, \
OnlineFeaturestoreNotEnabled
from hops.featurestore_impl.featureframes.FeatureFrame import FeatureFrame
from hops.featurestore_impl.query_planner import query_planner
from hops.featurestore_impl.query_planner.f_query import FeatureQuery, FeaturesQuery
from hops.featurestore_impl.query_planner.fg_query import FeaturegroupQuery
from hops.featurestore_impl.query_planner.logical_query_plan import LogicalQueryPlan
from hops.featurestore_impl.rest import rest_rpc
from hops.featurestore_impl.util import fs_utils
from hops.featurestore_impl.visualizations import statistics_plots
from hops.featurestore_impl.online_featurestore import online_featurestore
# for backwards compatibility
try:
import h5py
except:
pass
# in case importing in %%local
try:
from pyspark.sql import SQLContext
from pyspark.sql.utils import AnalysisException
from py4j.java_gateway import java_import
except:
pass
metadata_cache = None
def _get_featurestore_id(featurestore):
"""
Gets the id of a featurestore (temporary workaround until HOPSWORKS-860 where we use Name to refer to resources)
Args:
:featurestore: the featurestore to get the id for
Returns:
the id of the feature store
"""
if metadata_cache is None or featurestore != metadata_cache.featurestore:
_get_featurestore_metadata(featurestore, update_cache=True)
return metadata_cache.featurestore.id
def _use_featurestore(spark, featurestore=None):
"""
Selects the featurestore database in Spark
Args:
:spark: the spark session
:featurestore: the name of the database, defaults to the project's featurestore
Returns:
None
Raises:
:HiveDatabaseNotFound: when no hive database with the provided featurestore name exist
"""
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
try:
sql_str = "use " + featurestore
_run_and_log_sql(spark, sql_str)
except AnalysisException:
raise HiveDatabaseNotFound((
"A hive database for the featurestore {} was not found, have you enabled the "
"featurestore service in your project?".format(
featurestore)))
def _get_featurestore_metadata(featurestore=None, update_cache=False):
"""
Makes a REST call to the appservice in hopsworks to get all metadata of a featurestore (featuregroups and
training datasets) for the provided featurestore.
Args:
:featurestore: the name of the database, defaults to the project's featurestore
:update_cache: if true the cache is updated
Returns:
feature store metadata object
"""
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
global metadata_cache
if metadata_cache is None or update_cache:
response_object = rest_rpc._get_featurestore_metadata(featurestore)
metadata_cache = FeaturestoreMetadata(response_object)
return metadata_cache
def _convert_field_to_feature_json(field_dict, primary_key, partition_by, online=False, online_types = None):
"""
Helper function that converts a field in a spark dataframe to a feature dict that is compatible with the
featurestore API
Args:
:field_dict: the dict of spark field to convert
:primary_key: list of the names of the primary key
:partition_by: a list of columns to partition_by, defaults to the empty list
:online: boolean flag whether the feature is to be used for online serving
:online_types: dict with feature name --> online_type. If the field name is present in this dict,
the type will be taken from the dict rather than inferred from the spark-dataframe-type
Returns:
a feature dict that is compatible with the featurestore API
"""
f_name = field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_NAME]
f_type = fs_utils._convert_spark_dtype_to_hive_dtype(field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE])
if online:
if online_types is not None and f_name in online_types:
f_type_online = online_types[f_name]
else:
f_type_online = \
fs_utils._convert_spark_dtype_to_mysql_dtype(field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE])
else:
f_type_online = None
f_desc = ""
if f_name in primary_key:
f_primary = True
else:
f_primary = False
if constants.REST_CONFIG.JSON_FEATURE_DESCRIPTION in field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_METADATA]:
f_desc = field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_METADATA][
constants.REST_CONFIG.JSON_FEATURE_DESCRIPTION]
if f_desc == "":
f_desc = "-" # comment must be non-empty
f_partition = f_name in partition_by
f_fg = ""
if constants.FEATURE_STORE.TRAINING_DATASET_PROVENANCE_FEATUREGROUP \
in field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_METADATA]:
f_fg = field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_METADATA][
constants.FEATURE_STORE.TRAINING_DATASET_PROVENANCE_FEATUREGROUP]
f_version = None
if constants.FEATURE_STORE.TRAINING_DATASET_PROVENANCE_VERSION \
in field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_METADATA]:
f_version = field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_METADATA][
constants.FEATURE_STORE.TRAINING_DATASET_PROVENANCE_VERSION]
return {
constants.REST_CONFIG.JSON_FEATURE_NAME: f_name,
constants.REST_CONFIG.JSON_FEATURE_TYPE: f_type,
constants.REST_CONFIG.JSON_FEATURE_DESCRIPTION: f_desc,
constants.REST_CONFIG.JSON_FEATURE_PRIMARY: f_primary,
constants.REST_CONFIG.JSON_FEATURE_PARTITION: f_partition,
constants.REST_CONFIG.JSON_FEATURE_ONLINE_TYPE: f_type_online,
constants.REST_CONFIG.JSON_FEATURE_FEATUREGROUP: f_fg,
constants.REST_CONFIG.JSON_FEATURE_VERSION: f_version
}
def _parse_spark_features_schema(spark_schema, primary_key=[], partition_by=[], online=False, online_types = None):
"""
Helper function for parsing the schema of a spark dataframe into a list of feature-dicts
Args:
:spark_schema: the spark schema to parse
:primary_key: list of the columns in the dataframe that should be the primary key
:partition_by: a list of columns to partition_by, defaults to the empty list
:online: whether the features are to be used for online serving
:online_types: a dict with feature_name --> online_type, if a feature is present in this dict,
the online_type will be taken from the dict rather than inferred from the spark dataframe.
Returns:
A list of the parsed features
"""
raw_schema = json.loads(spark_schema.json())
raw_fields = raw_schema[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELDS]
parsed_features = list(map(lambda field: _convert_field_to_feature_json(field, primary_key, partition_by,
online=online,
online_types=online_types),
raw_fields))
return parsed_features
def _compute_dataframe_stats(spark_df, name, version=1, descriptive_statistics=True,
feature_correlation=True, feature_histograms=True, cluster_analysis=True,
stat_columns=None, num_bins=20, num_clusters=5,
corr_method='pearson'):
"""
Helper function that computes statistics of a featuregroup or training dataset using spark
Args:
:name: the featuregroup or training dataset to update statistics for
:spark_df: If a spark df is provided it will be used to compute statistics, otherwise the dataframe of the
featuregroup will be fetched dynamically from the featurestore
:version: the version of the featuregroup/training dataset (defaults to 1)
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc)
for the featuregroup/training dataset
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns
in the featuregroup/training dataset
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the
featuregroup/training dataset
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in the
featuregroup/training dataset
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:num_clusters: the number of clusters to use for cluster analysis (k-means)
:corr_method: the method to compute feature correlation with (pearson or spearman)
Returns:
feature_corr_data, desc_stats_data, features_histograms_data, cluster_analysis
"""
if stat_columns:
spark_df = spark_df.select(stat_columns)
feature_corr_data = None
desc_stats_data = None
features_histograms_data = None
cluster_analysis_data = None
spark = util._find_spark()
_verify_hive_enabled(spark)
if spark_df.rdd.isEmpty():
fs_utils._log("Cannot compute statistics on an empty dataframe, the provided dataframe is empty")
if descriptive_statistics:
try:
fs_utils._log("computing descriptive statistics for : {}, version: {}".format(name, version))
spark.sparkContext.setJobGroup("Descriptive Statistics Computation",
"Analyzing Dataframe Statistics for : {}, version: {}".format(name, version))
spark_df_filtered = fs_utils._filter_spark_df_numeric(spark_df)
desc_stats_json = fs_utils._compute_descriptive_statistics(spark_df_filtered)
desc_stats_data = fs_utils._structure_descriptive_stats_json(desc_stats_json)
spark.sparkContext.setJobGroup("", "")
except Exception as e:
fs_utils._log(
"Could not compute descriptive statistics for: {}, version: {}, set the optional argument "
"descriptive_statistics=False to skip this step,\n error: {}".format(
name, version, str(e)))
desc_stats_data = None
if feature_correlation:
try:
fs_utils._log("computing feature correlation for: {}, version: {}".format(name, version))
spark.sparkContext.setJobGroup("Feature Correlation Computation",
"Analyzing Feature Correlations for: {}, version: {}".format(name, version))
spark_df_filtered = fs_utils._filter_spark_df_numeric(spark_df)
pd_corr_matrix = fs_utils._compute_corr_matrix(spark_df_filtered, corr_method=corr_method)
feature_corr_data = fs_utils._structure_feature_corr_json(pd_corr_matrix.to_dict())
spark.sparkContext.setJobGroup("", "")
except Exception as e:
fs_utils._log(
"Could not compute feature correlation for: {}, version: {}, set the optional argument "
"feature_correlation=False to skip this step,\n error: {}".format(
name, version, str(e)))
feature_corr_data = None
if feature_histograms:
try:
fs_utils._log("computing feature histograms for: {}, version: {}".format(name, version))
spark.sparkContext.setJobGroup("Feature Histogram Computation",
"Analyzing Feature Distributions for: {}, version: {}".format(name, version))
spark_df_filtered = fs_utils._filter_spark_df_numeric(spark_df)
features_histogram_list = fs_utils._compute_feature_histograms(spark_df_filtered, num_bins)
features_histograms_data = fs_utils._structure_feature_histograms_json(features_histogram_list)
spark.sparkContext.setJobGroup("", "")
except Exception as e:
fs_utils._log(
"Could not compute feature histograms for: {}, version: {}, "
"set the optional argument feature_histograms=False "
"to skip this step,\n error: {}".format(
name, version, str(e)))
features_histograms_data = None
if cluster_analysis:
try:
fs_utils._log("computing cluster analysis for: {}, version: {}".format(name, version))
spark.sparkContext.setJobGroup("Feature Cluster Analysis",
"Analyzing Feature Clusters for: {}, version: {}".format(name, version))
spark_df_filtered = fs_utils._filter_spark_df_numeric(spark_df)
cluster_analysis_raw = fs_utils._compute_cluster_analysis(spark_df_filtered, num_clusters)
cluster_analysis_data = fs_utils._structure_cluster_analysis_json(cluster_analysis_raw)
spark.sparkContext.setJobGroup("", "")
except Exception as e:
fs_utils._log(
"Could not compute cluster analysis for: {}, version: {}, "
"set the optional argument cluster_analysis=False "
"to skip this step,\n error: {}".format(
name, version, str(e)))
cluster_analysis_data = None
return feature_corr_data, desc_stats_data, features_histograms_data, cluster_analysis_data
def _get_featuregroup_id(featurestore, featuregroup_name, featuregroup_version):
"""
Gets the id of a featuregroup (temporary workaround until HOPSWORKS-860 where we use Name to refer to resources)
Args:
:featurestore: the featurestore where the featuregroup belongs
:featuregroup: the featuregroup to get the id for
:featuregroup_version: the version of the featuregroup
Returns:
the id of the featuregroup
Raises:
:FeaturegroupNotFound: when the requested featuregroup could not be found in the metadata
"""
metadata = _get_featurestore_metadata(featurestore, update_cache=False)
if metadata is None or featurestore != metadata.featurestore:
metadata = _get_featurestore_metadata(featurestore, update_cache=True)
for fg in metadata.featuregroups.values():
if fg.name == featuregroup_name \
and fg.version == featuregroup_version:
return fg.id
raise FeaturegroupNotFound("The featuregroup {} with version: {} "
"was not found in the feature store {}".format(featuregroup_name, featuregroup_version,
featurestore))
def _do_get_storage_connector(storage_connector_name, featurestore):
"""
Looks up the metadata of a storage connector given a name
Args:
:storage_connector_name: the storage connector name
:featurestore: the featurestore to query
Returns:
the id of the featuregroup
Raises:
:FeaturegroupNotFound: when the requested featuregroup could not be found in the metadata
"""
metadata = _get_featurestore_metadata(featurestore, update_cache=False)
if metadata is None or featurestore != metadata.featurestore:
metadata = _get_featurestore_metadata(featurestore, update_cache=True)
try:
return metadata.storage_connectors[storage_connector_name]
except:
try:
# Retry with updated metadata
metadata = _get_featurestore_metadata(featurestore, update_cache=True)
except KeyError:
storage_connector_names = list(map(lambda sc: sc.name, metadata.storage_connectors))
raise StorageConnectorNotFound("Could not find the requested storage connector with name: {} " \
", among the list of available storage connectors: {}".format(
storage_connector_name,
storage_connector_names))
def _do_get_feature(feature, featurestore_metadata, featurestore=None, featuregroup=None, featuregroup_version=1,
dataframe_type="spark", jdbc_args = {}, online=False):
"""
Gets a particular feature (column) from a featurestore, if no featuregroup is specified it queries
hopsworks metastore to see if the feature exists in any of the featuregroups in the featurestore.
If the user knows which featuregroup contain the feature, it should be specified as it will improve performance
of the query.
Args:
:feature: the feature name to get
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup: (Optional) the featuregroup where the feature resides
:featuregroup_version: (Optional) the version of the featuregroup
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
:featurestore_metadata: the metadata of the featurestore to query
:jdbc_args: jdbc arguments for fetching on-demand feature groups (optional)
:online: a boolean flag whether to fetch the online feature or the offline one (assuming that the
feature group that the feature is stored in has online serving enabled)
(for cached feature groups only)
Returns:
A spark dataframe with the feature
"""
if online and ((not featurestore_metadata.settings.online_enabled) or
(not featurestore_metadata.featurestore.online_enabled)):
raise OnlineFeaturestoreNotEnabled("Online Feature Store is not enabled for this project or cluster, "
"talk to an administrator to enable it")
spark = util._find_spark()
_verify_hive_enabled(spark)
_use_featurestore(spark, featurestore)
spark.sparkContext.setJobGroup("Fetching Feature",
"Getting feature: {} from the featurestore {}".format(feature, featurestore))
feature_query = FeatureQuery(feature, featurestore_metadata, featurestore, featuregroup, featuregroup_version)
logical_query_plan = LogicalQueryPlan(feature_query)
logical_query_plan.create_logical_plan()
on_demand_featuregroups = list(filter(
lambda fg: fg.featuregroup_type == featurestore_metadata.settings.on_demand_featuregroup_type,
logical_query_plan.featuregroups))
_register_on_demand_featuregroups_as_temp_tables(on_demand_featuregroups, featurestore, jdbc_args)
logical_query_plan.construct_sql()
feature_to_featuregroup_mapping = query_planner._get_feature_featuregroup_mapping(logical_query_plan, featurestore,
featurestore_metadata)
result = _run_and_log_sql(spark, logical_query_plan.sql_str, online=online, featurestore=featurestore)
result_w_provenance = fs_utils._add_provenance_metadata_to_dataframe(result, feature_to_featuregroup_mapping)
spark.sparkContext.setJobGroup("", "")
return fs_utils._return_dataframe_type(result_w_provenance, dataframe_type)
def _run_and_log_sql(spark, sql_str, online=False, featurestore=None):
"""
Runs and logs an SQL query with sparkSQL
Args:
:spark: the spark session
:sql_str: the query to run
:online: if true, run the query using online feature store JDBC connector
:featurestore: name of the featurestore
Returns:
the result of the SQL query
"""
if not online:
fs_utils._log("Running sql: {} against offline feature store".format(sql_str))
return spark.sql(sql_str)
else:
fs_utils._log("Running sql: {} against online feature store".format(sql_str))
featurestore_metadata = _get_featurestore_metadata(featurestore, update_cache=False)
if online and ((not featurestore_metadata.settings.online_enabled) or
(not featurestore_metadata.featurestore.online_enabled)):
raise OnlineFeaturestoreNotEnabled("Online Feature Store is not enabled for this project or cluster, "
"talk to an administrator to enable it")
storage_connector = _do_get_online_featurestore_connector(featurestore, featurestore_metadata)
return online_featurestore._read_jdbc_dataframe(spark, storage_connector, "({}) tmp".format(sql_str))
def _write_featuregroup_hive(spark_df, featuregroup, featurestore, featuregroup_version, mode):
"""
Writes the contents of a spark dataframe to a feature group Hive table
Args:
:spark_df: the data to write
:featuregroup: the featuregroup to write to
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
:mode: the write mode (append or overwrite)
Returns:
None
Raises:
:ValueError: when the provided write mode does not match the supported write modes (append and overwrite)
"""
spark = util._find_spark()
_verify_hive_enabled(spark)
sc = spark.sparkContext
sqlContext = SQLContext(sc)
sqlContext.setConf("hive.exec.dynamic.partition", "true")
sqlContext.setConf("hive.exec.dynamic.partition.mode", "nonstrict")
spark.sparkContext.setJobGroup("Inserting dataframe into featuregroup",
"Inserting into featuregroup: {} in the featurestore {}".format(featuregroup,
featurestore))
_use_featurestore(spark, featurestore)
tbl_name = fs_utils._get_table_name(featuregroup, featuregroup_version)
if mode == constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE:
_delete_table_contents(featurestore, featuregroup, featuregroup_version)
if not mode == constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE and not mode == \
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE:
raise ValueError(
"The provided write mode {} does not match "
"the supported modes: ['{}', '{}']".format(mode,
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE,
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE))
# overwrite is not supported because it will drop the table and create a new one,
# this means that all the featuregroup metadata will be dropped due to ON DELETE CASCADE
# to simulate "overwrite" we call hopsworks REST API to drop featuregroup and re-create with the same metadata
mode = constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE
# Specify format hive as it is managed table
format = "hive"
# Insert into featuregroup (hive table) with dynamic partitions
spark_df.write.format(format).mode(mode).insertInto(tbl_name)
spark.sparkContext.setJobGroup("", "")
def _do_insert_into_featuregroup(df, featuregroup_name, featurestore_metadata, featurestore=None,
featuregroup_version=1, mode="append", online=False, offline=True):
"""
Saves the given dataframe to the specified featuregroup. Defaults to the project-featurestore
This will append to the featuregroup. To overwrite a featuregroup, create a new version of the featuregroup
from the UI and append to that table.
Args:
:df: the dataframe containing the data to insert into the featuregroup
:featuregroup_name: the name of the featuregroup (hive table name)
:featurestore_metadata: metadata of the feature store
:featurestore: the featurestore to save the featuregroup to (hive database)
:featuregroup_version: the version of the featuregroup (defaults to 1)
:mode: the write mode, only 'overwrite' and 'append' are supported
:online: boolean flag whether to insert the data in the online version of the featuregroup
(assuming the featuregroup already has online feature serving enabled)
:offline boolean flag whether to insert the data in the offline version of the featuregroup
Returns:
None
Raises:
:CouldNotConvertDataframe: in case the provided dataframe could not be converted to a spark dataframe
"""
if online and ((not featurestore_metadata.settings.online_enabled) or
(not featurestore_metadata.featurestore.online_enabled)):
raise OnlineFeaturestoreNotEnabled("Online Feature Store is not enabled for this project or cluster, "
"talk to an administrator to enable it")
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
fg = query_planner._find_featuregroup(featurestore_metadata.featuregroups, featuregroup_name, featuregroup_version)
if fg.featuregroup_type == featurestore_metadata.settings.on_demand_featuregroup_type:
raise CannotInsertIntoOnDemandFeatureGroup("The feature group with name: {} , and version: {} "
"is an on-demand feature group and cannot be inserted into. "
"Insert operation is only supported for cached feature groups."
.format(featuregroup_name, featuregroup_version))
try:
spark_df = fs_utils._convert_dataframe_to_spark(df)
except Exception as e:
raise CouldNotConvertDataframe(
"Could not convert the provided dataframe to a spark dataframe which is required in order to save it to "
"the Feature Store, error: {}".format(str(e)))
if not online and not offline:
raise ValueError("online=False and offline=False, nothing to insert. "
"Please set online=True to insert data in the online feature group (MySQL), "
"and set offline=True to insert in the offline feature group (Hive)")
if offline:
fs_utils._log("Inserting data into offline feature group {}...".format(featuregroup_name))
_write_featuregroup_hive(spark_df, featuregroup_name, featurestore, featuregroup_version, mode)
fs_utils._log("Inserting data into offline feature group {}... [COMPLETE]".format(featuregroup_name))
if online:
fs_utils._log("Inserting data into online feature group {}...".format(featuregroup_name))
if not fg.is_online():
raise ValueError("Parameter `online` was set to True, but online feature serving is not enabled "
"for this featuregroup. Enable online feature serving with "
"`enable_featuregroup_online()` function.")
storage_connector = _do_get_online_featurestore_connector(featurestore, featurestore_metadata)
tbl_name = fs_utils._get_table_name(featuregroup_name, featuregroup_version)
online_featurestore._write_jdbc_dataframe(spark_df, storage_connector, tbl_name,
write_mode=mode)
fs_utils._log("Inserting data into online feature group {}... [COMPLETE]".format(featuregroup_name))
# update cache because 'overwrite' mode drops entire featuregroup and recreates it with new id
featurestore_metadata = _get_featurestore_metadata(featurestore, update_cache=True)
_do_update_featuregroup_stats(featuregroup_name, featurestore_metadata, featuregroup_version=featuregroup_version,
featurestore=featurestore, descriptive_statistics=None, feature_correlation=None,
feature_histograms=None, cluster_analysis=None, stat_columns=None, num_bins=None,
num_clusters=None, corr_method=None)
fs_utils._log("Insertion into feature group was successful")
def _do_get_features(features, featurestore_metadata, featurestore=None, featuregroups_version_dict={}, join_key=None,
dataframe_type="spark", jdbc_args = {}, online=False):
"""
Gets a list of features (columns) from the featurestore. If no featuregroup is specified it will query hopsworks
metastore to find where the features are stored.
Args:
:features: a list of features to get from the featurestore
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroups: (Optional) a dict with (fg --> version) for all the featuregroups where the features resides
:featuregroup_version: (Optional) the version of the featuregroup
:join_key: (Optional) column name to join on
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
:featurestore_metadata: the metadata of the featurestore
:jdbc_args: jdbc arguments for fetching on-demand feature groups (optional)
:online: a boolean flag whether to fetch the online version of the features (assuming that the
feature groups where the features reside have online serving enabled) (for cached feature groups only)
Returns:
A spark dataframe with all the features
"""
if online and ((not featurestore_metadata.settings.online_enabled) or
(not featurestore_metadata.featurestore.online_enabled)):
raise OnlineFeaturestoreNotEnabled("Online Feature Store is not enabled for this project or cluster, "
"talk to an administrator to enable it")
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
spark = util._find_spark()
_verify_hive_enabled(spark)
_use_featurestore(spark, featurestore)
spark.sparkContext.setJobGroup("Fetching Features",
"Getting features: {} from the featurestore {}".format(features, featurestore))
features_query = FeaturesQuery(features, featurestore_metadata, featurestore, featuregroups_version_dict, join_key)
logical_query_plan = LogicalQueryPlan(features_query)
logical_query_plan.create_logical_plan()
on_demand_featuregroups = list(filter(
lambda fg: fg.featuregroup_type == featurestore_metadata.settings.on_demand_featuregroup_type,
logical_query_plan.featuregroups))
_register_on_demand_featuregroups_as_temp_tables(on_demand_featuregroups, featurestore, jdbc_args)
logical_query_plan.construct_sql()
feature_to_featuregroup_mapping = query_planner._get_feature_featuregroup_mapping(logical_query_plan, featurestore,
featurestore_metadata)
result = _run_and_log_sql(spark, logical_query_plan.sql_str, online=online, featurestore=featurestore)
result_w_provenance = fs_utils._add_provenance_metadata_to_dataframe(result, feature_to_featuregroup_mapping)
spark.sparkContext.setJobGroup("", "")
return fs_utils._return_dataframe_type(result_w_provenance, dataframe_type)
def _delete_table_contents(featurestore, featuregroup, featuregroup_version):
"""
Sends a request to clear the contents of a featuregroup by dropping the featuregroup and recreating it with
the same metadata.
Args:
:featurestore: the featurestore where the featuregroup resides
:featuregroup: the featuregroup to clear
:featuregroup_version: version of the featuregroup
Returns:
The JSON response
"""
featuregroup_id = _get_featuregroup_id(featurestore, featuregroup, featuregroup_version)
featurestore_id = _get_featurestore_id(featurestore)
response_object = rest_rpc._delete_table_contents(featuregroup_id, featurestore_id)
# update metadata cache since clearing featuregroup will update its id.
try:
_get_featurestore_metadata(featurestore, update_cache=True)
except:
pass
return response_object
def _register_on_demand_featuregroups_as_temp_tables(on_demand_featuregroups, featurestore, jdbc_args={}):
"""
Registers a list of on-demand featuregroups as temp tables in SparkSQL. Fetches the on-demand featuregroups using
JDBC and the provided SQL string and then registers the resulting spark dataframes as temporary tables with the name
featuregroupname_featuregroupversion
Args:
:on_demand_featuregroups: metadata of the on-demand feature group to register
:featurestore: the featurestore to query
:jdbc_args: jdbc arguments for fetching the on-demand feature group
Returns:
None
"""
for fg in on_demand_featuregroups:
j_args = {}
if fs_utils._get_table_name(fg.name, fg.version) in jdbc_args:
j_args = jdbc_args[fs_utils._get_table_name(fg.name, fg.version)]
spark_df = _do_get_on_demand_featuregroup(fg, featurestore, j_args)
spark_df.registerTempTable(fs_utils._get_table_name(fg.name, fg.version))
fs_utils._log("Registered on-demand feature group: {} with version: {} as temporary table: {}"
.format(fg.name, fg.version, fs_utils._get_table_name(fg.name, fg.version)))
def _do_get_on_demand_featuregroup(featuregroup, featurestore, jdbc_args = {}):
"""
Gets an on-demand featuregroup from a featurestore as a spark dataframe. Uses the JDBC connector to connect
to the storage backend with Spark and then applies the SQL string for the on-demand feature group and return
the result
Args:
:featuregroup: featuregroup metadata
:featurestore: the featurestore to query
:jdbc_args: a dict of argument_name -> value with jdbc connection string arguments to be filled in
dynamically at runtime
Returns:
a spark dataframe with the contents of the feature group
"""
jdbc_connector = _do_get_storage_connector(featuregroup.on_demand_featuregroup.jdbc_connector_name,
featurestore=featurestore)
connection_string = jdbc_connector.connection_string
connection_string_arguments = jdbc_connector.arguments.split(constants.DELIMITERS.COMMA_DELIMITER)
# Fill in connection string arguments at runtime
for connection_string_arg in connection_string_arguments:
if jdbc_args is not None and connection_string_arg in jdbc_args:
connection_string = connection_string + connection_string_arg + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + \
jdbc_args[connection_string_arg] + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_DELIMITER
else:
if connection_string_arg == constants.FEATURE_STORE.JDBC_TRUSTSTORE_ARG:
truststore = tls.get_trust_store()
connection_string = connection_string + constants.FEATURE_STORE.JDBC_TRUSTSTORE_ARG + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + truststore + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_DELIMITER
if connection_string_arg == constants.FEATURE_STORE.JDBC_TRUSTSTORE_PW_ARG:
pw = tls.get_key_store_pwd()
connection_string = connection_string + constants.FEATURE_STORE.JDBC_TRUSTSTORE_PW_ARG + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + pw + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_DELIMITER
if connection_string_arg == constants.FEATURE_STORE.JDBC_KEYSTORE_ARG:
keystore = tls.get_key_store()
connection_string = connection_string + constants.FEATURE_STORE.JDBC_KEYSTORE_ARG + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + keystore + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_DELIMITER
if connection_string_arg == constants.FEATURE_STORE.JDBC_KEYSTORE_PW_ARG:
pw = tls.get_key_store_pwd()
connection_string = connection_string + constants.FEATURE_STORE.JDBC_KEYSTORE_PW_ARG + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + pw + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_DELIMITER
# Add custom JDBC dialects
spark = util._find_spark()
gw = spark.sparkContext._gateway
java_import(gw.jvm, constants.FEATURE_STORE.IMPORT_HOPS_UTIL_FEATURESTORE_HELPER)
gw.jvm.org.apache.spark.sql.jdbc.JdbcDialects.registerDialect(
gw.jvm.io.hops.util.featurestore.FeaturestoreHelper.getHiveJdbcDialect())
# Read using Spark, the JDBC connector, and the SQL query of the on-demand fg
spark_df = spark.read.format(constants.SPARK_CONFIG.SPARK_JDBC_FORMAT) \
.option(constants.SPARK_CONFIG.SPARK_JDBC_URL, connection_string) \
.option(constants.SPARK_CONFIG.SPARK_JDBC_DBTABLE, "(" + featuregroup.on_demand_featuregroup.query + ") fs_q") \
.load()
# Remove column prefixes
column_names = \
list(map(lambda field: field[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_NAME].replace("fs_q.", ""),
json.loads(spark_df.schema.json())[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELDS]))
renamed_spark_df = spark_df.toDF(*column_names)
spark.sparkContext.setJobGroup("", "")
return renamed_spark_df
def _do_get_featuregroup(featuregroup_name, featurestore_metadata, featurestore=None,
featuregroup_version=1, dataframe_type="spark", jdbc_args = {}, online=False):
"""
Gets a featuregroup from a featurestore as a spark dataframe
Args:
:featuregroup_name: name of the featuregroup to get
:featurestore_metadata: featurestore metadata
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup_version: (Optional) the version of the featuregroup
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
:jdbc_args: a dict of argument_name -> value with jdbc connection string arguments to be filled in
dynamically at runtime for fetching on-demand feature groups
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled) (for cached feature groups only)
Returns:
a spark dataframe with the contents of the featurestore
"""
if online and ((not featurestore_metadata.settings.online_enabled) or
(not featurestore_metadata.featurestore.online_enabled)):
raise OnlineFeaturestoreNotEnabled("Online Feature Store is not enabled for this project or cluster, "
"talk to an administrator to enable it")
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
fg = query_planner._find_featuregroup(featurestore_metadata.featuregroups, featuregroup_name, featuregroup_version)
spark = util._find_spark()
spark.sparkContext.setJobGroup("Fetching Feature group",
"Getting feature group: {} from the featurestore {}".format(featuregroup_name,
featurestore))
if fg.featuregroup_type == featurestore_metadata.settings.on_demand_featuregroup_type:
return _do_get_on_demand_featuregroup(fg, featurestore, jdbc_args)
if fg.featuregroup_type == featurestore_metadata.settings.cached_featuregroup_type:
return _do_get_cached_featuregroup(featuregroup_name, featurestore_metadata,
featurestore, featuregroup_version, dataframe_type, online=online)
raise ValueError("The feature group type: "
+ fg.featuregroup_type + " was not recognized. Recognized types include: {} and {}" \
.format(featurestore_metadata.settings.on_demand_featuregroup_type,
featurestore_metadata.settings.cached_featuregroup_type))
def _do_get_cached_featuregroup(featuregroup_name, featurestore_metadata, featurestore=None,
featuregroup_version=1, dataframe_type="spark", online=False):
"""
Gets a cached featuregroup from a featurestore as a spark dataframe
Args:
:featuregroup_name: name of the featuregroup to get
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup_version: (Optional) the version of the featuregroup
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
:featurestore_metadata: metadata of the featurestore
Returns:
a spark dataframe with the contents of the feature group
"""
spark = util._find_spark()
_verify_hive_enabled(spark)
_use_featurestore(spark, featurestore)
featuregroup_query = FeaturegroupQuery(featuregroup_name, featurestore, featuregroup_version)
logical_query_plan = LogicalQueryPlan(featuregroup_query)
logical_query_plan.create_logical_plan()
logical_query_plan.construct_sql()
feature_to_featuregroup_mapping = query_planner._get_feature_featuregroup_mapping(logical_query_plan, featurestore,
featurestore_metadata)
result = _run_and_log_sql(spark, logical_query_plan.sql_str, online=online, featurestore=featurestore)
result_w_provenance = fs_utils._add_provenance_metadata_to_dataframe(result, feature_to_featuregroup_mapping)
spark.sparkContext.setJobGroup("", "")
return fs_utils._return_dataframe_type(result_w_provenance, dataframe_type)
def _do_get_training_dataset(training_dataset_name, featurestore_metadata, training_dataset_version=1,
dataframe_type="spark", featurestore=None):
"""
Reads a training dataset into a spark dataframe
Args:
:training_dataset_name: the name of the training dataset to read
:training_dataset_version: the version of the training dataset
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
:featurestore_metadata: metadata of the featurestore
Returns:
A spark dataframe with the given training dataset data
"""
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
training_dataset = query_planner._find_training_dataset(featurestore_metadata.training_datasets,
training_dataset_name,
training_dataset_version)
if training_dataset.training_dataset_type == featurestore_metadata.settings.hopsfs_training_dataset_type:
path = util.abspath(training_dataset.location + constants.DELIMITERS.SLASH_DELIMITER + training_dataset.name)
else:
s3_connector = _do_get_storage_connector(training_dataset.connector_name, featurestore)
fs_utils._setup_s3_credentials_for_spark(s3_connector.access_key, s3_connector.secret_key, util._find_spark())
path = fs_utils._get_external_training_dataset_path(training_dataset.location)
featureframe = FeatureFrame.get_featureframe(path=path, dataframe_type=dataframe_type,
data_format=training_dataset.data_format,
training_dataset = training_dataset
)
return featureframe.read_featureframe(util._find_spark())
def _do_create_training_dataset(df, training_dataset, description="", featurestore=None,
data_format="tfrecords", training_dataset_version=1,
jobs=[], descriptive_statistics=True, feature_correlation=True,
feature_histograms=True, cluster_analysis=True, stat_columns=None, num_bins=20,
corr_method='pearson', num_clusters=5, petastorm_args={}, fixed=True,
storage_connector=None, path=None):
"""
Creates a new training dataset from a dataframe, saves metadata about the training dataset to the database
and saves the materialized dataset on hdfs
Args:
:df: the dataframe to create the training dataset from
:training_dataset: the name of the training dataset
:description: a description of the training dataset
:featurestore: the featurestore that the training dataset is linked to
:data_format: the format of the materialized training dataset
:training_dataset_version: the version of the training dataset (defaults to 1)
:jobs: list of job names linked to the training dataset
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc)
for the featuregroup
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns
in the featuregroup
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the featuregroup
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in the
featuregroup
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:num_clusters: number of clusters to use for cluster analysis
:corr_method: the method to compute feature correlation with (pearson or spearman)
:petastorm_args: a dict containing petastorm parameters for serializing a dataset in the
petastorm format. Required parameters are: 'schema'
:fixed: boolean flag indicating whether array columns should be treated with fixed size or variable size
:storage_connector: the storage connector where the training dataset is stored
:path: path to complement the sink storage connector with, e.g if the storage connector points to an
S3 bucket, this path can be used to define a sub-directory inside the bucket to place the training
dataset.
Returns:
None
Raises:
:CouldNotConvertDataframe: in case the provided dataframe could not be converted to a spark dataframe
"""
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
try:
spark_df = fs_utils._convert_dataframe_to_spark(df)
except Exception as e:
raise CouldNotConvertDataframe(
"Could not convert the provided dataframe to a spark dataframe which is required in order "
"to save it to the Feature Store, error: {}".format(
str(e)))
featurestore_metadata = _get_featurestore_metadata(featurestore, update_cache=False)
features_schema = _parse_spark_features_schema(spark_df.schema)
fs_utils._validate_metadata(training_dataset, features_schema, description, featurestore_metadata.settings)
feature_corr_data, training_dataset_desc_stats_data, features_histogram_data, cluster_analysis_data = \
_compute_dataframe_stats(
spark_df, training_dataset, version=training_dataset_version,
descriptive_statistics=descriptive_statistics, feature_correlation=feature_correlation,
feature_histograms=feature_histograms, cluster_analysis=cluster_analysis, stat_columns=stat_columns,
num_bins=num_bins,
corr_method=corr_method,
num_clusters=num_clusters)
featurestore_id = _get_featurestore_id(featurestore)
external = not (storage_connector.type == featurestore_metadata.settings.hopsfs_connector_type)
training_dataset_type = fs_utils._get_training_dataset_type_info(featurestore_metadata, external)
td_json = rest_rpc._create_training_dataset_rest(
training_dataset, featurestore_id, description, training_dataset_version,
data_format, jobs, features_schema, feature_corr_data, training_dataset_desc_stats_data,
features_histogram_data, cluster_analysis_data, training_dataset_type, featurestore_metadata.settings,
connector_id = storage_connector.id, path=path)
td = TrainingDataset(td_json)
if td.training_dataset_type == featurestore_metadata.settings.hopsfs_training_dataset_type:
path = util.abspath(td.location)
if data_format == constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT:
try:
tf_record_schema_json = fs_utils._get_dataframe_tf_record_schema_json(spark_df, fixed=fixed)[1]
fs_utils._store_tf_record_schema_hdfs(tf_record_schema_json, path)
except Exception as e:
fs_utils._log("Could not infer tfrecords schema for the dataframe, {}".format(str(e)))
featureframe = FeatureFrame.get_featureframe(path=path + constants.DELIMITERS.SLASH_DELIMITER + td.name,
data_format=data_format, df=spark_df,
write_mode=constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE,
training_dataset=td,
petastorm_args=petastorm_args)
else:
s3_connector = _do_get_storage_connector(td.connector_name, featurestore)
fs_utils._setup_s3_credentials_for_spark(s3_connector.access_key, s3_connector.secret_key, util._find_spark())
path = fs_utils._get_external_training_dataset_path(td.location)
featureframe = FeatureFrame.get_featureframe(path=path,
data_format=data_format, df=spark_df,
write_mode=constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE,
training_dataset=td,
petastorm_args=petastorm_args)
spark = util._find_spark()
spark.sparkContext.setJobGroup("Materializing dataframe as training dataset",
"Saving training dataset in path: {} in format {}".format(path, data_format))
featureframe.write_featureframe()
spark.sparkContext.setJobGroup("", "")
# update metadata cache
try:
_get_featurestore_metadata(featurestore, update_cache=True)
except:
pass
fs_utils._log("Training Dataset created successfully")
def _do_update_featuregroup_stats(featuregroup_name, featurestore_metadata, spark_df=None, featuregroup_version=1,
featurestore=None, descriptive_statistics=True,
feature_correlation=True, feature_histograms=True, cluster_analysis=True,
stat_columns=None, num_bins=20, num_clusters=5, corr_method='pearson'):
"""
Args:
:featuregroup_name: name of the featuregroup to update the statistics for
:featurestore_metadata: metadata of the featurestore
:spark_df: if not None, use this dataframe to compute the statistics, otherwise read the feature group
:featuregroup_version: the version of the featuregroup (defaults to 1)
:featurestore: the featurestore where the featuregroup resides (defaults to the project's featurestore)
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc)
for the featuregroup
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns
in the featuregroup
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the featuregroup
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in the
featuregroup
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:num_clusters: the number of clusters to use in clustering analysis (k-means)
:corr_method: the method to compute feature correlation with (pearson or spearman)
Returns:
DTO of the created feature group
"""
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
if spark_df is None:
spark_df = _do_get_cached_featuregroup(featuregroup_name, featurestore_metadata, featurestore,
featuregroup_version,
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK)
fg = query_planner._find_featuregroup(featurestore_metadata.featuregroups, featuregroup_name, featuregroup_version)
if fg.featuregroup_type == featurestore_metadata.settings.on_demand_featuregroup_type:
raise CannotUpdateStatisticsOfOnDemandFeatureGroup("The feature group with name: {} , and version: {} "
"is an on-demand feature group and therefore there are no "
"statistics stored about it in Hopsworks that can be updated."
"Update statistics operation is only supported for cached "
"feature groups."
.format(featuregroup_name, featuregroup_version))
# Sanitize input and check for changed settings
group_settings = fs_utils._do_prepare_stats_settings(
featuregroup_name, featuregroup_version, featurestore_metadata, descriptive_statistics, feature_correlation,
feature_histograms, cluster_analysis, stat_columns, num_bins, num_clusters, corr_method)
feature_corr_data, featuregroup_desc_stats_data, features_histogram_data, cluster_analysis_data = \
_compute_dataframe_stats(spark_df,
featuregroup_name, version=featuregroup_version,
descriptive_statistics=group_settings["desc_stats_enabled"],
feature_correlation=group_settings["feat_corr_enabled"],
feature_histograms=group_settings["feat_hist_enabled"],
cluster_analysis=group_settings["cluster_analysis_enabled"],
stat_columns=group_settings["stat_columns"],
num_bins=group_settings["num_bins"], corr_method=group_settings["corr_method"],
num_clusters=group_settings["num_clusters"])
featuregroup_id = fg.id
featurestore_id = featurestore_metadata.featurestore.id
featuregroup_type, featuregroup_type_dto = fs_utils._get_cached_featuregroup_type_info(featurestore_metadata)
jobs = []
if util.get_job_name() is not None:
jobs.append(util.get_job_name())
featuregroup = Featuregroup(rest_rpc._update_featuregroup_stats_rest(
featuregroup_id, featurestore_id, feature_corr_data, featuregroup_desc_stats_data, features_histogram_data,
cluster_analysis_data, group_settings["desc_stats_enabled"], group_settings["feat_corr_enabled"],
group_settings["feat_hist_enabled"], group_settings["cluster_analysis_enabled"], group_settings["stat_columns"],
group_settings["num_bins"], group_settings["num_clusters"], group_settings["corr_method"], featuregroup_type,
featuregroup_type_dto, jobs))
return featuregroup
def _do_update_training_dataset_stats(training_dataset_name, featurestore_metadata, spark_df=None, featurestore=None,
training_dataset_version=1, descriptive_statistics=True, feature_correlation=True,
feature_histograms=True, cluster_analysis=True, stat_columns=None, num_bins=20,
corr_method='pearson', num_clusters=5):
"""
Args:
:training_dataset_name: the training dataset to update the statistics for
:featurestore_metadata: metadata of the feature store
:spark_df: if not None, use this dataframe to compute the statistics, otherwise read the training dataset
:featurestore: the featurestore where the training dataset resides (defaults to the project's featurestore)
:training_dataset_version: the version of the training dataset (defaults to 1)
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc) for
the featuregroup
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns
in the featuregroup
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the featuregroup
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in
the featuregroup
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:corr_method: the method to compute feature correlation with (pearson or spearman)
:num_clusters: the number of clusters to use in clustering analysis (k-means)
Returns:
DTO of the created training dataset
"""
if spark_df is None:
spark_df = _do_get_training_dataset(training_dataset_name, featurestore_metadata,
training_dataset_version=training_dataset_version,
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK)
training_dataset = query_planner._find_training_dataset(featurestore_metadata.training_datasets,
training_dataset_name, training_dataset_version)
feature_corr_data, training_dataset_desc_stats_data, features_histogram_data, cluster_analysis_data = \
_compute_dataframe_stats(
spark_df, training_dataset_name, version=training_dataset_version,
descriptive_statistics=descriptive_statistics, feature_correlation=feature_correlation,
feature_histograms=feature_histograms, cluster_analysis=cluster_analysis, stat_columns=stat_columns,
num_bins=num_bins, corr_method=corr_method,
num_clusters=num_clusters)
training_dataset_id = training_dataset.id
featurestore_id = featurestore_metadata.featurestore.id
training_dataset_type = \
fs_utils._get_training_dataset_type_info(featurestore_metadata,
external=(training_dataset.training_dataset_type ==
featurestore_metadata.settings.external_training_dataset_type))
jobs = []
if util.get_job_name() is not None:
jobs.append(util.get_job_name())
td = TrainingDataset(rest_rpc._update_training_dataset_stats_rest(
training_dataset_id, featurestore_id, feature_corr_data, training_dataset_desc_stats_data,
features_histogram_data, cluster_analysis_data, training_dataset_type, jobs))
return td
def _do_insert_into_training_dataset(
df, training_dataset_name, featurestore_metadata, featurestore=None, training_dataset_version=1,
descriptive_statistics=True, feature_correlation=True,
feature_histograms=True, cluster_analysis=True, stat_columns=None, num_bins=20, corr_method='pearson',
num_clusters=5, write_mode="overwrite", fixed=True):
"""
Inserts the data in a training dataset from a spark dataframe (append or overwrite)
Args:
:df: the dataframe to write
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore that the training dataset is linked to
:featurestore_metadata: metadata of the featurestore
:training_dataset_version: the version of the training dataset (defaults to 1)
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc)
for the featuregroup
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns
in the featuregroup
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the featuregroup
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in
the featuregroup
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:num_clusters: number of clusters to use for cluster analysis
:corr_method: the method to compute feature correlation with (pearson or spearman)
:write_mode: spark write mode ('append' or 'overwrite'). Note: append is not supported for tfrecords datasets.
:fixed: boolean flag indicating whether array columns should be treated with fixed size or variable size
Returns:
None
Raises:
:CouldNotConvertDataframe: in case the provided dataframe could not be converted to a spark dataframe
"""
print("insert_into_training_dataset")
try:
spark_df = fs_utils._convert_dataframe_to_spark(df)
except Exception as e:
raise CouldNotConvertDataframe(
"Could not convert the provided dataframe to a spark dataframe which is required in order to save it to "
"the Feature Store, error: {}".format(str(e)))
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
td = _do_update_training_dataset_stats(training_dataset_name, featurestore_metadata, spark_df=spark_df,
featurestore=featurestore, training_dataset_version=training_dataset_version,
descriptive_statistics=descriptive_statistics,
feature_correlation=feature_correlation,
feature_histograms=feature_histograms,
cluster_analysis=cluster_analysis, stat_columns=stat_columns,
num_bins=num_bins,
corr_method=corr_method, num_clusters=num_clusters)
data_format = td.data_format
if td.training_dataset_type == featurestore_metadata.settings.hopsfs_training_dataset_type:
path = util.abspath(td.location)
if data_format == constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT:
write_mode = constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE
try:
tf_record_schema_json = fs_utils._get_dataframe_tf_record_schema_json(spark_df, fixed=fixed)[1]
fs_utils._store_tf_record_schema_hdfs(tf_record_schema_json, path)
except Exception as e:
fs_utils._log("Could not infer tfrecords schema for the dataframe, {}".format(str(e)))
featureframe = FeatureFrame.get_featureframe(path=path + constants.DELIMITERS.SLASH_DELIMITER + td.name,
data_format=data_format, df=spark_df,
write_mode=write_mode,
training_dataset=td)
else:
s3_connector = _do_get_storage_connector(td.connector_name, featurestore)
fs_utils._setup_s3_credentials_for_spark(s3_connector.access_key, s3_connector.secret_key, util._find_spark())
path = fs_utils._get_external_training_dataset_path(td.location)
featureframe = FeatureFrame.get_featureframe(path=path,
data_format=data_format, df=spark_df,
write_mode=write_mode,
training_dataset=td)
spark = util._find_spark()
_verify_hive_enabled(spark)
spark.sparkContext.setJobGroup("Materializing dataframe as training dataset",
"Saving training dataset in path: {} in format {}".format(path, data_format))
print("Writing Feature Frame, data format: {}".format(data_format))
featureframe.write_featureframe()
spark.sparkContext.setJobGroup("", "")
def _get_training_dataset_id(featurestore, training_dataset_name, training_dataset_version):
"""
Gets the id of a training_Dataset (temporary workaround until HOPSWORKS-860 where we use Name to refer to resources)
Args:
:featurestore: the featurestore where the featuregroup belongs
:training_dataset_name: the training_dataset to get the id for
:training_dataset_version: the id of the training dataset
Returns:
the id of the training dataset
Raises:
:TrainingDatasetNotFound: if the requested trainining dataset could not be found
"""
metadata = _get_featurestore_metadata(featurestore, update_cache=False)
if metadata is None or featurestore != metadata.featurestore.name:
metadata = _get_featurestore_metadata(featurestore, update_cache=True)
for td in metadata.training_datasets.values():
if td.name == training_dataset_name and td.version == training_dataset_version:
return td.id
raise TrainingDatasetNotFound("The training dataset {} with version: {} "
"was not found in the feature store {}".format(
training_dataset_name, training_dataset_version, featurestore))
def _do_get_training_datasets(featurestore_metadata):
"""
Gets a list of all training datasets in a featurestore
Args:
:featurestore_metadata: metadata of the featurestore
Returns:
A list of names of the training datasets in this featurestore
"""
training_dataset_names = list(
map(lambda td: fs_utils._get_table_name(td.name,
td.version),
featurestore_metadata.training_datasets.values()))
return training_dataset_names
def _do_get_storage_connectors(featurestore_metadata):
"""
Gets a list of all storage connectors and their type in a featurestore
Args:
:featurestore_metadata: metadata of the featurestore
Returns:
A list of names of the storage connectors in this featurestore and their type
"""
return list(map(lambda sc: (sc.name, sc.type), featurestore_metadata.storage_connectors.values()))
def _do_get_training_dataset_path(training_dataset_name, featurestore_metadata, training_dataset_version=1):
"""
Gets the HDFS path to a training dataset with a specific name and version in a featurestore
Args:
:training_dataset_name: name of the training dataset
:featurestore_metadata: metadata of the featurestore
:training_dataset_version: version of the training dataset
Returns:
The HDFS path to the training dataset
"""
training_dataset = query_planner._find_training_dataset(featurestore_metadata.training_datasets,
training_dataset_name,
training_dataset_version)
hdfs_path = training_dataset.location + \
constants.DELIMITERS.SLASH_DELIMITER + training_dataset.name
data_format = training_dataset.data_format
if data_format == constants.FEATURE_STORE.TRAINING_DATASET_NPY_FORMAT:
hdfs_path = hdfs_path + constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX
if data_format == constants.FEATURE_STORE.TRAINING_DATASET_HDF5_FORMAT:
hdfs_path = hdfs_path + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX
if data_format == constants.FEATURE_STORE.TRAINING_DATASET_IMAGE_FORMAT:
hdfs_path = training_dataset.location
# abspath means "hdfs://namenode:port/ is preprended
abspath = util.abspath(hdfs_path)
return abspath
def _do_get_training_dataset_tf_record_schema(training_dataset_name, featurestore_metadata, training_dataset_version=1,
featurestore=None):
"""
Gets the tf record schema for a training dataset that is stored in tfrecords format
Args:
:training_dataset: the training dataset to get the tfrecords schema for
:training_dataset_version: the version of the training dataset
:featurestore_metadata: metadata of the featurestore
Returns:
the tf records schema
Raises:
:TFRecordSchemaNotFound: if a tfrecord schema for the given training dataset could not be found
"""
training_dataset = query_planner._find_training_dataset(featurestore_metadata.training_datasets,
training_dataset_name,
training_dataset_version)
if training_dataset.data_format != \
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT:
raise TFRecordSchemaNotFound(
"Cannot fetch tf records schema for a training dataset that is not stored in tfrecords format, "
"this training dataset is stored in format: {}".format(
training_dataset.data_format))
hdfs_path = util.abspath(training_dataset.location)
tf_record_json_schema = json.loads(hdfs.load(
hdfs_path + constants.DELIMITERS.SLASH_DELIMITER +
constants.FEATURE_STORE.TRAINING_DATASET_TF_RECORD_SCHEMA_FILE_NAME))
return fs_utils._convert_tf_record_schema_json_to_dict(tf_record_json_schema)
def _do_get_featuregroup_partitions(featuregroup_name, featurestore_metadata, featurestore=None, featuregroup_version=1,
dataframe_type="spark"):
"""
Gets the partitions of a featuregroup
Args:
:featuregroup_name: the featuregroup to get partitions for
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup_version: the version of the featuregroup, defaults to 1
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
Returns:
a dataframe with the partitions of the featuregroup
"""
fg = query_planner._find_featuregroup(featurestore_metadata.featuregroups, featuregroup_name, featuregroup_version)
if fg.featuregroup_type == featurestore_metadata.settings.on_demand_featuregroup_type:
raise CannotGetPartitionsOfOnDemandFeatureGroup("The feature group with name: {} , and version: {} "
"is an on-demand feature group. "
"Get partitions operation is only supported for "
"cached feature groups."
.format(featuregroup_name, featuregroup_version))
spark = util._find_spark()
_verify_hive_enabled(spark)
spark.sparkContext.setJobGroup("Fetching Partitions of a Featuregroup",
"Getting partitions for feature group: {} from the featurestore {}".format(
featuregroup_name, featurestore))
_use_featurestore(spark, featurestore)
sql_str = "SHOW PARTITIONS " + fs_utils._get_table_name(featuregroup_name, featuregroup_version)
result = _run_and_log_sql(spark, sql_str)
spark.sparkContext.setJobGroup("", "")
return fs_utils._return_dataframe_type(result, dataframe_type)
def _do_visualize_featuregroup_distributions(featuregroup_name, featurestore=None, featuregroup_version=1,
figsize=(16, 12), color='lightblue', log=False, align="center"):
"""
Creates a matplotlib figure of the feature distributions in a featuregroup in the featurestore.
1. Fetches the stored statistics for the featuregroup
2. If the feature distributions have been computed for the featuregroup, create the figure
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
:figsize: size of the figure
:color: the color of the histograms
:log: whether to use log-scaling on the y-axis or not
:align: how to align the bars, defaults to center.
Returns:
Matplotlib figure with the feature distributions
Raises:
:FeatureDistributionsNotComputed: if the feature distributions to visualize have not been computed.
"""
stats = _do_get_featuregroup_statistics(featuregroup_name, featurestore=featurestore,
featuregroup_version=featuregroup_version)
if stats.feature_histograms is None or stats.feature_histograms.feature_distributions is None:
raise FeatureDistributionsNotComputed("Cannot visualize the feature distributions for the "
"feature group: {} with version: {} in featurestore: {} since the "
"feature distributions have not been computed for this featuregroup."
" To compute the feature distributions, call "
"featurestore.update_featuregroup_stats(featuregroup_name)")
fig = statistics_plots._visualize_feature_distributions(stats.feature_histograms.feature_distributions,
figsize=figsize, color=color, log=log, align=align)
return fig
def _do_visualize_featuregroup_correlations(featuregroup_name, featurestore=None, featuregroup_version=1,
figsize=(16, 12), cmap="coolwarm", annot=True, fmt=".2f", linewidths=.05):
"""
Creates a matplotlib figure of the feature correlations in a featuregroup in the featurestore.
1. Fetches the stored statistics for the featuregroup
2. If the feature correlations have been computed for the featuregroup, create the figure
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
:figsize: the size of the figure
:cmap: the color map
:annot: whether to annotate the heatmap
:fmt: how to format the annotations
:linewidths: line width in the plot
Returns:
Matplotlib figure with the feature correlations
Raises:
:FeatureCorrelationsNotComputed: if the feature distributions to visualize have not been computed.
"""
stats = _do_get_featuregroup_statistics(featuregroup_name, featurestore=featurestore,
featuregroup_version=featuregroup_version)
if stats.correlation_matrix is None or stats.correlation_matrix.feature_correlations is None:
raise FeatureCorrelationsNotComputed("Cannot visualize the feature correlations for the "
"feature group: {} with version: {} in featurestore: {} since the "
"feature correlations have not been computed for this featuregroup."
" To compute the feature correlations, call "
"featurestore.update_featuregroup_stats(featuregroup_name)")
fig = statistics_plots._visualize_feature_correlations(stats.correlation_matrix.feature_correlations,
figsize=figsize, cmap=cmap, annot=annot, fmt=fmt,
linewidths=linewidths)
return fig
def _do_visualize_featuregroup_clusters(featuregroup_name, featurestore=None, featuregroup_version=1, figsize=(16, 12)):
"""
Creates a matplotlib figure of the feature clusters in a featuregroup in the featurestore.
1. Fetches the stored statistics for the featuregroup
2. If the feature clusters have been computed for the featuregroup, create the figure
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
:figsize: the size of the figure
Returns:
Matplotlib figure with the feature clusters
Raises:
:FeatureClustersNotComputed: if the feature distributions to visualize have not been computed.
"""
stats = _do_get_featuregroup_statistics(featuregroup_name, featurestore=featurestore,
featuregroup_version=featuregroup_version)
if stats.cluster_analysis is None:
raise FeatureClustersNotComputed("Cannot visualize the feature clusters for the "
"feature group: {} with version: {} in featurestore: {} since the "
"feature clusters have not been computed for this featuregroup."
" To compute the feature clusters, call "
"featurestore.update_featuregroup_stats(featuregroup_name)")
fig = statistics_plots._visualize_feature_clusters(stats.cluster_analysis, figsize=figsize)
return fig
def _do_visualize_featuregroup_descriptive_stats(featuregroup_name, featurestore=None,
featuregroup_version=1):
"""
Creates a pandas dataframe of the descriptive statistics of a featuregroup in the featurestore.
1. Fetches the stored statistics for the featuregroup
2. If the descriptive statistics have been computed for the featuregroup, create the pandas dataframe
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
Returns:
Pandas dataframe with the descriptive statistics
Raises:
:DescriptiveStatisticsNotComputed: if the feature distributions to visualize have not been computed.
"""
stats = _do_get_featuregroup_statistics(featuregroup_name, featurestore=featurestore,
featuregroup_version=featuregroup_version)
if stats.descriptive_stats is None or stats.descriptive_stats.descriptive_stats is None:
raise DescriptiveStatisticsNotComputed("Cannot visualize the descriptive statistics for the "
"featuregroup: {} with version: {} in featurestore: {} since the "
"descriptive statistics have not been computed for this featuregroup."
" To compute the descriptive statistics, call "
"featurestore.update_featuregroup_stats(featuregroup_name)")
df = statistics_plots._visualize_descriptive_stats(stats.descriptive_stats.descriptive_stats)
return df
def _do_visualize_training_dataset_distributions(training_dataset_name, featurestore=None, training_dataset_version=1,
figsize=(16, 12), color='lightblue', log=False, align="center"):
"""
Creates a matplotlib figure of the feature distributions in a training dataset in the featurestore.
1. Fetches the stored statistics for the training dataset
2. If the feature distributions have been computed for the training dataset, create the figure
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
:figsize: size of the figure
:color: the color of the histograms
:log: whether to use log-scaling on the y-axis or not
:align: how to align the bars, defaults to center.
Returns:
Matplotlib figure with the feature distributions
Raises:
:FeatureDistributionsNotComputed: if the feature distributions to visualize have not been computed.
"""
stats = _do_get_training_dataset_statistics(training_dataset_name, featurestore=featurestore,
training_dataset_version=training_dataset_version)
if stats.feature_histograms is None or stats.feature_histograms.feature_distributions is None:
raise FeatureDistributionsNotComputed("Cannot visualize the feature distributions for the "
"training dataset: {} with version: {} in featurestore: {} since the "
"feature distributions have not been computed for this training dataset."
" To compute the feature distributions, call "
"featurestore.update_training_dataset_stats(training_dataset_name)")
fig = statistics_plots._visualize_feature_distributions(stats.feature_histograms.feature_distributions,
figsize=figsize, color=color, log=log, align=align)
return fig
def _do_visualize_training_dataset_correlations(training_dataset_name, featurestore=None, training_dataset_version=1,
figsize=(16, 12), cmap="coolwarm", annot=True, fmt=".2f",
linewidths=.05):
"""
Creates a matplotlib figure of the feature correlations in a training dataset in the featurestore.
1. Fetches the stored statistics for the training dataset
2. If the feature correlations have been computed for the training dataset, create the figure
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:tranining_dataset_version: the version of the training dataset
:figsize: the size of the figure
:cmap: the color map
:annot: whether to annotate the heatmap
:fmt: how to format the annotations
:linewidths: line width in the plot
Returns:
Matplotlib figure with the feature correlations
Raises:
:FeatureCorrelationsNotComputed: if the feature distributions to visualize have not been computed.
"""
stats = _do_get_training_dataset_statistics(training_dataset_name, featurestore=featurestore,
training_dataset_version=training_dataset_version)
if stats.correlation_matrix is None or stats.correlation_matrix.feature_correlations is None:
raise FeatureCorrelationsNotComputed("Cannot visualize the feature correlations for the "
"training dataset: {} with version: {} in featurestore: {} since the "
"feature correlations have not been computed for this training dataset."
" To compute the feature correlations, call "
"featurestore.update_training_dataset_stats(training_dataset_name)")
fig = statistics_plots._visualize_feature_correlations(stats.correlation_matrix.feature_correlations,
figsize=figsize, cmap=cmap, annot=annot, fmt=fmt,
linewidths=linewidths)
return fig
def _do_visualize_training_dataset_clusters(training_dataset_name, featurestore=None, training_dataset_version=1,
figsize=(16, 12)):
"""
Creates a matplotlib figure of the feature clusters in a training dataset in the featurestore.
1. Fetches the stored statistics for the training dataset
2. If the feature clusters have been computed for the training dataset, create the figure
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
:figsize: the size of the figure
Returns:
Matplotlib figure with the feature clusters
Raises:
:FeatureClustersNotComputed: if the feature distributions to visualize have not been computed.
"""
stats = _do_get_training_dataset_statistics(training_dataset_name, featurestore=featurestore,
training_dataset_version=training_dataset_version)
if stats.cluster_analysis is None:
raise FeatureClustersNotComputed("Cannot visualize the feature clusters for the "
"training dataset: {} with version: {} in featurestore: {} since the "
"feature clusters have not been computed for this training dataset."
" To compute the feature clusters, call "
"featurestore.update_training_dataset_stats(training_dataset_name)")
fig = statistics_plots._visualize_feature_clusters(stats.cluster_analysis, figsize=figsize)
return fig
def _do_visualize_training_dataset_descriptive_stats(training_dataset_name, featurestore=None,
training_dataset_version=1):
"""
Creates a pandas dataframe of the descriptive statistics of a training dataset in the featurestore.
1. Fetches the stored statistics for the training dataset
2. If the descriptive statistics have been computed for the training dataset, create the pandas dataframe
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
Returns:
Pandas dataframe with the descriptive statistics
Raises:
:DescriptiveStatisticsNotComputed: if the feature distributions to visualize have not been computed.
"""
stats = _do_get_training_dataset_statistics(training_dataset_name, featurestore=featurestore,
training_dataset_version=training_dataset_version)
if stats.descriptive_stats is None or stats.descriptive_stats.descriptive_stats is None:
raise DescriptiveStatisticsNotComputed("Cannot visualize the descriptive statistics for the "
"training dataset: {} with version: {} in featurestore: {} since the "
"descriptive statistics have not been computed for this training dataset."
" To compute the descriptive statistics, call "
"featurestore.update_training_dataset_stats(training_dataset_name)")
df = statistics_plots._visualize_descriptive_stats(stats.descriptive_stats.descriptive_stats)
return df
def _do_get_featuregroup_statistics(featuregroup_name, featurestore=None, featuregroup_version=1):
"""
Gets the computed statistics (if any) of a featuregroup
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
Returns:
A Statistics Object
"""
featuregroup_id = _get_featuregroup_id(featurestore, featuregroup_name, featuregroup_version)
featurestore_id = _get_featurestore_id(featurestore)
response_object = rest_rpc._get_featuregroup_rest(featuregroup_id, featurestore_id)
# .get() returns None if key dont exists intead of exception
descriptive_stats_json = response_object.get(constants.REST_CONFIG.JSON_FEATUREGROUP_DESC_STATS)
correlation_matrix_json = response_object.get(constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURE_CORRELATION)
features_histogram_json = response_object.get(constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_HISTOGRAM)
feature_clusters = response_object.get(constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_CLUSTERS)
return Statistics(descriptive_stats_json, correlation_matrix_json, features_histogram_json, feature_clusters)
def _do_get_training_dataset_statistics(training_dataset_name, featurestore=None, training_dataset_version=1):
"""
Gets the computed statistics (if any) of a training dataset
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
Returns:
A Statistics Object
"""
training_dataset_id = _get_training_dataset_id(featurestore, training_dataset_name, training_dataset_version)
featurestore_id = _get_featurestore_id(featurestore)
response_object = rest_rpc._get_training_dataset_rest(training_dataset_id, featurestore_id)
# .get() returns None if key dont exists intead of exception
descriptive_stats_json = response_object.get(constants.REST_CONFIG.JSON_FEATUREGROUP_DESC_STATS)
correlation_matrix_json = response_object.get(constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURE_CORRELATION)
features_histogram_json = response_object.get(constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_HISTOGRAM)
feature_clusters = response_object.get(constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_CLUSTERS)
return Statistics(descriptive_stats_json, correlation_matrix_json, features_histogram_json, feature_clusters)
def _verify_hive_enabled(spark):
"""
Verifies that Hive is enabled on the given spark session.
Args:
:spark: the spark session to verfiy
Returns:
None
Raises:
:HiveNotEnabled: when hive is not enabled on the provided spark session
"""
if not fs_utils._is_hive_enabled(spark):
raise HiveNotEnabled((
"Hopsworks Featurestore Depends on Hive. Hive is not enabled for the current spark session. "
"Make sure to enable hive before using the featurestore API."
" The current SparkSQL catalog implementation is: {}, it should be: {}".format(
fs_utils._get_spark_sql_catalog_impl(spark), constants.SPARK_CONFIG.SPARK_SQL_CATALOG_HIVE)))
def _sync_hive_table_with_featurestore(featuregroup, featurestore_metadata, description="", featurestore=None,
featuregroup_version=1, jobs=[], feature_corr_data = None,
featuregroup_desc_stats_data = None, features_histogram_data = None,
cluster_analysis_data = None):
"""
Synchronizes an existing Hive table with a Feature Store.
Args:
:featuregroup: name of the featuregroup to synchronize with the hive table.
The hive table should have a naming scheme of `featuregroup_featuregroupversion`
:featurestore_metadata: metadata of the feature store
:description: description of the feature group
:featurestore: the feature store where the hive table is stored
:featuregroup_version: version of the feature group
:jobs: list of jobs used to compute the feature group (optional)
:feature_corr_data: feature correlation statistics (optional)
:featuregroup_desc_stats_data: descriptive statistics (optional)
:features_histogram_data: feature histograms statistics (optional)
:cluster_analysis_data: cluster analysis statistics (optional)
Returns:
None
"""
featuregroup_type, featuregroup_type_dto = fs_utils._get_cached_featuregroup_type_info(featurestore_metadata)
featurestore_id = _get_featurestore_id(featurestore)
rest_rpc._sync_hive_table_with_featurestore_rest(featuregroup, featurestore_id, description, featuregroup_version, jobs,
feature_corr_data, featuregroup_desc_stats_data, features_histogram_data,
cluster_analysis_data, featuregroup_type, featuregroup_type_dto)
def _do_get_s3_featuregroup(storage_connector_name, dataset_path, featurestore_metadata,
featurestore=None, data_format="parquet"):
"""
Gets a feature dataset stored externally (e.g in a S3 bucket)
Args:
:storage_connector_name: the storage connector to the external data store
:dataset_path: the path to the dataset in the external datastore
:featurestore_metadata: featurestore metadata
:featurestore: the featurestore of the storage connector
:data_format: the data format (so that we know how to read the dataset)
Returns:
A spark dataframe of the external feature group
"""
storage_connector = _do_get_storage_connector(storage_connector_name, featurestore)
if storage_connector.type == featurestore_metadata.settings.s3_connector_type:
if storage_connector.access_key is not None:
fs_utils._setup_s3_credentials_for_spark(storage_connector.access_key,
storage_connector.secret_key, util._find_spark())
path = fs_utils._get_bucket_path(storage_connector.bucket, dataset_path)
featureframe = FeatureFrame.get_featureframe(path=path,
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK,
data_format=data_format)
return featureframe.read_featureframe(util._find_spark())
elif storage_connector.type not in metadata_cache.settings.feature_import_connectors:
raise StorageConnectorTypeNotSupportedForFeatureImport("The storage conector type: {} is not supported for "
"feature importation. Supported feature storage "
"connectors for importation are: " \
.format(storage_connector.type))
def _do_get_online_featurestore_connector(featurestore, featurestore_metadata):
"""
Gets the JDBC connector for the online featurestore
Args:
:featurestore: the featurestore name
:featurestore_metadata: the featurestore metadata
Returns:
a JDBC connector DTO object for the online featurestore
"""
if featurestore_metadata is not None and featurestore_metadata.online_featurestore_connector is not None:
return featurestore_metadata.online_featurestore_connector
else:
featurestore_id = _get_featurestore_id(featurestore)
response_object = rest_rpc._get_online_featurestore_jdbc_connector_rest(featurestore_id)
return JDBCStorageConnector(response_object)
def _do_create_featuregroup(df, featurestore_metadata, featuregroup, primary_key=[], description="", featurestore=None,
featuregroup_version=1, jobs=[],
descriptive_statistics=True, feature_correlation=True,
feature_histograms=True, cluster_analysis=True, stat_columns=None, num_bins=20,
corr_method='pearson', num_clusters=5, partition_by=[], online=False,
online_types=None, offline=True):
"""
Creates a new cached featuregroup from a dataframe of features (sends the metadata to Hopsworks with a REST call
to create the Hive table and store the metadata and then inserts the data of the spark dataframe into the newly
created table) If online=True, a MySQL table will be created for online feature data
Args:
:df: the dataframe to create the featuregroup for (used to infer the schema)
:featuregroup: the name of the new featuregroup
:primary_key: list of names of columns to be the primary key of the new featuregroup, if not specified,
the first column in the dataframe will be used as primary
:description: a description of the featuregroup
:featurestore: the featurestore of the featuregroup (defaults to the project's featurestore)
:featuregroup_version: the version of the featuregroup (defaults to 1)
:jobs: list of Hopsworks jobs linked to the feature group
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc) for the
featuregroup
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns in
the featuregroup
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the featuregroup
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in the
featuregroup
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:num_clusters: the number of clusters to use for cluster analysis
:corr_method: the method to compute feature correlation with (pearson or spearman)
:partition_by: a list of columns to partition_by, defaults to the empty list
:online: boolean flag, if this is set to true, a MySQL table for online feature data will be created in
addition to the Hive table for offline feature data
:online_types: a dict with feature_name --> online_type, if a feature is present in this dict,
the online_type will be taken from the dict rather than inferred from the spark dataframe.
:offline boolean flag whether to insert the data in the offline version of the featuregroup
Returns:
None
Raises:
:CouldNotConvertDataframe: in case the provided dataframe could not be converted to a spark dataframe
"""
if online and ((not featurestore_metadata.settings.online_enabled) or
(not featurestore_metadata.featurestore.online_enabled)):
raise OnlineFeaturestoreNotEnabled("Online Feature Store is not enabled for this project or cluster, "
"talk to an administrator to enable it")
try:
spark_df = fs_utils._convert_dataframe_to_spark(df)
except Exception as e:
raise CouldNotConvertDataframe(
"Could not convert the provided dataframe to a spark dataframe which is required in order to save it to "
"the Feature Store, error: {}".format(
str(e)))
if len(primary_key) == 0:
primary_key = [fs_utils._get_default_primary_key(spark_df)]
if util.get_job_name() is not None:
jobs.append(util.get_job_name())
fs_utils._validate_primary_key(spark_df, primary_key)
features_schema = _parse_spark_features_schema(spark_df.schema, primary_key, partition_by, online=online,
online_types= online_types)
fs_utils._validate_metadata(featuregroup, features_schema, description, featurestore_metadata.settings)
feature_corr_data, featuregroup_desc_stats_data, features_histogram_data, cluster_analysis_data = \
_compute_dataframe_stats(
spark_df, featuregroup, version=featuregroup_version,
descriptive_statistics=descriptive_statistics, feature_correlation=feature_correlation,
feature_histograms=feature_histograms, cluster_analysis=cluster_analysis, stat_columns=stat_columns,
num_bins=num_bins,
corr_method=corr_method,
num_clusters=num_clusters)
featurestore_id = _get_featurestore_id(featurestore)
featuregroup_type, featuregroup_type_dto = fs_utils._get_cached_featuregroup_type_info(featurestore_metadata)
fs_utils._log("Registering feature metadata...")
response = rest_rpc._create_featuregroup_rest(featuregroup, featurestore_id, description, featuregroup_version, jobs,
features_schema, feature_corr_data, featuregroup_desc_stats_data,
features_histogram_data, cluster_analysis_data, feature_correlation,
descriptive_statistics, feature_histograms, cluster_analysis, stat_columns,
num_bins, num_clusters, corr_method, featuregroup_type, featuregroup_type_dto,
None, None, online)
fs_utils._log("Registering feature metadata... [COMPLETE]")
if offline:
fs_utils._log("Writing feature data to offline feature group (Hive)...")
_write_featuregroup_hive(spark_df, featuregroup, featurestore, featuregroup_version,
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE)
fs_utils._log("Writing feature data to offline feature group (Hive)... [COMPLETE]")
if online:
fs_utils._log("Writing feature data to online feature group (MySQL)...")
storage_connector = _do_get_online_featurestore_connector(featurestore, featurestore_metadata)
tbl_name = fs_utils._get_table_name(featuregroup, featuregroup_version)
online_featurestore._write_jdbc_dataframe(spark_df, storage_connector, tbl_name,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE)
fs_utils._log("Writing feature data to online feature group (MySQL)... [COMPLETE]")
def _do_enable_featuregroup_online(featuregroup_name, featuregroup_version, featurestore_metadata, featurestore=None,
online_types=None):
"""
Enables online serving for a featuregroup
Args:
:featuregroup_name: name of the featuregroup
:featuregroup_version: version of the featuregroup
:featurestore_metadata: metadata about the featurestore
:featurestore: the featurestore to query
:online_types: a dict with feature_name --> online_type, if a feature is present in this dict,
the online_type will be taken from the dict rather than inferred from the spark dataframe.
Returns:
None
"""
if featurestore is None:
featurestore = fs_utils._do_get_project_featurestore()
fg = query_planner._find_featuregroup(featurestore_metadata.featuregroups, featuregroup_name, featuregroup_version)
if fg.featuregroup_type == featurestore_metadata.settings.on_demand_featuregroup_type:
raise CannotEnableOnlineFeatureServingForOnDemandFeatureGroup("The feature group with name: {} , and version: {} "
"is an on-demand feature group. Online feature serving "
"operation is only supported for cached feature groups."
.format(featuregroup_name, featuregroup_version))
spark_df = _do_get_cached_featuregroup(featuregroup_name, featurestore_metadata, featurestore, featuregroup_version,
constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK,online=False)
primary_key = []
for feature in fg.features:
if feature.primary:
primary_key.append(feature.name)
features_schema = _parse_spark_features_schema(spark_df.schema, primary_key, [], online=True,
online_types=online_types)
featuregroup_id = fg.id
featurestore_id = featurestore_metadata.featurestore.id
featuregroup_type, featuregroup_type_dto = fs_utils._get_cached_featuregroup_type_info(featurestore_metadata)
rest_rpc._enable_featuregroup_online_rest(featuregroup_name, featuregroup_version, featuregroup_id,
featurestore_id, featuregroup_type_dto, featuregroup_type,
features_schema)
def _do_disable_featuregroup_online(featuregroup_name, featuregroup_version, featurestore_metadata):
"""
Disable online serving for a featuregroup
Args:
:featuregroup_name: name of the featuregroup
:featuregroup_version: version of the featuregroup
:featurestore_metadata: metadata about the featurestore
Returns:
None
"""
fg = query_planner._find_featuregroup(featurestore_metadata.featuregroups, featuregroup_name, featuregroup_version)
if fg.featuregroup_type == featurestore_metadata.settings.on_demand_featuregroup_type:
raise CannotEnableOnlineFeatureServingForOnDemandFeatureGroup("The feature group with name: {} , and version: {} "
"is an on-demand feature group. Online feature serving "
"operation is only supported for cached feature groups."
.format(featuregroup_name, featuregroup_version))
featuregroup_id = fg.id
featurestore_id = featurestore_metadata.featurestore.id
featuregroup_type, featuregroup_type_dto = fs_utils._get_cached_featuregroup_type_info(featurestore_metadata)
rest_rpc._disable_featuregroup_online_rest(featuregroup_name, featuregroup_version, featuregroup_id,
featurestore_id, featuregroup_type_dto, featuregroup_type)
def _do_get_redshift_featuregroup(storage_connector_name, query, featurestore_metadata, featurestore):
"""
Gets a feature dataset stored in Redshift
Args:
:storage_connector_name: the storage connector to the external data store
:query: the query extracting the data from Redshift
:featurestore_metadata: featurestore metadata
:featurestore: the featurestore of the storage connector
Returns:
A spark dataframe of the Redshift feature group
"""
storage_connector = _do_get_storage_connector(storage_connector_name, featurestore)
if storage_connector.type == featurestore_metadata.settings.jdbc_connector_type:
region_name, cluster_identifier, database, user = util.parse_redhift_jdbc_url(
storage_connector.connection_string + '?' + storage_connector.arguments)
user, password = util.get_redshift_username_password(region_name, cluster_identifier, user, database)
return util._find_spark().read.format(constants.SPARK_CONFIG.SPARK_JDBC_FORMAT) \
.option(constants.SPARK_CONFIG.SPARK_JDBC_URL, storage_connector.connection_string) \
.option(constants.SPARK_CONFIG.SPARK_JDBC_DBTABLE, '(' + query + ') tmp') \
.option(constants.SPARK_CONFIG.SPARK_JDBC_USER, user) \
.option(constants.SPARK_CONFIG.SPARK_JDBC_PW, password) \
.load()
elif storage_connector.type not in metadata_cache.settings.feature_import_connectors:
raise StorageConnectorTypeNotSupportedForFeatureImport("The storage conector type: {} is not supported for "
"feature importation. Supported feature storage "
"connectors for importation are: " \
.format(storage_connector.type))
def _do_add_metadata(featuregroup_name, name, value, featurestore=None, featuregroup_version=1):
"""
Attach custom metadata to a feature group
Args:
:featuregroup_name: the name of the featuregroup
:name: the name of the extended metadata
:value: the value of the extended metadata
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
Returns:
None
"""
featuregroup_id = _get_featuregroup_id(featurestore, featuregroup_name, featuregroup_version)
featurestore_id = _get_featurestore_id(featurestore)
rest_rpc._add_metadata(featurestore_id, featuregroup_id, name, value)
def _do_get_metadata(featuregroup_name, name=None, featurestore=None, featuregroup_version=1):
"""
Get the custom metadata attached to a feature group
Args:
:featuregroup_name: the name of the featuregroup
:name: the name of the extended metadata
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
Returns:
A dictionary containing the metadata attached to the featuregroup
"""
featuregroup_id = _get_featuregroup_id(featurestore, featuregroup_name, featuregroup_version)
featurestore_id = _get_featurestore_id(featurestore)
return rest_rpc._get_metadata(featurestore_id, featuregroup_id, name)
def _do_remove_metadata(featuregroup_name, name, featurestore=None, featuregroup_version=1):
"""
Remove the custom metadata attached to a feature group
Args:
:featuregroup_name: the name of the featuregroup
:name: the name of the extended metadata
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
Returns:
None
"""
featuregroup_id = _get_featuregroup_id(featurestore, featuregroup_name, featuregroup_version)
featurestore_id = _get_featurestore_id(featurestore)
rest_rpc._remove_metadata(featurestore_id, featuregroup_id, name)
# Fetch on-load and cache it on the client
try:
metadata_cache = _get_featurestore_metadata(featurestore=fs_utils._do_get_project_featurestore())
except:
pass
| 56.517618
| 144
| 0.683719
|
4a0c543eb940e76b0405a88a855aff248d415827
| 13,406
|
py
|
Python
|
cryptoapis/model/inline_response40331.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 5
|
2021-05-17T04:45:03.000Z
|
2022-03-23T12:51:46.000Z
|
cryptoapis/model/inline_response40331.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | null | null | null |
cryptoapis/model/inline_response40331.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 2
|
2021-06-02T07:32:26.000Z
|
2022-02-12T02:36:23.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.get_xrp_ripple_block_details_by_block_hash_e403 import GetXRPRippleBlockDetailsByBlockHashE403
globals()['GetXRPRippleBlockDetailsByBlockHashE403'] = GetXRPRippleBlockDetailsByBlockHashE403
class InlineResponse40331(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'api_version': (str,), # noqa: E501
'request_id': (str,), # noqa: E501
'error': (GetXRPRippleBlockDetailsByBlockHashE403,), # noqa: E501
'context': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'api_version': 'apiVersion', # noqa: E501
'request_id': 'requestId', # noqa: E501
'error': 'error', # noqa: E501
'context': 'context', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, api_version, request_id, error, *args, **kwargs): # noqa: E501
"""InlineResponse40331 - a model defined in OpenAPI
Args:
api_version (str): Specifies the version of the API that incorporates this endpoint.
request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request.
error (GetXRPRippleBlockDetailsByBlockHashE403):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.api_version = api_version
self.request_id = request_id
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, api_version, request_id, error, *args, **kwargs): # noqa: E501
"""InlineResponse40331 - a model defined in OpenAPI
Args:
api_version (str): Specifies the version of the API that incorporates this endpoint.
request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request.
error (GetXRPRippleBlockDetailsByBlockHashE403):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.api_version = api_version
self.request_id = request_id
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 47.038596
| 484
| 0.598016
|
4a0c5514c9d7d0496d68ce7f6b0d6132974e6ad3
| 16,469
|
py
|
Python
|
eZmaxApi/model/ezsigndocument_request.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
eZmaxApi/model/ezsigndocument_request.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
eZmaxApi/model/ezsigndocument_request.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.3
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from eZmaxApi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from eZmaxApi.exceptions import ApiAttributeError
def lazy_import():
from eZmaxApi.model.field_pki_language_id import FieldPkiLanguageID
globals()['FieldPkiLanguageID'] = FieldPkiLanguageID
class EzsigndocumentRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('e_ezsigndocument_source',): {
'BASE64': "Base64",
'URL': "Url",
},
('e_ezsigndocument_format',): {
'PDF': "Pdf",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'e_ezsigndocument_source': (str,), # noqa: E501
'e_ezsigndocument_format': (str,), # noqa: E501
'fki_ezsignfolder_id': (int,), # noqa: E501
'dt_ezsigndocument_duedate': (str,), # noqa: E501
'fki_language_id': (FieldPkiLanguageID,), # noqa: E501
's_ezsigndocument_name': (str,), # noqa: E501
's_ezsigndocument_base64': (str,), # noqa: E501
's_ezsigndocument_url': (str,), # noqa: E501
'b_ezsigndocument_forcerepair': (bool,), # noqa: E501
's_ezsigndocument_password': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'e_ezsigndocument_source': 'eEzsigndocumentSource', # noqa: E501
'e_ezsigndocument_format': 'eEzsigndocumentFormat', # noqa: E501
'fki_ezsignfolder_id': 'fkiEzsignfolderID', # noqa: E501
'dt_ezsigndocument_duedate': 'dtEzsigndocumentDuedate', # noqa: E501
'fki_language_id': 'fkiLanguageID', # noqa: E501
's_ezsigndocument_name': 'sEzsigndocumentName', # noqa: E501
's_ezsigndocument_base64': 'sEzsigndocumentBase64', # noqa: E501
's_ezsigndocument_url': 'sEzsigndocumentUrl', # noqa: E501
'b_ezsigndocument_forcerepair': 'bEzsigndocumentForcerepair', # noqa: E501
's_ezsigndocument_password': 'sEzsigndocumentPassword', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, e_ezsigndocument_source, fki_ezsignfolder_id, dt_ezsigndocument_duedate, fki_language_id, s_ezsigndocument_name, *args, **kwargs): # noqa: E501
"""EzsigndocumentRequest - a model defined in OpenAPI
Args:
e_ezsigndocument_source (str): Indicates where to look for the document binary content.
fki_ezsignfolder_id (int): The unique ID of the Ezsignfolder
dt_ezsigndocument_duedate (str): The maximum date and time at which the document can be signed.
fki_language_id (FieldPkiLanguageID):
s_ezsigndocument_name (str): The name of the document that will be presented to Ezsignfoldersignerassociations
Keyword Args:
e_ezsigndocument_format (str): Indicates the format of the document.. defaults to "Pdf", must be one of ["Pdf", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
s_ezsigndocument_base64 (str): The Base64 encoded binary content of the document. This field is Required when eEzsigndocumentSource = Base64.. [optional] # noqa: E501
s_ezsigndocument_url (str): The url where the document content resides. This field is Required when eEzsigndocumentSource = Url.. [optional] # noqa: E501
b_ezsigndocument_forcerepair (bool): Try to repair the document or flatten it if it cannot be used for electronic signature. . [optional] if omitted the server will use the default value of True # noqa: E501
s_ezsigndocument_password (str): If the source document is password protected, the password to open/modify it.. [optional] if omitted the server will use the default value of "" # noqa: E501
"""
e_ezsigndocument_format = kwargs.get('e_ezsigndocument_format', "Pdf")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.e_ezsigndocument_source = e_ezsigndocument_source
self.e_ezsigndocument_format = e_ezsigndocument_format
self.fki_ezsignfolder_id = fki_ezsignfolder_id
self.dt_ezsigndocument_duedate = dt_ezsigndocument_duedate
self.fki_language_id = fki_language_id
self.s_ezsigndocument_name = s_ezsigndocument_name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, e_ezsigndocument_source, fki_ezsignfolder_id, dt_ezsigndocument_duedate, fki_language_id, s_ezsigndocument_name, *args, **kwargs): # noqa: E501
"""EzsigndocumentRequest - a model defined in OpenAPI
Args:
e_ezsigndocument_source (str): Indicates where to look for the document binary content.
fki_ezsignfolder_id (int): The unique ID of the Ezsignfolder
dt_ezsigndocument_duedate (str): The maximum date and time at which the document can be signed.
fki_language_id (FieldPkiLanguageID):
s_ezsigndocument_name (str): The name of the document that will be presented to Ezsignfoldersignerassociations
Keyword Args:
e_ezsigndocument_format (str): Indicates the format of the document.. defaults to "Pdf", must be one of ["Pdf", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
s_ezsigndocument_base64 (str): The Base64 encoded binary content of the document. This field is Required when eEzsigndocumentSource = Base64.. [optional] # noqa: E501
s_ezsigndocument_url (str): The url where the document content resides. This field is Required when eEzsigndocumentSource = Url.. [optional] # noqa: E501
b_ezsigndocument_forcerepair (bool): Try to repair the document or flatten it if it cannot be used for electronic signature. . [optional] if omitted the server will use the default value of True # noqa: E501
s_ezsigndocument_password (str): If the source document is password protected, the password to open/modify it.. [optional] if omitted the server will use the default value of "" # noqa: E501
"""
e_ezsigndocument_format = kwargs.get('e_ezsigndocument_format', "Pdf")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.e_ezsigndocument_source = e_ezsigndocument_source
self.e_ezsigndocument_format = e_ezsigndocument_format
self.fki_ezsignfolder_id = fki_ezsignfolder_id
self.dt_ezsigndocument_duedate = dt_ezsigndocument_duedate
self.fki_language_id = fki_language_id
self.s_ezsigndocument_name = s_ezsigndocument_name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 50.830247
| 220
| 0.616674
|
4a0c55d4bfb0f7bc2037c6071944b5e5f84dc85e
| 3,940
|
py
|
Python
|
python_koans/python3/koans/about_iteration.py
|
kipel/koans
|
752ce65a99c27a0e766276039bf48b4a5e98e62a
|
[
"MIT"
] | null | null | null |
python_koans/python3/koans/about_iteration.py
|
kipel/koans
|
752ce65a99c27a0e766276039bf48b4a5e98e62a
|
[
"MIT"
] | null | null | null |
python_koans/python3/koans/about_iteration.py
|
kipel/koans
|
752ce65a99c27a0e766276039bf48b4a5e98e62a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1,6))
fib = 0
for num in it:
fib += num
self.assertEqual(15 , fib)
def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegexpMatches(err_msg, 'Ran out of iterations')
# ------------------------------------------------------------------
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = list()
mapping = map(self.add_ten, seq)
self.assertNotEqual(list, mapping.__class__)
self.assertEqual(map, mapping.__class__)
# In Python 3 built in iterator funcs return iterable view objects
# instead of lists
for item in mapping:
mapped_seq.append(item)
self.assertEqual([11, 12, 13], mapped_seq)
# Note, iterator methods actually return objects of iter type in
# python 3. In python 2 map() would give you a list.
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = list()
for item in filter(is_even, seq):
even_numbers.append(item)
self.assertEqual([2, 4, 6], even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
name = None
iterator = filter(is_big_name, names)
try:
name = next(iterator)
except StopIteration:
msg = 'Ran out of big names'
self.assertEqual("Clarence", name)
# ------------------------------------------------------------------
def add(self,accum,item):
return accum + item
def multiply(self,accum,item):
return accum * item
def test_reduce_will_blow_your_mind(self):
import functools
# As of Python 3 reduce() has been demoted from a builtin function
# to the functools module.
result = functools.reduce(self.add, [2, 3, 4])
self.assertEqual(int, result.__class__)
# Reduce() syntax is same as Python 2
self.assertEqual(9, result)
result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(24, result2)
# Extra Credit:
# Describe in your own words what reduce does.
# ------------------------------------------------------------------
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1,5):
pass
self.assertEqual(4, num)
# ------------------------------------------------------------------
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
# Ranges are an iterable sequence
result = map(self.add_ten, range(1,4))
self.assertEqual([11, 12, 13], list(result))
try:
file = open("example_file.txt")
try:
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, file.readlines())
self.assertEqual(["THIS", "IS", "A", "TEST"], list(upcase_lines))
finally:
# Arg, this is ugly.
# We will figure out how to fix this later.
file.close()
except IOError:
# should never happen
self.fail()
| 28.550725
| 81
| 0.535025
|
4a0c560280165b751cabb04de5c60a539863e800
| 285,836
|
py
|
Python
|
databricks/koalas/frame.py
|
HG1112/koalas
|
580f48c81d3d2236c399063ce453f9170d88b954
|
[
"Apache-2.0"
] | 1
|
2019-12-06T05:01:34.000Z
|
2019-12-06T05:01:34.000Z
|
databricks/koalas/frame.py
|
HG1112/koalas
|
580f48c81d3d2236c399063ce453f9170d88b954
|
[
"Apache-2.0"
] | null | null | null |
databricks/koalas/frame.py
|
HG1112/koalas
|
580f48c81d3d2236c399063ce453f9170d88b954
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from collections import OrderedDict
from distutils.version import LooseVersion
import re
import warnings
import inspect
import json
from functools import partial, reduce
import sys
from itertools import zip_longest
from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar, Iterable, Dict
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType)
from pyspark.sql.utils import AnalysisException
from pyspark.sql.window import Window
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import validate_arguments_and_invoke_function, align_diff_frames
from databricks.koalas.generic import _Frame
from databricks.koalas.internal import _InternalFrame, IndexMap, SPARK_INDEX_NAME_FORMAT
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.utils import column_index_level, scol_for
from databricks.koalas.typedef import _infer_return_type, as_spark_type, as_python_type
from databricks.koalas.plot import KoalasFramePlotMethods
from databricks.koalas.config import get_option
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results. Also reverse version.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide and true divide by constant with reverse version.
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rtruediv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant with reverse version.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
Multiply by constant with reverse version.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Floor Divide by constant with reverse version.
>>> df // 10
angles degrees
circle 0 36
triangle 0 18
rectangle 0 36
>>> df.floordiv(10)
angles degrees
circle 0 36
triangle 0 18
rectangle 0 36
>>> df.rfloordiv(10)
angles degrees
circle NaN 0
triangle 3.0 0
rectangle 2.0 0
Mod by constant with reverse version.
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.mod(2)
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.rmod(2)
angles degrees
circle NaN 2
triangle 2.0 2
rectangle 2.0 2
Power by constant with reverse version.
>>> df ** 2
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.rpow(2)
angles degrees
circle 1.0 2.348543e+108
triangle 8.0 1.532496e+54
rectangle 16.0 2.348543e+108
"""
T = TypeVar('T')
if (3, 5) <= sys.version_info < (3, 7):
from typing import GenericMeta
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, Tuple[params])
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(_Frame, Generic[T]):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: _InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame, Spark DataFrame \
or Koalas Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, a Spark DataFrame, and a Koalas Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, _InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(data)
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(_InternalFrame(data))
elif isinstance(data, ks.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_dataframe()
super(DataFrame, self).__init__(data._internal)
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf))
@property
def _sdf(self) -> spark.DataFrame:
return self._internal.sdf
def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : boolean, default False
If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
from databricks.koalas import Series
if axis in ('index', 0, None):
exprs = []
num_args = len(signature(sfun).parameters)
for idx in self._internal.column_index:
col_sdf = self._internal.scol_for(idx)
col_type = self._internal.spark_type_for(idx)
is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType))
min_or_max = sfun.__name__ in ('min', 'max')
keep_column = not numeric_only or is_numeric_or_boolean or min_or_max
if keep_column:
if isinstance(col_type, BooleanType) and not min_or_max:
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(str(idx) if len(idx) > 1 else idx[0]))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
if self._internal.column_index_level > 1:
pdf.columns = pd.MultiIndex.from_tuples(self._internal.column_index)
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
# TODO: return Koalas series.
return row # Return first row as a Series
elif axis in ('columns', 1):
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type))
def calculate_columns_axis(*cols):
return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only)
df = self._sdf.select(calculate_columns_axis(*self._internal.data_scols).alias("0"))
return DataFrame(df)["0"]
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
# Arithmetic Operators
def _map_series_op(self, op, other):
from databricks.koalas.base import IndexOpsMixin
if not isinstance(other, DataFrame) and (isinstance(other, IndexOpsMixin) or
is_sequence(other)):
raise ValueError(
"%s with a sequence is currently not supported; "
"however, got %s." % (op, type(other)))
if isinstance(other, DataFrame) and self is not other:
if self._internal.column_index_level != other._internal.column_index_level:
raise ValueError('cannot join with no overlapping index names')
# Different DataFrames
def apply_op(kdf, this_column_index, that_column_index):
for this_idx, that_idx in zip(this_column_index, that_column_index):
yield (getattr(kdf[this_idx], op)(kdf[that_idx]), this_idx)
return align_diff_frames(apply_op, self, other, fillna=True, how="full")
else:
# DataFrame and Series
applied = []
for idx in self._internal.column_index:
applied.append(getattr(self[idx], op)(other))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0]
for c in applied],
column_index=[c._internal.column_index[0]
for c in applied])
return DataFrame(internal)
def __add__(self, other):
return self._map_series_op("add", other)
def __radd__(self, other):
return self._map_series_op("radd", other)
def __div__(self, other):
return self._map_series_op("div", other)
def __rdiv__(self, other):
return self._map_series_op("rdiv", other)
def __truediv__(self, other):
return self._map_series_op("truediv", other)
def __rtruediv__(self, other):
return self._map_series_op("rtruediv", other)
def __mul__(self, other):
return self._map_series_op("mul", other)
def __rmul__(self, other):
return self._map_series_op("rmul", other)
def __sub__(self, other):
return self._map_series_op("sub", other)
def __rsub__(self, other):
return self._map_series_op("rsub", other)
def __pow__(self, other):
return self._map_series_op("pow", other)
def __rpow__(self, other):
return self._map_series_op("rpow", other)
def __mod__(self, other):
return self._map_series_op("mod", other)
def __rmod__(self, other):
return self._map_series_op("rmod", other)
def __floordiv__(self, other):
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other):
return self._map_series_op("rfloordiv", other)
def add(self, other):
return self + other
# create accessor for plot
plot = CachedAccessor("plot", KoalasFramePlotMethods)
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = KoalasFramePlotMethods.hist.__doc__
def kde(self, bw_method=None, ind=None, **kwds):
return self.plot.kde(bw_method, ind, **kwds)
kde.__doc__ = KoalasFramePlotMethods.kde.__doc__
add.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name='+',
equiv='dataframe + other',
reverse='radd')
def radd(self, other):
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name="+",
equiv="other + dataframe",
reverse='add')
def div(self, other):
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rdiv')
divide = div
def rdiv(self, other):
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='div')
def truediv(self, other):
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rtruediv')
def rtruediv(self, other):
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='truediv')
def mul(self, other):
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="dataframe * other",
reverse='rmul')
multiply = mul
def rmul(self, other):
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="other * dataframe",
reverse='mul')
def sub(self, other):
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="dataframe - other",
reverse='rsub')
subtract = sub
def rsub(self, other):
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="other - dataframe",
reverse='sub')
def mod(self, other):
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='dataframe % other',
reverse='rmod')
def rmod(self, other):
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='other % dataframe',
reverse='mod')
def pow(self, other):
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power of series',
op_name='**',
equiv='dataframe ** other',
reverse='rpow')
def rpow(self, other):
return other ** self
rpow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power',
op_name='**',
equiv='other ** dataframe',
reverse='pow')
def floordiv(self, other):
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='dataframe // other',
reverse='rfloordiv')
def rfloordiv(self, other):
return other // self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='other // dataframe',
reverse='floordiv')
# Comparison Operators
def __eq__(self, other):
return self._map_series_op("eq", other)
def __ne__(self, other):
return self._map_series_op("ne", other)
def __lt__(self, other):
return self._map_series_op("lt", other)
def __le__(self, other):
return self._map_series_op("le", other)
def __ge__(self, other):
return self._map_series_op("ge", other)
def __gt__(self, other):
return self._map_series_op("gt", other)
def eq(self, other):
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False None
c False True
d False None
"""
return self == other
equals = eq
def gt(self, other):
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False None
c True False
d True None
"""
return self > other
def ge(self, other):
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True None
c True True
d True None
"""
return self >= other
def lt(self, other):
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False None
c False False
d False None
"""
return self < other
def le(self, other):
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True None
c False True
d False None
"""
return self <= other
def ne(self, other):
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True None
c True False
d True None
"""
return self != other
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
You can omit the type hint and let Koalas infer its type.
>>> df.applymap(lambda x: x ** 2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
applied = []
for idx in self._internal.column_index:
# TODO: We can implement shortcut theoretically since it creates new DataFrame
# anyway and we don't have to worry about operations on different DataFrames.
applied.append(self[idx].apply(func))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: not all arguments are implemented comparing to Pandas' for now.
def aggregate(self, func: Union[List[str], Dict[str, List[str]]]):
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : dict or a list
a dict mapping from column name (string) to
aggregate functions (list of strings).
If a list is given, the aggregation is performed against
all columns.
Returns
-------
DataFrame
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1.0 2.0 3.0
1 4.0 5.0 6.0
2 7.0 8.0 9.0
3 NaN NaN NaN
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])[['A', 'B', 'C']]
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']]
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
"""
from databricks.koalas.groupby import GroupBy
if isinstance(func, list):
if all((isinstance(f, str) for f in func)):
func = dict([
(column, func) for column in self.columns])
else:
raise ValueError("If the given function is a list, it "
"should only contains function names as strings.")
if not isinstance(func, dict) or \
not all(isinstance(key, str) and
(isinstance(value, str) or
isinstance(value, list) and all(isinstance(v, str) for v in value))
for key, value in func.items()):
raise ValueError("aggs must be a dict mapping from column name (string) to aggregate "
"functions (list of strings).")
kdf = DataFrame(GroupBy._spark_groupby(self, func, ())) # type: DataFrame
# The codes below basically converts:
#
# A B
# sum min min max
# 0 12.0 1.0 2.0 8.0
#
# to:
# A B
# max NaN 8.0
# min 1.0 2.0
# sum 12.0 NaN
#
# Aggregated output is usually pretty much small. So it is fine to directly use pandas API.
pdf = kdf.to_pandas().stack()
pdf.index = pdf.index.droplevel()
pdf.columns.names = [None]
pdf.index.names = [None]
return DataFrame(pdf[list(func.keys())])
agg = aggregate
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self) -> Iterable:
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def items(self) -> Iterable:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from databricks.koalas.config import get_option, set_option
>>> set_option('compute.max_rows', 1000)
>>> ks.DataFrame({'a': range(1001)}).transpose() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
pdf = self.head(max_compute_count + 1)._to_internal_pandas()
if len(pdf) > max_compute_count:
raise ValueError(
"Current DataFrame has more then the given limit {0} rows. "
"Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' "
"to retrieve to retrieve more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive."
.format(max_compute_count))
return DataFrame(pdf.transpose())
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +------+------+------+------+------+
# |index1|index2|(a,x1)|(a,x2)|(b,x3)|
# +------+------+------+------+------+
# | y1| z1| 1| 0| 0|
# | y2| z2| 0| 50| 0|
# | y3| z3| 3| 2| 1|
# +------+------+------+------+------+
#
# Output of `exploded_df` becomes as below:
#
# +-----------------+-----------------+-----------------+-----+
# | index|__index_level_0__|__index_level_1__|value|
# +-----------------+-----------------+-----------------+-----+
# |{"a":["y1","z1"]}| a| x1| 1|
# |{"a":["y1","z1"]}| a| x2| 0|
# |{"a":["y1","z1"]}| b| x3| 0|
# |{"a":["y2","z2"]}| a| x1| 0|
# |{"a":["y2","z2"]}| a| x2| 50|
# |{"a":["y2","z2"]}| b| x3| 0|
# |{"a":["y3","z3"]}| a| x1| 3|
# |{"a":["y3","z3"]}| a| x2| 2|
# |{"a":["y3","z3"]}| b| x3| 1|
# +-----------------+-----------------+-----------------+-----+
pairs = F.explode(F.array(*[
F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(idx)] +
[self[idx]._scol.alias("value")]
) for idx in self._internal.column_index]))
exploded_df = self._sdf.withColumn("pairs", pairs).select(
[F.to_json(F.struct(F.array([scol.cast('string')
for scol in self._internal.index_scols])
.alias('a'))).alias('index'),
F.col("pairs.*")])
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
internal_index_columns = [SPARK_INDEX_NAME_FORMAT(i)
for i in range(self._internal.column_index_level)]
pivoted_df = exploded_df.groupBy(internal_index_columns).pivot('index')
transposed_df = pivoted_df.agg(F.first(F.col("value")))
new_data_columns = list(filter(lambda x: x not in internal_index_columns,
transposed_df.columns))
internal = self._internal.copy(
sdf=transposed_df,
data_columns=new_data_columns,
index_map=[(col, None) for col in internal_index_columns],
column_index=[tuple(json.loads(col)['a']) for col in new_data_columns],
column_index_names=None)
return DataFrame(internal)
T = property(transpose)
def transform(self, func):
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
.. note:: the series within ``func`` is actually a pandas series, and
the length of each series is not guaranteed.
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
Examples
--------
>>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
You can omit the type hint and let Koalas infer its type.
>>> df.transform(lambda x: x ** 2)
A B
0 0 1
1 1 4
2 4 9
For multi-index columns:
>>> df.columns = [('X', 'A'), ('X', 'B')]
>>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
>>> df.transform(lambda x: x ** 2) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
"""
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
transformed = pdf.transform(func)
kdf = DataFrame(transformed)
if len(pdf) <= limit:
return kdf
applied = []
for input_idx, output_idx in zip(
self._internal.column_index, kdf._internal.column_index):
wrapped = ks.pandas_wraps(
func,
return_col=as_python_type(kdf[output_idx].spark_type))
applied.append(wrapped(self[input_idx]).rename(input_idx))
else:
wrapped = ks.pandas_wraps(func)
applied = []
for idx in self._internal.column_index:
applied.append(wrapped(self[idx]).rename(idx))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
Also support for MultiIndex
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df
a b
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('a')
name class
0 falcon bird
1 parrot bird
2 lion mammal
3 monkey mammal
>>> df
b
max_speed
0 389.0
1 24.0
2 80.5
3 NaN
"""
result = self[item]
self._internal = self.drop(item)._internal
return result
# TODO: add axis parameter can work when '1' or 'columns'
def xs(self, key, axis=0, level=None):
"""
Return cross-section from the DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : 0 or 'index', default 0
Axis to retrieve cross-section on.
currently only support 0 or 'index'
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
DataFrame
Cross-section from the original DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = ks.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class locomotion
mammal walks 4 0
"""
from databricks.koalas.series import _col
if not isinstance(key, (str, tuple)):
raise ValueError("'key' should be string or tuple that contains strings")
if not all(isinstance(index, str) for index in key):
raise ValueError("'key' should have index names as only strings "
"or a tuple that contain index names as only strings")
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
if isinstance(key, str):
key = (key,)
if len(key) > len(self._internal.index_scols):
raise KeyError("Key length ({}) exceeds index depth ({})"
.format(len(key), len(self._internal.index_scols)))
if level is None:
level = 0
scols = self._internal.scols[:level] + self._internal.scols[level+len(key):]
rows = [self._internal.scols[lvl] == index
for lvl, index in enumerate(key, level)]
sdf = self._sdf.select(scols) \
.where(reduce(lambda x, y: x & y, rows))
if len(key) == len(self._internal.index_scols):
result = _col(DataFrame(_InternalFrame(sdf=sdf)).T)
result.name = key
else:
internal = self._internal.copy(
sdf=sdf,
index_map=self._internal.index_map[:level] +
self._internal.index_map[level+len(key):])
result = DataFrame(internal)
return result
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._internal.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._internal.column_index) == 0 or self._sdf.rdd.isEmpty()
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
.. note:: currently it collects top 1000 rows and return its
pandas `pandas.io.formats.style.Styler` instance.
Examples
--------
>>> ks.range(1001).style # doctest: +ELLIPSIS
<pandas.io.formats.style.Styler object at ...>
"""
max_results = get_option('compute.max_rows')
pdf = self.head(max_results + 1).to_pandas()
if len(pdf) > max_results:
warnings.warn(
"'style' property will only use top %s rows." % max_results, UserWarning)
return pdf.head(max_results).style
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, (str, tuple)):
keys = [keys]
else:
keys = list(keys)
columns = set(self.columns)
for key in keys:
if key not in columns:
raise KeyError(key)
keys = [key if isinstance(key, tuple) else (key,) for key in keys]
if drop:
column_index = [idx for idx in self._internal.column_index if idx not in keys]
else:
column_index = self._internal.column_index
if append:
index_map = self._internal.index_map + [(self._internal.column_name_for(idx), idx)
for idx in keys]
else:
index_map = [(self._internal.column_name_for(idx), idx) for idx in keys]
internal = self._internal.copy(index_map=index_map,
column_index=column_index,
data_columns=[self._internal.column_name_for(idx)
for idx in column_index])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ks.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
multi_index = len(self._internal.index_map) > 1
def rename(index):
if multi_index:
return ('level_{}'.format(index),)
else:
if ('index',) not in self._internal.column_index:
return ('index',)
else:
return ('level_{}'.format(index),)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._internal.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._internal.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._internal.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._internal.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._internal.index_map.copy()
for i in idx:
info = self._internal.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(i)))
index_map.remove(info)
new_data_scols = [
self._internal.scol_for(column).alias(str(name)) for column, name in new_index_map]
if len(index_map) > 0:
index_scols = [scol_for(self._sdf, column) for column, _ in index_map]
sdf = self._sdf.select(
index_scols + new_data_scols + self._internal.data_scols)
else:
sdf = self._sdf.select(new_data_scols + self._internal.data_scols)
# Now, new internal Spark columns are named as same as index name.
new_index_map = [(column, name) for column, name in new_index_map]
index_map = [(SPARK_INDEX_NAME_FORMAT(0), None)]
sdf = _InternalFrame.attach_default_index(sdf)
if drop:
new_index_map = []
internal = self._internal.copy(
sdf=sdf,
data_columns=[str(name) for _, name in new_index_map] + self._internal.data_columns,
index_map=index_map,
column_index=None)
if self._internal.column_index_level > 1:
column_depth = len(self._internal.column_index[0])
if col_level >= column_depth:
raise IndexError('Too many levels: Index has only {} levels, not {}'
.format(column_depth, col_level + 1))
if any(col_level + len(name) > column_depth for _, name in new_index_map):
raise ValueError('Item must have length equal to number of levels.')
columns = pd.MultiIndex.from_tuples(
[tuple(([col_fill] * col_level)
+ list(name)
+ ([col_fill] * (column_depth - (len(name) + col_level))))
for _, name in new_index_map]
+ self._internal.column_index)
else:
columns = [name for _, name in new_index_map] + self._internal.column_index
if inplace:
self._internal = internal
self.columns = columns
else:
kdf = DataFrame(internal)
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None):
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].shift(periods, fill_value))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: axis should support 1 or 'columns' either at this moment
def diff(self, periods: int = 1, axis: Union[int, str] = 0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].diff(periods))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: axis should support 1 or 'columns' either at this moment
def nunique(self, axis: Union[int, str] = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
res = self._sdf.select([self[column]._nunique(dropna, approx, rsd)
for column in self.columns])
return res.toPandas().T.iloc[:, 0]
def round(self, decimals=0):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ks.Series):
decimals_list = [(k if isinstance(k, tuple) else (k,), v)
for k, v in decimals._to_internal_pandas().items()]
elif isinstance(decimals, dict):
decimals_list = [(k if isinstance(k, tuple) else (k,), v)
for k, v in decimals.items()]
elif isinstance(decimals, int):
decimals_list = [(k, decimals) for k in self._internal.column_index]
else:
raise ValueError("decimals must be an integer, a dict-like or a Series")
sdf = self._sdf
for idx, decimal in decimals_list:
if idx in self._internal.column_index:
col = self._internal.column_name_for(idx)
sdf = sdf.withColumn(col, F.round(scol_for(sdf, col), decimal))
return DataFrame(self._internal.copy(sdf=sdf))
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},
... columns = ['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 1 1 1
2 1 1 1
3 3 4 5
>>> df.duplicated().sort_index()
0 False
1 True
2 True
3 False
Name: 0, dtype: bool
Mark duplicates as ``True`` except for the last occurrence.
>>> df.duplicated(keep='last').sort_index()
0 True
1 True
2 False
3 False
Name: 0, dtype: bool
Mark all duplicates as ``True``.
>>> df.duplicated(keep=False).sort_index()
0 True
1 True
2 True
3 False
Name: 0, dtype: bool
"""
from databricks.koalas.series import _col
if len(self._internal.index_names) > 1:
raise ValueError("Now we don't support multi-index Now.")
if subset is None:
subset = self._internal.column_index
else:
if isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
diff = set(subset).difference(set(self._internal.column_index))
if len(diff) > 0:
raise KeyError(', '.join([str(d) if len(d) > 1 else d[0] for d in diff]))
group_cols = [self._internal.column_name_for(idx) for idx in subset]
index_column = self._internal.index_columns[0]
if self._internal.index_names[0] is not None:
name = self._internal.index_names[0]
else:
name = ('0',)
column = str(name) if len(name) > 1 else name[0]
sdf = self._sdf
if column == index_column:
index_column = SPARK_INDEX_NAME_FORMAT(0)
sdf = sdf.select([self._internal.index_scols[0].alias(index_column)]
+ self._internal.data_scols)
if keep == 'first' or keep == 'last':
if keep == 'first':
ord_func = spark.functions.asc
else:
ord_func = spark.functions.desc
window = Window.partitionBy(group_cols).orderBy(ord_func(index_column)).rowsBetween(
Window.unboundedPreceding, Window.currentRow)
sdf = sdf.withColumn(column, F.row_number().over(window) > 1)
elif not keep:
window = Window.partitionBy(group_cols).orderBy(scol_for(sdf, index_column).desc())\
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
sdf = sdf.withColumn(column, F.count(scol_for(sdf, index_column)).over(window) > 1)
else:
raise ValueError("'keep' only support 'first', 'last' and False")
return _col(DataFrame(_InternalFrame(sdf=sdf.select(scol_for(sdf, index_column),
scol_for(sdf, column)),
data_columns=[column],
column_index=[name],
index_map=[(index_column,
self._internal.index_names[0])])))
def to_koalas(self, index_col: Optional[Union[str, List[str]]] = None):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
col1 col2
0 1 3
1 2 4
We can specify the index columns.
>>> kdf = spark_df.to_koalas(index_col='col1')
>>> kdf # doctest: +NORMALIZE_WHITESPACE
col2
col1
1 3
2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
assert isinstance(self, spark.DataFrame), type(self)
from databricks.koalas.namespace import _get_index_map
index_map = _get_index_map(self, index_col)
internal = _InternalFrame(sdf=self, index_map=index_map)
return DataFrame(internal)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
return _CachedDataFrame(self._internal)
def to_table(self, name: str, format: Optional[str] = None, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
**options):
"""
Write the DataFrame into a Spark table.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the table exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
self.to_spark().write.saveAsTable(name=name, format=format, mode=mode,
partitionBy=partition_cols, options=options)
def to_delta(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None, **options):
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2019-01-01"')
"""
self.to_spark_io(
path=path, mode=mode, format="delta", partition_cols=partition_cols, options=options)
def to_parquet(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
compression: Optional[str] = None):
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
self.to_spark().write.parquet(
path=path, mode=mode, partitionBy=partition_cols, compression=compression)
def to_spark_io(self, path: Optional[str] = None, format: Optional[str] = None,
mode: str = 'error', partition_cols: Union[str, List[str], None] = None,
**options):
"""Write the DataFrame out to a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
self.to_spark().write.save(
path=path, format=format, mode=mode, partitionBy=partition_cols, options=options)
def to_spark(self, index_col: Optional[Union[str, List[str]]] = None):
"""
Return the current DataFrame as a Spark DataFrame.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
See Also
--------
DataFrame.to_koalas
Examples
--------
By default, this method loses the index as below.
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.to_spark().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
If `index_col` is set, it keeps the index column as specified.
>>> df.to_spark(index_col="index").show() # doctest: +NORMALIZE_WHITESPACE
+-----+---+---+---+
|index| a| b| c|
+-----+---+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-----+---+---+---+
Keeping index column is useful when you want to call some Spark APIs and
convert it back to Koalas DataFrame without creating a default index, which
can affect performance.
>>> spark_df = df.to_spark(index_col="index")
>>> spark_df = spark_df.filter("a == 2")
>>> spark_df.to_koalas(index_col="index") # doctest: +NORMALIZE_WHITESPACE
a b c
index
1 2 5 8
In case of multi-index, specify a list to `index_col`.
>>> new_df = df.set_index("a", append=True)
>>> new_spark_df = new_df.to_spark(index_col=["index_1", "index_2"])
>>> new_spark_df.show() # doctest: +NORMALIZE_WHITESPACE
+-------+-------+---+---+
|index_1|index_2| b| c|
+-------+-------+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-------+-------+---+---+
Likewise, can be converted to back to Koalas DataFrame.
>>> new_spark_df.to_koalas(
... index_col=["index_1", "index_2"]) # doctest: +NORMALIZE_WHITESPACE
b c
index_1 index_2
0 1 4 7
1 2 5 8
2 3 6 9
"""
if index_col is None:
return self._internal.spark_df
else:
if isinstance(index_col, str):
index_col = [index_col]
data_column_names = []
data_columns = []
data_columns_column_index = \
zip(self._internal._data_columns, self._internal.column_index)
# TODO: this code is similar with _InternalFrame.spark_df. Might have to deduplicate.
for i, (column, idx) in enumerate(data_columns_column_index):
scol = self._internal.scol_for(idx)
name = str(i) if idx is None else str(idx) if len(idx) > 1 else idx[0]
data_column_names.append(name)
if column != name:
scol = scol.alias(name)
data_columns.append(scol)
old_index_scols = self._internal.index_scols
if len(index_col) != len(old_index_scols):
raise ValueError(
"length of index columns is %s; however, the length of the given "
"'index_col' is %s." % (len(old_index_scols), len(index_col)))
if any(col in data_column_names for col in index_col):
raise ValueError(
"'index_col' cannot be overlapped with other columns.")
sdf = self._internal.spark_internal_df
new_index_scols = [
index_scol.alias(col) for index_scol, col in zip(old_index_scols, index_col)]
return sdf.select(new_index_scols + data_columns)
def to_pandas(self):
"""
Return a pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.pandas_df.copy()
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
return self._assign(kwargs)
def _assign(self, kwargs):
assert isinstance(kwargs, dict)
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = {(k if isinstance(k, tuple) else (k,)):
(v._scol if isinstance(v, Series)
else v if isinstance(v, spark.Column)
else F.lit(v))
for k, v in kwargs.items()}
scols = []
for idx in self._internal.column_index:
for i in range(len(idx)):
if idx[:len(idx)-i] in pairs:
name = self._internal.column_name_for(idx)
scol = pairs[idx[:len(idx)-i]].alias(name)
break
else:
scol = self._internal.scol_for(idx)
scols.append(scol)
adding_data_columns = []
adding_column_index = []
for idx, scol in pairs.items():
if idx not in set(i[:len(idx)] for i in self._internal.column_index):
name = str(idx) if len(idx) > 1 else idx[0]
scols.append(scol.alias(name))
adding_data_columns.append(name)
adding_column_index.append(idx)
sdf = self._sdf.select(self._internal.index_scols + scols)
level = self._internal.column_index_level
adding_column_index = [tuple(list(idx) + ([''] * (level - len(idx))))
for idx in adding_column_index]
internal = self._internal.copy(
sdf=sdf,
data_columns=(self._internal.data_columns + adding_data_columns),
column_index=(self._internal.column_index + adding_column_index))
return DataFrame(internal)
@staticmethod
def from_records(data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None, exclude: list = None,
columns: list = None, coerce_float: bool = False, nrows: int = None) \
-> 'DataFrame':
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float,
nrows))
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df_copy = df.copy()
>>> df_copy
x y z w
0 1 3 5 7
1 2 4 6 8
"""
return DataFrame(self._internal.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
idxes = [(subset,)]
elif isinstance(subset, tuple):
idxes = [subset]
else:
idxes = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
invalids = [idx for idx in idxes
if idx not in self._internal.column_index]
if len(invalids) > 0:
raise KeyError(invalids)
else:
idxes = self._internal.column_index
cnt = reduce(lambda x, y: x + y,
[F.when(self[idx].notna()._scol, 1).otherwise(0)
for idx in idxes],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(idxes))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
# TODO: add 'limit' when value parameter exists
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
sdf = self._sdf
if value is not None:
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
value = {self._internal.column_name_for(key): value for key, value in value.items()}
if limit is not None:
raise ValueError('limit parameter for value is not support now')
sdf = sdf.fillna(value)
internal = self._internal.copy(sdf=sdf)
else:
if method is None:
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].fillna(value=value, method=method, axis=axis,
inplace=False, limit=limit))
sdf = self._sdf.select(self._internal.index_scols + [col._scol for col in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[col._internal.data_columns[0]
for col in applied],
column_index=[col._internal.column_index[0]
for col in applied])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
# TODO: add 'downcast' when value parameter exists
def bfill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> df.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit)
# TODO: add 'downcast' when value parameter exists
def ffill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> df.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit)
def replace(self, to_replace=None, value=None, subset=None, inplace=False,
limit=None, regex=False, method='pad'):
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, or list
Value to be replaced. If the value is a dict, then value is ignored and
to_replace must be a mapping from column name (string) to replacement value.
The value to be replaced must be an int, float, or string.
value : int, float, string, or list
Value to use to replace holes. The replacement value must be an int, float,
or string. If value is a list, value should be of the same length with to_replace.
subset : string, list
Optional list of column names to consider. Columns specified in subset that
do not have matching data type are ignored. For example, if value is a string,
and subset contains a non-string column, then the non-string column is simply ignored.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Replacing value by specifying column
>>> df.replace('Mjolnir', 'Stormbuster', subset='weapon')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict like `to_replace`
>>> df = ks.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']},
... columns=['A', 'B', 'C'])
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
Notes
-----
One difference between this implementation and pandas is that it is necessary
to specify the column name when you are passing dictionary in `to_replace`
parameter. Calling `replace` on its index such as `df.replace({0: 10, 1: 100})` will
throw an error. Instead specify column-name like `df.replace({'A': {0: 10, 1: 100}})`.
"""
if method != 'pad':
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
if value is not None and not isinstance(value, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(value)))
if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(to_replace)))
if isinstance(value, list) and isinstance(to_replace, list):
if len(value) != len(to_replace):
raise ValueError('Length of to_replace and value must be same')
# TODO: Do we still need to support this argument?
if subset is None:
subset = self._internal.column_index
elif isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
subset = [self._internal.column_name_for(idx) for idx in subset]
sdf = self._sdf
if isinstance(to_replace, dict) and value is None and \
(not any(isinstance(i, dict) for i in to_replace.values())):
sdf = sdf.replace(to_replace, value, subset)
elif isinstance(to_replace, dict):
for name, replacement in to_replace.items():
if isinstance(name, str):
name = (name,)
df_column = self._internal.column_name_for(name)
if isinstance(replacement, dict):
sdf = sdf.replace(replacement, subset=df_column)
else:
sdf = sdf.withColumn(df_column,
F.when(scol_for(sdf, df_column) == replacement, value)
.otherwise(scol_for(sdf, df_column)))
else:
sdf = sdf.replace(to_replace, value, subset)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [(c, self._internal.scol_for(c)) for c in self.columns
if isinstance(self._internal.spark_type_for(c), numeric_types)]
if lower is not None:
numeric_columns = [(c, F.when(scol < lower, lower).otherwise(scol).alias(c))
for c, scol in numeric_columns]
if upper is not None:
numeric_columns = [(c, F.when(scol > upper, upper).otherwise(scol).alias(c))
for c, scol in numeric_columns]
nonnumeric_columns = [self._internal.scol_for(c) for c in self.columns
if not isinstance(self._internal.spark_type_for(c), numeric_types)]
sdf = self._sdf.select([scol for _, scol in numeric_columns] + nonnumeric_columns)
return ks.DataFrame(sdf)[list(self.columns)]
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._internal.copy(sdf=self._sdf.limit(n)))
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list less than three or a string.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the resulting pivot table will have
columns concatenated by "_" where the first part is the value
of columns and the second part is the column name in values
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table # doctest: +NORMALIZE_WHITESPACE
C large small
A B
foo one 4.0 1
two NaN 6
bar two 7.0 6
one 4.0 5
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table # doctest: +NORMALIZE_WHITESPACE
C large small
A B
foo one 4 1
two 0 6
bar two 7 6
one 4 5
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values=['D'], index =['C'],
... columns="A", aggfunc={'D': 'mean'})
>>> table # doctest: +NORMALIZE_WHITESPACE
D
A bar foo
C
small 5.5 2.333333
large 5.5 2.000000
The next example aggregates on multiple values.
>>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'],
... aggfunc={'D': 'mean', 'E': 'sum'})
>>> table # doctest: +NORMALIZE_WHITESPACE
D E
A bar foo bar foo
C
small 5.5 2.333333 17 13
large 5.5 2.000000 15 9
"""
if not isinstance(columns, (str, tuple)):
raise ValueError("columns should be string or tuple.")
if not isinstance(values, (str, tuple)) and not isinstance(values, list):
raise ValueError('values should be string or list of one column.')
if not isinstance(aggfunc, str) and \
(not isinstance(aggfunc, dict) or
not all(isinstance(key, (str, tuple)) and isinstance(value, str)
for key, value in aggfunc.items())):
raise ValueError("aggfunc must be a dict mapping from column name (string or tuple) "
"to aggregate functions (string).")
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError("pivot_table doesn't support aggfunc"
" as dict and without index.")
if isinstance(values, list) and index is None:
raise NotImplementedError("values can't be a list without index.")
if columns not in self.columns:
raise ValueError("Wrong columns {}.".format(columns))
if isinstance(values, list):
values = [col if isinstance(col, tuple) else (col,) for col in values]
if not all(isinstance(self._internal.spark_type_for(col), NumericType)
for col in values):
raise TypeError('values should be a numeric type.')
else:
values = values if isinstance(values, tuple) else (values,)
if not isinstance(self._internal.spark_type_for(values), NumericType):
raise TypeError('values should be a numeric type.')
if isinstance(aggfunc, str):
if isinstance(values, list):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(value), aggfunc))
for value in values]
else:
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(values), aggfunc))]
elif isinstance(aggfunc, dict):
aggfunc = {key if isinstance(key, tuple) else (key,): value
for key, value in aggfunc.items()}
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(key), value))
for key, value in aggfunc.items()]
agg_columns = [key for key, _ in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
if index is None:
sdf = self._sdf.groupBy() \
.pivot(pivot_col=self._internal.column_name_for(columns)).agg(*agg_cols)
elif isinstance(index, list):
index = [idx if isinstance(idx, tuple) else (idx,) for idx in index]
sdf = self._sdf.groupBy([self._internal.scol_for(idx) for idx in index]) \
.pivot(pivot_col=self._internal.column_name_for(columns)).agg(*agg_cols)
else:
raise ValueError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
if isinstance(values, list):
index_columns = [self._internal.column_name_for(idx) for idx in index]
data_columns = [column for column in sdf.columns if column not in index_columns]
if len(values) > 1:
# If we have two values, Spark will return column's name
# in this format: column_values, where column contains
# their values in the DataFrame and values is
# the column list passed to the pivot_table().
# E.g. if column is b and values is ['b','e'],
# then ['2_b', '2_e', '3_b', '3_e'].
# We sort the columns of Spark DataFrame by values.
data_columns.sort(key=lambda x: x.split('_', 1)[1])
sdf = sdf.select(index_columns + data_columns)
column_name_to_index = dict(zip(self._internal.data_columns,
self._internal.column_index))
column_index = [tuple(list(column_name_to_index[name.split('_')[1]])
+ [name.split('_')[0]])
for name in data_columns]
index_map = list(zip(index_columns, index))
column_index_names = (([None] * column_index_level(values))
+ [str(columns) if len(columns) > 1 else columns[0]])
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
kdf = DataFrame(internal)
else:
column_index = [tuple(list(values[0]) + [column]) for column in data_columns]
index_map = list(zip(index_columns, index))
column_index_names = (([None] * len(values[0]))
+ [str(columns) if len(columns) > 1 else columns[0]])
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
kdf = DataFrame(internal)
return kdf
else:
index_columns = [self._internal.column_name_for(idx) for idx in index]
index_map = list(zip(index_columns, index))
data_columns = [column for column in sdf.columns if column not in index_columns]
column_index_names = [str(columns) if len(columns) > 1 else columns[0]]
internal = _InternalFrame(sdf=sdf,
index_map=index_map, data_columns=data_columns,
column_index_names=column_index_names)
return DataFrame(internal)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
index_map = []
for i, index_value in enumerate(index_values):
colname = SPARK_INDEX_NAME_FORMAT(i)
sdf = sdf.withColumn(colname, F.lit(index_value))
index_map.append((colname, None))
column_index_names = [str(columns) if len(columns) > 1 else columns[0]]
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
column_index_names=column_index_names)
return DataFrame(internal)
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE
bar A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
Koalas' pivot still works with its first value it meets during operation because pivot
is an expensive operation and it is preferred to permissively execute over failing fast
when processing large data.
>>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
df = self
index = [index]
else:
df = self.copy()
df['__DUMMY__'] = F.monotonically_increasing_id()
df.set_index('__DUMMY__', append=True, inplace=True)
df.reset_index(level=range(len(df._internal.index_map) - 1), inplace=True)
index = df._internal.column_index[:len(df._internal.index_map)]
df = df.pivot_table(
index=index, columns=columns, values=values, aggfunc='first')
if should_use_existing_index:
return df
else:
index_columns = df._internal.index_columns
# Note that the existing indexing column won't exist in the pivoted DataFrame.
internal = df._internal.copy(
index_map=[(index_column, None) for index_column in index_columns])
return DataFrame(internal)
@property
def columns(self):
"""The column labels of the DataFrame."""
if self._internal.column_index_level > 1:
columns = pd.MultiIndex.from_tuples(self._internal.column_index)
else:
columns = pd.Index([idx[0] for idx in self._internal.column_index])
if self._internal.column_index_names is not None:
columns.names = self._internal.column_index_names
return columns
@columns.setter
def columns(self, columns):
if isinstance(columns, pd.MultiIndex):
column_index = columns.tolist()
old_names = self._internal.column_index
if len(old_names) != len(column_index):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(column_index)))
column_index_names = columns.names
data_columns = [str(idx) if len(idx) > 1 else idx[0] for idx in column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
self._internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
else:
old_names = self._internal.column_index
if len(old_names) != len(columns):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(columns)))
column_index = [col if isinstance(col, tuple) else (col,) for col in columns]
if isinstance(columns, pd.Index):
column_index_names = columns.names
else:
column_index_names = None
data_columns = [str(idx) if len(idx) > 1 else idx[0] for idx in column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
self._internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[idx].dtype for idx in self._internal.column_index],
index=pd.Index([idx if len(idx) > 1 else idx[0]
for idx in self._internal.column_index]))
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
TypeError: string dtypes are not allowed, use 'object' instead
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=set(include).intersection(set(exclude))))
# Handle Spark types
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle Pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
columns = []
column_index = []
for idx in self._internal.column_index:
if len(include) > 0:
should_include = (
infer_dtype_from_object(self[idx].dtype.name) in include_numpy_type or
self._internal.spark_type_for(idx) in include_spark_type)
else:
should_include = not (
infer_dtype_from_object(self[idx].dtype.name) in exclude_numpy_type or
self._internal.spark_type_for(idx) in exclude_spark_type)
if should_include:
columns.append(self._internal.column_name_for(idx))
column_index.append(idx)
return DataFrame(self._internal.copy(
sdf=self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(col) for col in columns]),
data_columns=columns, column_index=column_index))
def count(self, axis=None):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
Name: 0, dtype: int64
"""
return self._reduce_for_stat_function(
_Frame._count_expr, name="count", axis=axis, numeric_only=False)
def drop(self, labels=None, axis=1,
columns: Union[str, Tuple[str, ...], List[str], List[Tuple[str, ...]]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Also support for MultiIndex
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE
b
z w
0 5 7
1 6 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [(columns,)] # type: ignore
elif isinstance(columns, tuple):
columns = [columns]
else:
columns = [col if isinstance(col, tuple) else (col,) # type: ignore
for col in columns]
drop_column_index = set(idx for idx in self._internal.column_index
for col in columns
if idx[:len(col)] == col)
if len(drop_column_index) == 0:
raise KeyError(columns)
cols, idxes = zip(*((column, idx)
for column, idx
in zip(self._internal.data_columns, self._internal.column_index)
if idx not in drop_column_index))
internal = self._internal.copy(
sdf=self._sdf.select(
self._internal.index_scols + [self._internal.scol_for(idx) for idx in idxes]),
data_columns=list(cols),
column_index=list(idxes))
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def _sort(self, by: List[Column], ascending: Union[bool, List[bool]],
inplace: bool, na_position: str):
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]
kdf = DataFrame(self._internal.copy(sdf=self._sdf.sort(*by))) # type: ks.DataFrame
if inplace:
self._internal = kdf._internal
return None
else:
return kdf
def sort_values(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
by = [self[colname]._scol for colname in by]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
def sort_index(self, axis: int = 0,
level: Optional[Union[int, List[int]]] = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_scols
elif is_list_like(level):
by = [self._internal.index_scols[l] for l in level] # type: ignore
else:
by = [self._internal.index_scols[level]]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._internal.index_columns.copy()
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self._internal.scol_for(col)
.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self._internal.scol_for(col).isin(list(values)).alias(col)
for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._internal.copy(sdf=self._sdf.select(_select_columns)))
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(self, right: 'DataFrame', how: str = 'inner',
on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
left_on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
right_on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
5 baz 3 baz 7
1 foo 1 foo 5
2 foo 1 foo 8
3 foo 5 foo 5
4 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda os: (os if os is None
else [os] if isinstance(os, tuple)
else [(os,)] if isinstance(os, str)
else [o if isinstance(o, tuple) else (o,) # type: ignore
for o in os])
if isinstance(right, ks.Series):
right = right.to_frame()
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = self._internal.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._internal.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = _to_list(common)
right_keys = _to_list(common)
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = self._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
left_scol_for = lambda idx: scol_for(left_table, self._internal.column_name_for(idx))
right_scol_for = lambda idx: scol_for(right_table, right._internal.column_name_for(idx))
left_key_columns = [left_scol_for(idx) for idx in left_keys] # type: ignore
right_key_columns = [right_scol_for(idx) for idx in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(self._internal.column_index)
& set(right._internal.column_index))
exprs = []
data_columns = []
column_index = []
for idx in self._internal.column_index:
col = self._internal.column_name_for(idx)
scol = left_scol_for(idx)
if idx in duplicate_columns:
if idx in left_keys and idx in right_keys: # type: ignore
right_scol = right_scol_for(idx)
if how == 'right':
scol = right_scol
elif how == 'full':
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
idx = tuple([idx[0] + left_suffix] + list(idx[1:]))
exprs.append(scol)
data_columns.append(col)
column_index.append(idx)
for idx in right._internal.column_index:
col = right._internal.column_name_for(idx)
scol = right_scol_for(idx)
if idx in duplicate_columns:
if idx in left_keys and idx in right_keys: # type: ignore
continue
else:
col = col + right_suffix
scol = scol.alias(col)
idx = tuple([idx[0] + right_suffix] + list(idx[1:]))
exprs.append(scol)
data_columns.append(col)
column_index.append(idx)
left_index_scols = self._internal.index_scols
right_index_scols = right._internal.index_scols
# Retain indices if they are used for joining
if left_index:
if right_index:
if how in ('inner', 'left'):
exprs.extend(left_index_scols)
index_map = self._internal.index_map
elif how == 'right':
exprs.extend(right_index_scols)
index_map = right._internal.index_map
else:
index_map = []
for (col, name), left_scol, right_scol in zip(self._internal.index_map,
left_index_scols,
right_index_scols):
scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol)
exprs.append(scol.alias(col))
index_map.append((col, name))
else:
exprs.extend(right_index_scols)
index_map = right._internal.index_map
elif right_index:
exprs.extend(left_index_scols)
index_map = self._internal.index_map
else:
index_map = []
selected_columns = joined_table.select(*exprs)
internal = _InternalFrame(sdf=selected_columns,
index_map=index_map if index_map else None,
data_columns=data_columns,
column_index=column_index)
return DataFrame(internal)
def join(self, right: 'DataFrame',
on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None,
how: str = 'left', lsuffix: str = '', rsuffix: str = '') -> 'DataFrame':
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> kdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> kdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right')
>>> join_kdf.sort_values(by=join_kdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key'))
>>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method preserves the
original DataFrame’s index in the result.
>>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key')
>>> join_kdf.sort_index()
key A B
0 K3 A3 None
1 K0 A0 B0
2 K1 A1 B1
3 K2 A2 B2
"""
if isinstance(right, ks.Series):
common = list(self.columns.intersection([right.name]))
else:
common = list(self.columns.intersection(right.columns))
if len(common) > 0 and not lsuffix and not rsuffix:
raise ValueError(
"columns overlap but no suffix specified: "
"{rename}".format(rename=common))
if on:
self = self.set_index(on)
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix)).reset_index()
else:
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix))
return join_kdf
def append(self, other: 'DataFrame', ignore_index: bool = False,
verify_integrity: bool = False, sort: bool = False) -> 'DataFrame':
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise ValueError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_scols
if len(index_scols) != len(other._internal.index_scols):
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (self._sdf.select(index_scols)
.intersect(other._sdf.select(other._internal.index_scols))
.count()) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return concat([self, other], ignore_index=ignore_index)
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: 'DataFrame', join: str = 'left', overwrite: bool = True):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != 'left':
raise NotImplementedError("Only left join is supported")
if isinstance(other, ks.Series):
other = DataFrame(other)
update_columns = list(set(self._internal.column_index)
.intersection(set(other._internal.column_index)))
update_sdf = self.join(other[update_columns], rsuffix='_new')._sdf
for column_index in update_columns:
column_name = self._internal.column_name_for(column_index)
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(update_sdf, other._internal.column_name_for(column_index) + '_new')
if overwrite:
update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col)
.otherwise(new_col))
else:
update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col)
.otherwise(old_col))
internal = self._internal.copy(sdf=update_sdf.select([scol_for(update_sdf, col)
for col in self._internal.columns]))
self._internal = internal
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifying the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(self._internal.copy(sdf=sdf))
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.items():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.items():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._internal.index_scols + list(map(lambda ser: ser._scol, results)))
return DataFrame(self._internal.copy(sdf=sdf))
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
data_columns = [prefix + self._internal.column_name_for(idx)
for idx in self._internal.column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
column_index = [tuple([prefix + i for i in idx]) for idx in self._internal.column_index]
internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index)
return DataFrame(internal)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
data_columns = [self._internal.column_name_for(idx) + suffix
for idx in self._internal.column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
column_index = [tuple([i + suffix for i in idx]) for idx in self._internal.column_index]
internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index)
return DataFrame(internal)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
data_columns = []
for col in self.columns:
kseries = self[col]
spark_type = kseries.spark_type
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name))
data_columns.append(kseries.name)
elif isinstance(spark_type, NumericType):
exprs.append(kseries._scol)
data_columns.append(kseries.name)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
internal = _InternalFrame(sdf=sdf.replace("stddev", "std", subset='summary'),
data_columns=data_columns,
index_map=[('summary', None)])
return DataFrame(internal).astype('float64')
def _cum(self, func, skipna: bool):
# This is used for cummin, cummax, cumxum, etc.
if func == F.min:
func = "cummin"
elif func == F.max:
func = "cummax"
elif func == F.sum:
func = "cumsum"
elif func.__name__ == "cumprod":
func = "cumprod"
applied = []
for column in self.columns:
applied.append(getattr(self[column], func)(skipna))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: implements 'keep' parameters
def drop_duplicates(self, subset=None, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_values(['a', 'b'])
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
"""
if subset is None:
subset = self._internal.column_index
elif isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
sdf = self._sdf.drop_duplicates(subset=[self._internal.column_name_for(idx)
for idx in subset])
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reindex(self, labels: Optional[Any] = None, index: Optional[Any] = None,
columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None,
copy: Optional[bool] = True, fill_value: Optional[Any] = None) -> 'DataFrame':
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ks.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
if axis in ('index', 0, None):
index = labels
elif axis in ('columns', 1):
columns = labels
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
if index is not None and not is_list_like(index):
raise TypeError("Index must be called with a collection of some kind, "
"%s was passed" % type(index))
if columns is not None and not is_list_like(columns):
raise TypeError("Columns must be called with a collection of some kind, "
"%s was passed" % type(columns))
df = self.copy()
if index is not None:
df = DataFrame(df._reindex_index(index))
if columns is not None:
df = DataFrame(df._reindex_columns(columns))
# Process missing values.
if fill_value is not None:
df = df.fillna(fill_value)
# Copy
if copy:
return df.copy()
else:
self._internal = df._internal
return self
def _reindex_index(self, index):
# When axis is index, we can mimic pandas' by a right outer join.
index_column = self._internal.index_columns
assert len(index_column) <= 1, "Index should be single column or not set."
if len(index_column) == 1:
kser = ks.Series(list(index))
index_column = index_column[0]
labels = kser._kdf._sdf.select(kser._scol.alias(index_column))
else:
index_column = None
labels = ks.Series(index).to_frame()._sdf
joined_df = self._sdf.join(labels, on=index_column, how="right")
new_data_columns = filter(lambda x: x not in index_column, joined_df.columns)
if index_column is not None:
index_map = [(index_column, None)] # type: List[IndexMap]
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns),
index_map=index_map)
else:
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns))
return internal
def _reindex_columns(self, columns):
level = self._internal.column_index_level
if level > 1:
label_columns = list(columns)
for col in label_columns:
if not isinstance(col, tuple):
raise TypeError('Expected tuple, got {}'.format(type(col)))
else:
label_columns = [(col,) for col in columns]
for col in label_columns:
if len(col) != level:
raise ValueError("shape (1,{}) doesn't match the shape (1,{})"
.format(len(col), level))
scols, columns, idx = [], [], []
null_columns = False
for label in label_columns:
if label in self._internal.column_index:
scols.append(self._internal.scol_for(label))
columns.append(self._internal.column_name_for(label))
else:
scols.append(F.lit(np.nan).alias(str(label)))
columns.append(str(label))
null_columns = True
idx.append(label)
if null_columns:
sdf = self._sdf.select(self._internal.index_scols + list(scols))
return self._internal.copy(sdf=sdf, data_columns=columns, column_index=idx)
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value'):
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column. If None it uses `frame.columns.name` or
‘variable’.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ks.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> ks.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ks.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
if id_vars is None:
id_vars = []
elif isinstance(id_vars, str):
id_vars = [(id_vars,)]
elif isinstance(id_vars, tuple):
if self._internal.column_index_level == 1:
id_vars = [idv if isinstance(idv, tuple) else (idv,) for idv in id_vars]
else:
raise ValueError('id_vars must be a list of tuples when columns are a MultiIndex')
else:
id_vars = [idv if isinstance(idv, tuple) else (idv,) for idv in id_vars]
column_index = self._internal.column_index
if value_vars is None:
value_vars = []
elif isinstance(value_vars, str):
value_vars = [(value_vars,)]
elif isinstance(value_vars, tuple):
value_vars = [value_vars]
else:
value_vars = [valv if isinstance(valv, tuple) else (valv,) for valv in value_vars]
if len(value_vars) == 0:
value_vars = column_index
column_index = [idx for idx in column_index if idx not in id_vars]
sdf = self._sdf
if var_name is None:
if self._internal.column_index_names is not None:
var_name = self._internal.column_index_names
elif self._internal.column_index_level == 1:
var_name = ['variable']
else:
var_name = ['variable_{}'.format(i)
for i in range(self._internal.column_index_level)]
elif isinstance(var_name, str):
var_name = [var_name]
pairs = F.explode(F.array(*[
F.struct(*(
[F.lit(c).alias(name) for c, name in zip(idx, var_name)] +
[self._internal.scol_for(idx).alias(value_name)])
) for idx in column_index if idx in value_vars]))
columns = ([self._internal.scol_for(idx).alias(str(idx) if len(idx) > 1 else idx[0])
for idx in id_vars] +
[F.col("pairs.%s" % name)
for name in var_name[:self._internal.column_index_level]] +
[F.col("pairs.%s" % value_name)])
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(exploded_df)
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
Name: all, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
column_index = self._internal.column_index
for idx in column_index:
col = self[idx]._scol
all_col = F.min(F.coalesce(col.cast('boolean'), F.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.any, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
value_column = "value"
cols = []
for idx, applied_col in zip(column_index, applied):
cols.append(F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(idx)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_index_names is None
else (self._internal.column_index_names[i],))
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(SPARK_INDEX_NAME_FORMAT(i), index_column_name(i))
for i in range(self._internal.column_index_level)],
column_index=None,
column_index_names=None)
return DataFrame(internal)[value_column].rename("all")
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
Name: any, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
column_index = self._internal.column_index
for idx in column_index:
col = self[idx]._scol
all_col = F.max(F.coalesce(col.cast('boolean'), F.lit(False)))
applied.append(F.when(all_col.isNull(), False).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.all, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
value_column = "value"
cols = []
for idx, applied_col in zip(column_index, applied):
cols.append(F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(idx)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_index_names is None
else (self._internal.column_index_names[i],))
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(SPARK_INDEX_NAME_FORMAT(i), index_column_name(i))
for i in range(self._internal.column_index_level)],
column_index=None,
column_index_names=None)
return DataFrame(internal)[value_column].rename("any")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method='average', ascending=True):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].rank(method=method, ascending=ascending))
sdf = self._sdf.select(self._internal.index_columns + [column._scol for column in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive")
if axis not in ('index', 0, 'columns', 1, None):
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
index_scols = self._internal.index_scols
sdf = self._sdf
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
col = None
for item in items:
if col is None:
col = index_scols[0] == F.lit(item)
else:
col = col | (index_scols[0] == F.lit(item))
sdf = sdf.filter(col)
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
return self[items]
elif like is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].contains(like))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
column_index = self._internal.column_index
output_idx = [idx for idx in column_index if any(like in i for i in idx)]
return self[output_idx]
elif regex is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].rlike(regex))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
column_index = self._internal.column_index
matcher = re.compile(regex)
output_idx = [idx for idx in column_index
if any(matcher.search(i) is not None for i in idx)]
return self[output_idx]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def rename(self,
mapper=None,
index=None,
columns=None,
axis='index',
inplace=False,
level=None,
errors='ignore'):
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series
will be left as-is. Extra labels listed don’t throw an error.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to that axis’ values.
Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`
and `columns`.
index : dict-like or function
Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").
columns : dict-like or function
Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").
axis : int or str, default 'index'
Axis to target with mapper. Can be either the axis name ('index', 'columns') or
number (0, 1).
inplace : bool, default False
Whether to return a new DataFrame.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`
contains labels that are not present in the Index being transformed. If 'ignore',
existing keys will be renamed and extra keys will be ignored.
Returns
-------
DataFrame with the renamed axis labels.
Raises:
-------
`KeyError`
If any of the labels is not found in the selected axis and "errors='raise'".
Examples
--------
>>> kdf1 = ks.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> kdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE
a c
0 1 4
1 2 5
2 3 6
>>> kdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> def str_lower(s) -> str:
... return str.lower(s)
>>> kdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE
a b
0 1 4
1 2 5
2 3 6
>>> def mul10(x) -> int:
... return x * 10
>>> kdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])
>>> kdf2 = ks.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
>>> kdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE
x y
A B C D
0 1 2 3 4
1 5 6 7 8
>>> kdf3 = ks.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))
>>> kdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE
a b
x a 1 2
b 3 4
y c 5 6
d 7 8
"""
def gen_mapper_fn(mapper):
if isinstance(mapper, dict):
if len(mapper) == 0:
if errors == 'raise':
raise KeyError('Index include label which is not in the `mapper`.')
else:
return DataFrame(self._internal)
type_set = set(map(lambda x: type(x), mapper.values()))
if len(type_set) > 1:
raise ValueError("Mapper dict should have the same value type.")
spark_return_type = as_spark_type(list(type_set)[0])
def mapper_fn(x):
if x in mapper:
return mapper[x]
else:
if errors == 'raise':
raise KeyError('Index include value which is not in the `mapper`')
return x
elif callable(mapper):
spark_return_type = _infer_return_type(mapper).tpe
def mapper_fn(x):
return mapper(x)
else:
raise ValueError("`mapper` or `index` or `columns` should be "
"either dict-like or function type.")
return mapper_fn, spark_return_type
index_mapper_fn = None
index_mapper_ret_stype = None
columns_mapper_fn = None
if mapper:
if axis == 'index' or axis == 0:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(mapper)
elif axis == 'columns' or axis == 1:
columns_mapper_fn, columns_mapper_ret_stype = gen_mapper_fn(mapper)
else:
raise ValueError("argument axis should be either the axis name "
"(‘index’, ‘columns’) or number (0, 1)")
else:
if index:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(index)
if columns:
columns_mapper_fn, _ = gen_mapper_fn(columns)
if not index and not columns:
raise ValueError("Either `index` or `columns` should be provided.")
internal = self._internal
if index_mapper_fn:
# rename index labels, if `level` is None, rename all index columns, otherwise only
# rename the corresponding level index.
# implement this by transform the underlying spark dataframe,
# Example:
# suppose the kdf index column in underlying spark dataframe is "index_0", "index_1",
# if rename level 0 index labels, will do:
# ``kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))``
# if rename all index labels (`level` is None), then will do:
# ```
# kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))
# .withColumn("index_1", mapper_fn_udf(col("index_1"))
# ```
index_columns = internal.index_columns
num_indices = len(index_columns)
if level:
if level < 0 or level >= num_indices:
raise ValueError("level should be an integer between [0, num_indices)")
def gen_new_index_column(level):
index_col_name = index_columns[level]
index_mapper_udf = pandas_udf(lambda s: s.map(index_mapper_fn),
returnType=index_mapper_ret_stype)
return index_mapper_udf(scol_for(internal.sdf, index_col_name))
sdf = internal.sdf
if level is None:
for i in range(num_indices):
sdf = sdf.withColumn(index_columns[i], gen_new_index_column(i))
else:
sdf = sdf.withColumn(index_columns[level], gen_new_index_column(level))
internal = internal.copy(sdf=sdf)
if columns_mapper_fn:
# rename column name.
# Will modify the `_internal._column_index` and transform underlying spark dataframe
# to the same column name with `_internal._column_index`.
if level:
if level < 0 or level >= internal.column_index_level:
raise ValueError("level should be an integer between [0, column_index_level)")
def gen_new_column_index_entry(column_index_entry):
if isinstance(column_index_entry, tuple):
if level is None:
# rename all level columns
return tuple(map(columns_mapper_fn, column_index_entry))
else:
# only rename specified level column
entry_list = list(column_index_entry)
entry_list[level] = columns_mapper_fn(entry_list[level])
return tuple(entry_list)
else:
return columns_mapper_fn(column_index_entry)
new_column_index = list(map(gen_new_column_index_entry, internal.column_index))
if internal.column_index_level == 1:
new_data_columns = [col[0] for col in new_column_index]
else:
new_data_columns = [str(col) for col in new_column_index]
new_data_scols = [scol_for(internal.sdf, old_col_name).alias(new_col_name)
for old_col_name, new_col_name
in zip(internal.data_columns, new_data_columns)]
sdf = internal.sdf.select(*(internal.index_scols + new_data_scols))
internal = internal.copy(sdf=sdf, column_index=new_column_index,
data_columns=new_data_columns)
if inplace:
self._internal = internal
return self
else:
return DataFrame(internal)
def keys(self):
"""
Return alias for columns.
Returns
-------
Index
Columns of the DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
>>> df.keys()
Index(['max_speed', 'shield'], dtype='object')
"""
return self.columns
def _get_from_multiindex_column(self, key):
""" Select columns from multi-index columns.
:param key: the multi-index column keys represented by tuple
:return: DataFrame or Series
"""
from databricks.koalas.series import Series
assert isinstance(key, tuple)
indexes = [(idx, idx) for idx in self._internal.column_index]
for k in key:
indexes = [(index, idx[1:]) for index, idx in indexes if idx[0] == k]
if len(indexes) == 0:
raise KeyError(k)
recursive = False
if all(len(idx) > 0 and idx[0] == '' for _, idx in indexes):
# If the head is '', drill down recursively.
recursive = True
for i, (col, idx) in enumerate(indexes):
indexes[i] = (col, tuple([str(key), *idx[1:]]))
column_index_names = None
if self._internal.column_index_names is not None:
# Manage column index names
level = column_index_level([idx for _, idx in indexes])
column_index_names = self._internal.column_index_names[-level:]
if all(len(idx) == 0 for _, idx in indexes):
try:
idxes = set(idx for idx, _ in indexes)
assert len(idxes) == 1
index = list(idxes)[0]
kdf_or_ser = \
Series(self._internal.copy(scol=self._internal.scol_for(index),
column_index=[index]),
anchor=self)
except AnalysisException:
raise KeyError(key)
else:
kdf_or_ser = DataFrame(self._internal.copy(
data_columns=[self._internal.column_name_for(idx) for idx, _ in indexes],
column_index=[idx for _, idx in indexes],
column_index_names=column_index_names))
if recursive:
kdf_or_ser = kdf_or_ser._get_from_multiindex_column((str(key),))
return kdf_or_ser
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
return self._get_from_multiindex_column((key,))
if isinstance(key, tuple):
return self._get_from_multiindex_column(key)
elif np.isscalar(key):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._internal.copy(scol=self._internal.scol_for(key)), anchor=self)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._internal.copy(sdf=self._sdf.filter(bcol)))
raise NotImplementedError(key)
def _to_internal_pandas(self):
"""
Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._internal.pandas_df
def __repr__(self):
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string()
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_string = pdf.to_string(show_dimensions=True)
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return pdf.to_string()
def _repr_html_(self):
max_display_count = get_option("display.max_rows")
# pandas 0.25.1 has a regression about HTML representation so 'bold_rows'
# has to be set as False explicitly. See https://github.com/pandas-dev/pandas/issues/28204
bold_rows = not (LooseVersion("0.25.1") == LooseVersion(pd.__version__))
if max_display_count is None:
return self._to_internal_pandas().to_html(notebook=True, bold_rows=bold_rows)
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
pdf = pdf[:max_display_count]
if pdf_length > max_display_count:
repr_html = pdf.to_html(show_dimensions=True, notebook=True, bold_rows=bold_rows)
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return pdf.to_html(notebook=True, bold_rows=bold_rows)
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
if (isinstance(value, Series) and value._kdf is not self) or \
(isinstance(value, DataFrame) and value is not self):
# Different Series or DataFrames
level = self._internal.column_index_level
if isinstance(value, Series):
value = value.to_frame()
value.columns = pd.MultiIndex.from_tuples(
[tuple(list(value._internal.column_index[0]) + ([''] * (level - 1)))])
else:
assert isinstance(value, DataFrame)
value_level = value._internal.column_index_level
if value_level > level:
value.columns = pd.MultiIndex.from_tuples(
[idx[level:] for idx in value._internal.column_index])
elif value_level < level:
value.columns = pd.MultiIndex.from_tuples(
[tuple(list(idx) + ([''] * (level - value_level)))
for idx in value._internal.column_index])
if isinstance(key, str):
key = [(key,)]
elif isinstance(key, tuple):
key = [key]
else:
key = [k if isinstance(k, tuple) else (k,) for k in key]
def assign_columns(kdf, this_column_index, that_column_index):
assert len(key) == len(that_column_index)
# Note that here intentionally uses `zip_longest` that combine
# that_columns.
for k, this_idx, that_idx \
in zip_longest(key, this_column_index, that_column_index):
yield (kdf[that_idx], tuple(['that', *k]))
if this_idx is not None and this_idx[1:] != k:
yield (kdf[this_idx], this_idx)
kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left")
elif isinstance(key, list):
assert isinstance(value, DataFrame)
# Same DataFrames.
field_names = value.columns
kdf = self._assign({k: value[c] for k, c in zip(key, field_names)})
else:
# Same Series.
kdf = self._assign({key: value})
self._internal = kdf._internal
def __getattr__(self, key: str) -> Any:
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
try:
return self._get_from_multiindex_column((key,))
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key))
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
def __iter__(self):
return iter(self.columns)
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return super(cls, DataFrame).__class_getitem__(Tuple[params])
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# Koalas DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class _CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal):
self._cached = internal._sdf.cache()
super(_CachedDataFrame, self).__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unpersist()
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.cache()
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
if self._cached.is_cached:
self._cached.unpersist()
| 37.097469
| 100
| 0.518644
|
4a0c565d78be0bf08c5f8a3ef971148ba4149931
| 1,510
|
py
|
Python
|
pytorch_lightning/trainer/connectors/model_connector.py
|
prajakta0111/pytorch-lightning
|
3df02b880a6d145ff0aca24ea429c12c0d8f1181
|
[
"Apache-2.0"
] | 1
|
2021-02-26T09:15:38.000Z
|
2021-02-26T09:15:38.000Z
|
pytorch_lightning/trainer/connectors/model_connector.py
|
prajakta0111/pytorch-lightning
|
3df02b880a6d145ff0aca24ea429c12c0d8f1181
|
[
"Apache-2.0"
] | 1
|
2021-03-01T17:32:12.000Z
|
2021-03-01T17:32:12.000Z
|
pytorch_lightning/trainer/connectors/model_connector.py
|
prajakta0111/pytorch-lightning
|
3df02b880a6d145ff0aca24ea429c12c0d8f1181
|
[
"Apache-2.0"
] | 1
|
2020-10-18T10:32:31.000Z
|
2020-10-18T10:32:31.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Root module for all distributed operations in Lightning.
Currently supports training on CPU, GPU (dp, ddp, ddp2, horovod) and TPU.
"""
from weakref import proxy
class ModelConnector:
def __init__(self, trainer):
self.trainer = trainer
def copy_trainer_model_properties(self, model):
ref_model = self.trainer.lightning_module or model
automatic_optimization = ref_model.automatic_optimization and self.trainer.train_loop.automatic_optimization
self.trainer.train_loop.automatic_optimization = automatic_optimization
for m in [model, ref_model]:
m.trainer = proxy(self.trainer)
m._device_type = str(self.trainer._device_type)
m._distrib_type = str(self.trainer._distrib_type)
m.use_amp = self.trainer.amp_backend is not None
m.testing = self.trainer.testing
m.precision = self.trainer.precision
| 37.75
| 116
| 0.727815
|
4a0c56c936c713b3d213687027fecddd3818ef21
| 36,597
|
py
|
Python
|
api/tacticalrmm/agents/tests.py
|
InsaneTechnologies/tacticalrmm
|
67e7156c4bbbbb4c82e00d94b6aa2cd3b1cf2f2b
|
[
"MIT"
] | 2
|
2021-04-29T13:34:07.000Z
|
2021-04-29T13:34:11.000Z
|
api/tacticalrmm/agents/tests.py
|
InsaneTechnologies/tacticalrmm
|
67e7156c4bbbbb4c82e00d94b6aa2cd3b1cf2f2b
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/agents/tests.py
|
InsaneTechnologies/tacticalrmm
|
67e7156c4bbbbb4c82e00d94b6aa2cd3b1cf2f2b
|
[
"MIT"
] | null | null | null |
import json
import os
from itertools import cycle
from unittest.mock import patch
from django.conf import settings
from model_bakery import baker
from packaging import version as pyver
from logs.models import PendingAction
from tacticalrmm.test import TacticalTestCase
from winupdate.models import WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from .models import Agent, AgentCustomField
from .serializers import AgentSerializer
from .tasks import auto_self_agent_update_task
class TestAgentsList(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
def test_agents_list(self):
url = "/agents/listagents/"
# 36 total agents
company1 = baker.make("clients.Client")
company2 = baker.make("clients.Client")
site1 = baker.make("clients.Site", client=company1)
site2 = baker.make("clients.Site", client=company1)
site3 = baker.make("clients.Site", client=company2)
baker.make_recipe(
"agents.online_agent", site=site1, monitoring_type="server", _quantity=15
)
baker.make_recipe(
"agents.online_agent",
site=site2,
monitoring_type="workstation",
_quantity=10,
)
baker.make_recipe(
"agents.online_agent",
site=site3,
monitoring_type="server",
_quantity=4,
)
baker.make_recipe(
"agents.online_agent",
site=site3,
monitoring_type="workstation",
_quantity=7,
)
# test all agents
r = self.client.patch(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data), 36) # type: ignore
# test client1
data = {"clientPK": company1.pk} # type: ignore
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data), 25) # type: ignore
# test site3
data = {"sitePK": site3.pk} # type: ignore
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data), 11) # type: ignore
self.check_not_authenticated("patch", url)
class TestAgentViews(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
client = baker.make("clients.Client", name="Google")
site = baker.make("clients.Site", client=client, name="LA Office")
self.agent = baker.make_recipe(
"agents.online_agent", site=site, version="1.1.1"
)
baker.make_recipe("winupdate.winupdate_policy", agent=self.agent)
def test_get_patch_policy(self):
# make sure get_patch_policy doesn't error out when agent has policy with
# an empty patch policy
policy = baker.make("automation.Policy")
self.agent.policy = policy
self.agent.save(update_fields=["policy"])
_ = self.agent.get_patch_policy()
self.agent.monitoring_type = "workstation"
self.agent.save(update_fields=["monitoring_type"])
_ = self.agent.get_patch_policy()
self.agent.policy = None
self.agent.save(update_fields=["policy"])
self.coresettings.server_policy = policy
self.coresettings.workstation_policy = policy
self.coresettings.save(update_fields=["server_policy", "workstation_policy"])
_ = self.agent.get_patch_policy()
self.agent.monitoring_type = "server"
self.agent.save(update_fields=["monitoring_type"])
_ = self.agent.get_patch_policy()
def test_get_agent_versions(self):
url = "/agents/getagentversions/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
assert any(i["hostname"] == self.agent.hostname for i in r.json()["agents"])
self.check_not_authenticated("get", url)
@patch("agents.tasks.send_agent_update_task.delay")
def test_update_agents(self, mock_task):
url = "/agents/updateagents/"
baker.make_recipe(
"agents.agent",
operating_system="Windows 10 Pro, 64 bit (build 19041.450)",
version=settings.LATEST_AGENT_VER,
_quantity=15,
)
baker.make_recipe(
"agents.agent",
operating_system="Windows 10 Pro, 64 bit (build 19041.450)",
version="1.3.0",
_quantity=15,
)
pks: list[int] = list(
Agent.objects.only("pk", "version").values_list("pk", flat=True)
)
data = {"pks": pks}
expected: list[int] = [
i.pk
for i in Agent.objects.only("pk", "version")
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
mock_task.assert_called_with(pks=expected)
self.check_not_authenticated("post", url)
@patch("agents.models.Agent.nats_cmd")
def test_ping(self, nats_cmd):
url = f"/agents/{self.agent.pk}/ping/"
nats_cmd.return_value = "timeout"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
ret = {"name": self.agent.hostname, "status": "offline"}
self.assertEqual(r.json(), ret)
nats_cmd.return_value = "natsdown"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
ret = {"name": self.agent.hostname, "status": "offline"}
self.assertEqual(r.json(), ret)
nats_cmd.return_value = "pong"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
ret = {"name": self.agent.hostname, "status": "online"}
self.assertEqual(r.json(), ret)
nats_cmd.return_value = "asdasjdaksdasd"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
ret = {"name": self.agent.hostname, "status": "offline"}
self.assertEqual(r.json(), ret)
self.check_not_authenticated("get", url)
@patch("agents.models.Agent.nats_cmd")
@patch("agents.views.reload_nats")
def test_uninstall(self, reload_nats, nats_cmd):
url = "/agents/uninstall/"
data = {"pk": self.agent.pk}
r = self.client.delete(url, data, format="json")
self.assertEqual(r.status_code, 200)
nats_cmd.assert_called_with({"func": "uninstall"}, wait=False)
reload_nats.assert_called_once()
self.check_not_authenticated("delete", url)
@patch("agents.models.Agent.nats_cmd")
def test_get_processes(self, mock_ret):
agent = baker.make_recipe("agents.online_agent", version="1.2.0")
url = f"/agents/{agent.pk}/getprocs/"
with open(
os.path.join(settings.BASE_DIR, "tacticalrmm/test_data/procs.json")
) as f:
mock_ret.return_value = json.load(f)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
assert any(i["name"] == "Registry" for i in mock_ret.return_value)
assert any(i["membytes"] == 434655234324 for i in mock_ret.return_value)
mock_ret.return_value = "timeout"
r = self.client.get(url)
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("get", url)
@patch("agents.models.Agent.nats_cmd")
def test_kill_proc(self, nats_cmd):
url = f"/agents/{self.agent.pk}/8234/killproc/"
nats_cmd.return_value = "ok"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
nats_cmd.return_value = "timeout"
r = self.client.get(url)
self.assertEqual(r.status_code, 400)
nats_cmd.return_value = "process doesn't exist"
r = self.client.get(url)
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("get", url)
@patch("agents.models.Agent.nats_cmd")
def test_get_event_log(self, nats_cmd):
url = f"/agents/{self.agent.pk}/geteventlog/Application/22/"
with open(
os.path.join(settings.BASE_DIR, "tacticalrmm/test_data/appeventlog.json")
) as f:
nats_cmd.return_value = json.load(f)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
nats_cmd.assert_called_with(
{
"func": "eventlog",
"timeout": 30,
"payload": {
"logname": "Application",
"days": str(22),
},
},
timeout=32,
)
url = f"/agents/{self.agent.pk}/geteventlog/Security/6/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
nats_cmd.assert_called_with(
{
"func": "eventlog",
"timeout": 180,
"payload": {
"logname": "Security",
"days": str(6),
},
},
timeout=182,
)
nats_cmd.return_value = "timeout"
r = self.client.get(url)
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("get", url)
@patch("agents.models.Agent.nats_cmd")
def test_reboot_now(self, nats_cmd):
url = f"/agents/reboot/"
data = {"pk": self.agent.pk}
nats_cmd.return_value = "ok"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
nats_cmd.assert_called_with({"func": "rebootnow"}, timeout=10)
nats_cmd.return_value = "timeout"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("post", url)
@patch("agents.models.Agent.nats_cmd")
def test_send_raw_cmd(self, mock_ret):
url = f"/agents/sendrawcmd/"
data = {
"pk": self.agent.pk,
"cmd": "ipconfig",
"shell": "cmd",
"timeout": 30,
}
mock_ret.return_value = "nt authority\system"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertIsInstance(r.data, str) # type: ignore
mock_ret.return_value = "timeout"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("post", url)
@patch("agents.models.Agent.nats_cmd")
def test_reboot_later(self, nats_cmd):
url = f"/agents/reboot/"
data = {
"pk": self.agent.pk,
"datetime": "2025-08-29 18:41",
}
nats_cmd.return_value = "ok"
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["time"], "August 29, 2025 at 06:41 PM") # type: ignore
self.assertEqual(r.data["agent"], self.agent.hostname) # type: ignore
nats_data = {
"func": "schedtask",
"schedtaskpayload": {
"type": "schedreboot",
"deleteafter": True,
"trigger": "once",
"name": r.data["task_name"], # type: ignore
"year": 2025,
"month": "August",
"day": 29,
"hour": 18,
"min": 41,
},
}
nats_cmd.assert_called_with(nats_data, timeout=10)
nats_cmd.return_value = "error creating task"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
data_invalid = {
"pk": self.agent.pk,
"datetime": "rm -rf /",
}
r = self.client.patch(url, data_invalid, format="json")
self.assertEqual(r.status_code, 400)
self.assertEqual(r.data, "Invalid date") # type: ignore
self.check_not_authenticated("patch", url)
@patch("os.path.exists")
def test_install_agent(self, mock_file_exists):
url = "/agents/installagent/"
site = baker.make("clients.Site")
data = {
"client": site.client.id, # type: ignore
"site": site.id, # type: ignore
"arch": "64",
"expires": 23,
"installMethod": "manual",
"api": "https://api.example.com",
"agenttype": "server",
"rdp": 1,
"ping": 0,
"power": 0,
"fileName": "rmm-client-site-server.exe",
}
mock_file_exists.return_value = False
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 406)
mock_file_exists.return_value = True
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
data["arch"] = "32"
mock_file_exists.return_value = False
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 415)
data["arch"] = "64"
mock_file_exists.return_value = True
r = self.client.post(url, data, format="json")
self.assertIn("rdp", r.json()["cmd"])
self.assertNotIn("power", r.json()["cmd"])
data.update({"ping": 1, "power": 1})
r = self.client.post(url, data, format="json")
self.assertIn("power", r.json()["cmd"])
self.assertIn("ping", r.json()["cmd"])
data["installMethod"] = "powershell"
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("post", url)
@patch("agents.models.Agent.nats_cmd")
def test_recover(self, nats_cmd):
from agents.models import RecoveryAction
RecoveryAction.objects.all().delete()
url = "/agents/recover/"
agent = baker.make_recipe("agents.online_agent")
# test mesh realtime
data = {"pk": agent.pk, "cmd": None, "mode": "mesh"}
nats_cmd.return_value = "ok"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 0)
nats_cmd.assert_called_with(
{"func": "recover", "payload": {"mode": "mesh"}}, timeout=10
)
nats_cmd.reset_mock()
# test mesh with agent rpc not working
data = {"pk": agent.pk, "cmd": None, "mode": "mesh"}
nats_cmd.return_value = "timeout"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 1)
mesh_recovery = RecoveryAction.objects.first()
self.assertEqual(mesh_recovery.mode, "mesh")
nats_cmd.reset_mock()
RecoveryAction.objects.all().delete()
# test tacagent realtime
data = {"pk": agent.pk, "cmd": None, "mode": "tacagent"}
nats_cmd.return_value = "ok"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 0)
nats_cmd.assert_called_with(
{"func": "recover", "payload": {"mode": "tacagent"}}, timeout=10
)
nats_cmd.reset_mock()
# test tacagent with rpc not working
data = {"pk": agent.pk, "cmd": None, "mode": "tacagent"}
nats_cmd.return_value = "timeout"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.assertEqual(RecoveryAction.objects.count(), 0)
nats_cmd.reset_mock()
# test shell cmd without command
data = {"pk": agent.pk, "cmd": None, "mode": "command"}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.assertEqual(RecoveryAction.objects.count(), 0)
# test shell cmd
data = {"pk": agent.pk, "cmd": "shutdown /r /t 10 /f", "mode": "command"}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 1)
cmd_recovery = RecoveryAction.objects.first()
self.assertEqual(cmd_recovery.mode, "command")
self.assertEqual(cmd_recovery.command, "shutdown /r /t 10 /f")
def test_agents_agent_detail(self):
url = f"/agents/{self.agent.pk}/agentdetail/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("get", url)
def test_edit_agent(self):
# setup data
site = baker.make("clients.Site", name="Ny Office")
url = "/agents/editagent/"
edit = {
"id": self.agent.pk,
"site": site.id, # type: ignore
"monitoring_type": "workstation",
"description": "asjdk234andasd",
"offline_time": 4,
"overdue_time": 300,
"check_interval": 60,
"overdue_email_alert": True,
"overdue_text_alert": False,
"winupdatepolicy": [
{
"critical": "approve",
"important": "approve",
"moderate": "manual",
"low": "ignore",
"other": "ignore",
"run_time_hour": 5,
"run_time_days": [2, 3, 6],
"reboot_after_install": "required",
"reprocess_failed": True,
"reprocess_failed_times": 13,
"email_if_fail": True,
"agent": self.agent.pk,
}
],
}
r = self.client.patch(url, edit, format="json")
self.assertEqual(r.status_code, 200)
agent = Agent.objects.get(pk=self.agent.pk)
data = AgentSerializer(agent).data
self.assertEqual(data["site"], site.id) # type: ignore
policy = WinUpdatePolicy.objects.get(agent=self.agent)
data = WinUpdatePolicySerializer(policy).data
self.assertEqual(data["run_time_days"], [2, 3, 6])
# test adding custom fields
field = baker.make("core.CustomField", model="agent", type="number")
edit = {
"id": self.agent.pk,
"site": site.id, # type: ignore
"description": "asjdk234andasd",
"custom_fields": [{"field": field.id, "string_value": "123"}], # type: ignore
}
r = self.client.patch(url, edit, format="json")
self.assertEqual(r.status_code, 200)
self.assertTrue(
AgentCustomField.objects.filter(agent=self.agent, field=field).exists()
)
# test edit custom field
edit = {
"id": self.agent.pk,
"site": site.id, # type: ignore
"description": "asjdk234andasd",
"custom_fields": [{"field": field.id, "string_value": "456"}], # type: ignore
}
r = self.client.patch(url, edit, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
AgentCustomField.objects.get(agent=agent, field=field).value,
"456",
)
self.check_not_authenticated("patch", url)
@patch("agents.models.Agent.get_login_token")
def test_meshcentral_tabs(self, mock_token):
url = f"/agents/{self.agent.pk}/meshcentral/"
mock_token.return_value = "askjh1k238uasdhk487234jadhsajksdhasd"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
# TODO
# decode the cookie
self.assertIn("&viewmode=13", r.data["file"]) # type: ignore
self.assertIn("&viewmode=12", r.data["terminal"]) # type: ignore
self.assertIn("&viewmode=11", r.data["control"]) # type: ignore
self.assertIn("&gotonode=", r.data["file"]) # type: ignore
self.assertIn("&gotonode=", r.data["terminal"]) # type: ignore
self.assertIn("&gotonode=", r.data["control"]) # type: ignore
self.assertIn("?login=", r.data["file"]) # type: ignore
self.assertIn("?login=", r.data["terminal"]) # type: ignore
self.assertIn("?login=", r.data["control"]) # type: ignore
self.assertEqual(self.agent.hostname, r.data["hostname"]) # type: ignore
self.assertEqual(self.agent.client.name, r.data["client"]) # type: ignore
self.assertEqual(self.agent.site.name, r.data["site"]) # type: ignore
self.assertEqual(r.status_code, 200)
mock_token.return_value = "err"
r = self.client.get(url)
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("get", url)
def test_overdue_action(self):
url = "/agents/overdueaction/"
payload = {"pk": self.agent.pk, "overdue_email_alert": True}
r = self.client.post(url, payload, format="json")
self.assertEqual(r.status_code, 200)
agent = Agent.objects.get(pk=self.agent.pk)
self.assertTrue(agent.overdue_email_alert)
self.assertEqual(self.agent.hostname, r.data) # type: ignore
payload = {"pk": self.agent.pk, "overdue_text_alert": False}
r = self.client.post(url, payload, format="json")
self.assertEqual(r.status_code, 200)
agent = Agent.objects.get(pk=self.agent.pk)
self.assertFalse(agent.overdue_text_alert)
self.assertEqual(self.agent.hostname, r.data) # type: ignore
self.check_not_authenticated("post", url)
def test_list_agents_no_detail(self):
url = "/agents/listagentsnodetail/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("get", url)
def test_agent_edit_details(self):
url = f"/agents/{self.agent.pk}/agenteditdetails/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
url = "/agents/48234982/agenteditdetails/"
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
self.check_not_authenticated("get", url)
""" @patch("winupdate.tasks.bulk_check_for_updates_task.delay")
@patch("scripts.tasks.handle_bulk_script_task.delay")
@patch("scripts.tasks.handle_bulk_command_task.delay")
@patch("agents.models.Agent.salt_batch_async")
def test_bulk_cmd_script(
self, salt_batch_async, bulk_command, bulk_script, mock_update
):
url = "/agents/bulk/"
payload = {
"mode": "command",
"monType": "all",
"target": "agents",
"client": None,
"site": None,
"agentPKs": [
self.agent.pk,
],
"cmd": "gpupdate /force",
"timeout": 300,
"shell": "cmd",
}
r = self.client.post(url, payload, format="json")
bulk_command.assert_called_with([self.agent.pk], "gpupdate /force", "cmd", 300)
self.assertEqual(r.status_code, 200)
payload = {
"mode": "command",
"monType": "servers",
"target": "agents",
"client": None,
"site": None,
"agentPKs": [],
"cmd": "gpupdate /force",
"timeout": 300,
"shell": "cmd",
}
r = self.client.post(url, payload, format="json")
self.assertEqual(r.status_code, 400)
payload = {
"mode": "command",
"monType": "workstations",
"target": "client",
"client": self.agent.client.id,
"site": None,
"agentPKs": [],
"cmd": "gpupdate /force",
"timeout": 300,
"shell": "cmd",
}
r = self.client.post(url, payload, format="json")
self.assertEqual(r.status_code, 200)
bulk_command.assert_called_with([self.agent.pk], "gpupdate /force", "cmd", 300)
payload = {
"mode": "command",
"monType": "all",
"target": "client",
"client": self.agent.client.id,
"site": self.agent.site.id,
"agentPKs": [
self.agent.pk,
],
"cmd": "gpupdate /force",
"timeout": 300,
"shell": "cmd",
}
r = self.client.post(url, payload, format="json")
self.assertEqual(r.status_code, 200)
bulk_command.assert_called_with([self.agent.pk], "gpupdate /force", "cmd", 300)
payload = {
"mode": "scan",
"monType": "all",
"target": "agents",
"client": None,
"site": None,
"agentPKs": [
self.agent.pk,
],
}
r = self.client.post(url, payload, format="json")
mock_update.assert_called_with(minions=[self.agent.salt_id])
self.assertEqual(r.status_code, 200)
payload = {
"mode": "install",
"monType": "all",
"target": "client",
"client": self.agent.client.id,
"site": None,
"agentPKs": [
self.agent.pk,
],
}
salt_batch_async.return_value = "ok"
r = self.client.post(url, payload, format="json")
self.assertEqual(r.status_code, 200)
payload["target"] = "all"
r = self.client.post(url, payload, format="json")
self.assertEqual(r.status_code, 200)
payload["target"] = "asdasdsd"
r = self.client.post(url, payload, format="json")
self.assertEqual(r.status_code, 400)
# TODO mock the script
self.check_not_authenticated("post", url) """
@patch("agents.models.Agent.nats_cmd")
def test_recover_mesh(self, nats_cmd):
url = f"/agents/{self.agent.pk}/recovermesh/"
nats_cmd.return_value = "ok"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertIn(self.agent.hostname, r.data) # type: ignore
nats_cmd.assert_called_with(
{"func": "recover", "payload": {"mode": "mesh"}}, timeout=45
)
nats_cmd.return_value = "timeout"
r = self.client.get(url)
self.assertEqual(r.status_code, 400)
url = f"/agents/543656/recovermesh/"
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
self.check_not_authenticated("get", url)
@patch("agents.tasks.run_script_email_results_task.delay")
@patch("agents.models.Agent.run_script")
def test_run_script(self, run_script, email_task):
run_script.return_value = "ok"
url = "/agents/runscript/"
script = baker.make_recipe("scripts.script")
# test wait
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"output": "wait",
"args": [],
"timeout": 15,
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk, args=[], timeout=18, wait=True
)
run_script.reset_mock()
# test email default
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"output": "email",
"args": ["abc", "123"],
"timeout": 15,
"emailmode": "default",
"emails": ["admin@example.com", "bob@example.com"],
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
email_task.assert_called_with(
agentpk=self.agent.pk,
scriptpk=script.pk,
nats_timeout=18,
emails=[],
args=["abc", "123"],
)
email_task.reset_mock()
# test email overrides
data["emailmode"] = "custom"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
email_task.assert_called_with(
agentpk=self.agent.pk,
scriptpk=script.pk,
nats_timeout=18,
emails=["admin@example.com", "bob@example.com"],
args=["abc", "123"],
)
# test fire and forget
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"output": "forget",
"args": ["hello", "world"],
"timeout": 22,
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk, args=["hello", "world"], timeout=25
)
class TestAgentViewsNew(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
""" def test_agent_counts(self):
url = "/agents/agent_counts/"
# create some data
baker.make_recipe(
"agents.online_agent",
monitoring_type=cycle(["server", "workstation"]),
_quantity=6,
)
baker.make_recipe(
"agents.overdue_agent",
monitoring_type=cycle(["server", "workstation"]),
_quantity=6,
)
# returned data should be this
data = {
"total_server_count": 6,
"total_server_offline_count": 3,
"total_workstation_count": 6,
"total_workstation_offline_count": 3,
}
r = self.client.post(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data, data) # type: ignore
self.check_not_authenticated("post", url) """
def test_agent_maintenance_mode(self):
url = "/agents/maintenance/"
# setup data
site = baker.make("clients.Site")
agent = baker.make_recipe("agents.agent", site=site)
# Test client toggle maintenance mode
data = {"type": "Client", "id": site.client.id, "action": True} # type: ignore
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertTrue(Agent.objects.get(pk=agent.pk).maintenance_mode)
# Test site toggle maintenance mode
data = {"type": "Site", "id": site.id, "action": False} # type: ignore
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertFalse(Agent.objects.get(pk=agent.pk).maintenance_mode)
# Test agent toggle maintenance mode
data = {"type": "Agent", "id": agent.id, "action": True}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertTrue(Agent.objects.get(pk=agent.pk).maintenance_mode)
# Test invalid payload
data = {"type": "Invalid", "id": agent.id, "action": True}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("post", url)
class TestAgentTasks(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
@patch("agents.utils.get_exegen_url")
@patch("agents.models.Agent.nats_cmd")
def test_agent_update(self, nats_cmd, get_exe):
from agents.tasks import agent_update
agent_noarch = baker.make_recipe(
"agents.agent",
operating_system="Error getting OS",
version=settings.LATEST_AGENT_VER,
)
r = agent_update(agent_noarch.pk)
self.assertEqual(r, "noarch")
agent_130 = baker.make_recipe(
"agents.agent",
operating_system="Windows 10 Pro, 64 bit (build 19041.450)",
version="1.3.0",
)
r = agent_update(agent_130.pk)
self.assertEqual(r, "not supported")
# test __without__ code signing
agent64_nosign = baker.make_recipe(
"agents.agent",
operating_system="Windows 10 Pro, 64 bit (build 19041.450)",
version="1.4.14",
)
r = agent_update(agent64_nosign.pk, None)
self.assertEqual(r, "created")
action = PendingAction.objects.get(agent__pk=agent64_nosign.pk)
self.assertEqual(action.action_type, "agentupdate")
self.assertEqual(action.status, "pending")
self.assertEqual(
action.details["url"],
f"https://github.com/wh1te909/rmmagent/releases/download/v{settings.LATEST_AGENT_VER}/winagent-v{settings.LATEST_AGENT_VER}.exe",
)
self.assertEqual(
action.details["inno"], f"winagent-v{settings.LATEST_AGENT_VER}.exe"
)
self.assertEqual(action.details["version"], settings.LATEST_AGENT_VER)
nats_cmd.assert_called_with(
{
"func": "agentupdate",
"payload": {
"url": f"https://github.com/wh1te909/rmmagent/releases/download/v{settings.LATEST_AGENT_VER}/winagent-v{settings.LATEST_AGENT_VER}.exe",
"version": settings.LATEST_AGENT_VER,
"inno": f"winagent-v{settings.LATEST_AGENT_VER}.exe",
},
},
wait=False,
)
# test __with__ code signing (64 bit)
codesign = baker.make("core.CodeSignToken", token="testtoken123")
agent64_sign = baker.make_recipe(
"agents.agent",
operating_system="Windows 10 Pro, 64 bit (build 19041.450)",
version="1.4.14",
)
nats_cmd.return_value = "ok"
get_exe.return_value = "https://exe.tacticalrmm.io"
r = agent_update(agent64_sign.pk, codesign.token) # type: ignore
self.assertEqual(r, "created")
nats_cmd.assert_called_with(
{
"func": "agentupdate",
"payload": {
"url": f"https://exe.tacticalrmm.io/api/v1/winagents/?version={settings.LATEST_AGENT_VER}&arch=64&token=testtoken123", # type: ignore
"version": settings.LATEST_AGENT_VER,
"inno": f"winagent-v{settings.LATEST_AGENT_VER}.exe",
},
},
wait=False,
)
action = PendingAction.objects.get(agent__pk=agent64_sign.pk)
self.assertEqual(action.action_type, "agentupdate")
self.assertEqual(action.status, "pending")
# test __with__ code signing (32 bit)
agent32_sign = baker.make_recipe(
"agents.agent",
operating_system="Windows 10 Pro, 32 bit (build 19041.450)",
version="1.4.14",
)
nats_cmd.return_value = "ok"
get_exe.return_value = "https://exe.tacticalrmm.io"
r = agent_update(agent32_sign.pk, codesign.token) # type: ignore
self.assertEqual(r, "created")
nats_cmd.assert_called_with(
{
"func": "agentupdate",
"payload": {
"url": f"https://exe.tacticalrmm.io/api/v1/winagents/?version={settings.LATEST_AGENT_VER}&arch=32&token=testtoken123", # type: ignore
"version": settings.LATEST_AGENT_VER,
"inno": f"winagent-v{settings.LATEST_AGENT_VER}-x86.exe",
},
},
wait=False,
)
action = PendingAction.objects.get(agent__pk=agent32_sign.pk)
self.assertEqual(action.action_type, "agentupdate")
self.assertEqual(action.status, "pending")
@patch("agents.tasks.agent_update")
@patch("agents.tasks.sleep", return_value=None)
def test_auto_self_agent_update_task(self, mock_sleep, agent_update):
baker.make_recipe(
"agents.agent",
operating_system="Windows 10 Pro, 64 bit (build 19041.450)",
version=settings.LATEST_AGENT_VER,
_quantity=23,
)
baker.make_recipe(
"agents.agent",
operating_system="Windows 10 Pro, 64 bit (build 19041.450)",
version="1.3.0",
_quantity=33,
)
self.coresettings.agent_auto_update = False
self.coresettings.save(update_fields=["agent_auto_update"])
r = auto_self_agent_update_task.s().apply()
self.assertEqual(agent_update.call_count, 0)
self.coresettings.agent_auto_update = True
self.coresettings.save(update_fields=["agent_auto_update"])
r = auto_self_agent_update_task.s().apply()
self.assertEqual(agent_update.call_count, 33)
| 34.854286
| 156
| 0.575047
|
4a0c57b5df7b948d5ca19641bad038331fa728d7
| 399
|
py
|
Python
|
setup.py
|
CoAxLab/glia_playing_atari
|
e0b69e69af2c4dd655cc4c39c03c20e216949616
|
[
"MIT"
] | 2
|
2019-11-13T14:41:46.000Z
|
2022-02-25T18:53:34.000Z
|
setup.py
|
CoAxLab/glia_playing_atari
|
e0b69e69af2c4dd655cc4c39c03c20e216949616
|
[
"MIT"
] | null | null | null |
setup.py
|
CoAxLab/glia_playing_atari
|
e0b69e69af2c4dd655cc4c39c03c20e216949616
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='glia',
version='0.0.1',
description="Glia computers!",
url='',
author='Erik J. Peterson',
author_email='erik.exists@gmail.com',
license='MIT',
packages=['glia'],
scripts=[
'glia/exp/glia_digits.py', 'glia/exp/glia_xor.py',
'glia/exp/glia_fashion.py', 'glia/exp/tune_digits.py'
],
zip_safe=False)
| 23.470588
| 61
| 0.611529
|
4a0c5862fc15f62500118575ba4cfa6a6b8e2b85
| 13,447
|
py
|
Python
|
tests/conftest.py
|
zhonger/aiida-core
|
403f7e7d896f8408d0cacee9fe41c030a1072eaf
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2016-09-12T10:51:00.000Z
|
2016-09-12T10:51:00.000Z
|
tests/conftest.py
|
zhonger/aiida-core
|
403f7e7d896f8408d0cacee9fe41c030a1072eaf
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
tests/conftest.py
|
zhonger/aiida-core
|
403f7e7d896f8408d0cacee9fe41c030a1072eaf
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=redefined-outer-name
"""Configuration file for pytest tests."""
import os
import pytest
from aiida.manage.configuration import Config, Profile, get_config, load_profile
pytest_plugins = ['aiida.manage.tests.pytest_fixtures', 'sphinx.testing.fixtures'] # pylint: disable=invalid-name
@pytest.fixture()
def non_interactive_editor(request):
"""Fixture to patch click's `Editor.edit_file`.
In `click==7.1` the `Editor.edit_file` command was changed to escape the `os.environ['EDITOR']` command. Our tests
are currently abusing this variable to define an automated vim command in order to make an interactive command
non-interactive, and escaping it makes bash interpret the command and its arguments as a single command instead.
Here we patch the method to remove the escaping of the editor command.
:param request: the command to set for the editor that is to be called
"""
from unittest.mock import patch
from click._termui_impl import Editor
os.environ['EDITOR'] = request.param
os.environ['VISUAL'] = request.param
def edit_file(self, filename):
import subprocess
import click
editor = self.get_editor()
if self.env:
environ = os.environ.copy()
environ.update(self.env)
else:
environ = None
try:
process = subprocess.Popen(
f'{editor} {filename}', # This is the line that we change removing `shlex_quote`
env=environ,
shell=True,
)
exit_code = process.wait()
if exit_code != 0:
raise click.ClickException(f'{editor}: Editing failed!')
except OSError as exception:
raise click.ClickException(f'{editor}: Editing failed: {exception}')
with patch.object(Editor, 'edit_file', edit_file):
yield
@pytest.fixture(scope='function')
def fixture_sandbox():
"""Return a `SandboxFolder`."""
from aiida.common.folders import SandboxFolder
with SandboxFolder() as folder:
yield folder
@pytest.fixture
def generate_calc_job():
"""Fixture to construct a new `CalcJob` instance and call `prepare_for_submission` for testing `CalcJob` classes.
The fixture will return the `CalcInfo` returned by `prepare_for_submission` and the temporary folder that was passed
to it, into which the raw input files will have been written.
"""
def _generate_calc_job(folder, entry_point_name, inputs=None, return_process=False):
"""Fixture to generate a mock `CalcInfo` for testing calculation jobs."""
from aiida.engine.utils import instantiate_process
from aiida.manage.manager import get_manager
from aiida.plugins import CalculationFactory
inputs = inputs or {}
manager = get_manager()
runner = manager.get_runner()
process_class = CalculationFactory(entry_point_name)
process = instantiate_process(runner, process_class, **inputs)
if return_process:
return process
return process.prepare_for_submission(folder)
return _generate_calc_job
@pytest.fixture
def generate_work_chain():
"""Generate an instance of a `WorkChain`."""
def _generate_work_chain(entry_point, inputs=None):
"""Generate an instance of a `WorkChain` with the given entry point and inputs.
:param entry_point: entry point name of the work chain subclass.
:param inputs: inputs to be passed to process construction.
:return: a `WorkChain` instance.
"""
from aiida.engine.utils import instantiate_process
from aiida.manage.manager import get_manager
from aiida.plugins import WorkflowFactory
inputs = inputs or {}
process_class = WorkflowFactory(entry_point) if isinstance(entry_point, str) else entry_point
runner = get_manager().get_runner()
process = instantiate_process(runner, process_class, **inputs)
return process
return _generate_work_chain
@pytest.fixture
def generate_calculation_node():
"""Generate an instance of a `CalculationNode`."""
from aiida.engine import ProcessState
def _generate_calculation_node(process_state=ProcessState.FINISHED, exit_status=None, entry_point=None):
"""Generate an instance of a `CalculationNode`..
:param process_state: state to set
:param exit_status: optional exit status, will be set to `0` if `process_state` is `ProcessState.FINISHED`
:return: a `CalculationNode` instance.
"""
from aiida.orm import CalculationNode
if process_state is ProcessState.FINISHED and exit_status is None:
exit_status = 0
node = CalculationNode(process_type=entry_point)
node.set_process_state(process_state)
if exit_status is not None:
node.set_exit_status(exit_status)
return node
return _generate_calculation_node
@pytest.fixture
def empty_config(tmp_path) -> Config:
"""Create a temporary configuration instance.
This creates a temporary directory with a clean `.aiida` folder and basic configuration file. The currently loaded
configuration and profile are stored in memory and are automatically restored at the end of this context manager.
:return: a new empty config instance.
"""
from aiida.common.utils import Capturing
from aiida.manage import configuration
from aiida.manage.configuration import settings, reset_profile
# Store the current configuration instance and config directory path
current_config = configuration.CONFIG
current_config_path = current_config.dirpath
current_profile_name = configuration.PROFILE.name
reset_profile()
configuration.CONFIG = None
# Create a temporary folder, set it as the current config directory path and reset the loaded configuration
settings.AIIDA_CONFIG_FOLDER = str(tmp_path)
# Create the instance base directory structure, the config file and a dummy profile
settings.create_instance_directories()
# The constructor of `Config` called by `load_config` will print warning messages about migrating it
with Capturing():
configuration.CONFIG = configuration.load_config(create=True)
yield get_config()
# Reset the config folder path and the config instance. Note this will always be executed after the yield no
# matter what happened in the test that used this fixture.
reset_profile()
settings.AIIDA_CONFIG_FOLDER = current_config_path
configuration.CONFIG = current_config
load_profile(current_profile_name)
@pytest.fixture
def profile_factory() -> Profile:
"""Create a new profile instance.
:return: the profile instance.
"""
def _create_profile(name, **kwargs):
repository_dirpath = kwargs.pop('repository_dirpath', get_config().dirpath)
profile_dictionary = {
'default_user': kwargs.pop('default_user', 'dummy@localhost'),
'database_engine': kwargs.pop('database_engine', 'postgresql_psycopg2'),
'database_backend': kwargs.pop('database_backend', 'django'),
'database_hostname': kwargs.pop('database_hostname', 'localhost'),
'database_port': kwargs.pop('database_port', 5432),
'database_name': kwargs.pop('database_name', name),
'database_username': kwargs.pop('database_username', 'user'),
'database_password': kwargs.pop('database_password', 'pass'),
'repository_uri': f"file:///{os.path.join(repository_dirpath, f'repository_{name}')}",
}
return Profile(name, profile_dictionary)
return _create_profile
@pytest.fixture
def config_with_profile_factory(empty_config, profile_factory) -> Config:
"""Create a temporary configuration instance with one profile.
This fixture builds on the `empty_config` fixture, to add a single profile.
The defaults of the profile can be overridden in the callable, as well as whether it should be set as default.
Example::
def test_config_with_profile(config_with_profile_factory):
config = config_with_profile_factory(set_as_default=True, name='default', database_backend='django')
assert config.current_profile.name == 'default'
As with `empty_config`, the currently loaded configuration and profile are stored in memory,
and are automatically restored at the end of this context manager.
This fixture should be used by tests that modify aspects of the AiiDA configuration or profile
and require a preconfigured profile, but do not require an actual configured database.
"""
def _config_with_profile_factory(set_as_default=True, load=True, name='default', **kwargs):
"""Create a temporary configuration instance with one profile.
:param set_as_default: whether to set the one profile as the default.
:param load: whether to load the profile.
:param name: the profile name
:param kwargs: parameters that are forwarded to the `Profile` constructor.
:return: a config instance with a configured profile.
"""
profile = profile_factory(name=name, **kwargs)
config = empty_config
config.add_profile(profile)
if set_as_default:
config.set_default_profile(profile.name, overwrite=True)
config.store()
if load:
load_profile(profile.name)
return config
return _config_with_profile_factory
@pytest.fixture
def config_with_profile(config_with_profile_factory):
"""Create a temporary configuration instance with one default, loaded profile."""
yield config_with_profile_factory()
@pytest.fixture
def manager(aiida_profile): # pylint: disable=unused-argument
"""Get the ``Manager`` instance of the currently loaded profile."""
from aiida.manage.manager import get_manager
return get_manager()
@pytest.fixture
def event_loop(manager):
"""Get the event loop instance of the currently loaded profile.
This is automatically called as a fixture for any test marked with ``@pytest.mark.asyncio``.
"""
yield manager.get_runner().loop
@pytest.fixture
def backend(manager):
"""Get the ``Backend`` instance of the currently loaded profile."""
return manager.get_backend()
@pytest.fixture
def communicator(manager):
"""Get the ``Communicator`` instance of the currently loaded profile to communicate with RabbitMQ."""
return manager.get_communicator()
@pytest.fixture
def skip_if_not_django(backend):
"""Fixture that will skip any test that uses it when a profile is loaded with any other backend then Django."""
from aiida.orm.implementation.django.backend import DjangoBackend
if not isinstance(backend, DjangoBackend):
pytest.skip('this test should only be run for the Django backend.')
@pytest.fixture
def skip_if_not_sqlalchemy(backend):
"""Fixture that will skip any test that uses it when a profile is loaded with any other backend then SqlAlchemy."""
from aiida.orm.implementation.sqlalchemy.backend import SqlaBackend
if not isinstance(backend, SqlaBackend):
pytest.skip('this test should only be run for the SqlAlchemy backend.')
@pytest.fixture(scope='function')
def override_logging():
"""Return a `SandboxFolder`."""
from aiida.common.log import configure_logging
config = get_config()
try:
config.set_option('logging.aiida_loglevel', 'DEBUG')
config.set_option('logging.db_loglevel', 'DEBUG')
configure_logging(with_orm=True)
yield
finally:
config.unset_option('logging.aiida_loglevel')
config.unset_option('logging.db_loglevel')
configure_logging(with_orm=True)
@pytest.fixture
def with_daemon():
"""Starts the daemon process and then makes sure to kill it once the test is done."""
import sys
import signal
import subprocess
from aiida.engine.daemon.client import DaemonClient
from aiida.cmdline.utils.common import get_env_with_venv_bin
# Add the current python path to the environment that will be used for the daemon sub process.
# This is necessary to guarantee the daemon can also import all the classes that are defined
# in this `tests` module.
env = get_env_with_venv_bin()
env['PYTHONPATH'] = ':'.join(sys.path)
profile = get_config().current_profile
daemon = subprocess.Popen(
DaemonClient(profile).cmd_string.split(),
stderr=sys.stderr,
stdout=sys.stdout,
env=env,
)
yield
# Note this will always be executed after the yield no matter what happened in the test that used this fixture.
os.kill(daemon.pid, signal.SIGTERM)
| 36.640327
| 120
| 0.69086
|
4a0c5a1f9afdded366847cf8fa76fc4040d112a2
| 11,830
|
py
|
Python
|
src/sage/combinat/baxter_permutations.py
|
Findstat/sage
|
d661c2c2bd18676014c151e9eec1e81ed12db9f6
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/baxter_permutations.py
|
Findstat/sage
|
d661c2c2bd18676014c151e9eec1e81ed12db9f6
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/baxter_permutations.py
|
Findstat/sage
|
d661c2c2bd18676014c151e9eec1e81ed12db9f6
|
[
"BSL-1.0"
] | null | null | null |
"""
Baxter permutations
"""
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.sets.disjoint_union_enumerated_sets import DisjointUnionEnumeratedSets
from sage.combinat.permutation import Permutation, Permutations
from sage.rings.integer_ring import ZZ
class BaxterPermutations(UniqueRepresentation, Parent):
r"""
The combinatorial class of Baxter permutations.
A Baxter permutation is a permutation avoiding the generalized
permutation patterns `2-41-3` and `3-14-2`. In other words, a
permutation `\sigma` is a Baxter permutation if for any subword `u
:= u_1u_2u_3u_4` of `\sigma` such that the letters `u_2` and `u_3`
are adjacent in `\sigma`, the standardized version of `u` is
neither `2413` nor `3142`.
See [Gir12]_ for a study of Baxter permutations.
INPUT:
- ``n`` -- (default: ``None``) a nonnegative integer, the size of
the permutations.
OUTPUT:
Return the combinatorial class of the Baxter permutations of size ``n``
if ``n`` is not ``None``. Otherwise, return the combinatorial class
of all Baxter permutations.
EXAMPLES::
sage: BaxterPermutations(5)
Baxter permutations of size 5
sage: BaxterPermutations()
Baxter permutations
REFERENCES:
.. [Gir12] Samuele Giraudo,
*Algebraic and combinatorial structures on pairs of twin binary trees*,
:arxiv:`1204.4776v1`.
"""
@staticmethod
def __classcall_private__(classe, n=None):
"""
EXAMPLES::
sage: BaxterPermutations(5)
Baxter permutations of size 5
sage: BaxterPermutations()
Baxter permutations
"""
if n is None:
return BaxterPermutations_all()
return BaxterPermutations_size(n)
class BaxterPermutations_size(BaxterPermutations):
r"""
The enumerated set of Baxter permutations of a given size.
See :class:`BaxterPermutations` for the definition of Baxter
permutations.
EXAMPLES::
sage: from sage.combinat.baxter_permutations import BaxterPermutations_size
sage: BaxterPermutations_size(5)
Baxter permutations of size 5
"""
def __init__(self, n):
"""
EXAMPLES::
sage: from sage.combinat.baxter_permutations import BaxterPermutations_size
sage: BaxterPermutations_size(5)
Baxter permutations of size 5
"""
self.element_class = Permutations(n).element_class
self._n = ZZ(n)
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
super(BaxterPermutations, self).__init__(category=FiniteEnumeratedSets())
def _repr_(self):
"""
Return a string representation of ``self``
EXAMPLES::
sage: from sage.combinat.baxter_permutations import BaxterPermutations_size
sage: BaxterPermutations_size(5)
Baxter permutations of size 5
"""
return "Baxter permutations of size %s" % self._n
def __contains__(self, x):
r"""
Return ``True`` if and only if ``x`` is a Baxter permutation of
size ``self._n``.
INPUT:
- ``x`` -- a permutation.
EXAMPLES::
sage: Permutation([2, 1, 4, 3]) in BaxterPermutations(4)
True
sage: Permutation([2, 1, 4, 3]) in BaxterPermutations(5)
False
sage: Permutation([3, 1, 4, 2]) in BaxterPermutations(4)
False
sage: [len([p for p in Permutations(n) if p in BaxterPermutations(n)]) for n in range(7)]
[1, 1, 2, 6, 22, 92, 422]
sage: sorted([p for p in Permutations(6) if p in BaxterPermutations(6)]) == sorted(BaxterPermutations(6).list())
True
"""
if not x in Permutations(self._n):
return False
for i in range(1, len(x) - 1):
a = x[i]
b = x[i + 1]
if a < b: # Hunting pattern 3-14-2.
max_l = 0
for x_j in x[:i]:
if x_j > a and x_j < b and x_j > max_l:
max_l = x_j
min_r = len(x) + 1
for x_j in x[i+2:]:
if x_j > a and x_j < b and x_j < min_r:
min_r = x_j
if max_l > min_r:
return False
else: # Hunting pattern 2-41-3.
min_l = len(x) + 1
for x_j in x[:i]:
if x_j < a and x_j > b and x_j < min_l:
min_l = x_j
max_r = 0
for x_j in x[i+2:]:
if x_j < a and x_j > b and x_j > max_r:
max_r = x_j
if min_l < max_r:
return False
return True
def __iter__(self):
r"""
Efficient generation of Baxter permutations.
OUTPUT:
An iterator over the Baxter permutations of size ``self._n``.
EXAMPLES::
sage: BaxterPermutations(4).list()
[[4, 3, 2, 1], [3, 4, 2, 1], [3, 2, 4, 1], [3, 2, 1, 4], [2, 4, 3, 1],
[4, 2, 3, 1], [2, 3, 4, 1], [2, 3, 1, 4], [2, 1, 4, 3], [4, 2, 1, 3],
[2, 1, 3, 4], [1, 4, 3, 2], [4, 1, 3, 2], [1, 3, 4, 2], [1, 3, 2, 4],
[4, 3, 1, 2], [3, 4, 1, 2], [3, 1, 2, 4], [1, 2, 4, 3], [1, 4, 2, 3],
[4, 1, 2, 3], [1, 2, 3, 4]]
sage: [len(BaxterPermutations(n)) for n in xrange(9)]
[1, 1, 2, 6, 22, 92, 422, 2074, 10754]
TESTS::
sage: all(a in BaxterPermutations(n) for n in xrange(7)
....: for a in BaxterPermutations(n))
True
ALGORITHM:
The algorithm using generating trees described in [BBF08]_ is used.
The idea is that all Baxter permutations of size `n + 1` can be
obtained by inserting the letter `n + 1` either just before a left
to right maximum or just after a right to left maximum of a Baxter
permutation of size `n`.
REFERENCES:
.. [BBF08] N. Bonichon, M. Bousquet-Melou, E. Fusy.
Baxter permutations and plane bipolar orientations.
Seminaire Lotharingien de combinatoire 61A, article B61Ah, 2008.
"""
if self._n == 0:
yield Permutations(0)([])
elif self._n == 1:
yield Permutations(1)([1])
else:
for b in BaxterPermutations(self._n - 1):
# Left to right maxima.
for i in [self._n - 2 - i for i in b.reverse().saliances()]:
yield Permutations(self._n)(b[:i] + [self._n] + b[i:])
# Right to left maxima.
for i in b.saliances():
yield Permutations(self._n)(b[:i + 1] + [self._n] + b[i + 1:])
def _an_element_(self):
"""
Return an element of ``self``.
EXAMPLES::
sage: BaxterPermutations(4)._an_element_()
[4, 3, 2, 1]
"""
return self.first()
def cardinality(self):
r"""
Return the number of Baxter permutations of size ``self._n``.
For any positive integer `n`, the number of Baxter
permutations of size `n` equals
.. MATH::
\sum_{k=1}^n \dfrac
{\binom{n+1}{k-1} \binom{n+1}{k} \binom{n+1}{k+1}}
{\binom{n+1}{1} \binom{n+1}{2}} .
This is :oeis:`A001181`.
EXAMPLES::
sage: [BaxterPermutations(n).cardinality() for n in xrange(13)]
[1, 1, 2, 6, 22, 92, 422, 2074, 10754, 58202, 326240, 1882960, 11140560]
sage: BaxterPermutations(3r).cardinality()
6
sage: parent(_)
Integer Ring
"""
if self._n == 0:
return 1
from sage.rings.arith import binomial
return sum((binomial(self._n + 1, k) *
binomial(self._n + 1, k + 1) *
binomial(self._n + 1, k + 2)) //
((self._n + 1) * binomial(self._n + 1, 2))
for k in xrange(self._n))
class BaxterPermutations_all(DisjointUnionEnumeratedSets, BaxterPermutations):
r"""
The enumerated set of all Baxter permutations.
See :class:`BaxterPermutations` for the definition of Baxter
permutations.
EXAMPLES::
sage: from sage.combinat.baxter_permutations import BaxterPermutations_all
sage: BaxterPermutations_all()
Baxter permutations
"""
def __init__(self, n=None):
r"""
EXAMPLES::
sage: from sage.combinat.baxter_permutations import BaxterPermutations_all
sage: BaxterPermutations_all()
Baxter permutations
"""
self.element_class = Permutations().element_class
from sage.categories.examples.infinite_enumerated_sets import NonNegativeIntegers
from sage.sets.family import Family
DisjointUnionEnumeratedSets.__init__(self,
Family(NonNegativeIntegers(),
BaxterPermutations_size),
facade=False, keepkey=False)
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: from sage.combinat.baxter_permutations import BaxterPermutations_all
sage: BaxterPermutations_all()
Baxter permutations
"""
return "Baxter permutations"
def __contains__(self, x):
r"""
Return ``True`` if and only if ``x`` is a Baxter permutation.
INPUT:
- ``x`` -- any object.
EXAMPLES::
sage: Permutation([4, 2, 1, 7, 3, 8, 5, 6]) in BaxterPermutations()
False
sage: Permutation([4, 3, 6, 9, 7, 5, 1, 2, 8]) in BaxterPermutations()
True
"""
if not x in Permutations():
return False
return x in BaxterPermutations(len(x))
def to_pair_of_twin_binary_trees(self, p):
r"""
Apply a bijection between Baxter permutations of size ``self._n``
and the set of pairs of twin binary trees with ``self._n`` nodes.
INPUT:
- ``p`` -- a Baxter permutation.
OUTPUT:
The pair of twin binary trees `(T_L, T_R)` where `T_L`
(resp. `T_R`) is obtained by inserting the letters of ``p`` from
left to right (resp. right to left) following the the binary search
tree insertion algorithm. This is called the *Baxter P-symbol*
in [Gir12]_ Definition 4.1.
.. NOTE::
This method only works when ``p`` is a permutation. For words
with repeated letters, it would return two "right binary
search trees" (in the terminology of [Gir12]_), which conflicts
with the definition in [Gir12]_.
EXAMPLES::
sage: BaxterPermutations().to_pair_of_twin_binary_trees(Permutation([]))
(., .)
sage: BaxterPermutations().to_pair_of_twin_binary_trees(Permutation([1, 2, 3]))
(1[., 2[., 3[., .]]], 3[2[1[., .], .], .])
sage: BaxterPermutations().to_pair_of_twin_binary_trees(Permutation([3, 4, 1, 2]))
(3[1[., 2[., .]], 4[., .]], 2[1[., .], 4[3[., .], .]])
"""
from sage.combinat.binary_tree import LabelledBinaryTree
left = LabelledBinaryTree(None)
right = LabelledBinaryTree(None)
for a in p:
left = left.binary_search_insert(a)
for a in reversed(p):
right = right.binary_search_insert(a)
return (left, right)
| 33.512748
| 124
| 0.552578
|
4a0c5ad8f5d940e85ecb45146ec6a49d921a180b
| 2,565
|
py
|
Python
|
skodaconnect/utilities.py
|
stefanuc111/skodaconnect
|
106c83825fa009a238cdedebd67d0157fc950e90
|
[
"Apache-2.0"
] | 30
|
2020-11-29T05:27:54.000Z
|
2022-02-01T19:50:32.000Z
|
skodaconnect/utilities.py
|
stefanuc111/skodaconnect
|
106c83825fa009a238cdedebd67d0157fc950e90
|
[
"Apache-2.0"
] | 43
|
2020-11-21T16:51:06.000Z
|
2022-01-12T12:55:37.000Z
|
skodaconnect/utilities.py
|
stefanuc111/skodaconnect
|
106c83825fa009a238cdedebd67d0157fc950e90
|
[
"Apache-2.0"
] | 22
|
2020-11-20T13:25:40.000Z
|
2022-02-01T19:51:37.000Z
|
from datetime import date, datetime
from base64 import b64encode
from string import ascii_letters as letters, digits
from sys import argv
from os import environ as env
from os.path import join, dirname, expanduser
from itertools import product
import json
import logging
import re
_LOGGER = logging.getLogger(__name__)
def read_config():
"""Read config from file."""
for directory, filename in product(
[
dirname(argv[0]),
expanduser("~"),
env.get("XDG_CONFIG_HOME", join(expanduser("~"), ".config")),
],
["skoda.conf", ".skoda.conf"],
):
try:
config = join(directory, filename)
_LOGGER.debug("checking for config file %s", config)
with open(config) as config:
return dict(
x.split(": ")
for x in config.read().strip().splitlines()
if not x.startswith("#")
)
except (IOError, OSError):
continue
return {}
def json_loads(s):
return json.loads(s, object_hook=obj_parser)
def obj_parser(obj):
"""Parse datetime."""
for key, val in obj.items():
try:
obj[key] = datetime.strptime(val, "%Y-%m-%dT%H:%M:%S%z")
except (TypeError, ValueError):
pass
return obj
def find_path(src, path):
"""Simple navigation of a hierarchical dict structure using XPATH-like syntax.
>>> find_path(dict(a=1), 'a')
1
>>> find_path(dict(a=1), '')
{'a': 1}
>>> find_path(dict(a=None), 'a')
>>> find_path(dict(a=1), 'b')
Traceback (most recent call last):
...
KeyError: 'b'
>>> find_path(dict(a=dict(b=1)), 'a.b')
1
>>> find_path(dict(a=dict(b=1)), 'a')
{'b': 1}
>>> find_path(dict(a=dict(b=1)), 'a.c')
Traceback (most recent call last):
...
KeyError: 'c'
"""
if not path:
return src
if isinstance(path, str):
path = path.split(".")
return find_path(src[path[0]], path[1:])
def is_valid_path(src, path):
"""
>>> is_valid_path(dict(a=1), 'a')
True
>>> is_valid_path(dict(a=1), '')
True
>>> is_valid_path(dict(a=1), None)
True
>>> is_valid_path(dict(a=1), 'b')
False
"""
try:
find_path(src, path)
return True
except KeyError:
return False
def camel2slug(s):
"""Convert camelCase to camel_case.
>>> camel2slug('fooBar')
'foo_bar'
"""
return re.sub("([A-Z])", "_\\1", s).lower().lstrip("_")
| 21.923077
| 82
| 0.548928
|
4a0c5b521f0bd4b3874769a2bd450ce612bd8611
| 1,011
|
py
|
Python
|
setup.py
|
fossabot/django-todopago
|
83a308d05bacf0b7f179812bb01ba30c32cc4653
|
[
"ISC"
] | 3
|
2018-11-29T01:46:55.000Z
|
2019-10-23T02:37:17.000Z
|
setup.py
|
fossabot/django-todopago
|
83a308d05bacf0b7f179812bb01ba30c32cc4653
|
[
"ISC"
] | 2
|
2020-02-07T12:08:03.000Z
|
2021-08-03T18:56:37.000Z
|
setup.py
|
fossabot/django-todopago
|
83a308d05bacf0b7f179812bb01ba30c32cc4653
|
[
"ISC"
] | 3
|
2019-07-20T23:41:32.000Z
|
2021-12-01T21:26:55.000Z
|
#!/usr/bin/env python3
from setuptools import setup
setup(
name='django_todopago',
description='Library to integrate TodoPago into Django apps',
author='Hugo Osvaldo Barrera',
author_email='hugo@barrera.io',
url='https://github.com/WhyNotHugo/django-todopago',
license='ISC',
packages=['django_todopago'],
include_package_data=True,
install_requires=[
'django>=2.0',
'suds-jurko',
'requests',
],
long_description=open('README.rst').read(),
use_scm_version={
'version_scheme': 'post-release',
'write_to': 'django_todopago/version.py',
},
setup_requires=['setuptools_scm'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
]
)
| 28.885714
| 65
| 0.609298
|
4a0c5bd2295e8d813d0bc9bba858c5e478931fba
| 4,539
|
py
|
Python
|
utils.py
|
applicaai/nlpday-workshop-2019
|
ce2d193082afbd87b704f3188eb537121d229566
|
[
"MIT"
] | 3
|
2019-05-31T14:34:32.000Z
|
2021-01-26T15:57:11.000Z
|
utils.py
|
applicaai/nlpday-workshop-2019
|
ce2d193082afbd87b704f3188eb537121d229566
|
[
"MIT"
] | 1
|
2020-11-18T21:53:27.000Z
|
2020-11-18T21:53:27.000Z
|
utils.py
|
applicaai/nlpday-workshop-2019
|
ce2d193082afbd87b704f3188eb537121d229566
|
[
"MIT"
] | null | null | null |
# lots of import for checking the environment
import numpy as np
import scipy
import sklearn
import modAL
import tqdm
import lime
import random
# actual imports
import matplotlib.pyplot as plt
import warnings
from sklearn.datasets import fetch_20newsgroups
from typing import Tuple, List, Any, Union, Callable
from flair.data import Token, Sentence
import pandas as pd
def load_news20(real: bool = False) -> Tuple[Tuple[list, list], Tuple[list, list], List[str]]:
"""
Loads the 20 News Group dataset split by train and test as a raw text with class names,
see: http://qwone.com/~jason/20Newsgroups/ for details.
:param real: bool, default False
Whether to use the `real` dataset, with headers, footers and quotes stripped
:return: tuple (train set, test set, class names)
Train and test set as tuples (data, target) and the class names is a list
"""
if real:
train_data = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
test_data = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
else:
train_data = fetch_20newsgroups(subset='train')
test_data = fetch_20newsgroups(subset='test')
# extract class names
class_names = train_data['target_names']
# extract raw data and labels
X_raw_train, y_train_full = train_data['data'], train_data['target']
X_raw_test, y_test_full = test_data['data'], test_data['target']
# reformat class names to more readable
class_names = [x.split('.')[-1] if 'misc' not in x else '.'.join(x.split('.')[-2:]) for x in class_names]
class_names[3] = 'pc.hardware'
class_names[4] = 'mac.hardware'
class_names[5] = 'ms-windows.x'
return (X_raw_train, y_train_full), (X_raw_test, y_test_full), class_names
def replace_token(sentence: Sentence, token: Token, new_word: str = ""):
"""Replaces token with word."""
if new_word == "":
sentence.tokens.remove(token)
else:
token.text = new_word
def most_important_words_black(text, label:int, pipeline: Callable[[List[str]], np.array]) -> List[Tuple[str, float]]:
"""Returns list of words sorted descending by importance in text and loss in true class probability."""
sentence = Sentence(text, True)
tokens = sentence.tokens
if len(tokens) == 1:
return [(text,1)]
true_probability = [None]*len(tokens)
new_texts = [""]*len(tokens)
# create list of modified sentences with removed words
for i, token in enumerate(tokens):
sentence_tmp = Sentence()
for t in tokens:
sentence_tmp.add_token(t)
replace_token(sentence_tmp, token)
new_texts[i] = sentence_tmp.to_plain_string()
output = pipeline(new_texts)
org_prob = pipeline([text])[0][label]
# probability of of true label with removed word
for i, o in enumerate(output):
true_probability[i] = o[label]
# return sorted ascending by probability is equivalent of sorted descending by importance
return [(x.idx, x.text, org_prob - p)for p ,x in sorted(zip(true_probability,tokens), key=lambda pair: pair[0])]
def change_most_important_word(text, label:int, pipeline: Callable[[List[str]], np.array], corruptor) -> str:
"""Returns sentece with corrupt most important word."""
sentence = Sentence(text, True)
tokens = sentence.tokens
if len(tokens) == 1:
return [(text,1)]
true_probability = [None]*len(tokens)
new_texts = [""]*len(tokens)
# create list of modified sentences with removed words
for i, token in enumerate(tokens):
sentence_tmp = Sentence()
for t in tokens:
sentence_tmp.add_token(t)
replace_token(sentence_tmp, token)
new_texts[i] = sentence_tmp.to_plain_string()
output = pipeline(new_texts)
org_prob = pipeline([text])[0][label]
# probability of of true label with removed word
for i, o in enumerate(output):
true_probability[i] = o[label]
most_imp_index = np.argmin(true_probability)
tokens[most_imp_index].text = corruptor(tokens[most_imp_index].text)
return sentence.to_plain_string()
def change_random_word(text, corruptor) -> str:
"""Returns sentece with corrupt random word."""
sentence = Sentence(text, True)
tokens = sentence.tokens
random_token = random.choice(tokens)
random_token.text = corruptor(random_token.text)
return sentence.to_plain_string()
| 34.648855
| 118
| 0.67592
|
4a0c5c17ddcdfce14caf9e6e3143d74f4d8d57eb
| 2,178
|
py
|
Python
|
nilmtk/stats/tests/test_totalenergy.py
|
chandru99/nilmtk
|
236b16906ec1f6e6ba973e30af11affe5f7e2c9a
|
[
"Apache-2.0"
] | 646
|
2015-01-17T20:21:58.000Z
|
2022-03-30T09:17:07.000Z
|
nilmtk/stats/tests/test_totalenergy.py
|
chandru99/nilmtk
|
236b16906ec1f6e6ba973e30af11affe5f7e2c9a
|
[
"Apache-2.0"
] | 643
|
2015-01-01T18:30:19.000Z
|
2022-03-23T08:34:29.000Z
|
nilmtk/stats/tests/test_totalenergy.py
|
chandru99/nilmtk
|
236b16906ec1f6e6ba973e30af11affe5f7e2c9a
|
[
"Apache-2.0"
] | 484
|
2015-01-03T06:37:19.000Z
|
2022-03-22T15:20:03.000Z
|
import unittest
from ..totalenergy import TotalEnergy, _energy_for_power_series
from ...preprocessing import Clip
from ... import TimeFrame, ElecMeter, HDFDataStore
from ...elecmeter import ElecMeterID
from ...consts import JOULES_PER_KWH
from ...tests.testingtools import data_dir
from os.path import join
import numpy as np
import pandas as pd
from datetime import timedelta
from copy import deepcopy
METER_ID = ElecMeterID(instance=1, building=1, dataset='REDD')
def check_energy_numbers(testcase, energy):
true_active_kwh = 0.0163888888889
testcase.assertAlmostEqual(energy['active'], true_active_kwh)
testcase.assertAlmostEqual(energy['reactive'], true_active_kwh*0.9)
testcase.assertAlmostEqual(energy['apparent'], true_active_kwh*1.1)
class TestEnergy(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = join(data_dir(), 'energy.h5')
cls.datastore = HDFDataStore(filename)
ElecMeter.load_meter_devices(cls.datastore)
cls.meter_meta = cls.datastore.load_metadata('building1')['elec_meters'][METER_ID.instance]
@classmethod
def tearDownClass(cls):
cls.datastore.close()
def test_energy_per_power_series(self):
data = np.array([0, 0, 0, 100, 100, 100, 150, 150, 200, 0, 0, 100, 5000, 0])
secs = np.arange(start=0, stop=len(data)*10, step=10)
true_kwh = ((data[:-1] * np.diff(secs)) / JOULES_PER_KWH).sum()
index = [pd.Timestamp('2010-01-01') + timedelta(seconds=int(sec)) for sec in secs]
df = pd.Series(data=data, index=index)
kwh = _energy_for_power_series(df, max_sample_period=15)
self.assertAlmostEqual(true_kwh, kwh)
def test_pipeline(self):
meter = ElecMeter(store=self.datastore,
metadata=self.meter_meta,
meter_id=METER_ID)
source_node = meter.get_source_node()
clipped = Clip(source_node)
energy = TotalEnergy(clipped)
energy.run()
energy_results = deepcopy(energy.results)
check_energy_numbers(self, energy_results.combined())
if __name__ == '__main__':
unittest.main()
| 35.704918
| 99
| 0.68641
|
4a0c5c406c143604bcd0a10b1df3f910edf97c4c
| 3,029
|
py
|
Python
|
clips/common.py
|
Telefonica/clipspy
|
87d1d63604a209e2271efd3d3b8df0943836a504
|
[
"BSD-3-Clause"
] | null | null | null |
clips/common.py
|
Telefonica/clipspy
|
87d1d63604a209e2271efd3d3b8df0943836a504
|
[
"BSD-3-Clause"
] | null | null | null |
clips/common.py
|
Telefonica/clipspy
|
87d1d63604a209e2271efd3d3b8df0943836a504
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016-2019, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from enum import IntEnum
from collections import namedtuple
if sys.version_info.major == 3:
class Symbol(str):
"""Python equivalent of a CLIPS SYMBOL."""
def __new__(cls, symbol):
return str.__new__(cls, sys.intern(symbol))
elif sys.version_info.major == 2:
class Symbol(str):
"""Python equivalent of a CLIPS SYMBOL."""
def __new__(cls, symbol):
# pylint: disable=E0602
return str.__new__(cls, intern(str(symbol)))
class InstanceName(Symbol):
"""Instance names are CLIPS SYMBOLS."""
pass
class CLIPSType(IntEnum):
FLOAT = 0
INTEGER = 1
SYMBOL = 2
STRING = 3
MULTIFIELD = 4
EXTERNAL_ADDRESS = 5
FACT_ADDRESS = 6
INSTANCE_ADDRESS = 7
INSTANCE_NAME = 8
class SaveMode(IntEnum):
LOCAL_SAVE = 0
VISIBLE_SAVE = 1
class ClassDefaultMode(IntEnum):
CONVENIENCE_MODE = 0
CONSERVATION_MODE = 1
class Strategy(IntEnum):
DEPTH = 0
BREADTH = 1
LEX = 2
MEA = 3
COMPLEXITY = 4
SIMPLICITY = 5
RANDOM = 6
class SalienceEvaluation(IntEnum):
WHEN_DEFINED = 0
WHEN_ACTIVATED = 1
EVERY_CYCLE = 2
class Verbosity(IntEnum):
VERBOSE = 0
SUCCINT = 1
TERSE = 2
class TemplateSlotDefaultType(IntEnum):
NO_DEFAULT = 0
STATIC_DEFAULT = 1
DYNAMIC_DEFAULT = 2
# Assign functions and routers per Environment
ENVIRONMENT_DATA = {}
EnvData = namedtuple('EnvData', ('user_functions', 'routers'))
| 28.308411
| 80
| 0.7207
|
4a0c5d154881988ef67394721f89ea38e50615de
| 3,471
|
py
|
Python
|
docs/make_docs_test_files.py
|
tltx/iommi
|
a0ca5e261040cc0452d7452e9320a88af5222b30
|
[
"BSD-3-Clause"
] | null | null | null |
docs/make_docs_test_files.py
|
tltx/iommi
|
a0ca5e261040cc0452d7452e9320a88af5222b30
|
[
"BSD-3-Clause"
] | null | null | null |
docs/make_docs_test_files.py
|
tltx/iommi
|
a0ca5e261040cc0452d7452e9320a88af5222b30
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
from glob import glob
from pathlib import Path
base_dir = Path(__file__).parent
os.makedirs(base_dir / '_generated_tests', exist_ok=True)
sys.path.insert(0, base_dir.parent)
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
from django import setup
setup()
from iommi.docs import generate_rst_docs
generate_rst_docs(str(base_dir))
def build_test_file_from_rst(filename):
with open(filename) as f:
lines = list(f.readlines())
sections = [
dict(header=None, code=[])
]
current_section = sections[0]
type_of_block = None
for i, line in enumerate(lines):
if line[:4] in ('~~~~', '====', '----', '^^^^'):
header = lines[i-1].replace(':', '').replace('.', 'dot').replace("'", '').replace('&', '')
current_section = dict(header=header, code=[])
sections.append(current_section)
type_of_block = None
elif line.startswith('..'):
type_of_block = line[2:].strip()
elif line.startswith(' '):
if type_of_block == 'code:: pycon':
if line.strip().startswith('>>>'):
current_section['code'].append((line.replace('>>>', 'tmp ='), i))
elif line.strip().startswith('...'):
current_section['code'].append((line.replace('...', ''), i))
else:
current_section['code'].append((' assert tmp == ' + line.strip(' '), i))
elif type_of_block in ('code:: python', 'test'):
current_section['code'].append((line, i))
elif type_of_block == 'imports':
current_section['code'].append((line[4:], i)) # 4: is to dedent one level
func_trans = str.maketrans({
'?': None,
' ': '_',
'-': '_',
'/': '_',
',': '_',
'`': None,
})
with open(base_dir / '_generated_tests' / f'test_{filename.partition(os.path.sep)[-1].partition(".")[0]}.py', 'w') as f:
current_line = 0
def write(s):
f.write(s)
nonlocal current_line
current_line += s.count('\n')
setup = '''
from iommi import *
from iommi.admin import Admin
from django.urls import (
include,
path,
)
from django.db import models
from tests.helpers import req, user_req, staff_req
from docs.models import *
request = req('get')
'''.strip() + '\n'
write(setup)
write('\n')
for section in sections:
if section['header']:
func_name = section['header'].strip().translate(func_trans).lower().partition('(')[0]
def_line = f'\n\ndef test_{func_name}():\n'
write(def_line)
else:
func_name = None
for line, line_number in section['code']:
# This stuff is to make the line numbers align between .rst and test_*.py files.
while line_number > current_line:
write('\n')
if line.strip() == 'return':
# A little hack to turn some return statements like "if not form.is_valid(): return" into non-covered
write(line.rstrip() + ' # pragma: no cover\n')
else:
write(line)
if not section['code'] and func_name:
write(' pass\n')
for x in glob(str(base_dir / '*.rst')):
build_test_file_from_rst(x)
| 30.716814
| 124
| 0.536445
|
4a0c5e4285ca3c9aca9fe7bc143d368f599c8e02
| 1,342
|
py
|
Python
|
sec/violent/codes/nmapportscan.py
|
imsilence/notes
|
2e00f65f0d61fd077c589e0e9962b062bf591efc
|
[
"Apache-2.0"
] | null | null | null |
sec/violent/codes/nmapportscan.py
|
imsilence/notes
|
2e00f65f0d61fd077c589e0e9962b062bf591efc
|
[
"Apache-2.0"
] | null | null | null |
sec/violent/codes/nmapportscan.py
|
imsilence/notes
|
2e00f65f0d61fd077c589e0e9962b062bf591efc
|
[
"Apache-2.0"
] | null | null | null |
#encoding: utf-8
import logging
import argparse
import colorama
import nmap
import socket
logger = logging.getLogger(__name__)
def port_scan(host, ports):
try:
_ip = socket.gethostbyname(host)
except BaseException as e:
logger.error('%s[-] error get host ip addr:%s', colorama.Fore.RED, host)
return
_nps = nmap.PortScanner()
_result = _nps.scan(hosts=_ip, ports=','.join(map(str, ports)))
_tcps = _result.get('scan', {}).get(_ip, {}).get('tcp', {})
logger.info('[+]cmd: %s', _result.get('nmap', {}).get('command_line', ''))
for _port in ports:
_status = _tcps.get(_port, {})
logger.info('%s[*] %s/tcp %s %s %s', \
colorama.Fore.GREEN, host, _port, \
_status.get('state', 'unknow'), \
_status.get('name', ''))
if __name__ == '__main__':
colorama.init(autoreset=True)
logging.basicConfig(level=logging.DEBUG)
_parser = argparse.ArgumentParser()
_parser.add_argument('-T', '--target', help='scan target', type=str, default='localhost')
_parser.add_argument('-P', '--ports', help='scan ports', type=int, default=[80,21,22,443,8080], nargs='+')
_args = _parser.parse_args()
port_scan(_args.target, _args.ports)
| 35.315789
| 111
| 0.577496
|
4a0c5e49f002676a9b94721d77b0c15a534c8d55
| 1,172
|
py
|
Python
|
QRSMS/initial/migrations/0049_auto_20200530_1304.py
|
Srishti-Ahuja/QRSMS-V1
|
1f2fa82e8ddaeb62e633fcd6a136696355317bba
|
[
"Apache-2.0"
] | 4
|
2020-06-16T09:42:20.000Z
|
2021-11-24T08:18:16.000Z
|
QRSMS/initial/migrations/0049_auto_20200530_1304.py
|
Srishti-Ahuja/QRSMS-V1
|
1f2fa82e8ddaeb62e633fcd6a136696355317bba
|
[
"Apache-2.0"
] | 7
|
2021-04-08T21:57:34.000Z
|
2022-02-27T06:41:15.000Z
|
QRSMS/initial/migrations/0049_auto_20200530_1304.py
|
Srishti-Ahuja/QRSMS-V1
|
1f2fa82e8ddaeb62e633fcd6a136696355317bba
|
[
"Apache-2.0"
] | 7
|
2020-11-29T09:45:44.000Z
|
2022-03-30T15:27:33.000Z
|
# Generated by Django 2.2 on 2020-05-30 08:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('initial', '0048_auto_20200527_1706'),
]
operations = [
migrations.AddField(
model_name='sectionmarks',
name='marks_mean',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AddField(
model_name='sectionmarks',
name='marks_standard_deviation',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AddField(
model_name='sectionmarks',
name='weightage_mean',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AddField(
model_name='sectionmarks',
name='weightage_standard_deviation',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='marksheet',
name='grade',
field=models.CharField(blank=True, max_length=3, null=True),
),
]
| 30.051282
| 72
| 0.585324
|
4a0c5f0dc1899ec462680251781cf0785349ace5
| 27,506
|
py
|
Python
|
coloredlogs/tests.py
|
hugovk/python-coloredlogs
|
b63fce3fbf112c568ef89e1670d511a98f64df80
|
[
"MIT"
] | null | null | null |
coloredlogs/tests.py
|
hugovk/python-coloredlogs
|
b63fce3fbf112c568ef89e1670d511a98f64df80
|
[
"MIT"
] | null | null | null |
coloredlogs/tests.py
|
hugovk/python-coloredlogs
|
b63fce3fbf112c568ef89e1670d511a98f64df80
|
[
"MIT"
] | null | null | null |
# Automated tests for the `coloredlogs' package.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: February 14, 2020
# URL: https://coloredlogs.readthedocs.io
"""Automated tests for the `coloredlogs` package."""
# Standard library modules.
import contextlib
import imp
import logging
import logging.handlers
import os
import re
import subprocess
import sys
import tempfile
# External dependencies.
from humanfriendly.compat import StringIO
from humanfriendly.terminal import ANSI_COLOR_CODES, ansi_style, ansi_wrap
from humanfriendly.testing import PatchedItem, TestCase, retry
from humanfriendly.text import format, random_string
from mock import MagicMock
# The module we're testing.
import coloredlogs
import coloredlogs.cli
from coloredlogs import (
CHROOT_FILES,
ColoredFormatter,
NameNormalizer,
decrease_verbosity,
find_defined_levels,
find_handler,
find_hostname,
find_program_name,
get_level,
increase_verbosity,
install,
is_verbose,
level_to_number,
match_stream_handler,
parse_encoded_styles,
set_level,
walk_propagation_tree,
)
from coloredlogs.syslog import SystemLogging, match_syslog_handler
from coloredlogs.converter import (
ColoredCronMailer,
EIGHT_COLOR_PALETTE,
capture,
convert,
)
# External test dependencies.
from capturer import CaptureOutput
from verboselogs import VerboseLogger
# Compiled regular expression that matches a single line of output produced by
# the default log format (does not include matching of ANSI escape sequences).
PLAIN_TEXT_PATTERN = re.compile(r'''
(?P<date> \d{4}-\d{2}-\d{2} )
\s (?P<time> \d{2}:\d{2}:\d{2} )
\s (?P<hostname> \S+ )
\s (?P<logger_name> \w+ )
\[ (?P<process_id> \d+ ) \]
\s (?P<severity> [A-Z]+ )
\s (?P<message> .* )
''', re.VERBOSE)
# Compiled regular expression that matches a single line of output produced by
# the default log format with milliseconds=True.
PATTERN_INCLUDING_MILLISECONDS = re.compile(r'''
(?P<date> \d{4}-\d{2}-\d{2} )
\s (?P<time> \d{2}:\d{2}:\d{2},\d{3} )
\s (?P<hostname> \S+ )
\s (?P<logger_name> \w+ )
\[ (?P<process_id> \d+ ) \]
\s (?P<severity> [A-Z]+ )
\s (?P<message> .* )
''', re.VERBOSE)
def setUpModule():
"""Speed up the tests by disabling the demo's artificial delay."""
os.environ['COLOREDLOGS_DEMO_DELAY'] = '0'
coloredlogs.demo.DEMO_DELAY = 0
class ColoredLogsTestCase(TestCase):
"""Container for the `coloredlogs` tests."""
def find_system_log(self):
"""Find the system log file or skip the current test."""
filename = ('/var/log/system.log' if sys.platform == 'darwin' else (
'/var/log/syslog' if 'linux' in sys.platform else None
))
if not filename:
self.skipTest("Location of system log file unknown!")
elif not os.path.isfile(filename):
self.skipTest("System log file not found! (%s)" % filename)
elif not os.access(filename, os.R_OK):
self.skipTest("Insufficient permissions to read system log file! (%s)" % filename)
else:
return filename
def test_level_to_number(self):
"""Make sure :func:`level_to_number()` works as intended."""
# Make sure the default levels are translated as expected.
assert level_to_number('debug') == logging.DEBUG
assert level_to_number('info') == logging.INFO
assert level_to_number('warning') == logging.WARNING
assert level_to_number('error') == logging.ERROR
assert level_to_number('fatal') == logging.FATAL
# Make sure bogus level names don't blow up.
assert level_to_number('bogus-level') == logging.INFO
def test_find_hostname(self):
"""Make sure :func:`~find_hostname()` works correctly."""
assert find_hostname()
# Create a temporary file as a placeholder for e.g. /etc/debian_chroot.
fd, temporary_file = tempfile.mkstemp()
try:
with open(temporary_file, 'w') as handle:
handle.write('first line\n')
handle.write('second line\n')
CHROOT_FILES.insert(0, temporary_file)
# Make sure the chroot file is being read.
assert find_hostname() == 'first line'
finally:
# Clean up.
CHROOT_FILES.pop(0)
os.unlink(temporary_file)
# Test that unreadable chroot files don't break coloredlogs.
try:
CHROOT_FILES.insert(0, temporary_file)
# Make sure that a usable value is still produced.
assert find_hostname()
finally:
# Clean up.
CHROOT_FILES.pop(0)
def test_host_name_filter(self):
"""Make sure :func:`install()` integrates with :class:`~coloredlogs.HostNameFilter()`."""
install(fmt='%(hostname)s')
with CaptureOutput() as capturer:
logging.info("A truly insignificant message ..")
output = capturer.get_text()
assert find_hostname() in output
def test_program_name_filter(self):
"""Make sure :func:`install()` integrates with :class:`~coloredlogs.ProgramNameFilter()`."""
install(fmt='%(programname)s')
with CaptureOutput() as capturer:
logging.info("A truly insignificant message ..")
output = capturer.get_text()
assert find_program_name() in output
def test_colorama_enabled(self):
"""Test that colorama is enabled (through mocking)."""
init_function = MagicMock()
with mocked_colorama_module(init_function):
# Configure logging to the terminal.
coloredlogs.install()
# Ensure that our mock method was called.
assert init_function.called
def test_colorama_missing(self):
"""Test that colorama is missing (through mocking)."""
def init_function():
raise ImportError
with mocked_colorama_module(init_function):
# Configure logging to the terminal. It is expected that internally
# an ImportError is raised, but the exception is caught and colored
# output is disabled.
coloredlogs.install()
# Find the handler that was created by coloredlogs.install().
handler, logger = find_handler(logging.getLogger(), match_stream_handler)
# Make sure that logging to the terminal was initialized.
assert isinstance(handler.formatter, logging.Formatter)
# Make sure colored logging is disabled.
assert not isinstance(handler.formatter, ColoredFormatter)
def test_system_logging(self):
"""Make sure the :class:`coloredlogs.syslog.SystemLogging` context manager works."""
system_log_file = self.find_system_log()
expected_message = random_string(50)
with SystemLogging(programname='coloredlogs-test-suite') as syslog:
if not syslog:
return self.skipTest("couldn't connect to syslog daemon")
# When I tried out the system logging support on macOS 10.13.1 on
# 2018-01-05 I found that while WARNING and ERROR messages show up
# in the system log DEBUG and INFO messages don't. This explains
# the importance of the level of the log message below.
logging.error("%s", expected_message)
# Retry the following assertion (for up to 60 seconds) to give the
# logging daemon time to write our log message to disk. This
# appears to be needed on MacOS workers on Travis CI, see:
# https://travis-ci.org/xolox/python-coloredlogs/jobs/325245853
retry(lambda: check_contents(system_log_file, expected_message, True))
def test_syslog_shortcut_simple(self):
"""Make sure that ``coloredlogs.install(syslog=True)`` works."""
system_log_file = self.find_system_log()
expected_message = random_string(50)
with cleanup_handlers():
# See test_system_logging() for the importance of this log level.
coloredlogs.install(syslog=True)
logging.error("%s", expected_message)
# See the comments in test_system_logging() on why this is retried.
retry(lambda: check_contents(system_log_file, expected_message, True))
def test_syslog_shortcut_enhanced(self):
"""Make sure that ``coloredlogs.install(syslog='warning')`` works."""
system_log_file = self.find_system_log()
the_expected_message = random_string(50)
not_an_expected_message = random_string(50)
with cleanup_handlers():
# See test_system_logging() for the importance of these log levels.
coloredlogs.install(syslog='error')
logging.warning("%s", not_an_expected_message)
logging.error("%s", the_expected_message)
# See the comments in test_system_logging() on why this is retried.
retry(lambda: check_contents(system_log_file, the_expected_message, True))
retry(lambda: check_contents(system_log_file, not_an_expected_message, False))
def test_name_normalization(self):
"""Make sure :class:`~coloredlogs.NameNormalizer` works as intended."""
nn = NameNormalizer()
for canonical_name in ['debug', 'info', 'warning', 'error', 'critical']:
assert nn.normalize_name(canonical_name) == canonical_name
assert nn.normalize_name(canonical_name.upper()) == canonical_name
assert nn.normalize_name('warn') == 'warning'
assert nn.normalize_name('fatal') == 'critical'
def test_style_parsing(self):
"""Make sure :func:`~coloredlogs.parse_encoded_styles()` works as intended."""
encoded_styles = 'debug=green;warning=yellow;error=red;critical=red,bold'
decoded_styles = parse_encoded_styles(encoded_styles, normalize_key=lambda k: k.upper())
assert sorted(decoded_styles.keys()) == sorted(['debug', 'warning', 'error', 'critical'])
assert decoded_styles['debug']['color'] == 'green'
assert decoded_styles['warning']['color'] == 'yellow'
assert decoded_styles['error']['color'] == 'red'
assert decoded_styles['critical']['color'] == 'red'
assert decoded_styles['critical']['bold'] is True
def test_is_verbose(self):
"""Make sure is_verbose() does what it should :-)."""
set_level(logging.INFO)
assert not is_verbose()
set_level(logging.DEBUG)
assert is_verbose()
set_level(logging.VERBOSE)
assert is_verbose()
def test_increase_verbosity(self):
"""Make sure increase_verbosity() respects default and custom levels."""
# Start from a known state.
set_level(logging.INFO)
assert get_level() == logging.INFO
# INFO -> VERBOSE.
increase_verbosity()
assert get_level() == logging.VERBOSE
# VERBOSE -> DEBUG.
increase_verbosity()
assert get_level() == logging.DEBUG
# DEBUG -> SPAM.
increase_verbosity()
assert get_level() == logging.SPAM
# SPAM -> NOTSET.
increase_verbosity()
assert get_level() == logging.NOTSET
# NOTSET -> NOTSET.
increase_verbosity()
assert get_level() == logging.NOTSET
def test_decrease_verbosity(self):
"""Make sure decrease_verbosity() respects default and custom levels."""
# Start from a known state.
set_level(logging.INFO)
assert get_level() == logging.INFO
# INFO -> NOTICE.
decrease_verbosity()
assert get_level() == logging.NOTICE
# NOTICE -> WARNING.
decrease_verbosity()
assert get_level() == logging.WARNING
# WARNING -> SUCCESS.
decrease_verbosity()
assert get_level() == logging.SUCCESS
# SUCCESS -> ERROR.
decrease_verbosity()
assert get_level() == logging.ERROR
# ERROR -> CRITICAL.
decrease_verbosity()
assert get_level() == logging.CRITICAL
# CRITICAL -> CRITICAL.
decrease_verbosity()
assert get_level() == logging.CRITICAL
def test_level_discovery(self):
"""Make sure find_defined_levels() always reports the levels defined in Python's standard library."""
defined_levels = find_defined_levels()
level_values = defined_levels.values()
for number in (0, 10, 20, 30, 40, 50):
assert number in level_values
def test_walk_propagation_tree(self):
"""Make sure walk_propagation_tree() properly walks the tree of loggers."""
root, parent, child, grand_child = self.get_logger_tree()
# Check the default mode of operation.
loggers = list(walk_propagation_tree(grand_child))
assert loggers == [grand_child, child, parent, root]
# Now change the propagation (non-default mode of operation).
child.propagate = False
loggers = list(walk_propagation_tree(grand_child))
assert loggers == [grand_child, child]
def test_find_handler(self):
"""Make sure find_handler() works as intended."""
root, parent, child, grand_child = self.get_logger_tree()
# Add some handlers to the tree.
stream_handler = logging.StreamHandler()
syslog_handler = logging.handlers.SysLogHandler()
child.addHandler(stream_handler)
parent.addHandler(syslog_handler)
# Make sure the first matching handler is returned.
matched_handler, matched_logger = find_handler(grand_child, lambda h: isinstance(h, logging.Handler))
assert matched_handler is stream_handler
# Make sure the first matching handler of the given type is returned.
matched_handler, matched_logger = find_handler(child, lambda h: isinstance(h, logging.handlers.SysLogHandler))
assert matched_handler is syslog_handler
def get_logger_tree(self):
"""Create and return a tree of loggers."""
# Get the root logger.
root = logging.getLogger()
# Create a top level logger for ourselves.
parent_name = random_string()
parent = logging.getLogger(parent_name)
# Create a child logger.
child_name = '%s.%s' % (parent_name, random_string())
child = logging.getLogger(child_name)
# Create a grand child logger.
grand_child_name = '%s.%s' % (child_name, random_string())
grand_child = logging.getLogger(grand_child_name)
return root, parent, child, grand_child
def test_support_for_milliseconds(self):
"""Make sure milliseconds are hidden by default but can be easily enabled."""
# Check that the default log format doesn't include milliseconds.
stream = StringIO()
install(reconfigure=True, stream=stream)
logging.info("This should not include milliseconds.")
assert all(map(PLAIN_TEXT_PATTERN.match, stream.getvalue().splitlines()))
# Check that milliseconds can be enabled via a shortcut.
stream = StringIO()
install(milliseconds=True, reconfigure=True, stream=stream)
logging.info("This should include milliseconds.")
assert all(map(PATTERN_INCLUDING_MILLISECONDS.match, stream.getvalue().splitlines()))
def test_support_for_milliseconds_directive(self):
"""Make sure milliseconds using the ``%f`` directive are supported."""
stream = StringIO()
install(reconfigure=True, stream=stream, datefmt='%Y-%m-%dT%H:%M:%S.%f%z')
logging.info("This should be timestamped according to #45.")
assert re.match(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}[+-]\d{4}\s', stream.getvalue())
def test_plain_text_output_format(self):
"""Inspect the plain text output of coloredlogs."""
logger = VerboseLogger(random_string(25))
stream = StringIO()
install(level=logging.NOTSET, logger=logger, stream=stream)
# Test that filtering on severity works.
logger.setLevel(logging.INFO)
logger.debug("No one should see this message.")
assert len(stream.getvalue().strip()) == 0
# Test that the default output format looks okay in plain text.
logger.setLevel(logging.NOTSET)
for method, severity in ((logger.debug, 'DEBUG'),
(logger.info, 'INFO'),
(logger.verbose, 'VERBOSE'),
(logger.warning, 'WARNING'),
(logger.error, 'ERROR'),
(logger.critical, 'CRITICAL')):
# XXX Workaround for a regression in Python 3.7 caused by the
# Logger.isEnabledFor() method using stale cache entries. If we
# don't clear the cache then logger.isEnabledFor(logging.DEBUG)
# returns False and no DEBUG message is emitted.
try:
logger._cache.clear()
except AttributeError:
pass
# Prepare the text.
text = "This is a message with severity %r." % severity.lower()
# Log the message with the given severity.
method(text)
# Get the line of output generated by the handler.
output = stream.getvalue()
lines = output.splitlines()
last_line = lines[-1]
assert text in last_line
assert severity in last_line
assert PLAIN_TEXT_PATTERN.match(last_line)
def test_html_conversion(self):
"""Check the conversion from ANSI escape sequences to HTML."""
# Check conversion of colored text.
for color_name, ansi_code in ANSI_COLOR_CODES.items():
ansi_encoded_text = 'plain text followed by %s text' % ansi_wrap(color_name, color=color_name)
expected_html = format(
'<code>plain text followed by <span style="color:{css}">{name}</span> text</code>',
css=EIGHT_COLOR_PALETTE[ansi_code], name=color_name,
)
self.assertEquals(expected_html, convert(ansi_encoded_text))
# Check conversion of bright colored text.
expected_html = '<code><span style="color:#FF0">bright yellow</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('bright yellow', color='yellow', bright=True)))
# Check conversion of text with a background color.
expected_html = '<code><span style="background-color:#DE382B">red background</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('red background', background='red')))
# Check conversion of text with a bright background color.
expected_html = '<code><span style="background-color:#F00">bright red background</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('bright red background', background='red', bright=True)))
# Check conversion of text that uses the 256 color mode palette as a foreground color.
expected_html = '<code><span style="color:#FFAF00">256 color mode foreground</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('256 color mode foreground', color=214)))
# Check conversion of text that uses the 256 color mode palette as a background color.
expected_html = '<code><span style="background-color:#AF0000">256 color mode background</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('256 color mode background', background=124)))
# Check that invalid 256 color mode indexes don't raise exceptions.
expected_html = '<code>plain text expected</code>'
self.assertEquals(expected_html, convert('\x1b[38;5;256mplain text expected\x1b[0m'))
# Check conversion of bold text.
expected_html = '<code><span style="font-weight:bold">bold text</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('bold text', bold=True)))
# Check conversion of underlined text.
expected_html = '<code><span style="text-decoration:underline">underlined text</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('underlined text', underline=True)))
# Check conversion of strike-through text.
expected_html = '<code><span style="text-decoration:line-through">strike-through text</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('strike-through text', strike_through=True)))
# Check conversion of inverse text.
expected_html = '<code><span style="background-color:#FFC706;color:#000">inverse</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('inverse', color='yellow', inverse=True)))
# Check conversion of URLs.
for sample_text in 'www.python.org', 'http://coloredlogs.rtfd.org', 'https://coloredlogs.rtfd.org':
sample_url = sample_text if '://' in sample_text else ('http://' + sample_text)
expected_html = '<code><a href="%s" style="color:inherit">%s</a></code>' % (sample_url, sample_text)
self.assertEquals(expected_html, convert(sample_text))
# Check that the capture pattern for URLs doesn't match ANSI escape
# sequences and also check that the short hand for the 0 reset code is
# supported. These are tests for regressions of bugs found in
# coloredlogs <= 8.0.
reset_short_hand = '\x1b[0m'
blue_underlined = ansi_style(color='blue', underline=True)
ansi_encoded_text = '<%shttps://coloredlogs.readthedocs.io%s>' % (blue_underlined, reset_short_hand)
expected_html = (
'<code><<span style="color:#006FB8;text-decoration:underline">'
'<a href="https://coloredlogs.readthedocs.io" style="color:inherit">'
'https://coloredlogs.readthedocs.io'
'</a></span>></code>'
)
self.assertEquals(expected_html, convert(ansi_encoded_text))
def test_output_interception(self):
"""Test capturing of output from external commands."""
expected_output = 'testing, 1, 2, 3 ..'
actual_output = capture(['echo', expected_output])
assert actual_output.strip() == expected_output.strip()
def test_enable_colored_cron_mailer(self):
"""Test that automatic ANSI to HTML conversion when running under ``cron`` can be enabled."""
with PatchedItem(os.environ, 'CONTENT_TYPE', 'text/html'):
with ColoredCronMailer() as mailer:
assert mailer.is_enabled
def test_disable_colored_cron_mailer(self):
"""Test that automatic ANSI to HTML conversion when running under ``cron`` can be disabled."""
with PatchedItem(os.environ, 'CONTENT_TYPE', 'text/plain'):
with ColoredCronMailer() as mailer:
assert not mailer.is_enabled
def test_auto_install(self):
"""Test :func:`coloredlogs.auto_install()`."""
needle = random_string()
command_line = [sys.executable, '-c', 'import logging; logging.info(%r)' % needle]
# Sanity check that log messages aren't enabled by default.
with CaptureOutput() as capturer:
os.environ['COLOREDLOGS_AUTO_INSTALL'] = 'false'
subprocess.check_call(command_line)
output = capturer.get_text()
assert needle not in output
# Test that the $COLOREDLOGS_AUTO_INSTALL environment variable can be
# used to automatically call coloredlogs.install() during initialization.
with CaptureOutput() as capturer:
os.environ['COLOREDLOGS_AUTO_INSTALL'] = 'true'
subprocess.check_call(command_line)
output = capturer.get_text()
assert needle in output
def test_cli_demo(self):
"""Test the command line colored logging demonstration."""
with CaptureOutput() as capturer:
main('coloredlogs', '--demo')
output = capturer.get_text()
# Make sure the output contains all of the expected logging level names.
for name in 'debug', 'info', 'warning', 'error', 'critical':
assert name.upper() in output
def test_cli_conversion(self):
"""Test the command line HTML conversion."""
output = main('coloredlogs', '--convert', 'coloredlogs', '--demo', capture=True)
# Make sure the output is encoded as HTML.
assert '<span' in output
def test_empty_conversion(self):
"""
Test that conversion of empty output produces no HTML.
This test was added because I found that ``coloredlogs --convert`` when
used in a cron job could cause cron to send out what appeared to be
empty emails. On more careful inspection the body of those emails was
``<code></code>``. By not emitting the wrapper element when no other
HTML is generated, cron will not send out an email.
"""
output = main('coloredlogs', '--convert', 'true', capture=True)
assert not output.strip()
def test_implicit_usage_message(self):
"""Test that the usage message is shown when no actions are given."""
assert 'Usage:' in main('coloredlogs', capture=True)
def test_explicit_usage_message(self):
"""Test that the usage message is shown when ``--help`` is given."""
assert 'Usage:' in main('coloredlogs', '--help', capture=True)
def check_contents(filename, contents, match):
"""Check if a line in a file contains an expected string."""
with open(filename) as handle:
assert any(contents in line for line in handle) == match
def main(*arguments, **options):
"""Wrap the command line interface to make it easier to test."""
capture = options.get('capture', False)
saved_argv = sys.argv
saved_stdout = sys.stdout
try:
sys.argv = arguments
if capture:
sys.stdout = StringIO()
coloredlogs.cli.main()
if capture:
return sys.stdout.getvalue()
finally:
sys.argv = saved_argv
sys.stdout = saved_stdout
@contextlib.contextmanager
def mocked_colorama_module(init_function):
"""Context manager to ease testing of colorama integration."""
module_name = 'colorama'
# Create a fake module shadowing colorama.
fake_module = imp.new_module(module_name)
setattr(fake_module, 'init', init_function)
# Temporarily reconfigure coloredlogs to use colorama.
need_colorama = coloredlogs.NEED_COLORAMA
coloredlogs.NEED_COLORAMA = True
# Install the fake colorama module.
saved_module = sys.modules.get(module_name, None)
sys.modules[module_name] = fake_module
# We've finished setting up, yield control.
yield
# Restore the original setting.
coloredlogs.NEED_COLORAMA = need_colorama
# Clean up the mock module.
if saved_module is not None:
sys.modules[module_name] = saved_module
else:
sys.modules.pop(module_name, None)
@contextlib.contextmanager
def cleanup_handlers():
"""Context manager to cleanup output handlers."""
# There's nothing to set up so we immediately yield control.
yield
# After the with block ends we cleanup any output handlers.
for match_func in match_stream_handler, match_syslog_handler:
handler, logger = find_handler(logging.getLogger(), match_func)
if handler and logger:
logger.removeHandler(handler)
| 45.919866
| 118
| 0.653058
|
4a0c5f87edd01ade467c7f9e275ffd6861ee4041
| 852
|
py
|
Python
|
emoji_functions.py
|
TimGoebel/yolox_deepsort
|
284f5d493866d8c636b4ec7994bf5834fab0c219
|
[
"Apache-2.0"
] | null | null | null |
emoji_functions.py
|
TimGoebel/yolox_deepsort
|
284f5d493866d8c636b4ec7994bf5834fab0c219
|
[
"Apache-2.0"
] | null | null | null |
emoji_functions.py
|
TimGoebel/yolox_deepsort
|
284f5d493866d8c636b4ec7994bf5834fab0c219
|
[
"Apache-2.0"
] | null | null | null |
def vid_to_frames(path):
frames = []
cap = cv2.VideoCapture(path)
ret = True
while ret:
ret, img = cap.read() # read one frame from the 'capture' object; img is (H, W, C)
if ret:
frames.append(img)
return frames
# Overlaying Image and Emoji
def add_image(img, src2, x, y):
# x= x+90
# y = y-10
w = 80
h = 80
initial = img[y:y+h,x:x+w]
src1 = initial
src2 = cv2.resize(src2, src1.shape[1::-1])
u_green = np.array([1, 1, 1])
l_green = np.array([0, 0, 0])
mask = cv2.inRange(src2, l_green, u_green)
res = cv2.bitwise_and(src2, src2, mask = mask)
f = src2 - res
f = np.where(f == 0, src1, f)
img[y:y+h,x:x+w] = f
return img
emojidict = dict(
car = vid_to_frames('assets/car.gif'),
truck = vid_to_frames('assets/truck.gif'),
)
| 23.027027
| 90
| 0.557512
|
4a0c61825ea7e5cae3dbfc3d31c3cefede3c7ce5
| 43,689
|
py
|
Python
|
sss/negotiator.py
|
OA-DeepGreen/Simple-Sword-Server
|
fbce8d24e7c7b56a0140fd5cbabce2b58c830ee2
|
[
"BSD-3-Clause"
] | null | null | null |
sss/negotiator.py
|
OA-DeepGreen/Simple-Sword-Server
|
fbce8d24e7c7b56a0140fd5cbabce2b58c830ee2
|
[
"BSD-3-Clause"
] | null | null | null |
sss/negotiator.py
|
OA-DeepGreen/Simple-Sword-Server
|
fbce8d24e7c7b56a0140fd5cbabce2b58c830ee2
|
[
"BSD-3-Clause"
] | 1
|
2020-03-05T15:39:40.000Z
|
2020-03-05T15:39:40.000Z
|
"""
Negotiator
==========
Negotiator offers a framework for making content negotiation decisions
based on the HTTP accept headers.
NOTE it currently only formally supports Accept and Accept-Language,
but it is a short haul to support for Accept-Charset and
Accept-Encoding (TODO)
"""
import logging
log = logging.getLogger(__name__)
__version__ = "1.0.0"
# Objects used to represent aspects of Content Negotiation
###########################################################
class AcceptParameters(object):
"""
AcceptParameters represents all of the possible aspects of Content
Negotiation as a single object. It is used to represent a combination
of content type, language, encoding and charset which is either
explicitly supported by the server or requested by the client.
To create an AcceptParameters object, initialise with any of the
Conneg options:
AcceptParameters(content_type, language, encoding, charset)
(using unnamed parameters if using them in this order)
AcceptParameters(language=language, charset=charset)
(using named parameters if using partial and/or out of order parameters)
The content_type argument must be a ContentType object,
The language argument must be a Language object
The encoding argument must be a string
The charset argument must be a string
For example:
ap = AcceptParameters(ContentType("text/html"), Language("en"))
"""
def __init__(self, content_type=None, language=None, encoding=None, charset=None, packaging=None):
self.content_type = content_type
self.language = language
self.encoding = encoding
self.charset = charset
self.packaging = packaging
def matches(self, other, ignore_language_variants=False, as_client=True, packaging_wildcard=False):
"""
Do this set of AcceptParameters match the other set of AcceptParameters.
This is not the same as equivalence, especially if the ignore_language_variants
and as_client arguments are set.
ignore_language_variants will ensure that en matches en-gb, and so on
as_client will ensure that this object acts as a client parameter, and therefore
will implicitly ignore language variants
packaging_wildcard will allow the packaging parameter to be * in either or both cases and still match
"""
if other is None:
return False
ct_match = self.content_type.matches(other.content_type) if self.content_type is not None else True
e_match = self.encoding == other.encoding
c_match = self.charset == other.charset
p_match = False
if packaging_wildcard:
p_match = self.packaging is None or other.packaging is None or self.packaging == other.packaging
else:
p_match = self.packaging == other.packaging
l_match = self.language.matches(other.language, ignore_language_variants, as_client) if self.language is not None else True
return ct_match and l_match and e_match and c_match and p_match
def media_format(self):
"""
This provides a convenient method to canonically represent the accept
parameters using the language of media formats.
"""
params = ""
if self.content_type is not None:
params += "(type=\"" + str(self.content_type.mimetype()) + "\") "
if self.language is not None:
params += "(lang=\"" + str(self.language) + "\") "
if self.encoding is not None:
params += "(encoding=\"" + str(self.encoding) + "\") "
if self.charset is not None:
params += "(charset=\"" + str(self.charset) + "\") "
if self.packaging is not None:
params += "(packaging=\"" + str(self.packaging) + "\") "
mf = "(& " + params + ")"
return mf
def __eq__(self, other):
return self.media_format() == other.media_format()
def __str__(self):
s = "AcceptParameters:: "
if self.content_type is not None:
s += "Content Type: " + str(self.content_type) + ";"
if self.language is not None:
s += "Language: " + str(self.language) + ";"
if self.encoding is not None:
s += "Encoding: " + str(self.encoding) + ";"
if self.charset is not None:
s += "Charset: " + str(self.charset) + ";"
if self.packaging is not None:
s += "Packaging: " + str(self.packaging) + ";"
return s
def __repr__(self):
return str(self)
class Language(object):
"""
Class to represent a language code as per the conneg spec.
Languages can have a main language term and a language variant.
For example:
en - English
en-gb - British English
"""
def __init__(self, range=None, language=None, variant=None):
"""
This object can be initiased in 2 ways:
1/ With a Language range, containing the language and optionally the variant parts:
lang = Language("en-us")
lang = Language("cz")
2/ With one or both of the language and variants specified separately:
lang = Language(language="en", variant="gb")
lang = Language(language="de")
"""
if range is not None:
self.language, self.variant = self._from_range(range)
else:
self.language = language
self.variant = variant
def matches(self, other, ignore_language_variants=False, as_client=True):
"""
Does this language match the other language. This is not strictly
equivalence, depending on the ignore_language_variants and as_client
arguments
ignore_language_variants will cause this operation to only look for
matches on the main language part (e.g. en will match en-us and en-gb)
as_client will cause this operation to ignore language variants from
the client only
"""
if other is None:
return False
if self.language == "*" or other.language == "*":
return True
l_match = self.language == other.language
v_match = self.variant == other.variant
if as_client and self.variant is None and other.variant is not None:
v_match = True
elif as_client and self.variant is not None and other.variant is None:
if ignore_language_variants:
v_match = True
return l_match and v_match
def _from_range(self, range):
"""
parse the lang and variant from the supplied range
"""
lang_parts = range.split("-")
if len(lang_parts) == 1:
return lang_parts[0], None
elif len(lang_parts) == 2:
lang = lang_parts[0]
sublang = lang_parts[1]
return lang, sublang
def __eq__(self, other):
return str(self) == str(other)
def __str__(self):
s = str(self.language)
if self.variant is not None:
s += "-" + str(self.variant)
return s
def __repr__(self):
return str(self)
class ContentType(object):
"""
Class to represent a content type (mimetype) requested through content negotiation
"""
def __init__(self, mimetype=None, type=None, subtype=None, params=None):
"""
There are 2 ways to instantiate this object.
1/ With just the mimetype
ct = ContentType("text/html")
ct = ContentType("application/atom+xml;type=entry")
2/ With the parts of the Content Type
ct = ContentType(type="application", subtype="atom+xml", params="type=entry")
Properties:
mimetype - the standard mimetype
type - the main type of the content. e.g. in text/html, the type is "text"
subtype - the subtype of the content. e.g. in text/html the subtype is "html"
params - as per the mime specification, his represents the parameter extension to the type, e.g. with
application/atom+xml;type=entry, the params are "type=entry"
So, for example:
application/atom+xml;type=entry => type="application", subtype="atom+xml", params="type=entry"
"""
self.type = None
self.subtype = None
self.params = None
if mimetype is not None:
self.from_mimetype(mimetype)
else:
self.type = type
self.subtype = subtype
self.params = params
def from_mimetype(self, mimetype):
"""
Construct this object from the mimetype
"""
# mimetype is of the form <supertype>/<subtype>[;<params>]
parts = mimetype.split(";")
if len(parts) == 2:
self.type, self.subtype = parts[0].split("/", 1)
self.params = parts[1]
elif len(parts) == 1:
self.type, self.subtype = parts[0].split("/", 1)
def mimetype(self):
"""
Turn the content type into its mimetype representation
"""
mt = self.type + "/" + self.subtype
if self.params is not None:
mt += ";" + self.params
return mt
def matches(self, other):
"""
Determine whether this ContentType and the supplied other ContentType are matches. This includes full equality
or whether the wildcards (*) which can be supplied for type or subtype properties are in place in either
partner in the match.
For example:
text/html matches */*
text/html matches text/*
text/html does not match image/*
and so on
"""
# assume None to be a wildcard
if other is None:
return False
tmatch = self.type == "*" or other.type == "*" or self.type == other.type
smatch = self.subtype == "*" or other.subtype == "*" or self.subtype == other.subtype
# FIXME: there is some ambiguity in mime as to whether the omission of the params part is the same as
# a wildcard. For the purposes of convenience we have assumed here that it is, otherwise a request for
# */* will not match any content type which has parameters
pmatch = self.params is None or other.params is None or self.params == other.params
return tmatch and smatch and pmatch
def __eq__(self, other):
return self.mimetype() == other.mimetype()
def __str__(self):
return self.mimetype()
def __repr__(self):
return str(self)
# Main Content Negotiation Objects
##################################
class ContentNegotiator(object):
"""
Class to manage content negotiation.
Basic Usage
-----------
# Import all the objects from the negotiator module
>>> from negotiator import ContentNegotiator, AcceptParameters, ContentType, Language
# Specify the default parameters. These are the parameters which will be used in
# place of any HTTP Accept headers which are not present in the negotiation request
# For example, if the Accept-Language header is not passed to the negotiator
# it will assume that the client request is for "en"
>>> default_params = AcceptParameters(ContentType("text/html"), Language("en"))
# Specify the list of acceptable formats that the server supports
>>> acceptable = [AcceptParameters(ContentType("text/html"), Language("en"))]
>>> acceptable.append(AcceptParameters(ContentType("text/json"), Language("en")))
# Create an instance of the negotiator, ready to accept negotiation requests
>>> cn = ContentNegotiator(default_params, acceptable)
# A simple negotiate on the HTTP Accept header "text/json;q=1.0, text/html;q=0.9",
# asking for json, and if not json then html
>>> acceptable = cn.negotiate(accept="text/json;q=1.0, text/html;q=0.9")
# The negotiator indicates that the best match the server can give to the
# client's request is text/json in english
>>> acceptable
AcceptParameters:: Content Type: text/json;Language: en;
Advanced Usage
--------------
# Import all the objects from the negotiator module
>>> from negotiator import ContentNegotiator, AcceptParameters, ContentType, Language
# Specify the default parameters. These are the parameters which will be used in
# place of any HTTP Accept headers which are not present in the negotiation request
# For example, if the Accept-Language header is not passed to the negotiator
# it will assume that the client request is for "en"
>>> default_params = AcceptParameters(ContentType("text/html"), Language("en"))
# Specify the list of acceptable formats that the server supports. For this
# advanced example we specify html, json and pdf in a variety of languages
>>> acceptable = [AcceptParameters(ContentType("text/html"), Language("en"))]
>>> acceptable.append(AcceptParameters(ContentType("text/html"), Language("fr")))
>>> acceptable.append(AcceptParameters(ContentType("text/html"), Language("de")))
>>> acceptable.append(AcceptParameters(ContentType("text/json"), Language("en")))
>>> acceptable.append(AcceptParameters(ContentType("text/json"), Language("cz")))
>>> acceptable.append(AcceptParameters(ContentType("application/pdf"), Language("de")))
# specify the weighting that the negotiator should apply to the different
# Accept headers. A higher weighting towards content type will prefer content
# type variations over language variations (e.g. if there are two formats
# which are equally acceptable to the client, in different languages, a
# content_type weight higher than a language weight will return the parameters
# according to the server's preferred content type.
>>> weights = {"content_type" : 1.0, "language" : 0.5}
# Create an instance of the negotiator, ready to accept negotiation requests
>>> cn = ContentNegotiator(default_params, acceptable, weights)
# set up some more complex accept headers (you can try modifying the order
# of the elements without q values, and the q values themselves, to see
# different results).
>>> accept = "text/html, text/json;q=1.0, application/pdf;q=0.5"
>>> accept_language = "en;q=0.5, de, cz, fr"
# negotiate over both headers, looking for an optimal solution to the client
# request
>>> acceptable = cn.negotiate(accept, accept_language)
# The negotiator indicates the best fit to the client request is text/html
# in german
>>> acceptable
AcceptParameters:: Content Type: text/html;Language: de;
"""
def __init__(self, default_accept_parameters=None, acceptable=[], weights=None, ignore_language_variants=False):
"""
There are 4 parameters which must be set in order to start content negotiation
- default_accept_parameters - the parameters to use when all or part of
the analysed accept headers is not present
- acceptable - What AcceptParameter objects are acceptable to
return (in order of preference)
- weights - the relative weights to apply to the different accept headers
- ignore_language_variants - whether the content negotiator should ignore language
variants overall
"""
self.acceptable = acceptable
self.default_accept_parameters = default_accept_parameters
self.weights = weights if weights is not None else {'content_type' : 1.0, 'language' : 1.0, 'charset' : 1.0, 'encoding' : 1.0, 'packaging' : 1.0}
self.ignore_language_variants = ignore_language_variants
if "content_type" not in self.weights:
self.weights["content_type"] = 1.0
if "language" not in self.weights:
self.weights["language"] = 1.0
if "charset" not in self.weights:
self.weights["charset"] = 1.0
if "encoding" not in self.weights:
self.weights["encoding"] = 1.0
if "packaging" not in self.weights:
self.weights["packaging"] = 1.0
def negotiate(self, accept=None, accept_language=None, accept_encoding=None, accept_charset=None, accept_packaging=None):
"""
Main method for carrying out content negotiation over the supplied HTTP headers.
Returns either the preferred AcceptParameters as per the settings of the object, or
None if no agreement could be reached.
The arguments are the raw strings from the relevant HTTP headers
- accept - HTTP Header: Accept; for example "text/html;q=1.0, text/plain;q=0.4"
- accept_language - HTTP Header: Accept-Language; for example "en, de;q=0.8"
- accept_encoding - HTTP Header: Accept-Encoding; not currently supported in negotiation
- accept_charset - HTTP Header: Accept-Charset; not currently supported in negotiation
- accept_packaging - HTTP Header: Accept-Packaging (from SWORD 2.0); a URI only, no q values
If verbose=True, then this will print to stdout
"""
if accept is None and accept_language is None and accept_encoding is None and accept_charset is None and accept_packaging is None:
# if it is not available just return the defaults
return self.default_accept_parameters
log.info("Accept: " + str(accept))
log.info("Accept-Language: " + str(accept_language))
log.info("Accept-Packaging: " + str(accept_packaging))
# get us back a dictionary keyed by q value which tells us the order of preference that the client has
# requested
accept_analysed = self._analyse_accept(accept)
lang_analysed = self._analyse_language(accept_language)
encoding_analysed = self._analyse_encoding(accept_encoding)
charset_analysed = self._analyse_charset(accept_charset)
packaging_analysed = self._analyse_packaging(accept_packaging)
log.info("Accept Analysed: " + str(accept_analysed))
log.info("Language Analysed: " + str(lang_analysed))
log.info("Packaging Analysed: " + str(packaging_analysed))
# now combine these results into one list of preferred accepts
preferences = self._list_acceptable(self.weights, accept_analysed, lang_analysed, encoding_analysed, charset_analysed, packaging_analysed)
log.info("Preference List: " + str(preferences))
# go through the analysed formats and cross reference them with the acceptable formats
accept_parameters = self._get_acceptable(preferences, self.acceptable)
log.info("Acceptable: " + str(accept_parameters))
# return the acceptable type. If this is None (which get_acceptable can return), then the caller
# will know that we failed to negotiate a type and should 415 the client
return accept_parameters
def _list_acceptable(self, weights, content_types=None, languages=None, encodings=None, charsets=None, packaging=None):
log.debug("Relative weights: " + str(weights))
if content_types is None:
content_types = {0.0 : [None]}
if languages is None:
languages = {0.0 : [None]}
if encodings is None:
encodings = {0.0 : [None]}
if charsets is None:
charsets = {0.0 : [None]}
if packaging is None:
packaging = {0.0 : [None]}
log.debug("Matrix of options:")
log.debug("Content Types: " + str(content_types))
log.debug("Languages: " + str(languages))
log.debug("Encodings: " + str(encodings))
log.debug("Charsets: " + str(charsets))
log.debug("Packaging: " + str(packaging))
unsorted = []
# create an accept_parameter for each first precedence field
# FIXME: this is hideous, but recursive programming is making my head
# hurt so screw it.
for q1, vals1 in list(content_types.items()):
for v1 in vals1:
for q2, vals2 in list(languages.items()):
for v2 in vals2:
for q3, vals3 in list(encodings.items()):
for v3 in vals3:
for q4, vals4 in list(charsets.items()):
for v4 in vals4:
for q5, vals5 in list(packaging.items()):
wq = ((weights['content_type'] * q1) + (weights['language'] * q2) +
(weights['encoding'] * q3) + (weights['charset'] * q4) +
(weights['packaging'] * q5))
for v5 in vals5:
ap = AcceptParameters(v1, v2, v3, v4, v5)
unsorted.append((ap, wq))
sorted = self._sort_by_q(unsorted, 0.0)
return sorted
def _analyse_packaging(self, accept):
if accept is None:
return None
# if the header is not none, then it should be a straightforward uri,
# with no q value, so our return is simple:
return {1.0 : [accept]}
def _analyse_encoding(self, accept):
return None
def _analyse_charset(self, accept):
return None
def _analyse_language(self, accept):
if accept is None:
return None
parts = self._split_accept_header(accept)
highest_q = 0.0
counter = 0
unsorted = []
for part in parts:
counter += 1
lang, sublang, q = self._interpret_accept_language_field(part, -1 * counter)
if q > highest_q:
highest_q = q
unsorted.append((Language(language=lang, variant=sublang), q))
sorted = self._sort_by_q(unsorted, highest_q)
# now we have a dictionary keyed by q value which we can return
return sorted
def _analyse_accept(self, accept):
"""
Analyse the Accept header string from the HTTP headers and return a structured dictionary with each
content types grouped by their common q values, thus:
dict = {
1.0 : [<ContentType>, <ContentType>],
0.8 : [<ContentType],
0.5 : [<ContentType>, <ContentType>]
}
This method will guarantee that every content type has some q value associated with it, even if this was not
supplied in the original Accept header; it will be inferred based on the rules of content negotiation
"""
if accept is None:
return None
# the accept header is a list of content types and q values, in a comma separated list
parts = self._split_accept_header(accept)
# set up some registries for the coming analysis. unsorted will hold each part of the accept header following
# its analysis, but without respect to its position in the preferences list. highest_q and counter will be
# recorded during this first run so that we can use them to sort the list later
unsorted = []
highest_q = 0.0
counter = 0
# go through each possible content type and analyse it along with its q value
for part in parts:
# count the part number that we are working on, starting from 1
counter += 1
type, params, q = self._interpret_accept_field(part, -1 * counter)
supertype, subtype = type.split("/", 1)
if q > highest_q:
highest_q = q
# at the end of the analysis we have all of the components with or without their default values, so we
# just record the analysed version for the time being as a tuple in the unsorted array
unsorted.append((ContentType(type=supertype, subtype=subtype, params=params), q))
# once we've finished the analysis we'll know what the highest explicitly requested q will be. This may leave
# us with a gap between 1.0 and the highest requested q, into which we will want to put the content types which
# did not have explicitly assigned q values. Here we calculate the size of that gap, so that we can use it
# later on in positioning those elements. Note that the gap may be 0.0.
sorted = self._sort_by_q(unsorted, highest_q)
# now we have a dictionary keyed by q value which we can return
return sorted
def _sort_by_q(self, unsorted, q_max):
# set up a dictionary to hold our sorted results. The dictionary will be keyed with the q value, and the
# value of each key will be an array of ContentType objects (in no particular order)
sorted = {}
# go through the unsorted list
for (value, q) in unsorted:
if q > 0:
# if the q value is greater than 0 it was explicitly assigned in the Accept header and we can just place
# it into the sorted dictionary
self.insert(sorted, q, value)
else:
# otherwise, we have to calculate the q value using the following equation which creates a q value "qv"
# within "q_range" of 1.0 [the first part of the eqn] based on the fraction of the way through the total
# accept header list scaled by the q_range [the second part of the eqn]
#qv = (1.0 - q_range) + (((-1 * q)/scale_factor) * q_range)
q_fraction = 1.0 / (-1.0 * q) # this is the fraction of the remaining spare q values that we can assign
qv = q_max + ((1.0 - q_max) * q_fraction) # this scales the fraction to the remaining q range and adds it onto the highest other qs (this also handles q_max = 1.0 implicitly)
self.insert(sorted, qv, value)
# now we have a dictionary keyed by q value which we can return
return sorted
def _split_accept_header(self, accept):
return [a.strip() for a in accept.split(",")]
def _interpret_accept_language_field(self, accept, default_q):
components = accept.split(";")
lang = None
sublang = None
q = default_q
# the first part can be a language, or a language-sublanguage pair (like en, or en-gb)
langs = components[0].strip()
lang_parts = langs.split("-")
if len(lang_parts) == 1:
lang = lang_parts[0]
elif len(lang_parts) == 2:
lang = lang_parts[0]
sublang = lang_parts[1]
if len(components) == 2:
q = components[1].strip()[2:] # strip the "q=" from the start of the q value
return (lang, sublang, float(q))
def _interpret_accept_field(self, accept, default_q):
# the components of the part can be "type;params;q" "type;params", "type;q" or just "type"
components = accept.split(";")
# the first part is always the type (see above comment)
type = components[0].strip()
# create some default values for the other parts. If there is no params, we will use None, if there is
# no q we will use a negative number multiplied by the position in the list of this part. This allows us
# to later see the order in which the parts with no q value were listed, which is important
params = None
q = default_q
# There are then 3 possibilities remaining to check for: "type;q", "type;params" and "type;params;q"
# ("type" is already handled by the default cases set up above)
if len(components) == 2:
# "type;q" or "type;params"
if components[1].strip().startswith("q="):
# "type;q"
q = components[1].strip()[2:] # strip the "q=" from the start of the q value
else:
# "type;params"
params = components[1].strip()
elif len(components) == 3:
# "type;params;q"
params = components[1].strip()
q = components[1].strip()[2:] # strip the "q=" from the start of the q value
return (type, params, float(q))
def insert(self, d, q, v):
"""
Utility method: if dict d contains key q, then append value v to the array which is identified by that key
otherwise create a new key with the value of an array with a single value v
"""
if q in d:
d[q].append(v)
else:
d[q] = [v]
def _contains_match(self, source, target):
"""
Does the target list of AcceptParameters objects contain a match for the supplied source
Args:
- source: An AcceptParameters object which we want to see if it matches anything in the target
- target: A list of AcceptParameters objects to try to match the source against
Returns the matching AcceptParameters from the target list, or None if no such match
"""
for ap in target:
if source.matches(ap, ignore_language_variants=self.ignore_language_variants):
# matches are symmetrical, so source.matches(ap) == ap.matches(source) so way round is irrelevant
# we return the target's content type, as this is considered the definitive list of allowed
# content types, while the source may contain wildcards
return ap
return None
def _get_acceptable(self, client, server):
"""
Take the client content negotiation requirements and the server's
array of supported types (in order of preference) and determine the most acceptable format to return.
This method always returns the client's most preferred format if the server supports it, irrespective of the
server's preference. If the client has no discernable preference between two formats (i.e. they have the same
q value) then the server's preference is taken into account.
Returns an AcceptParameters object represening the mutually acceptable content type, or None if no agreement could
be reached.
"""
log.info("Client: " + str(client))
log.info("Server: " + str(server))
# get the client requirement keys sorted with the highest q first (the server is a list which should be
# in order of preference already)
ckeys = list(client.keys())
ckeys.sort(reverse=True)
# the rule for determining what to return is that "the client's preference always wins", so we look for the
# highest q ranked item that the server is capable of returning. We only take into account the server's
# preference when the client has two equally weighted preferences - in that case we take the server's
# preferred content type
for q in ckeys:
# for each q in order starting at the highest
possibilities = client[q]
allowable = []
for p in possibilities:
# for each accept parameter with the same q value
# find out if the possibility p matches anything in the server. This uses the AcceptParameter's
# matches() method which will take into account wildcards, so content types like */* will match
# appropriately. We get back from this the concrete AcceptParameter as specified by the server
# if there is a match, so we know the result contains no unintentional wildcards
match = self._contains_match(p, server)
if match is not None:
# if there is a match, register it
allowable.append(match)
log.info("Allowable: " + str(q) + ":" + str(allowable))
# we now know if there are 0, 1 or many allowable content types at this q value
if len(allowable) == 0:
# we didn't find anything, so keep looking at the next q value
continue
elif len(allowable) == 1:
# we found exactly one match, so this is our content type to use
return allowable[0]
else:
# we found multiple supported content types at this q value, so now we need to choose the server's
# preference
for i in range(len(server)):
# iterate through the server explicitly by numerical position
if server[i] in allowable:
# when we find our first content type in the allowable list, it is the highest ranked server content
# type that is allowable, so this is our type
return server[i]
# we've got to here without returning anything, which means that the client and server can't come to
# an agreement on what content type they want and can deliver. There's nothing more we can do!
return None
if __name__ == "__main__":
"""
Some basic tests to show the code in action
"""
print("========= CONTENT TYPE ==============")
print("+++ text/plain only +++")
accept = "text/plain"
server = [AcceptParameters(ContentType("text/plain"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ application/atom+xml vs application/rdf+xml without q values +++")
accept = "application/atom+xml, application/rdf+xml"
server = [AcceptParameters(ContentType("application/rdf+xml")), AcceptParameters(ContentType("application/atom+xml"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ application/atom+xml vs application/rdf+xml with q values +++")
accept = "application/atom+xml;q=0.6, application/rdf+xml;q=0.9"
server = [AcceptParameters(ContentType("application/rdf+xml")), AcceptParameters(ContentType("application/atom+xml"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ application/atom+xml vs application/rdf+xml vs text/html with mixed q values +++")
accept = "application/atom+xml;q=0.6, application/rdf+xml;q=0.9, text/html"
server = [AcceptParameters(ContentType("application/rdf+xml")), AcceptParameters(ContentType("application/atom+xml")),
AcceptParameters(ContentType("text/html"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ text/plain only, unsupported by server +++")
accept = "text/plain"
server = [AcceptParameters(ContentType("text/html"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ application/atom+xml vs application/rdf+xml vs text/html with mixed q values, most preferred unavailable +++")
accept = "application/atom+xml;q=0.6, application/rdf+xml;q=0.9, text/html"
server = [AcceptParameters(ContentType("application/rdf+xml")), AcceptParameters(ContentType("application/atom+xml"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ application/atom+xml vs application/rdf+xml vs text/html with mixed q values, most preferred available +++")
accept = "application/atom+xml;q=0.6, application/rdf+xml;q=0.9, text/html"
server = [AcceptParameters(ContentType("application/rdf+xml")), AcceptParameters(ContentType("text/html"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ application/atom+xml;type=feed supported by server +++")
accept = "application/atom+xml;type=feed"
server = [AcceptParameters(ContentType("application/atom+xml;type=feed"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ image/* supported by server +++")
accept = "image/*"
server = [AcceptParameters(ContentType("text/plain")), AcceptParameters(ContentType("image/png")),
AcceptParameters(ContentType("image/jpeg"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("+++ */* supported by server +++")
accept = "*/*"
server = [AcceptParameters(ContentType("text/plain")), AcceptParameters(ContentType("image/png")),
AcceptParameters(ContentType("image/jpeg"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept)
print("+++ " + str(ap) + " +++")
print("====================================")
print("==============LANGUAGE==============")
print("+++ en only +++")
accept_language = "en"
server = [AcceptParameters(language=Language("en"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept_language)
print("+++ " + str(ap) + " +++")
print("+++ en vs de without q values +++")
accept = "en, de"
server = [AcceptParameters(language=Language("en")), AcceptParameters(language=Language("de"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ fr vs no with q values +++")
accept = "fr;q=0.7, no;q=0.8"
server = [AcceptParameters(language=Language("fr")), AcceptParameters(language=Language("no"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ en vs de vs fr with mixed q values +++")
accept = "en;q=0.6, de;q=0.9, fr"
server = [AcceptParameters(language=Language("en")), AcceptParameters(language=Language("de")),
AcceptParameters(language=Language("fr"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ en only, unsupported by server +++")
accept = "en"
server = [AcceptParameters(language=Language("de"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ en vs no vs de with mixed q values, most preferred unavailable +++")
accept = "en;q=0.6, no;q=0.9, de"
server = [AcceptParameters(language=Language("en")), AcceptParameters(language=Language("no"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ en vs no vs de with mixed q values, most preferred available +++")
accept = "en;q=0.6, no;q=0.9, de"
server = [AcceptParameters(language=Language("no")), AcceptParameters(language=Language("de"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ en-gb supported by server +++")
accept = "en-gb"
server = [AcceptParameters(language=Language("en-gb"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ en-gb, unsupported by server +++")
accept = "en-gb"
server = [AcceptParameters(language=Language("en"))]
cn = ContentNegotiator(acceptable=server, ignore_language_variants=False)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ en-gb, supported by server through language variants +++")
accept = "en-gb"
server = [AcceptParameters(language=Language("en"))]
cn = ContentNegotiator(acceptable=server, ignore_language_variants=True)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ en, partially supported by server +++")
accept = "en"
server = [AcceptParameters(language=Language("en-gb"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ * by itself +++")
accept = "*"
server = [AcceptParameters(language=Language("no")), AcceptParameters(language=Language("de"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ * with other options, primary option unsupported +++")
accept = "en, *"
server = [AcceptParameters(language=Language("no")), AcceptParameters(language=Language("de"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("+++ * with other options, primary option supported +++")
accept = "en, *"
server = [AcceptParameters(language=Language("en")), AcceptParameters(language=Language("de"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept_language=accept)
print("+++ " + str(ap) + " +++")
print("====================================")
print("======LANGUAGE+CONTENT TYPE=========")
print("+++ content type and language specified +++")
accept = "text/html"
accept_lang = "en"
server = [AcceptParameters(ContentType("text/html"), Language("en"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept, accept_language=accept_lang)
print("+++ " + str(ap) + " +++")
print("+++ 2 content types and one language specified +++")
accept = "text/html, text/plain"
accept_lang = "en"
server = [AcceptParameters(ContentType("text/html"), Language("de")), AcceptParameters(ContentType("text/plain"), Language("en"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept, accept_language=accept_lang)
print("+++ " + str(ap) + " +++")
print("+++ 2 content types and 2 languages specified +++")
accept = "text/html, text/plain"
accept_lang = "en, de"
server = [AcceptParameters(ContentType("text/html"), Language("de")), AcceptParameters(ContentType("text/plain"), Language("en"))]
cn = ContentNegotiator(acceptable=server)
ap = cn.negotiate(accept=accept, accept_language=accept_lang)
print("+++ " + str(ap) + " +++")
print("+++ 2 content types and one language specified, with weights +++")
weights = {'content_type' : 2.0, 'language' : 1.0, 'charset' : 1.0, 'encoding' : 1.0}
accept = "text/html, text/plain"
accept_lang = "en"
server = [AcceptParameters(ContentType("text/html"), Language("de")), AcceptParameters(ContentType("text/plain"), Language("en"))]
cn = ContentNegotiator(acceptable=server, weights=weights)
ap = cn.negotiate(accept=accept, accept_language=accept_lang)
print("+++ " + str(ap) + " +++")
| 45.55683
| 190
| 0.618829
|
4a0c61b189b602c3af1cdc5a56928e1f1125da65
| 335
|
py
|
Python
|
config.py
|
arthurBondarenko/flask_intro
|
8b308945c855fbe746df558db36c4b48e3f0fbff
|
[
"MIT"
] | null | null | null |
config.py
|
arthurBondarenko/flask_intro
|
8b308945c855fbe746df558db36c4b48e3f0fbff
|
[
"MIT"
] | null | null | null |
config.py
|
arthurBondarenko/flask_intro
|
8b308945c855fbe746df558db36c4b48e3f0fbff
|
[
"MIT"
] | null | null | null |
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('EMAIL_USER')
MAIL_PASSWORD = os.environ.get('EMAIL_PASS')
| 30.454545
| 72
| 0.701493
|
4a0c61e41a0a066a244eac377988c65b148e717f
| 8,466
|
py
|
Python
|
scraper/page_getters.py
|
Edward-TL/vsbuy_backend
|
e6b3e71d6c0e6b253707489d70d951400acac451
|
[
"MIT"
] | null | null | null |
scraper/page_getters.py
|
Edward-TL/vsbuy_backend
|
e6b3e71d6c0e6b253707489d70d951400acac451
|
[
"MIT"
] | 1
|
2020-10-05T01:27:02.000Z
|
2020-10-05T01:27:02.000Z
|
scraper/page_getters.py
|
Edward-TL/vsbuy_backend
|
e6b3e71d6c0e6b253707489d70d951400acac451
|
[
"MIT"
] | 1
|
2020-10-05T01:21:59.000Z
|
2020-10-05T01:21:59.000Z
|
from scrape_funcs import search_boxes
from data import Page
import re
def get_names(boxes_array, Page, position=None):
names = [None]*len(boxes_array)
name = Page.names_get
# If you know want to know some info of an specific product by its position on the page.
# Like you know the position of the cheapest
if position != None:
searcher = search_boxes(boxes_array[position], Page.name_and_images)
if searcher:
if Page.name == 'Best Buy':
image_name = searcher[0].img.get(name)
name_split = image_name.split(' - ')
names = name_split[1]
else:
names = searcher[0].img.get(name)
else:
# Obtain all the info
b=0
for box in boxes_array:
#Remember that boxes are arrays
searcher = search_boxes(box, Page.name_and_images)
if searcher:
if Page.name == 'Best Buy':
image_name = searcher[0].img.get(name)
name_split = image_name.split(' -')
if len(name_split) > 1:
names[b] = name_split[1].lstrip()
if len(name_split) == 1:
names[b] = name_split[0].lstrip()
else:
names[b] = searcher[0].img.get(name)
b +=1
return names
def get_images(boxes_array, Page, test_all=False, test_len=False, position=None):
images = [None]*len(boxes_array)
image = Page.images_get
# If you know want to know some info of an specific product by its position on the page.
# Like you know the position of the cheapest
if position:
searcher = search_boxes(boxes_array[position], Page.name_and_images)
if searcher:
images = searcher[0].img.get(image)
else:
# Obtain all the info
b=0
for box in boxes_array:
#Remember that boxes are arrays
searcher = search_boxes(box, Page.name_and_images)
if searcher:
images[b] = searcher[0].img.get(image)
b +=1
return images
def get_products_urls(boxes_array, Page, test_all=False, test_len=False, position=None):
urls = [None]*len(boxes_array)
url = Page.url_get
# If you know want to know some info of an specific product by its position on the page.
# Like you know the position of the cheapest
if position:
searcher = search_boxes(boxes_array[position], Page.product_urls)
if searcher:
try:
if Page.name == 'Amazon':
source_url = searcher[0].get(url)
position_url = 'https://www.amazon.com.mx' + source_url
else:
position_url = searcher[0].a.get(url)
urls = position_url
except:
error_message = f'''Value info:
Searcher: {searcher}
searcher[0]: {searcher}
searcher[0].a: searcher[0].a
url: {Page.url_get}
position: {position}
'''
raise ValueError(error_message)
else:
# Obtain all the info
b=0
for box in boxes_array:
#Remember that boxes are arrays
searcher = search_boxes(box, Page.product_urls)
if searcher:
if Page.name == 'Amazon':
source_url = searcher[0].get(url)
urls[b] = 'https://www.amazon.com.mx' + source_url
else:
urls[b] = searcher[0].a.get(url)
b +=1
return urls
def get_price(country, boxes_array, Page, test_all=False, test_len=False, position=None):
price = [None]*len(boxes_array)
coin_symbol = Page.money_dict[country]['coin']
k_sep = Page.money_dict[country]['thousands']
d_sep = Page.money_dict[country]['decimal']
tps = Page.money_dict[country]['two_prices_sep']
price_string = 'start'
'''If you know want to know some info of an specific product by its position on the page.
Like you know the position of the cheapest'''
if position:
searcher = search_boxes(boxes_array[position], Page.price)
if searcher:
try:
price_string = searcher[0].get_text().split(tps)
price_string = price_string[0].replace(coin_symbol,'').replace(k_sep,'').replace(d_sep,'.')
#Special case
price_string = re.findall(r'(\d+\.\d+)', price_string)
if Page.name != 'Ebay':
price = round(float(price_string[0])/22, 2)
else:
price = float(price_string[0])
except:
error_message = f'''String index out of range.
Money dictionary: {Page.money_dict}
Original String: {searcher[0].get_text()}
Box #{position}'''
raise ValueError(error_message)
#For Testing the functions and Xpaths
else:
b=0
for box in boxes_array:
#Remember that boxes are arrays
searcher = search_boxes(box, Page.price)
if searcher:
if country == 'mx':
# try:
price_string = searcher[0].get_text().split(tps)
price_string_bfre = price_string[0].replace(coin_symbol,'').replace(k_sep,'').replace(d_sep,'.')
#Ebays Special case
price_string_bfre = price_string_bfre.replace(u'\xa0',u'')
#Just in case
price_string_check = re.findall(rf"(\d+\.?\d+)", price_string_bfre)
#Sometimes it needs to be done again
#
if Page.name != 'Ebay':
price[b] = round(float(price_string_check[0])/22, 2)
else:
if str(type(price_string_check)) == "<class 'list'>" and len(price_string_check) > 0:
string_search = price_string_check[0]
else:
string_search = price_string_check
if str(type(string_search)) == "<class 'str'>":
price[b] = float(string_search)
b +=1
return price
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Info that can be obtained for some pages, but are not a standard for all
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_stars(boxes_array, Page, country='mx', test_all=False, test_len=False, position=None):
stars = [None]*len(boxes_array)
decimal_sep = Page.money_dict[country]['decimal']
b=0
for box in boxes_array:
#Remember that boxes are arrays
searcher = search_boxes(box, Page.stars)
if searcher:
if decimal_sep == '.':
stars[b] = float(searcher[0].get_text()[:3])
else:
stars[b] = float(searcher[0].get_text()[:3].replace(decimal_sep,''))
b +=1
return stars
def get_reviews(boxes_array, Page, country='mx', test_all=False, test_len=False, position=None):
reviews = [None]*len(boxes_array)
comma_sep = Page.money_dict[country]['thousands']
b=0
for box in boxes_array:
#Remember that boxes are arrays
searcher = search_boxes(box, Page.reviews)
if searcher:
if len(searcher) > 1:
searcher = [searcher[0]]
try:
reviews[b] = int(searcher[0].get_text().replace(comma_sep,''))
except:
pass
b +=1
return reviews
# The name saids all
def amazon_products_id(boxes_array, test=False):
ids = [None]*len(boxes_array)
b=0
for box in boxes_array:
#Remember that boxes are arrays
if box:
product_id = box.get('data-asin')
ids[b] = 'www.amazon.com.mx/dp/' + product_id
b +=1
return ids
| 38.135135
| 117
| 0.515828
|
4a0c624b3c0ea2053448e91a270165c78179fc55
| 9,592
|
py
|
Python
|
engine.py
|
dvd42/detr
|
08b9854d527441164e8ce083f024e86e5bccb008
|
[
"Apache-2.0"
] | null | null | null |
engine.py
|
dvd42/detr
|
08b9854d527441164e8ce083f024e86e5bccb008
|
[
"Apache-2.0"
] | null | null | null |
engine.py
|
dvd42/detr
|
08b9854d527441164e8ce083f024e86e5bccb008
|
[
"Apache-2.0"
] | 2
|
2020-10-20T08:04:44.000Z
|
2021-01-10T09:56:24.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import random
import os
import sys
from typing import Iterable
import wandb
import torch
import util.misc as utils
from util.io import create_wandb_img
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
LOG_IDX = [] # Global list to make image logging consistent over epochs
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, max_norm: float = 0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir, log_step=0):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
panoptic_evaluator = None
if 'panoptic' in postprocessors.keys():
panoptic_evaluator = PanopticEvaluator(
data_loader.dataset.ann_file,
data_loader.dataset.ann_folder,
output_dir=os.path.join(output_dir, "panoptic_eval"),
)
dataset = data_loader.dataset
classes = {cat["id"]: cat["name"] for cat in dataset.coco.dataset["categories"]}
wandb_imgs = {"images": [], "self_attention": [], "attention": []}
# Log every 50 steps and in step 0
log_this = output_dir and utils.is_main_process() and ((log_step + 1) % 50 == 0 or log_step == 0)
conv_features, enc_attn_weights, dec_attn_weights = [], [], []
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
log_image = False
if log_this:
if len(LOG_IDX) == 15:
if targets[0]["image_id"] in LOG_IDX:
log_image = True
elif random.random() < 0.3 and len(targets[0]["labels"].tolist()) > 3:
LOG_IDX.append(targets[0]["image_id"])
log_image = True
if log_image:
# Taken from https://colab.research.google.com/github/facebookresearch/detr/blob/colab/notebooks/detr_attention.ipynb
hooks = [
model.module.backbone[-2].register_forward_hook(
lambda self, input, output: conv_features.append(output)
),
model.module.transformer.encoder.layers[-1].self_attn.register_forward_hook(
lambda self, input, output: enc_attn_weights.append(output[1])
),
model.module.transformer.decoder.layers[-1].multihead_attn.register_forward_hook(
lambda self, input, output: dec_attn_weights.append(output[1])
),
]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
# Gather images to log to wandb
if log_image:
# get the HxW shape of the feature maps of the CNN
f_map = conv_features[-1]['0'].tensors.cpu()
shape = f_map.shape[-2:]
sattn = enc_attn_weights[-1][0].reshape(shape + shape).cpu()
dec_att = dec_attn_weights[-1].cpu()
target = targets[0]
logits = outputs["pred_logits"][0]
boxes = outputs["pred_boxes"][0]
pred = {"pred_logits": logits, "pred_boxes": boxes}
name = dataset.coco.imgs[target["image_id"].item()]["file_name"]
path = os.path.join(dataset.root, name)
img, self_attention, att_map = create_wandb_img(classes, path, target, pred, sattn, f_map, dec_att)
wandb_imgs["images"].append(img)
wandb_imgs["self_attention"].append(self_attention)
wandb_imgs["attention"].append(att_map)
# Free memory
del conv_features[-1]
del enc_attn_weights[-1]
for hook in hooks:
hook.remove()
if 'segm' in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
if panoptic_evaluator is not None:
res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
for i, target in enumerate(targets):
image_id = target["image_id"].item()
file_name = f"{image_id:012d}.png"
res_pano[i]["image_id"] = image_id
res_pano[i]["file_name"] = file_name
panoptic_evaluator.update(res_pano)
# Log all images to wandb
if log_this:
wandb.log({"Images": wandb_imgs["images"]}, step=log_step)
wandb.log({"Self Attention": wandb_imgs["self_attention"]}, step=log_step)
wandb.log({"Attention": wandb_imgs["attention"]}, step=log_step)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
if panoptic_evaluator is not None:
panoptic_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if panoptic_evaluator is not None:
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if 'segm' in postprocessors.keys():
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
if panoptic_res is not None:
stats['PQ_all'] = panoptic_res["All"]
stats['PQ_th'] = panoptic_res["Things"]
stats['PQ_st'] = panoptic_res["Stuff"]
return stats, coco_evaluator
| 42.255507
| 133
| 0.635113
|
4a0c6398b24a0691cc03d510952090b45b49adee
| 5,299
|
py
|
Python
|
tests/integ/sagemaker/lineage/test_artifact.py
|
longyuzhao/sagemaker-python-sdk
|
5c6c8e9a8a414627caa7e1d3d80d44cdc2a1c01f
|
[
"Apache-2.0"
] | 1,690
|
2017-11-29T20:13:37.000Z
|
2022-03-31T12:58:11.000Z
|
tests/integ/sagemaker/lineage/test_artifact.py
|
longyuzhao/sagemaker-python-sdk
|
5c6c8e9a8a414627caa7e1d3d80d44cdc2a1c01f
|
[
"Apache-2.0"
] | 2,762
|
2017-12-04T05:18:03.000Z
|
2022-03-31T23:40:11.000Z
|
tests/integ/sagemaker/lineage/test_artifact.py
|
longyuzhao/sagemaker-python-sdk
|
5c6c8e9a8a414627caa7e1d3d80d44cdc2a1c01f
|
[
"Apache-2.0"
] | 961
|
2017-11-30T16:44:03.000Z
|
2022-03-30T23:12:09.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains code to test SageMaker ``Artifacts``"""
from __future__ import absolute_import
import datetime
import logging
import time
import pytest
from sagemaker.lineage import artifact
from tests.integ.sagemaker.lineage.helpers import retry
def test_create_delete(artifact_obj):
# fixture does create and then delete, this test ensures it happens at least once
assert artifact_obj.artifact_arn
def test_create_delete_with_association(artifact_obj_with_association):
# fixture does create and then delete, this test ensures it happens at least once
assert artifact_obj_with_association.artifact_arn
def test_save(artifact_obj, sagemaker_session):
artifact_obj.properties = {"k3": "v3"}
artifact_obj.properties_to_remove = ["k1"]
artifact_obj.save()
loaded = artifact.Artifact.load(
artifact_arn=artifact_obj.artifact_arn, sagemaker_session=sagemaker_session
)
assert {"k3": "v3"} == loaded.properties
def test_load(artifact_obj, sagemaker_session):
assert artifact_obj.artifact_name
logging.info(f"loading {artifact_obj.artifact_name}")
loaded = artifact.Artifact.load(
artifact_arn=artifact_obj.artifact_arn, sagemaker_session=sagemaker_session
)
assert artifact_obj.artifact_arn == loaded.artifact_arn
def test_list(artifact_objs, sagemaker_session):
slack = datetime.timedelta(minutes=1)
now = datetime.datetime.now(datetime.timezone.utc)
artifact_names = [art.artifact_name for art in artifact_objs]
for sort_order in ["Ascending", "Descending"]:
artifact_names_listed = [
artifact_listed.artifact_name
for artifact_listed in artifact.Artifact.list(
created_after=now - slack,
created_before=now + slack,
sort_by="CreationTime",
sort_order=sort_order,
sagemaker_session=sagemaker_session,
)
if artifact_listed.artifact_name in artifact_names
]
if sort_order == "Descending":
artifact_names_listed = artifact_names_listed[::-1]
assert artifact_names == artifact_names_listed
# sanity check
assert artifact_names
def test_list_by_type(artifact_objs, sagemaker_session):
slack = datetime.timedelta(minutes=1)
now = datetime.datetime.now(datetime.timezone.utc)
expected_name = list(
filter(lambda x: x.artifact_type == "SDKIntegrationTestType2", artifact_objs)
)[0].artifact_name
artifact_names = [art.artifact_name for art in artifact_objs]
artifact_names_listed = [
artifact_listed.artifact_name
for artifact_listed in artifact.Artifact.list(
created_after=now - slack,
artifact_type="SDKIntegrationTestType2",
sagemaker_session=sagemaker_session,
)
if artifact_listed.artifact_name in artifact_names
]
assert len(artifact_names_listed) == 1
assert artifact_names_listed[0] == expected_name
def test_downstream_trials(trial_associated_artifact, trial_obj, sagemaker_session):
# allow trial components to index, 30 seconds max
def validate():
for i in range(3):
time.sleep(10)
trials = trial_associated_artifact.downstream_trials(
sagemaker_session=sagemaker_session
)
logging.info(f"Found {len(trials)} downstream trials.")
if len(trials) > 0:
break
assert len(trials) == 1
assert trial_obj.trial_name in trials
retry(validate, num_attempts=3)
@pytest.mark.timeout(30)
def test_tag(artifact_obj, sagemaker_session):
tag = {"Key": "foo", "Value": "bar"}
artifact_obj.set_tag(tag)
while True:
actual_tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=artifact_obj.artifact_arn
)["Tags"]
if actual_tags:
break
time.sleep(5)
# When sagemaker-client-config endpoint-url is passed as argument to hit some endpoints,
# length of actual tags will be greater than 1
assert len(actual_tags) > 0
assert actual_tags[0] == tag
@pytest.mark.timeout(30)
def test_tags(artifact_obj, sagemaker_session):
tags = [{"Key": "foo1", "Value": "bar1"}]
artifact_obj.set_tags(tags)
while True:
actual_tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=artifact_obj.artifact_arn
)["Tags"]
if actual_tags:
break
time.sleep(5)
# When sagemaker-client-config endpoint-url is passed as argument to hit some endpoints,
# length of actual tags will be greater than 1
assert len(actual_tags) > 0
assert [actual_tags[-1]] == tags
| 33.751592
| 92
| 0.701076
|
4a0c641012e864dc72e1e4517bf34d7a7da4d926
| 6,630
|
py
|
Python
|
sounds.py
|
AllanRamsay/ACCENTS
|
4cc2e2fac83c317188b4d6950b449110318f27ac
|
[
"MIT"
] | null | null | null |
sounds.py
|
AllanRamsay/ACCENTS
|
4cc2e2fac83c317188b4d6950b449110318f27ac
|
[
"MIT"
] | null | null | null |
sounds.py
|
AllanRamsay/ACCENTS
|
4cc2e2fac83c317188b4d6950b449110318f27ac
|
[
"MIT"
] | null | null | null |
from useful import *
import wave
import struct
from numpy.fft import rfft, irfft
try:
import pylab
except:
print "You won't be able to plot graphs, but the other stuff will work"
import numpy as pylab
import math
from play import play
"""
Represent the content of a .wav file as an object: the frames is a
string of 8-bit ASCII characters. That's horrible to work with, so we
convert it to an array of int-8s when we read it in, which we keep in
the signal, and then setframes will convert this array of ints back
into an ASCII string. You should do all your work on the signal, and
only convert it back to a string when you're about to save it (or play
it).
"""
class SOUND():
def __init__(self, signal=None, name="sound", frames=None, params=None, start=None, end=None):
if not signal is None:
self.signal = signal
self.name = name
if not frames is None:
self.frames = frames
else:
self.setframes(0, len(self.signal))
if params is None:
self.params = [1, 2, 44100, len(self.frames), 'NONE', 'not compressed']
else:
self.params = params
if not start is None and not end is None:
start = int(start*self.params[2])
end = int(end*self.params[2])
self.setframes(start, end)
def __repr__(self):
return "SOUND(%s, %s)"%(self.name, self.signal)
def normalise(self, n=60, dtype="int8"):
signal = pylab.array(self.signal)-(pylab.ones(len(self.signal))*min(self.signal))
mx = max(signal)
return pylab.array(map(lambda x: n*x/float(mx), signal), dtype=dtype)
def setframes(self, start, end):
self.frames = struct.pack("%sh"%(end-start), *(self.signal[start:end]))
def save(self, wavfile="temp.wav", start=None, end=None):
# self.params[3] = len(self.signal)/2
w = wave.open(wavfile, "w")
w.setparams(self.params)
# w.setnchannels(1)
if not self.frames:
if start is None:
start = 0
if end is None:
end = len(self.signal)
self.setframes(start, min(end, len(self.signal)))
w.writeframes(self.frames)
w.close()
def play(self):
self.save("%s.wav"%(self.name))
play("%s.wav"%(self.name))
def plot(self, show=True, save=False, N=False):
signal = self.normalise(n=255, dtype="float")
if N:
signal = signal[:N]
print signal[:N]
ymin, ymax = min(signal), max(signal)
pylab.ylim(min(ymin-1, int(-0.1*ymin)), max(ymax+1, int(1.1*ymax)))
pylab.plot(pylab.linspace(0, len(signal), len(signal)), signal)
if save:
pylab.savefig("%s.eps"%(self.name))
if show:
pylab.show()
def readsound(wavfile="sound1.wav", start=None, end=None):
w = wave.open(wavfile, "r")
params = list(w.getparams())
f = w.readframes(w.getnframes())
w.close()
s = SOUND(pylab.array(struct.unpack('h'*(len(f)/2), f)),
frames=f,
name=wavfile,
params=params,
start=start,
end=end)
return s
"""
The signal best matches itself at 400 frames. A frame is 1/44100 seconds, so it matches itself at 400/44100 seconds = 0.009 seconds so pitch is 110Hz.
"""
def localmaximum(l):
try:
return max([(x, i) for i, x in enumerate(l[1:-1]) if x > 500 and i > 300 and l[i] < x and l[i+2] < x])
except:
return 0
def autocorr(signal):
result = pylab.correlate(signal, signal, mode='full')
return [x/1000000 for x in result[result.size/2:]]
def latexautocorr(l, out=sys.stdout):
with safeout(out) as write:
write(r"""
\begin{Verbatim}
""")
for x in l:
write("%4d"%(x))
write("\n")
for i in range(1, len(l)):
k1 = [" "]*i+l[:-i]
for x in k1:
try:
write("%4d"%(x))
except:
write(x)
write(r"""
\textcolor{lightgray}{""")
for x, y in zip(l, k1):
try:
write("%4d"%(abs(x-y)))
except:
write(" ")
write("}\n")
write(r"""
\end{Verbatim}
""")
def localpitch(i, x):
return localmaximum(autocorr(x[i:i+1000]))
def pitch(x):
return [localpitch(i, x) for i in range(0, len(x)-1000, 100)]
"""
0.844014 180.680300
0.854014 181.916582
0.864014 180.753369
0.874014 193.796518
0.884014 202.034519
0.894014 201.161372
0.904014 191.750252
0.864127 162.798500
0.874127 172.712761
0.884127 177.480083
0.894127 182.036643
0.904127 180.446923
0.914127 172.204149
"""
def raisepitch(l0, r=10):
return [x for i, x in enumerate(l0) if i%r > 0]
def lowerpitch(l0, r=10):
l1 = []
for i in range(len(l0)):
l1.append(l0[i])
if i%r == 0:
l1.append((l0[i-1]+l0[i])/2)
return l1
def stretch(l0, r=100, stretching=True):
l0 = list(l0)
l1 = []
i = 0
n = 0
while i < len(l0):
p = localpitch(i, l0)
try:
j = p[1]
except:
j = 10
n += 1
if stretching:
l1 += l0[i:i+j]
if n%r == 0:
l1 += l0[i:i+j]
else:
if n%r > 0:
l1 += l0[i:i+j]
i += j
return l1
def toPicture(fft, out=sys.stdout, maxheight=False):
I = len(fft)
J = len(fft[0])
if maxheight == False:
maxheight = J
a = []
best = 0
for i in range(I):
r = []
for j in range(maxheight):
try:
p = fft[i][j]
v = math.sqrt(p.real**2+p.imag**2)
v = abs(p.real)
except:
v = 0
if v > best:
best = v
r.append(v)
a.append(r)
for r in a:
for j in range(len(r)):
r[j] = 255-int(255*r[j]/best)
a = pylab.array(a)
pylab.imshow(a, "gray")
pylab.show()
def showWav(wav):
best = 0
for x in wav:
if x > best:
best = x
wav = [float(10*x)/float(best) for x in wav]
pylab.plot(wav)
pylab.show()
def plotpoints(points):
img = pylab.zeros((512, 512, 1), pylab.uint8)
for i in range(1, len(points)):
p0 = int(points[i-1])+200
p1 = int(points[i])+200
cv2.line(img, (i-1, p0), (i, p1), 255,1)
cv2.imshow('image', img)
return img
| 27.857143
| 150
| 0.526998
|
4a0c6436e70b7770638a14abf99df332271f6243
| 240
|
py
|
Python
|
raft/messages/append_entries.py
|
baonguyen2604/raft-consensus
|
83dae8f8919384f6645a7041ea3c5b1239db1a14
|
[
"MIT"
] | 1
|
2020-11-01T17:07:08.000Z
|
2020-11-01T17:07:08.000Z
|
raft/messages/append_entries.py
|
baonguyen2604/raft-consensus
|
83dae8f8919384f6645a7041ea3c5b1239db1a14
|
[
"MIT"
] | null | null | null |
raft/messages/append_entries.py
|
baonguyen2604/raft-consensus
|
83dae8f8919384f6645a7041ea3c5b1239db1a14
|
[
"MIT"
] | 1
|
2021-04-26T13:32:44.000Z
|
2021-04-26T13:32:44.000Z
|
from .base_message import BaseMessage
class AppendEntriesMessage(BaseMessage):
_type = BaseMessage.AppendEntries
def __init__(self, sender, receiver, term, data):
BaseMessage.__init__(self, sender, receiver, term, data)
| 24
| 64
| 0.75
|
4a0c64859aecb03900c161f7f11c37eff961b721
| 3,511
|
py
|
Python
|
Xana/Xdrop/DropletizeData.py
|
reiserm/Xana
|
056f2bf2da67ba0dade49bb4b56ea2afd42b36bd
|
[
"MIT"
] | 1
|
2021-01-25T08:57:57.000Z
|
2021-01-25T08:57:57.000Z
|
Xana/Xdrop/DropletizeData.py
|
reiserm/Xana
|
056f2bf2da67ba0dade49bb4b56ea2afd42b36bd
|
[
"MIT"
] | 21
|
2020-03-23T12:50:32.000Z
|
2021-05-07T07:54:38.000Z
|
Xana/Xdrop/DropletizeData.py
|
reiserm/Xana
|
056f2bf2da67ba0dade49bb4b56ea2afd42b36bd
|
[
"MIT"
] | 2
|
2020-03-22T10:31:09.000Z
|
2020-07-01T14:00:28.000Z
|
import numpy as np
import numpy.ma as ma
from Xdrop.dropletizem import dropimgood_sel
import time
import pickle
import re
from matplotlib import pyplot as plt
def dropletizedata(data, pars, mask=None, dark=None, savdir="./", savname=None):
# print(dropimgood_sel.__doc__)
dim = data.shape
mshape = dim[-2:]
data = data.reshape(-1, *mshape)
datdrop = np.zeros(data.shape, dtype=np.uint16)
if dark is None:
dark = np.zeros(mshape)
if mask is None:
mask = np.ones(mshape, dtype=np.bool)
pix = []
for imgn in range(data.shape[0]):
img = data[imgn].astype(int)
imd = dropimgood_sel(
img,
dark,
~mask,
pars["background"],
pars["lower_threshold"],
pars["upper_threshold"],
pars["number_photons"],
pars["adusPphoton"],
pars["pixelPdroplet"],
)
datdrop[imgn] = imd[-1]
# pix.append(imd[:-1])
if savname is not None:
np.save(savdir + savname + "_dropletized.npy", datdrop)
f = open(savdir + savname + "_pix.pkl", "wb")
pickle.dump({"pix": pix}, f)
f.close()
datdrop = np.squeeze(datdrop.reshape(dim))
return datdrop
def testDropletizing(data, pars, dark=None, mask=None):
im = data.copy()
if "roi" in pars.keys():
roi = pars["roi"]
xl = roi[0]
yl = roi[1]
else:
xl = [0, dark.shape[1]]
yl = [0, dark.shape[0]]
im = im[yl[0] : yl[1], xl[0] : xl[1]]
if dark is not None:
dark = dark[yl[0] : yl[1], xl[0] : xl[1]]
im = im - 1.0 * dark
if mask is not None:
mask = mask[yl[0] : yl[1], xl[0] : xl[1]]
im *= mask
nx = im.shape[1]
ny = im.shape[0]
pars["nx"] = nx
pars["ny"] = ny
imd1 = dropletizedata(im, pars, mask=mask, dark=dark)
tstr = ["raw image", "parameters 1", "parameters 2"]
fig, ax = plt.subplots(1, 2, figsize=(9, 6))
ax = ax.ravel()
im[im < pars["background"]] = 0
for i, imp in enumerate(
[
im,
imd1,
]
):
pl = ax[i].imshow(imp, interpolation="nearest", cmap="Blues")
ind_x = np.arange(xl[1] - xl[0])
ind_y = np.arange(yl[1] - yl[0])
x, y = np.meshgrid(ind_x, ind_y)
for xi, yi in zip(x.flatten(), y.flatten()):
if imp[yi, xi] > 0:
c = "{0}".format(int(imp[yi, xi]))
ax[i].text(
xi,
yi,
c,
va="center",
ha="center",
fontsize=8,
color="w",
fontweight="bold",
)
ax[i].set_xticks(ind_x - 0.5)
ax[i].set_yticks(ind_y - 0.5)
ax[i].set_xticklabels([])
ax[i].set_yticklabels([])
ax[i].set_xlim(ind_x[0] - 0.5, ind_x[-1] + 0.5)
ax[i].set_ylim(ind_y[0] - 0.5, ind_y[-1] + 0.5)
I = im.sum()
ph = imd1.sum()
ax[0].set_title(
r"$I={:.00f}\,adus\, ({:.00f}ph$)".format(I, I / pars["adusPphoton"])
)
ax[1].set_title(r"$I={:.00f}\,ph$".format(ph))
plt.tight_layout(pad=2)
"""
number = 0
if saveimages:
while os.path.isfile(savdir + savname):
number = int(re.findall(r'\d+', savname)[0]) + 1
savname = 'pic_{0:02}.pdf'.format(number)
plt.savefig(savdir + savname)#, dpi=300)
"""
plt.show()
| 28.544715
| 80
| 0.494161
|
4a0c64d48423cf48d0f9743ec2acc6dc5130fd2e
| 1,310
|
py
|
Python
|
accounts/models.py
|
Vicky-Rathod/django-blog
|
1c15210376c9e365052dd5c106dbd903a9717bba
|
[
"MIT"
] | null | null | null |
accounts/models.py
|
Vicky-Rathod/django-blog
|
1c15210376c9e365052dd5c106dbd903a9717bba
|
[
"MIT"
] | null | null | null |
accounts/models.py
|
Vicky-Rathod/django-blog
|
1c15210376c9e365052dd5c106dbd903a9717bba
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.urls import reverse
from .managers import AccountManager
# Custom user created.
class Account(AbstractBaseUser):
#Custom user class inheriting AbstractBaseUser class
username = models.CharField(max_length=255, unique=True)
email = models.EmailField(unique=True)
last_login = models.DateTimeField(verbose_name='last login', auto_now=True)
date_joined = models.DateTimeField(verbose_name='date join', auto_now_add=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
#Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
#Simplest possible answer: Yes, always
return True
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse("profile:profile_view", kwargs={"pk": self.pk})
| 36.388889
| 83
| 0.710687
|
4a0c67f862d613b79479ab7316c9b0a9b1a4797d
| 15,266
|
py
|
Python
|
tests/contrib/cassandra/test.py
|
tancnle/dd-trace-py
|
4313f388383b90ccf2bcbca9d7ef1c400c827ece
|
[
"BSD-3-Clause"
] | null | null | null |
tests/contrib/cassandra/test.py
|
tancnle/dd-trace-py
|
4313f388383b90ccf2bcbca9d7ef1c400c827ece
|
[
"BSD-3-Clause"
] | null | null | null |
tests/contrib/cassandra/test.py
|
tancnle/dd-trace-py
|
4313f388383b90ccf2bcbca9d7ef1c400c827ece
|
[
"BSD-3-Clause"
] | null | null | null |
# stdlib
import contextlib
import logging
import unittest
from threading import Event
# 3p
from cassandra.cluster import Cluster, ResultSet
from cassandra.query import BatchStatement, SimpleStatement
# project
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.cassandra.patch import patch, unpatch
from ddtrace.contrib.cassandra.session import get_traced_cassandra, SERVICE
from ddtrace.ext import net, cassandra as cassx, errors
from ddtrace import config, Pin
# testing
from tests.contrib.config import CASSANDRA_CONFIG
from tests.opentracer.utils import init_tracer
from tests.test_tracer import get_dummy_tracer
# Oftentimes our tests fails because Cassandra connection timeouts during keyspace drop. Slowness in keyspace drop
# is known and is due to 'auto_snapshot' configuration. In our test env we should disable it, but the official cassandra
# image that we are using only allows us to configure a few configs:
# https://github.com/docker-library/cassandra/blob/4474c6c5cc2a81ee57c5615aae00555fca7e26a6/3.11/docker-entrypoint.sh#L51
# So for now we just increase the timeout, if this is not enough we may want to extend the official image with our own
# custom image.
CONNECTION_TIMEOUT_SECS = 20 # override the default value of 5
logging.getLogger('cassandra').setLevel(logging.INFO)
def setUpModule():
# skip all the modules if the Cluster is not available
if not Cluster:
raise unittest.SkipTest('cassandra.cluster.Cluster is not available.')
# create the KEYSPACE for this test module
cluster = Cluster(port=CASSANDRA_CONFIG['port'], connect_timeout=CONNECTION_TIMEOUT_SECS)
session = cluster.connect()
session.execute('DROP KEYSPACE IF EXISTS test', timeout=10)
session.execute(
"CREATE KEYSPACE if not exists test WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor': 1};"
)
session.execute('CREATE TABLE if not exists test.person (name text PRIMARY KEY, age int, description text)')
session.execute('CREATE TABLE if not exists test.person_write (name text PRIMARY KEY, age int, description text)')
session.execute("INSERT INTO test.person (name, age, description) VALUES ('Cassandra', 100, 'A cruel mistress')")
session.execute(
"INSERT INTO test.person (name, age, description) VALUES ('Athena', 100, 'Whose shield is thunder')"
)
session.execute("INSERT INTO test.person (name, age, description) VALUES ('Calypso', 100, 'Softly-braided nymph')")
def tearDownModule():
# destroy the KEYSPACE
cluster = Cluster(port=CASSANDRA_CONFIG['port'], connect_timeout=CONNECTION_TIMEOUT_SECS)
session = cluster.connect()
session.execute('DROP TABLE IF EXISTS test.person')
session.execute('DROP TABLE IF EXISTS test.person_write')
session.execute('DROP KEYSPACE IF EXISTS test', timeout=10)
class CassandraBase(object):
"""
Needs a running Cassandra
"""
TEST_QUERY = "SELECT * from test.person WHERE name = 'Cassandra'"
TEST_QUERY_PAGINATED = 'SELECT * from test.person'
TEST_KEYSPACE = 'test'
TEST_PORT = str(CASSANDRA_CONFIG['port'])
TEST_SERVICE = 'test-cassandra'
def _traced_session(self):
# implement me
pass
@contextlib.contextmanager
def override_config(self, integration, values):
"""
Temporarily override an integration configuration value
>>> with self.override_config('flask', dict(service_name='test-service')):
# Your test
"""
options = getattr(config, integration)
original = dict(
(key, options.get(key))
for key in values.keys()
)
options.update(values)
try:
yield
finally:
options.update(original)
def setUp(self):
self.cluster = Cluster(port=CASSANDRA_CONFIG['port'])
self.session = self.cluster.connect()
def _assert_result_correct(self, result):
assert len(result.current_rows) == 1
for r in result:
assert r.name == 'Cassandra'
assert r.age == 100
assert r.description == 'A cruel mistress'
def _test_query_base(self, execute_fn):
session, tracer = self._traced_session()
writer = tracer.writer
result = execute_fn(session, self.TEST_QUERY)
self._assert_result_correct(result)
spans = writer.pop()
assert spans, spans
# another for the actual query
assert len(spans) == 1
query = spans[0]
assert query.service == self.TEST_SERVICE
assert query.resource == self.TEST_QUERY
assert query.span_type == cassx.TYPE
assert query.get_tag(cassx.KEYSPACE) == self.TEST_KEYSPACE
assert query.get_tag(net.TARGET_PORT) == self.TEST_PORT
assert query.get_tag(cassx.ROW_COUNT) == '1'
assert query.get_tag(cassx.PAGE_NUMBER) is None
assert query.get_tag(cassx.PAGINATED) == 'False'
assert query.get_tag(net.TARGET_HOST) == '127.0.0.1'
# confirm no analytics sample rate set by default
assert query.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
def test_query(self):
def execute_fn(session, query):
return session.execute(query)
self._test_query_base(execute_fn)
def test_query_analytics_with_rate(self):
with self.override_config(
'cassandra',
dict(analytics_enabled=True, analytics_sample_rate=0.5)
):
session, tracer = self._traced_session()
session.execute(self.TEST_QUERY)
writer = tracer.writer
spans = writer.pop()
assert spans, spans
# another for the actual query
assert len(spans) == 1
query = spans[0]
# confirm no analytics sample rate set by default
assert query.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5
def test_query_analytics_without_rate(self):
with self.override_config(
'cassandra',
dict(analytics_enabled=True)
):
session, tracer = self._traced_session()
session.execute(self.TEST_QUERY)
writer = tracer.writer
spans = writer.pop()
assert spans, spans
# another for the actual query
assert len(spans) == 1
query = spans[0]
# confirm no analytics sample rate set by default
assert query.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0
def test_query_ot(self):
"""Ensure that cassandra works with the opentracer."""
def execute_fn(session, query):
return session.execute(query)
session, tracer = self._traced_session()
ot_tracer = init_tracer('cass_svc', tracer)
writer = tracer.writer
with ot_tracer.start_active_span('cass_op'):
result = execute_fn(session, self.TEST_QUERY)
self._assert_result_correct(result)
spans = writer.pop()
assert spans, spans
# another for the actual query
assert len(spans) == 2
ot_span, dd_span = spans
# confirm parenting
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.name == 'cass_op'
assert ot_span.service == 'cass_svc'
assert dd_span.service == self.TEST_SERVICE
assert dd_span.resource == self.TEST_QUERY
assert dd_span.span_type == cassx.TYPE
assert dd_span.get_tag(cassx.KEYSPACE) == self.TEST_KEYSPACE
assert dd_span.get_tag(net.TARGET_PORT) == self.TEST_PORT
assert dd_span.get_tag(cassx.ROW_COUNT) == '1'
assert dd_span.get_tag(cassx.PAGE_NUMBER) is None
assert dd_span.get_tag(cassx.PAGINATED) == 'False'
assert dd_span.get_tag(net.TARGET_HOST) == '127.0.0.1'
def test_query_async(self):
def execute_fn(session, query):
event = Event()
result = []
future = session.execute_async(query)
def callback(results):
result.append(ResultSet(future, results))
event.set()
future.add_callback(callback)
event.wait()
return result[0]
self._test_query_base(execute_fn)
def test_query_async_clearing_callbacks(self):
def execute_fn(session, query):
future = session.execute_async(query)
future.clear_callbacks()
return future.result()
self._test_query_base(execute_fn)
def test_span_is_removed_from_future(self):
session, tracer = self._traced_session()
future = session.execute_async(self.TEST_QUERY)
future.result()
span = getattr(future, '_ddtrace_current_span', None)
assert span is None
def test_paginated_query(self):
session, tracer = self._traced_session()
writer = tracer.writer
statement = SimpleStatement(self.TEST_QUERY_PAGINATED, fetch_size=1)
result = session.execute(statement)
# iterate over all pages
results = list(result)
assert len(results) == 3
spans = writer.pop()
assert spans, spans
# There are 4 spans for 3 results since the driver makes a request with
# no result to check that it has reached the last page
assert len(spans) == 4
for i in range(4):
query = spans[i]
assert query.service == self.TEST_SERVICE
assert query.resource == self.TEST_QUERY_PAGINATED
assert query.span_type == cassx.TYPE
assert query.get_tag(cassx.KEYSPACE) == self.TEST_KEYSPACE
assert query.get_tag(net.TARGET_PORT) == self.TEST_PORT
if i == 3:
assert query.get_tag(cassx.ROW_COUNT) == '0'
else:
assert query.get_tag(cassx.ROW_COUNT) == '1'
assert query.get_tag(net.TARGET_HOST) == '127.0.0.1'
assert query.get_tag(cassx.PAGINATED) == 'True'
assert query.get_tag(cassx.PAGE_NUMBER) == str(i+1)
def test_trace_with_service(self):
session, tracer = self._traced_session()
writer = tracer.writer
session.execute(self.TEST_QUERY)
spans = writer.pop()
assert spans
assert len(spans) == 1
query = spans[0]
assert query.service == self.TEST_SERVICE
def test_trace_error(self):
session, tracer = self._traced_session()
writer = tracer.writer
try:
session.execute('select * from test.i_dont_exist limit 1')
except Exception:
pass
else:
assert 0
spans = writer.pop()
assert spans
query = spans[0]
assert query.error == 1
for k in (errors.ERROR_MSG, errors.ERROR_TYPE):
assert query.get_tag(k)
def test_bound_statement(self):
session, tracer = self._traced_session()
writer = tracer.writer
query = 'INSERT INTO test.person_write (name, age, description) VALUES (?, ?, ?)'
prepared = session.prepare(query)
session.execute(prepared, ('matt', 34, 'can'))
prepared = session.prepare(query)
bound_stmt = prepared.bind(('leo', 16, 'fr'))
session.execute(bound_stmt)
spans = writer.pop()
assert len(spans) == 2
for s in spans:
assert s.resource == query
def test_batch_statement(self):
session, tracer = self._traced_session()
writer = tracer.writer
batch = BatchStatement()
batch.add(
SimpleStatement('INSERT INTO test.person_write (name, age, description) VALUES (%s, %s, %s)'),
('Joe', 1, 'a'),
)
batch.add(
SimpleStatement('INSERT INTO test.person_write (name, age, description) VALUES (%s, %s, %s)'),
('Jane', 2, 'b'),
)
session.execute(batch)
spans = writer.pop()
assert len(spans) == 1
s = spans[0]
assert s.resource == 'BatchStatement'
assert s.get_metric('cassandra.batch_size') == 2
assert 'test.person' in s.get_tag('cassandra.query')
class TestCassPatchDefault(unittest.TestCase, CassandraBase):
"""Test Cassandra instrumentation with patching and default configuration"""
TEST_SERVICE = SERVICE
def tearDown(self):
unpatch()
def setUp(self):
CassandraBase.setUp(self)
patch()
def _traced_session(self):
tracer = get_dummy_tracer()
Pin.get_from(self.cluster).clone(tracer=tracer).onto(self.cluster)
return self.cluster.connect(self.TEST_KEYSPACE), tracer
class TestCassPatchAll(TestCassPatchDefault):
"""Test Cassandra instrumentation with patching and custom service on all clusters"""
TEST_SERVICE = 'test-cassandra-patch-all'
def tearDown(self):
unpatch()
def setUp(self):
CassandraBase.setUp(self)
patch()
def _traced_session(self):
tracer = get_dummy_tracer()
# pin the global Cluster to test if they will conflict
Pin(service=self.TEST_SERVICE, tracer=tracer).onto(Cluster)
self.cluster = Cluster(port=CASSANDRA_CONFIG['port'])
return self.cluster.connect(self.TEST_KEYSPACE), tracer
class TestCassPatchOne(TestCassPatchDefault):
"""Test Cassandra instrumentation with patching and custom service on one cluster"""
TEST_SERVICE = 'test-cassandra-patch-one'
def tearDown(self):
unpatch()
def setUp(self):
CassandraBase.setUp(self)
patch()
def _traced_session(self):
tracer = get_dummy_tracer()
# pin the global Cluster to test if they will conflict
Pin(service='not-%s' % self.TEST_SERVICE).onto(Cluster)
self.cluster = Cluster(port=CASSANDRA_CONFIG['port'])
Pin(service=self.TEST_SERVICE, tracer=tracer).onto(self.cluster)
return self.cluster.connect(self.TEST_KEYSPACE), tracer
def test_patch_unpatch(self):
# Test patch idempotence
patch()
patch()
tracer = get_dummy_tracer()
Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster)
session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE)
session.execute(self.TEST_QUERY)
spans = tracer.writer.pop()
assert spans, spans
assert len(spans) == 1
# Test unpatch
unpatch()
session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE)
session.execute(self.TEST_QUERY)
spans = tracer.writer.pop()
assert not spans, spans
# Test patch again
patch()
Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster)
session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE)
session.execute(self.TEST_QUERY)
spans = tracer.writer.pop()
assert spans, spans
def test_backwards_compat_get_traced_cassandra():
cluster = get_traced_cassandra()
session = cluster(port=CASSANDRA_CONFIG['port']).connect()
session.execute("drop table if exists test.person")
| 34.61678
| 121
| 0.646731
|
4a0c67fd52ed9b4c684d17edb56c84ac68b5a4dd
| 4,346
|
py
|
Python
|
finetune/qa/squad_official_eval_v1.py
|
Wangkaixinlove/sentiment
|
a1bd7f099c4885d4e65435e403ff5932ebfab73a
|
[
"Apache-2.0"
] | null | null | null |
finetune/qa/squad_official_eval_v1.py
|
Wangkaixinlove/sentiment
|
a1bd7f099c4885d4e65435e403ff5932ebfab73a
|
[
"Apache-2.0"
] | null | null | null |
finetune/qa/squad_official_eval_v1.py
|
Wangkaixinlove/sentiment
|
a1bd7f099c4885d4e65435e403ff5932ebfab73a
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Official evaluation script for v1.1 of the SQuAD dataset.
Modified slightly for the ELECTRA codebase.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import string
import re
import json
import sys
import os
import collections
import tensorflow.compat.v1 as tf
#import tensorflow as tf
import configure_finetuning
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def main(config: configure_finetuning.FinetuningConfig, split):
expected_version = '1.1'
# parser = argparse.ArgumentParser(
# description='Evaluation for SQuAD ' + expected_version)
# parser.add_argument('dataset_file', help='Dataset file')
# parser.add_argument('prediction_file', help='Prediction File')
# args = parser.parse_args()
Args = collections.namedtuple("Args", [
"dataset_file", "prediction_file"
])
args = Args(dataset_file=os.path.join(
config.raw_data_dir("squadv1"),
split + ("-debug" if config.debug else "") + ".json"),
prediction_file=config.qa_preds_file("squadv1"))
with tf.io.gfile.GFile(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json['version'] != expected_version:
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with tf.io.gfile.GFile(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
return evaluate(dataset, predictions)
| 34.220472
| 74
| 0.71537
|
4a0c6899dc8fdb39f85553a611748b327454c992
| 914
|
py
|
Python
|
0013. Roman to Integer/Roman_to_Integer_sol.py
|
amitdev101/leetcode
|
cd03a088a3f6dd989be91e37420acfd7b29fce6e
|
[
"MIT"
] | 1
|
2021-04-23T13:55:38.000Z
|
2021-04-23T13:55:38.000Z
|
0013. Roman to Integer/Roman_to_Integer_sol.py
|
amitdev101/leetcode
|
cd03a088a3f6dd989be91e37420acfd7b29fce6e
|
[
"MIT"
] | null | null | null |
0013. Roman to Integer/Roman_to_Integer_sol.py
|
amitdev101/leetcode
|
cd03a088a3f6dd989be91e37420acfd7b29fce6e
|
[
"MIT"
] | null | null | null |
class Solution:
def romanToInt(self, s: str) -> int:
mydict = {
"I":1,
"IV":4,
"V":5,
"IX":9,
"X":10,
"XL":40,
"L":50,
"XC":90,
"C":100,
"CD":400,
"D":500,
"CM":900,
"M":1000,
}
# keylist = list(mydict.keys())
n = len(s)
i = 0
num=0
while(i<n):
if i<n-1:
# ts = s[i:i+2]
ts = s[i]+s[i+1]
if ts in mydict:
num+=mydict[ts]
i+=2
else:
num+=mydict[s[i]]
i+=1
# print(num)
else:
num+=mydict[s[i]]
i+=1
# print(num)
return num
| 23.435897
| 40
| 0.252735
|
4a0c6996630832c7adef6d87e8fd9b0e09f6c822
| 185
|
py
|
Python
|
examples/harris.py
|
sayefsakin/phylanx_halide
|
760305e0b2c3ed0fdbf81416d000ebca4b034585
|
[
"BSL-1.0"
] | null | null | null |
examples/harris.py
|
sayefsakin/phylanx_halide
|
760305e0b2c3ed0fdbf81416d000ebca4b034585
|
[
"BSL-1.0"
] | 2
|
2021-08-28T21:38:44.000Z
|
2022-03-25T12:41:58.000Z
|
examples/harris.py
|
sayefsakin/phylanx_halide
|
760305e0b2c3ed0fdbf81416d000ebca4b034585
|
[
"BSL-1.0"
] | 2
|
2021-08-28T20:48:41.000Z
|
2022-03-25T12:01:47.000Z
|
from phylanx import Phylanx
import cv2
import numpy
@Phylanx
def add(img):
return harris(img)
img = cv2.imread("rgba.png")
data = numpy.asarray(img)
print(data.shape)
add(data)
| 12.333333
| 28
| 0.724324
|
4a0c6a5d9c5e6fae2876dad38c1cb981f2fa5940
| 2,227
|
py
|
Python
|
ax/plot/marginal_effects.py
|
mwijaya3/Ax
|
ab64ec71067eb1f7c4f6a5d017761121a93e71f3
|
[
"MIT"
] | null | null | null |
ax/plot/marginal_effects.py
|
mwijaya3/Ax
|
ab64ec71067eb1f7c4f6a5d017761121a93e71f3
|
[
"MIT"
] | null | null | null |
ax/plot/marginal_effects.py
|
mwijaya3/Ax
|
ab64ec71067eb1f7c4f6a5d017761121a93e71f3
|
[
"MIT"
] | 1
|
2019-07-02T08:54:44.000Z
|
2019-07-02T08:54:44.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Any, List
import pandas as pd
import plotly.graph_objs as go
from ax.modelbridge.base import ModelBridge
from ax.plot.base import DECIMALS, AxPlotConfig, AxPlotTypes
from ax.plot.helper import get_plot_data
from ax.utils.stats.statstools import marginal_effects
from plotly import tools
def plot_marginal_effects(model: ModelBridge, metric: str) -> AxPlotConfig:
"""
Calculates and plots the marginal effects -- the effect of changing one
factor away from the randomized distribution of the experiment and fixing it
at a particular level.
Args:
model: Model to use for estimating effects
metric: The metric for which to plot marginal effects.
Returns:
AxPlotConfig of the marginal effects
"""
plot_data, _, _ = get_plot_data(model, {}, {metric})
arm_dfs = []
for arm in plot_data.in_sample.values():
arm_df = pd.DataFrame(arm.parameters, index=[arm.name])
arm_df["mean"] = arm.y_hat[metric]
arm_df["sem"] = arm.se_hat[metric]
arm_dfs.append(arm_df)
effect_table = marginal_effects(pd.concat(arm_dfs, 0))
varnames = effect_table["Name"].unique()
data: List[Any] = []
for varname in varnames:
var_df = effect_table[effect_table["Name"] == varname]
data += [
# pyre-ignore[16]
go.Bar(
x=var_df["Level"],
y=var_df["Beta"],
error_y={"type": "data", "array": var_df["SE"]},
name=varname,
)
]
fig = tools.make_subplots(
cols=len(varnames),
rows=1,
subplot_titles=list(varnames),
print_grid=False,
shared_yaxes=True,
)
for idx, item in enumerate(data):
fig.append_trace(item, 1, idx + 1)
fig.layout.showlegend = False
# fig.layout.margin = go.Margin(l=2, r=2)
fig.layout.title = "Marginal Effects by Factor"
fig.layout.yaxis = {
"title": "% better than experiment average",
"hoverformat": ".{}f".format(DECIMALS),
}
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
| 32.75
| 80
| 0.638976
|
4a0c6ad2f31830c6d6fbb022054333b82af3110a
| 23,314
|
py
|
Python
|
mim/Fragment.py
|
nbraunsc/MIM
|
587ae79b0ec76c20af6235a68c85ed15f2cc7155
|
[
"BSD-3-Clause"
] | null | null | null |
mim/Fragment.py
|
nbraunsc/MIM
|
587ae79b0ec76c20af6235a68c85ed15f2cc7155
|
[
"BSD-3-Clause"
] | null | null | null |
mim/Fragment.py
|
nbraunsc/MIM
|
587ae79b0ec76c20af6235a68c85ed15f2cc7155
|
[
"BSD-3-Clause"
] | null | null | null |
import string
import time
import numpy as np
#from .Pyscf import *
#from ase import Atoms
#from ase.calculators.vasp import Vasp
#from ase.vibrations import Infrared
from numpy import linalg as LA
from mendeleev import element
#import mim
#from mim import runpie, Molecule, fragmentation, Fragment, Pyscf
class Fragment():
"""
Class to store a list of primitives corresponding to a molecular fragment
Parameters
----------
theory : str
Level of theory for calculation
basis : str
Basis set name for calculations
prims : list
List of fragments from Fragmentation class with atom indexes in list
attached : list
List of attached pairs with the atom that is in the fragment and its corresponding atom pair that was cut
coeff : int
Coefficent of fragment. This will either be 1 or -1.
"""
def __init__(self, qc_class, molecule, prims, attached=[], coeff=1, step_size=0.001, local_coeff=1):
self.prims = prims
self.molecule = molecule
self.coeff = coeff
self.attached = attached #[(supporting, host), (supporting, host), ...]
self.inputxyz = []
self.apt = []
self.aptgrad = np.array([])
self.step = step_size
self.energy = 0
self.grad = 0
self.hessian = 0
self.hess = []
self.notes = [] # [index of link atom, factor, supporting atom, host atom]
self.jacobian_grad = [] #array for gradient link atom projections
self.jacobian_hess = [] #ndarray shape of full system*3 x fragment(with LA)*3
self.qc_class = qc_class
self.step_size = step_size
self.local_coeff = local_coeff
self.M = [] #this is the mass matrix for massweighting shape: (3N, 3N)
self.center = []
self.gradlist = []
self.origin_vec = []
self.nuc_deriv = []
def add_linkatoms(self, atom1, attached_atom, molecule):
""" Adds H as a link atom
This link atoms adds at a distance ratio between the supporting and host atom to each fragment where a previous atom was cut
Parameters
----------
atom1 : int
This is the integer corresponding to the supporting atom (real atom)
attached_atom : int
This is the integer corresponiding to the host atom (ghost atom)
molecule : <class> instance
This is the molecule class instance
Returns
-------
new_xyz : list
This is the list of the new link atom with atom label and xyz coords
factor : float
The factor between the supporting and host atom. Used in building Jacobians for link atom projections.
"""
atom1_element = molecule.atomtable[atom1][0]
attached_atom_element = molecule.atomtable[attached_atom][0]
cov_atom1 = molecule.covrad[atom1_element][0]
cov_attached_atom = molecule.covrad[attached_atom_element][0]
self.atom_xyz = np.array(molecule.atomtable[atom1][1:])
attached_atom_xyz = np.array(molecule.atomtable[attached_atom][1:])
vector = attached_atom_xyz - self.atom_xyz
dist = np.linalg.norm(vector)
h = 0.32
factor = (h + cov_atom1)/(cov_atom1 + cov_attached_atom)
new_xyz = list(factor*vector+self.atom_xyz)
coord = []
coord.append('H')
coord.append(new_xyz)
return coord, factor
def build_xyz(self):
""" Builds the xyz input with the atom labels, xyz coords, and link atoms as a string or list
Parameters
----------
none
Returns
-------
inputxyz : str
String with atom label then corresonding xyz coordinates. This input includes the link atoms.
input_list : list of lists
ie [[['H', [0, 0 ,0]], ['O', [x, y, z]], ... ]
self.notes: list of lists
List of lists that is created with len = number of link atoms. Each sub list corresponds to one link atom.
(i.e. [index of link atom, factor, supporting atom number, host atom number])
"""
self.notes = []
input_list = []
coord_matrix = np.empty([len(self.prims)+len(self.attached), 3])
for atom in self.prims:
input_list.append([self.molecule.atomtable[atom][0]])
input_list[-1].append(list(self.molecule.atomtable[atom][1:]))
x = np.array(self.molecule.atomtable[atom][1:])
for pair in range(0, len(self.attached)):
la_input, factor = self.add_linkatoms(self.attached[pair][0], self.attached[pair][1], self.molecule)
input_list.append(la_input)
position = len(self.prims)+pair
self.notes.append([position])
self.notes[-1].append(factor)
self.notes[-1].append(self.attached[pair][0])
self.notes[-1].append(self.attached[pair][1])
#self.input_list = input_list
return input_list
def build_jacobian_Grad(self):
"""Builds Jacobian matrix for gradient link atom projections
Parameters
----------
none
Returns
-------
self.jacobian_grad : ndarray
Array where entries are floats on the diagonal with the corresponding factor.
Array has size (# of atoms in full molecule + all link atoms, # of atoms in primiative)
"""
self.jacobian_grad = 0
array = np.zeros((self.molecule.natoms, len(self.prims)))
linkarray = np.zeros((self.molecule.natoms, len(self.notes)))
for i in range(0, len(self.prims)):
array[self.prims[i]][i] = 1
for j in range(0, len(self.notes)):
factor = 1 - self.notes[j][1]
linkarray[self.notes[j][2]][j] = factor
linkarray[self.notes[j][3]][j] = self.notes[j][1]
self.jacobian_grad = np.concatenate((array, linkarray), axis=1)
jacob = self.jacobian_grad
return jacob
def build_jacobian_Hess(self):
""" Builds Jacobian matrix for hessian link atom projections.
Parameters
----------
Returns
-------
self.jacobian_hess : ndarray (tensor)
Array where the entries are matrices corresponding factor.
"""
zero_list = []
full_array = np.zeros((self.molecule.natoms, len(self.prims)+len(self.notes), 3, 3))
for i in range(0, len(self.prims)):
full_array[self.prims[i], i] = np.identity(3)
for j in range(0, len(self.notes)):
factor_s = 1-self.notes[j][1]
factor_h = self.notes[j][1]
x = np.zeros((3,3))
np.fill_diagonal(x, factor_s)
position = len(self.prims) + j
full_array[self.notes[j][2]][position] = x
np.fill_diagonal(x, factor_h)
full_array[self.notes[j][3]][position] = x
self.jacobian_hess = full_array
return self.jacobian_hess
def qc_backend(self):
"""
Runs the quantum chemistry backend.
This runs an energy and gradient calculation. If hessian is available
it will also run that.
Returns
-------
self.energy : float
This is the energy for the fragment*its coeff
self.gradient : ndarray
This is the gradient for the fragment*its coeff
self.hessian : ndarray (4D tensor)
This is the hessian for the fragement*its coeff
"""
np.set_printoptions(suppress=True, precision=9, linewidth=200)
self.energy = 0
hess_py = 0
self.grad = 0
self.inputxyz = self.build_xyz()
#sets origin of coords to center of mass
#self.center = self.com()
#finds inertia vector, R and T modes (only for 3 atom molecules currently)
#self.inertia()
energy, grad, hess_py = self.qc_class.energy_gradient(self.inputxyz)
#self.energy = self.coeff*energy
self.energy = self.local_coeff*self.coeff*energy
jacob = self.build_jacobian_Grad()
self.grad = self.local_coeff*self.coeff*jacob.dot(grad)
self.M = self.mass_matrix()
print("Done! \n")
return self.energy, self.grad, hess_py #, self.hessian#, self.apt
def hess_apt(self, hess_py):
"""
Runs only the hessian and atomic polar tensor calculations
Returns
-------
self.hessian : ndarray
self.apt : ndarray
"""
#If not analytical hess, do numerical below
if type(hess_py) is int:
print("Numerical hessian needed, Theory=", self.qc_class.theory)
hess_flat = np.zeros(((len(self.inputxyz))*3, (len(self.inputxyz))*3))
i = -1
for atom in range(0, len(self.inputxyz)):
for xyz in range(0, 3):
i = i+1
self.inputxyz[atom][1][xyz] = self.inputxyz[atom][1][xyz]+self.step_size
grad1 = self.qc_class.energy_gradient(self.inputxyz)[1].flatten()
self.inputxyz[atom][1][xyz] = self.inputxyz[atom][1][xyz]-2*self.step_size
grad2 = self.qc_class.energy_gradient(self.inputxyz)[1].flatten()
self.inputxyz[atom][1][xyz] = self.inputxyz[atom][1][xyz]+self.step_size
vec = (grad1 - grad2)/(4*self.step_size)
hess_flat[i] = vec
hess_flat[:,i] = vec
#Analytical hess from qc_backend gets reshaped and flatten to 3Nx3N matrix
else:
hess_flat = hess_py
#start building jacobian and reshaping
self.jacobian_hess = self.build_jacobian_Hess() #shape: (Full, Sub, 3, 3)
j_reshape = self.jacobian_hess.transpose(0,2,1,3)
j_flat = j_reshape.reshape(self.molecule.natoms*3, len(self.inputxyz)*3, order='C') #shape: (Full*3, Sub*3)
j_flat_tran = j_flat.T #shape: (Sub*3, Full*3)
first = np.dot(j_flat, hess_flat) # (Full*3, Sub*3) x (Sub*3, Sub*3) -> (Full*3, Sub*3)
second = np.dot(first, j_flat_tran) # (Full*3, Sub*3) x (Sub*3, Full*3) -> (Full*3, Full*3)
self.hessian = second*self.coeff*self.local_coeff
#start building the APT's
self.apt = self.build_apt()
#self.aptgrad = self.apt_grad() #one i am trying to get to work
return self.hessian, self.apt
def inertia(self):
""" Finds principal axes and moments of inertia in amu*Bohr^2
(I did this in a very non-optimized way!)
"""
xx = 0
yy = 0
zz = 0
xy = 0
xz = 0
yz = 0
for i in range(0, len(self.inputxyz)):
x = element(self.inputxyz[i][0])
mass = x.atomic_weight
xx += (self.inputxyz[i][1][1]**2 + self.inputxyz[i][1][2]**2)*mass
yy += (self.inputxyz[i][1][0]**2 + self.inputxyz[i][1][2]**2)*mass
zz += (self.inputxyz[i][1][0]**2 + self.inputxyz[i][1][1]**2)*mass
xy += self.inputxyz[i][1][0]*self.inputxyz[i][1][1]*mass
xz += self.inputxyz[i][1][0]*self.inputxyz[i][1][2]*mass
yz += self.inputxyz[i][1][1]*self.inputxyz[i][1][2]*mass
print("moment of interia for xx:", xx)
print("moment of interia for yy:", yy)
print("moment of interia for zz:", zz)
print("moment of interia for xy:", xy)
print("moment of interia for xz:", xz)
print("moment of interia for yz:", yz)
tensor = np.zeros((3,3))
tensor[0][0] = xx
tensor[0][1] = tensor[1][0] = xy
tensor[1][1] = yy
tensor[0][2] = tensor[2][0] = xz
tensor[2][2] = zz
tensor[1][2] = tensor[2][1] = yz
print("Inertia tensor:\n", tensor)
evalues, vec = LA.eig(tensor) ###only for origin in pyscf calc
#evalues, vec = LA.eigh(tensor)
print(evalues)
print(" Principal axes and moments of inertia in amu*Bohr^2:")
print("Eigenvalues: \n", evalues*1.88973*1.88973)
#vec[:, [2, 0]] = vec[:, [0, 2]]
xyz = np.array(["X", "Y", "Z"])
print(xyz[0], vec[0])
print(xyz[1], vec[1])
print(xyz[2], vec[2])
#compute rotational constants
conv = (6.626755E-34/(8*np.pi**2))/1.6605402E-27 #kg -> amu, cancel out all masses
conv_final = (conv*1E20)/2.99792458E10 #B^2 -> A^2 -> m^2, cancel out all lengths, speed of light cm/s
self.origin_vec = np.sqrt(conv/evalues) #units of Bohr
print("Pyscf origin vector:", self.origin_vec)
rotate_const = conv_final/evalues
print("Rotational constants (units: cm-1)\n", rotate_const)
#generating internal coordinates to sep out R and T modes
#self.int_coords(vec)
def com(self):
""" This is translating the origin of fragment to the center of mass.
This will also update the coordinates for self.inputxyz to be in the center of mass basis.
"""
first = 0
second = 0
for i in range(0, len(self.inputxyz)):
x = element(self.inputxyz[i][0])
mass = x.atomic_weight
first += np.array(self.inputxyz[i][1])*mass
second += mass
self.center = (first/second)
#update coordinates to COM in Bohr
#for j in range(0, len(self.inputxyz)):
# self.inputxyz[j][1] = np.array(self.inputxyz[j][1]) - self.center
return self.center
# def int_coords(self, X):
# """" Generate coordinates in teh rotating and translating frame.
#
# This was trying to match Gaussian's way of computing the frequencies, taking out
# the rotational and translational modes, and IR intensities.
# """
# R = np.zeros((len(self.inputxyz), 3)) #Coords in COM
# M = np.zeros((len(self.inputxyz), 3)) #Mass 3x3 matrix with m^1/2
# T = np.zeros((len(self.inputxyz), 3)) #Translation matrix 3x3
# D = np.zeros((len(self.inputxyz)*3, 6))
# D1 = np.array([1, 0, 0, 1, 0, 0, 1, 0, 0]).reshape((3,3))
# D2 = np.array([0, 1, 0, 0, 1, 0, 0, 1, 0]).reshape((3,3))
# D3 = np.array([0, 0, 1, 0, 0, 1, 0, 0, 1]).reshape((3,3))
#
# for i in range(0, R.shape[0]):
# x = element(self.inputxyz[i][0])
# mass = np.sqrt(x.atomic_weight)
# M[i][i] = mass
# D1[i] = D1[i]*mass
# D2[i] = D2[i]*mass
# D3[i] = D3[i]*mass
# R[i] = np.array(self.inputxyz[i][1])
# P = np.dot(R, X.T)
# D1 = D1.flatten()
# D2 = D2.flatten()
# D3 = D3.flatten()
# D4 = np.dot(np.outer(P[:,1], X[2]) - np.outer(P[:,2], X[1]), M).flatten()
# print("D4:\n", np.dot(np.outer(P[:,1], X[2]) - np.outer(P[:,2], X[1]), M))
# print("D5\n", np.dot(np.outer(P[:,2], X[0]) - np.outer(P[:,0], X[2]), M))
# D5 = np.dot(np.outer(P[:,2], X[0]) - np.outer(P[:,0], X[2]), M).flatten()
# print("D6\n", np.dot(np.outer(P[:,0], X[1]) - np.outer(P[:,1], X[0]), M))
# D6 = np.dot(np.outer(P[:,0], X[1]) - np.outer(P[:,1], X[0]), M).flatten()
# #print("D1\n", D1)
# #print("D2\n", D2)
# #print("D3\n", D3)
# #print("D4\n", D4)
# #print("D5\n", D5)
# #print("D6\n", D6)
# #print(D[:,0].shape)
# #print(D1.shape)
# D[:,0] = D1
# D[:,1] = D2
# D[:,2] = D3
# D[:,3] = D4
# D[:,4] = D5
# D[:,5] = D6
# #print(D, D.shape)
#
# #normalize D tensor
# for j in range(0, D.shape[1]):
# norm = 0
# scalar = np.dot(D[:,j].T, D[:,j])
# print(scalar)
# if scalar < 1E-8:
# continue
# else:
# norm = 1/np.sqrt(scalar)
# D[:,j] = D[:,j]*norm
#
# q, r = np.linalg.qr(D)
# print(q, q.shape)
# #exit()
def apt_grad(self):
""" Working on implementing this.
Function to create the apts by applying an electric field in a certain direciton to
molecule then finite difference of gradient w.r.t the applied E field.
Returns
-------
apt_grad : ndarray (3N, 3)
The deriv of gradient w.r.t applied field after LA projections are done.
"""
extra_dip = self.qc_class.get_dipole(self.inputxyz)[0]
#e_field = 1.889725E-4 #Got this number from Qchem
e_field = 0.001
E = [0, 0, 0]
energy_vec = np.zeros((3))
apt = np.zeros((3, ((len(self.prims)+len(self.notes))*3)))
nucapt = np.zeros((3, ((len(self.prims)+len(self.notes))*3)))
nuc3 = np.zeros((3))
for i in range(0, 3):
#no field
e1, g1, dip, n, g_nuc, g_elec = self.qc_class.apply_field(E, self.inputxyz, self.center, self.origin_vec, i) #no field
print("\n############ Field applied in the ", i, "direction ###############\n")
#positive direction field
E[i] = e_field
e2, g2, dipole2, nuc2, g_nuc2, g_elec2 = self.qc_class.apply_field(E, self.inputxyz, self.center, self.origin_vec, i) #positive direction
#negative direction field
E[i] = -1*e_field
e3, g3, dipole3, nuc, g_nuc3, g_elec3 = self.qc_class.apply_field(E, self.inputxyz, self.center, self.origin_vec, i) #neg direction
#setting field back to zero
E[i] = 0
print(g1)
print(g2)
print(g3)
#central finite diff of gradient, a.u. -> Debye
#print("positive grad:\n", g3, "\n Negative grad:\n", g2, "\n")
gradient1 = ((g3-g2)/(2*e_field))/0.3934303
print("$$$$$$$$$$$\n", gradient1)
#add nuclear gradient to electronic
gradient = g_nuc/0.393430 - gradient1 #for pyscf
#checking finite diff of E w.r.t field (should be dipole moment)
energy2 = (e2-e3)/(2*e_field)
energy_vec[i] = energy2/0.393430 #a.u.(E_field) -> Debye, may need a neg sign
#Subtracting elec dip from nuclear dip moment
newvec = energy_vec #for psi4
#newvec = nuc3 - energy_vec #for pyscf
print("\nElectronic energy vec (Debye):", energy_vec, np.linalg.norm(energy_vec))
print("\nNuclear dipole moment energy vec (Debye):", nuc3, np.linalg.norm(nuc3))
print("\nDipole moment energy vec (Debye):", newvec, np.linalg.norm(newvec))
print("\nDipole moment from no field (Debye):\n", extra_dip, np.linalg.norm(extra_dip))
print("\ngradient no field", g1, "\n")
print("\ngradient elec after finite diff:\n", gradient1)
print("\ngradient nuc after finite diff:\n", g_nuc/0.393430)
print("\ng_nuc - g_elec:\n", gradient)
apt[i] = gradient1.flatten()
#apt[i] = gradient.flatten() #nuclear and electronic grad
#mass weight APT
mass_apt = apt.T
#Do link atom projection, multiply by local and principle inclusion/exculsion coefficients
reshape_mass_hess = self.jacobian_hess.transpose(0, 2, 1, 3)
jac_apt = reshape_mass_hess.reshape(reshape_mass_hess.shape[0]*reshape_mass_hess.shape[1],reshape_mass_hess.shape[2]*reshape_mass_hess.shape[3])
apt_grad = np.dot(self.M, self.local_coeff*self.coeff*np.dot(jac_apt, mass_apt))
return apt_grad
def build_apt(self):
"""
Builds the atomic polar tensor with numerical derivative of dipole moment w.r.t atomic Cartesian
coordinates. Function builds xyz input with link atoms in ndarray format, not string type or list like previous functions.
Units of APT: Debye / (Angstrom np.sqrt(amu))
Returns
-------
oldapt: ndarray (3N, 3)
This is the mass weighted APT for current fragment after LA projections are done.
"""
apt = []
for atom in range(0, len(self.prims)+len(self.notes)): #atom interation
storing_vec = np.zeros((3,3))
y = element(self.inputxyz[atom][0])
value = 1/(np.sqrt(y.atomic_weight))
for comp in range(0,3): #xyz interation
self.inputxyz[atom][1][comp] = self.inputxyz[atom][1][comp]+self.step_size
dip1, nuc1 = self.qc_class.get_dipole(self.inputxyz)
self.inputxyz[atom][1][comp] = self.inputxyz[atom][1][comp]-2*self.step_size
dip2, nuc2 = self.qc_class.get_dipole(self.inputxyz)
vec = (dip1 - dip2)/(2*self.step_size)
storing_vec[comp] = vec
self.inputxyz[atom][1][comp] = self.inputxyz[atom][1][comp]+self.step_size
apt.append(storing_vec)
px = np.vstack(apt)
reshape_mass_hess = self.jacobian_hess.transpose(0, 2, 1, 3)
jac_apt = reshape_mass_hess.reshape(reshape_mass_hess.shape[0]*reshape_mass_hess.shape[1],reshape_mass_hess.shape[2]*reshape_mass_hess.shape[3])
oldapt = np.dot(self.M, self.local_coeff*self.coeff*np.dot(jac_apt, px)) #mass weight here and LA projection
return oldapt
def mass_matrix(self):
M = np.zeros((self.molecule.natoms*3, self.molecule.natoms*3))
counter = np.array([0, 1, 2])
for i in range(0, self.molecule.natoms):
x = element(self.molecule.atomtable[i][0])
value = 1/(np.sqrt(x.atomic_weight))
for j in counter:
M[j][j] = value
counter = counter + 3
self.M = M
return self.M
def mw_hessian(self, full_hessian):
"""
Will compute the mass-weighted hessian, frequencies, and
normal modes for the full system.
Parameters
----------
full_hessian : ndarray
This is the full hessian for the full molecule.
Returns
-------
freq : ndarray
1D np array holding the frequencies
modes : ndarray
2D ndarray holding normal modes in the columns
"""
np.set_printoptions(suppress=True)
first = np.dot(full_hessian, self.M) #shape (3N,3N) x (3N, 3N)
second = np.dot(self.M, first) #shape (3N,3N) x (3N, 3N)
e_values, modes = LA.eigh(second)
print("\nEvalues of hessian [H/Bohr^2]):\n", e_values)
#unit conversion of freq from H/B**2 amu -> 1/s**2
#factor = (4.3597482*10**-18)/(1.6603145*10**-27)/(1.0*10**-20) # Hartreee->J, amu->kg, Angstrom->m
factor = (1.8897259886**2)*(4.3597482*10**-18)/(1.6603145*10**-27)/(1.0*10**-10)**2 #Bohr->Angstrom, Hartreee->J, amu->kg, Angstrom->m
freq = (np.sqrt(e_values*factor))/(2*np.pi*2.9979*10**10) #1/s^2 -> cm-1
return freq, modes, self.M, e_values
| 41.483986
| 152
| 0.558849
|
4a0c6aee5e511b9324c646f82fbdfb9d541519b3
| 9,025
|
py
|
Python
|
api/rossmann/Rossmann.py
|
moraes-ederson/DataScienceEmProducao
|
de01ab47564c09900ac5580f867073ffd280e663
|
[
"MIT"
] | null | null | null |
api/rossmann/Rossmann.py
|
moraes-ederson/DataScienceEmProducao
|
de01ab47564c09900ac5580f867073ffd280e663
|
[
"MIT"
] | null | null | null |
api/rossmann/Rossmann.py
|
moraes-ederson/DataScienceEmProducao
|
de01ab47564c09900ac5580f867073ffd280e663
|
[
"MIT"
] | null | null | null |
import pickle
import inflection
import pandas as pd
import numpy as np
import math
import datetime
class Rossmann(object):
def __init__(self):
self.home_path = '/media/ederson/Arquivos/MeusArquivos-Ederson/Cursos/Comunidade_DS/DataScienceEmProducao/'
self.competition_distance_scaler = pickle.load(open(self.home_path + 'parameter/competition_distance_scaler.pkl', 'rb'))
self.competition_time_month_scaler = pickle.load(open(self.home_path + 'parameter/competition_time_month_scaler.pkl', 'rb'))
self.promo_time_week_scaler = pickle.load(open(self.home_path + 'parameter/promo_time_week_scaler.pkl', 'rb'))
self.year_scaler = pickle.load(open(self.home_path + 'parameter/year_scaler.pkl', 'rb'))
self.store_type_scaler = pickle.load(open(self.home_path + 'parameter/store_type_scaler.pkl', 'rb'))
def data_cleaning(self, df1):
## 1.1. Rename Columns (without 'Sales', 'Customers')
cols_old = ['Store', 'DayOfWeek', 'Date', 'Open', 'Promo',
'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment',
'CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',
'Promo2SinceYear', 'PromoInterval']
snakecase = lambda x: inflection.underscore(x)
cols_new = map(snakecase, cols_old)
df1.columns = cols_new
## 1.3. Data Types
df1['date'] = pd.to_datetime(df1['date'])
## 1.5. Fillout NA
# competition_distance
df1['competition_distance'] = df1['competition_distance'].apply(lambda x: 200000.0 if math.isnan(x) else x)
# competition_open_since_month
df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if
math.isnan(x['competition_open_since_month']) else
x['competition_open_since_month'], axis=1)
# competition_open_since_year
df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if
math.isnan(x['competition_open_since_year']) else
x['competition_open_since_year'], axis=1)
# promo2_since_week
df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if
math.isnan(x['promo2_since_week']) else
x['promo2_since_week'], axis=1)
# promo2_since_year
df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year if
math.isnan(x['promo2_since_year']) else
x['promo2_since_year'], axis=1)
# promo_interval
month_map = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug',
9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
df1['promo_interval'].fillna(0, inplace=True)
df1['month_map'] = df1['date'].dt.month.map(month_map)
df1['is_promo'] = df1[['promo_interval', 'month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else
1 if x['month_map'] in
x['promo_interval'].split(',') else 0, axis=1)
## 1.6. Change Types
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int)
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int)
df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)
return df1
def feature_engineering(self, df2):
# year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
df2['week_of_year'] = df2['date'].dt.weekofyear
# year week
df2['year_week'] = df2['date'].dt.strftime('%Y-%W')
# competition since
df2['competition_since'] = df2.apply(lambda x: datetime.datetime(year=x['competition_open_since_year'],
month=x['competition_open_since_month'],
day=1), axis=1)
df2['competition_time_month'] = ((df2['date'] - df2['competition_since']) / 30).apply(lambda x: x.days).astype(int)
# promo since
df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)
df2['promo_since'] = df2['promo_since'].apply(lambda x:
datetime.datetime.strptime(x + '-1', '%Y-%W-%w') - datetime.timedelta(days=7))
df2['promo_time_week'] = ((df2['date'] - df2['promo_since']) / 7).apply(lambda x: x.days).astype(int)
# assortment
df2['assortment'] = df2['assortment'].apply(lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended')
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply(lambda x: 'public_holiday' if x == 'a' else
'easter_holiday' if x == 'b' else
'christmas' if x == 'c' else 'regular_day')
# 3.0 STEP 03 - FEATURE FILTERING
## 3.1. Rows Filtering (without 'sales')
df2 = df2[df2['open'] != 0]
## 3.2. Columns Selection (without 'customers')
cols_drop = ['open', 'promo_interval', 'month_map']
df2 = df2.drop(cols_drop, axis=1)
return df2
def data_preparation(self, df5):
## 5.2. Rescaling
# competition_distance
df5['competition_distance'] = self.competition_distance_scaler.fit_transform(df5[['competition_distance']].values)
# competition_time_month
df5['competition_time_month'] = self.competition_time_month_scaler.fit_transform(df5[['competition_time_month']].values)
# promo_time_week
df5['promo_time_week'] = self.promo_time_week_scaler.fit_transform(df5[['promo_time_week']].values)
# year
df5['year'] = self.year_scaler.fit_transform(df5[['year']].values)
### 5.3.1. Enconding
# state_holiday - One Hot Encoding
df5 = pd.get_dummies(df5, prefix=['state_holiday'], columns=['state_holiday'])
# store_type - Label Encoding
df5['store_type'] = self.store_type_scaler.fit_transform(df5['store_type'])
# assortment - Ordinal Encoding
assortment_dict = {'basic': 1, 'extra': 2, 'extended': 3}
df5['assortment'] = df5['assortment'].map(assortment_dict)
### 5.3.3. Nature Transformation
# day_of_week
df5['day_of_week_sin'] = df5['day_of_week'].apply(lambda x: np.sin(x * (2 * np.pi/7)))
df5['day_of_week_cos'] = df5['day_of_week'].apply(lambda x: np.cos(x * (2 * np.pi/7)))
# month
df5['month_sin'] = df5['month'].apply(lambda x: np.sin(x * (2 * np.pi/12)))
df5['month_cos'] = df5['month'].apply(lambda x: np.cos(x * (2 * np.pi/12)))
# day
df5['day_sin'] = df5['day'].apply(lambda x: np.sin(x * (2 * np.pi/30)))
df5['day_cos'] = df5['day'].apply(lambda x: np.cos(x * (2 * np.pi/30)))
# week_of_year
df5['week_of_year_sin'] = df5['week_of_year'].apply(lambda x: np.sin(x * (2 * np.pi/52)))
df5['week_of_year_cos'] = df5['week_of_year'].apply(lambda x: np.cos(x * (2 * np.pi/52)))
cols_selected = ['store','promo','store_type','assortment','competition_distance','competition_open_since_month','competition_open_since_year',
'promo2','promo2_since_week','promo2_since_year','competition_time_month','promo_time_week','day_of_week_sin','day_of_week_cos',
'month_sin','month_cos','day_sin','day_cos','week_of_year_sin','week_of_year_cos']
return df5[cols_selected]
def get_prediction(self, model, original_data, test_data):
# prediction
pred = model.predict(test_data)
# join pred into the original data
original_data['prediction'] = np.expm1(pred)
return original_data.to_json(orient='records', date_format='iso')
| 46.282051
| 153
| 0.543934
|
4a0c6c0d609a0134177376603320605f1c5c3934
| 13,813
|
py
|
Python
|
src/model_training_ctimp.py
|
jhchung/summary-gwas-imputation
|
f860475fdd714de90fa66a1c6e5e2ff59d631d97
|
[
"MIT"
] | 1
|
2021-09-24T11:27:56.000Z
|
2021-09-24T11:27:56.000Z
|
src/model_training_ctimp.py
|
RajLabMSSM/summary-gwas-imputation
|
f860475fdd714de90fa66a1c6e5e2ff59d631d97
|
[
"MIT"
] | null | null | null |
src/model_training_ctimp.py
|
RajLabMSSM/summary-gwas-imputation
|
f860475fdd714de90fa66a1c6e5e2ff59d631d97
|
[
"MIT"
] | null | null | null |
__author__ = "alvaro barbeira"
import subprocess
import re
import os
import logging
import gzip
import numpy
import pandas
import collections
import shutil
import traceback
import pyarrow.parquet as pq
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.individual_data import Utilities as StudyUtilities
from genomic_tools_lib.file_formats import Parquet
from genomic_tools_lib.miscellaneous import matrices, Genomics
def _intermediate_folder(intermediate_folder, gene): return os.path.join(intermediate_folder, gene)
def _y_folder(intermediate_folder, gene): return os.path.join(_intermediate_folder(intermediate_folder, gene), "y")
def _x_path(intermediate_folder, gene): return os.path.join(_intermediate_folder(intermediate_folder, gene), "x.txt")
def _info_path(intermediate_folder, gene): return os.path.join(_intermediate_folder(intermediate_folder, gene), "info.txt")
def _outdir(intermediate_folder, gene): return os.path.join(_intermediate_folder(intermediate_folder, gene), "out")
def _weights(intermediate_folder, gene): return os.path.join(_outdir(intermediate_folder, gene), "result.m.est")
def _summary(intermediate_folder, gene): return os.path.join(_outdir(intermediate_folder, gene), "result.m.stats")
def _execution_script(intermediate_folder, gene): return os.path.join(_intermediate_folder(intermediate_folder, gene), "ctimp.sh")
########################################################################################################################
def prepare_ctimp(script_path, seed, intermediate_folder, data_annotation_, features_, features_data_, d_):
_i = _intermediate_folder(intermediate_folder, data_annotation_.gene_id)
if os.path.exists(_i):
logging.info("intermediate folder for %s exists, aborting", data_annotation_.gene_id)
raise RuntimeError("Dirty folder")
os.makedirs(_i)
save_expression(intermediate_folder, data_annotation_.gene_id, d_, features_data_)
save_x(intermediate_folder, data_annotation_.gene_id, features_, features_data_)
execution_script(script_path, seed, intermediate_folder, data_annotation_.gene_id)
def save_x(intermediate_folder, gene, features_, features_data_):
Utilities.save_dataframe(
features_data_.drop("individual", axis=1),
_x_path(intermediate_folder, gene),
header=False, sep=" ")
Utilities.save_dataframe(
features_[["id", "allele_0", "allele_1"]].rename(columns={"id":"SNP", "allele_0":"REF.0.", "allele_1":"ALT.1."}),
_info_path(intermediate_folder, gene))
def save_expression(intermediate_folder, gene, d_, features_data_):
y_folder = _y_folder(intermediate_folder, gene)
os.makedirs(_y_folder(intermediate_folder, gene))
for k,v in d_.items():
if not gene in v:
logging.log(8, "%s not present in %s", gene, k)
continue
p = os.path.join(y_folder,k) + ".txt"
v = v.merge(features_data_[["individual", "id"]], on="individual")[["id", gene]]
Utilities.save_dataframe(v, p, header=False)
def execution_script(script_path, seed, intermediate_folder, gene):
r_ = os.path.split(script_path)[0]
script=\
"""#!/bin/bash
cd {run_path}
#mkdir -p {outdir}
Rscript {script_path} \\
-dosage {dosage} \\
-info {info} \\
-expression {expression} \\
-ntune 50 \\
-nfold 5 \\
-outdir {outdir} \\
-seed {seed} \\
-gene_id result #> /dev/null
""".format(run_path=r_, script_path=script_path, seed=seed,
dosage=_x_path(intermediate_folder, gene), info=_info_path(intermediate_folder, gene),
expression=_y_folder(intermediate_folder, gene), outdir=_outdir(intermediate_folder, gene))
with open(_execution_script(intermediate_folder,gene), "w") as s:
s.write(script)
def setup_output(output_prefix, tissue_names, WEIGHTS_FIELDS, SUMMARY_FIELDS):
weights = {}
summaries = {}
covariances = {}
for t in tissue_names:
w_ = "{}_{}_t_weights.txt.gz".format(output_prefix, t)
if os.path.exists(w_):
raise RuntimeError("weights exist! delete them or move them")
weights[t] = gzip.open(w_, "w")
weights[t].write(("\t".join(WEIGHTS_FIELDS) + "\n").encode())
summaries[t] = gzip.open("{}_{}_t_summary.txt.gz".format(output_prefix, t), "w")
summaries[t].write(("\t".join(SUMMARY_FIELDS) + "\n").encode())
covariances[t] = gzip.open("{}_{}_t_covariance.txt.gz".format(output_prefix, t), "w")
covariances[t].write("GENE RSID1 RSID2 VALUE\n".encode())
return weights, summaries, covariances
def set_down(weights, summaries, covariances, tissue_names, failed_run):
for t in tissue_names:
weights[t].close()
summaries[t].close()
covariances[t].close()
if failed_run:
os.remove(os.path.realpath(weights[t].name))
os.remove(os.path.realpath(summaries[t].name))
os.remove(os.path.realpath(covariances[t].name))
########################################################################################################################
def run(args):
Utilities.maybe_create_folder(args.intermediate_folder)
Utilities.ensure_requisite_folders(args.output_prefix)
logging.info("Opening data")
p_ = re.compile(args.data_name_pattern)
f = [x for x in sorted(os.listdir(args.data_folder)) if p_.search(x)]
tissue_names = [p_.search(x).group(1) for x in f]
data = []
for i in range(0,len(tissue_names)):
logging.info("Loading %s", tissue_names[i])
data.append((tissue_names[i], pq.ParquetFile(os.path.join(args.data_folder, f[i]))))
data = collections.OrderedDict(data)
available_data = {x for p in data.values() for x in p.metadata.schema.names}
logging.info("Preparing output")
WEIGHTS_FIELDS=["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]
SUMMARY_FIELDS=["gene", "genename", "gene_type", "alpha", "n_snps_in_window", "n.snps.in.model", "rho_avg", "pred.perf.R2", "pred.perf.pval"]
Utilities.ensure_requisite_folders(args.output_prefix)
if args.skip_regression:
weights, summaries, covariances = None, None, None
else:
weights, summaries, covariances = setup_output(args.output_prefix, tissue_names, WEIGHTS_FIELDS, SUMMARY_FIELDS)
logging.info("Loading data annotation")
data_annotation = StudyUtilities._load_gene_annotation(args.data_annotation)
data_annotation = data_annotation[data_annotation.gene_id.isin(available_data)]
if args.chromosome or (args.sub_batches and args.sub_batch):
data_annotation = StudyUtilities._filter_gene_annotation(data_annotation, args.chromosome, args.sub_batches, args.sub_batch)
logging.info("Kept %i entries", data_annotation.shape[0])
logging.info("Opening features annotation")
if not args.chromosome:
features_metadata = pq.read_table(args.features_annotation).to_pandas()
else:
features_metadata = pq.ParquetFile(args.features_annotation).read_row_group(args.chromosome-1).to_pandas()
if args.chromosome and args.sub_batches:
logging.info("Trimming variants")
features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(features_metadata, data_annotation, args.window)
if args.rsid_whitelist:
logging.info("Filtering features annotation")
whitelist = TextFileTools.load_list(args.rsid_whitelist)
whitelist = set(whitelist)
features_metadata = features_metadata[features_metadata.rsid.isin(whitelist)]
logging.info("Opening features")
features = pq.ParquetFile(args.features)
logging.info("Setting R seed")
seed = numpy.random.randint(1e8)
if args.run_tag:
d = pandas.DataFrame({"run":[args.run_tag], "cv_seed":[seed]})[["run", "cv_seed"]]
for t in tissue_names:
Utilities.save_dataframe(d, "{}_{}_t_runs.txt.gz".format(args.output_prefix, t))
failed_run=False
try:
for i, data_annotation_ in enumerate(data_annotation.itertuples()):
logging.log(9, "processing %i/%i:%s", i+1, data_annotation.shape[0], data_annotation_.gene_id)
logging.log(8, "loading data")
d_ = {}
for k, v in data.items():
d_[k] = Parquet._read(v, [data_annotation_.gene_id], to_pandas=True)
features_ = Genomics.entries_for_gene_annotation(data_annotation_, args.window,
features_metadata)
if features_.shape[0] == 0:
logging.log(9, "No features available")
continue
features_data_ = Parquet._read(features, [x for x in features_.id.values], to_pandas=True)
features_data_["id"] = range(1, features_data_.shape[0] + 1)
features_data_ = features_data_[["individual", "id"] + [x for x in features_.id.values]]
logging.log(8, "training")
prepare_ctimp(args.script_path, seed, args.intermediate_folder, data_annotation_, features_, features_data_, d_)
del(features_data_)
del(d_)
if args.skip_regression:
continue
subprocess.call(["bash", _execution_script(args.intermediate_folder, data_annotation_.gene_id)])
w = pandas.read_table(_weights(args.intermediate_folder, data_annotation_.gene_id), sep="\s+")
s = pandas.read_table(_summary(args.intermediate_folder, data_annotation_.gene_id), sep="\s+")
for e_, entry in enumerate(s.itertuples()):
entry_weights = w[["SNP", "REF.0.", "ALT.1.", entry.tissue]].rename(columns={
"SNP":"varID", "REF.0.":"ref_allele", "ALT.1.":"eff_allele", entry.tissue:"weight"
})
entry_weights = entry_weights[entry_weights.weight != 0]
entry_weights = entry_weights.assign(gene = data_annotation_.gene_id)
entry_weights = entry_weights.merge(features_, left_on="varID", right_on="id", how="left")
entry_weights = entry_weights[WEIGHTS_FIELDS]
if args.output_rsids:
entry_weights.loc[entry_weights.rsid == "NA", "rsid"] = entry_weights.loc[entry_weights.rsid == "NA", "varID"]
weights[entry.tissue].write(entry_weights.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
entry_summary = s[s.tissue == entry.tissue].rename(columns={"zscore_pval":"pred.perf.pval", "rho_avg_squared":"pred.perf.R2"})
entry_summary = entry_summary.assign(gene = data_annotation_.gene_id, alpha=0.5,
genename = data_annotation_.gene_name, gene_type= data_annotation_.gene_type, n_snps_in_window = features_.shape[0])
entry_summary["n.snps.in.model"] = entry_weights.shape[0]
#must repeat strings beause of weird pandas indexing issue
entry_summary = entry_summary.drop(["R2", "n", "tissue"], axis=1)[["gene", "genename", "gene_type", "alpha", "n_snps_in_window", "n.snps.in.model", "rho_avg", "pred.perf.R2", "pred.perf.pval"]]
summaries[entry.tissue].write(entry_summary.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
features_data_ = Parquet._read(features, [x for x in entry_weights.varID.values], to_pandas=True)
var_ids = [x for x in entry_weights.varID.values]
cov = numpy.cov([features_data_[k] for k in var_ids], ddof=1)
ids = [x for x in entry_weights.rsid.values] if args.output_rsids else var_ids
cov = matrices._flatten_matrix_data([(data_annotation_.gene_id, ids, cov)])
for cov_ in cov:
l = "{} {} {} {}\n".format(cov_[0], cov_[1], cov_[2], cov_[3]).encode()
covariances[entry.tissue].write(l)
if not args.keep_intermediate_folder:
logging.info("Cleaning up")
shutil.rmtree(_intermediate_folder(args.intermediate_folder, data_annotation_.gene_id))
if args.MAX_M and i >= args.MAX_M:
logging.info("Early abort")
break
except Exception as e:
logging.info("Exception running model training:\n%s", traceback.format_exc())
failed_run=True
finally:
pass
# if not args.keep_intermediate_folder:
# shutil.rmtree(args.intermediate_folder)
if not args.skip_regression:
set_down(weights, summaries, covariances, tissue_names, failed_run)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Train Elastic Net prediction models from GLMNET")
parser.add_argument("-run_tag")
parser.add_argument("-script_path")
parser.add_argument("-features")
parser.add_argument("-features_annotation")
parser.add_argument("-data_folder")
parser.add_argument("-data_name_pattern")
parser.add_argument("-data_annotation")
parser.add_argument("-window", type = int)
parser.add_argument("-intermediate_folder")
parser.add_argument("--output_rsids", action="store_true")
parser.add_argument("--chromosome", type = int)
parser.add_argument("--sub_batches", type = int)
parser.add_argument("--sub_batch", type =int)
parser.add_argument("--rsid_whitelist")
parser.add_argument("--keep_intermediate_folder", action="store_true")
parser.add_argument("--MAX_M", type=int)
parser.add_argument("--skip_regression", action="store_true")
parser.add_argument("-output_prefix")
parser.add_argument("-parsimony", default=10, type=int)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
run(args)
| 48.637324
| 209
| 0.671252
|
4a0c6c58619fc5e3fb3f88e91b12cf0b9524c43b
| 27,279
|
py
|
Python
|
xarray/tests/test_sparse.py
|
martinResearch/xarray
|
e921d1bfa4785b10310f8b5d46a1efacba7e1cc9
|
[
"Apache-2.0"
] | null | null | null |
xarray/tests/test_sparse.py
|
martinResearch/xarray
|
e921d1bfa4785b10310f8b5d46a1efacba7e1cc9
|
[
"Apache-2.0"
] | null | null | null |
xarray/tests/test_sparse.py
|
martinResearch/xarray
|
e921d1bfa4785b10310f8b5d46a1efacba7e1cc9
|
[
"Apache-2.0"
] | null | null | null |
import pickle
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import xarray.ufuncs as xu
from xarray import DataArray, Variable
from xarray.core.npcompat import IS_NEP18_ACTIVE
from xarray.core.pycompat import sparse_array_type
from . import assert_equal, assert_identical
param = pytest.param
xfail = pytest.mark.xfail
if not IS_NEP18_ACTIVE:
pytest.skip(
"NUMPY_EXPERIMENTAL_ARRAY_FUNCTION is not enabled", allow_module_level=True
)
sparse = pytest.importorskip("sparse")
def assert_sparse_equal(a, b):
assert isinstance(a, sparse_array_type)
assert isinstance(b, sparse_array_type)
np.testing.assert_equal(a.todense(), b.todense())
def make_ndarray(shape):
return np.arange(np.prod(shape)).reshape(shape)
def make_sparray(shape):
return sparse.random(shape, density=0.1, random_state=0)
def make_xrvar(dim_lengths):
return xr.Variable(
tuple(dim_lengths.keys()), make_sparray(shape=tuple(dim_lengths.values()))
)
def make_xrarray(dim_lengths, coords=None, name="test"):
if coords is None:
coords = {d: np.arange(n) for d, n in dim_lengths.items()}
return xr.DataArray(
make_sparray(shape=tuple(dim_lengths.values())),
dims=tuple(coords.keys()),
coords=coords,
name=name,
)
class do:
def __init__(self, meth, *args, **kwargs):
self.meth = meth
self.args = args
self.kwargs = kwargs
def __call__(self, obj):
return getattr(obj, self.meth)(*self.args, **self.kwargs)
def __repr__(self):
return "obj.{}(*{}, **{})".format(self.meth, self.args, self.kwargs)
@pytest.mark.parametrize(
"prop",
[
"chunks",
"data",
"dims",
"dtype",
"encoding",
"imag",
"nbytes",
"ndim",
param("values", marks=xfail(reason="Coercion to dense")),
],
)
def test_variable_property(prop):
var = make_xrvar({"x": 10, "y": 5})
getattr(var, prop)
@pytest.mark.parametrize(
"func,sparse_output",
[
(do("all"), False),
(do("any"), False),
(do("astype", dtype=int), True),
(do("clip", min=0, max=1), True),
(do("coarsen", windows={"x": 2}, func=np.sum), True),
(do("compute"), True),
(do("conj"), True),
(do("copy"), True),
(do("count"), False),
(do("get_axis_num", dim="x"), False),
(do("isel", x=slice(2, 4)), True),
(do("isnull"), True),
(do("load"), True),
(do("mean"), False),
(do("notnull"), True),
(do("roll"), True),
(do("round"), True),
(do("set_dims", dims=("x", "y", "z")), True),
(do("stack", dimensions={"flat": ("x", "y")}), True),
(do("to_base_variable"), True),
(do("transpose"), True),
(do("unstack", dimensions={"x": {"x1": 5, "x2": 2}}), True),
(do("broadcast_equals", make_xrvar({"x": 10, "y": 5})), False),
(do("equals", make_xrvar({"x": 10, "y": 5})), False),
(do("identical", make_xrvar({"x": 10, "y": 5})), False),
param(
do("argmax"),
True,
marks=xfail(reason="Missing implementation for np.argmin"),
),
param(
do("argmin"),
True,
marks=xfail(reason="Missing implementation for np.argmax"),
),
param(
do("argsort"),
True,
marks=xfail(reason="'COO' object has no attribute 'argsort'"),
),
param(
do(
"concat",
variables=[
make_xrvar({"x": 10, "y": 5}),
make_xrvar({"x": 10, "y": 5}),
],
),
True,
marks=xfail(reason="Coercion to dense"),
),
param(
do("conjugate"),
True,
marks=xfail(reason="'COO' object has no attribute 'conjugate'"),
),
param(
do("cumprod"),
True,
marks=xfail(reason="Missing implementation for np.nancumprod"),
),
param(
do("cumsum"),
True,
marks=xfail(reason="Missing implementation for np.nancumsum"),
),
(do("fillna", 0), True),
param(
do("item", (1, 1)),
False,
marks=xfail(reason="'COO' object has no attribute 'item'"),
),
param(
do("median"),
False,
marks=xfail(reason="Missing implementation for np.nanmedian"),
),
param(do("max"), False),
param(do("min"), False),
param(
do("no_conflicts", other=make_xrvar({"x": 10, "y": 5})),
True,
marks=xfail(reason="mixed sparse-dense operation"),
),
param(
do("pad_with_fill_value", pad_widths={"x": (1, 1)}, fill_value=5),
True,
marks=xfail(reason="Missing implementation for np.pad"),
),
(do("prod"), False),
param(
do("quantile", q=0.5),
True,
marks=xfail(reason="Missing implementation for np.nanpercentile"),
),
param(
do("rank", dim="x"),
False,
marks=xfail(reason="Only implemented for NumPy arrays (via bottleneck)"),
),
param(
do("reduce", func=np.sum, dim="x"),
True,
marks=xfail(reason="Coercion to dense"),
),
param(
do("rolling_window", dim="x", window=2, window_dim="x_win"),
True,
marks=xfail(reason="Missing implementation for np.pad"),
),
param(
do("shift", x=2), True, marks=xfail(reason="mixed sparse-dense operation")
),
param(
do("std"), False, marks=xfail(reason="Missing implementation for np.nanstd")
),
(do("sum"), False),
param(
do("var"), False, marks=xfail(reason="Missing implementation for np.nanvar")
),
param(do("to_dict"), False, marks=xfail(reason="Coercion to dense")),
(do("where", cond=make_xrvar({"x": 10, "y": 5}) > 0.5), True),
],
ids=repr,
)
def test_variable_method(func, sparse_output):
var_s = make_xrvar({"x": 10, "y": 5})
var_d = xr.Variable(var_s.dims, var_s.data.todense())
ret_s = func(var_s)
ret_d = func(var_d)
if sparse_output:
assert isinstance(ret_s.data, sparse.SparseArray)
assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True)
else:
assert np.allclose(ret_s, ret_d, equal_nan=True)
@pytest.mark.parametrize(
"func,sparse_output",
[
(do("squeeze"), True),
param(do("to_index"), False, marks=xfail(reason="Coercion to dense")),
param(do("to_index_variable"), False, marks=xfail(reason="Coercion to dense")),
param(
do("searchsorted", 0.5),
True,
marks=xfail(reason="'COO' object has no attribute 'searchsorted'"),
),
],
)
def test_1d_variable_method(func, sparse_output):
var_s = make_xrvar({"x": 10})
var_d = xr.Variable(var_s.dims, var_s.data.todense())
ret_s = func(var_s)
ret_d = func(var_d)
if sparse_output:
assert isinstance(ret_s.data, sparse.SparseArray)
assert np.allclose(ret_s.data.todense(), ret_d.data)
else:
assert np.allclose(ret_s, ret_d)
class TestSparseVariable:
@pytest.fixture(autouse=True)
def setUp(self):
self.data = sparse.random((4, 6), random_state=0, density=0.5)
self.var = xr.Variable(("x", "y"), self.data)
def test_unary_op(self):
assert_sparse_equal(-self.var.data, -self.data)
assert_sparse_equal(abs(self.var).data, abs(self.data))
assert_sparse_equal(self.var.round().data, self.data.round())
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_univariate_ufunc(self):
assert_sparse_equal(np.sin(self.data), xu.sin(self.var).data)
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_bivariate_ufunc(self):
assert_sparse_equal(np.maximum(self.data, 0), xu.maximum(self.var, 0).data)
assert_sparse_equal(np.maximum(self.data, 0), xu.maximum(0, self.var).data)
def test_repr(self):
expected = dedent(
"""\
<xarray.Variable (x: 4, y: 6)>
<COO: shape=(4, 6), dtype=float64, nnz=12, fill_value=0.0>"""
)
assert expected == repr(self.var)
def test_pickle(self):
v1 = self.var
v2 = pickle.loads(pickle.dumps(v1))
assert_sparse_equal(v1.data, v2.data)
def test_missing_values(self):
a = np.array([0, 1, np.nan, 3])
s = sparse.COO.from_numpy(a)
var_s = Variable("x", s)
assert np.all(var_s.fillna(2).data.todense() == np.arange(4))
assert np.all(var_s.count() == 3)
@pytest.mark.parametrize(
"prop",
[
"attrs",
"chunks",
"coords",
"data",
"dims",
"dtype",
"encoding",
"imag",
"indexes",
"loc",
"name",
"nbytes",
"ndim",
"plot",
"real",
"shape",
"size",
"sizes",
"str",
"variable",
],
)
def test_dataarray_property(prop):
arr = make_xrarray({"x": 10, "y": 5})
getattr(arr, prop)
@pytest.mark.parametrize(
"func,sparse_output",
[
(do("all"), False),
(do("any"), False),
(do("assign_attrs", {"foo": "bar"}), True),
(do("assign_coords", x=make_xrarray({"x": 10}).x + 1), True),
(do("astype", int), True),
(do("clip", min=0, max=1), True),
(do("compute"), True),
(do("conj"), True),
(do("copy"), True),
(do("count"), False),
(do("diff", "x"), True),
(do("drop", "x"), True),
(do("expand_dims", {"z": 2}, axis=2), True),
(do("get_axis_num", "x"), False),
(do("get_index", "x"), False),
(do("identical", make_xrarray({"x": 5, "y": 5})), False),
(do("integrate", "x"), True),
(do("isel", {"x": slice(0, 3), "y": slice(2, 4)}), True),
(do("isnull"), True),
(do("load"), True),
(do("mean"), False),
(do("persist"), True),
(do("reindex", {"x": [1, 2, 3]}), True),
(do("rename", "foo"), True),
(do("reorder_levels"), True),
(do("reset_coords", drop=True), True),
(do("reset_index", "x"), True),
(do("round"), True),
(do("sel", x=[0, 1, 2]), True),
(do("shift"), True),
(do("sortby", "x", ascending=False), True),
(do("stack", z={"x", "y"}), True),
(do("transpose"), True),
# TODO
# set_index
# swap_dims
(do("broadcast_equals", make_xrvar({"x": 10, "y": 5})), False),
(do("equals", make_xrvar({"x": 10, "y": 5})), False),
param(
do("argmax"),
True,
marks=xfail(reason="Missing implementation for np.argmax"),
),
param(
do("argmin"),
True,
marks=xfail(reason="Missing implementation for np.argmin"),
),
param(
do("argsort"),
True,
marks=xfail(reason="'COO' object has no attribute 'argsort'"),
),
param(
do("bfill", dim="x"),
False,
marks=xfail(reason="Missing implementation for np.flip"),
),
(do("combine_first", make_xrarray({"x": 10, "y": 5})), True),
param(
do("conjugate"),
False,
marks=xfail(reason="'COO' object has no attribute 'conjugate'"),
),
param(
do("cumprod"),
True,
marks=xfail(reason="Missing implementation for np.nancumprod"),
),
param(
do("cumsum"),
True,
marks=xfail(reason="Missing implementation for np.nancumsum"),
),
param(
do("differentiate", "x"),
False,
marks=xfail(reason="Missing implementation for np.gradient"),
),
param(
do("dot", make_xrarray({"x": 10, "y": 5})),
True,
marks=xfail(reason="Missing implementation for np.einsum"),
),
param(do("dropna", "x"), False, marks=xfail(reason="Coercion to dense")),
param(do("ffill", "x"), False, marks=xfail(reason="Coercion to dense")),
(do("fillna", 0), True),
param(
do("interp", coords={"x": np.arange(10) + 0.5}),
True,
marks=xfail(reason="Coercion to dense"),
),
param(
do(
"interp_like",
make_xrarray(
{"x": 10, "y": 5},
coords={"x": np.arange(10) + 0.5, "y": np.arange(5) + 0.5},
),
),
True,
marks=xfail(reason="Indexing COO with more than one iterable index"),
),
param(do("interpolate_na", "x"), True, marks=xfail(reason="Coercion to dense")),
param(
do("isin", [1, 2, 3]),
False,
marks=xfail(reason="Missing implementation for np.isin"),
),
param(
do("item", (1, 1)),
False,
marks=xfail(reason="'COO' object has no attribute 'item'"),
),
param(do("max"), False),
param(do("min"), False),
param(
do("median"),
False,
marks=xfail(reason="Missing implementation for np.nanmedian"),
),
(do("notnull"), True),
(do("pipe", np.sum, axis=1), True),
(do("prod"), False),
param(
do("quantile", q=0.5),
False,
marks=xfail(reason="Missing implementation for np.nanpercentile"),
),
param(
do("rank", "x"),
False,
marks=xfail(reason="Only implemented for NumPy arrays (via bottleneck)"),
),
param(
do("reduce", np.sum, dim="x"),
False,
marks=xfail(reason="Coercion to dense"),
),
param(
do(
"reindex_like",
make_xrarray(
{"x": 10, "y": 5},
coords={"x": np.arange(10) + 0.5, "y": np.arange(5) + 0.5},
),
),
True,
marks=xfail(reason="Indexing COO with more than one iterable index"),
),
(do("roll", x=2, roll_coords=True), True),
param(
do("sel", x=[0, 1, 2], y=[2, 3]),
True,
marks=xfail(reason="Indexing COO with more than one iterable index"),
),
param(
do("std"), False, marks=xfail(reason="Missing implementation for np.nanstd")
),
(do("sum"), False),
param(
do("var"), False, marks=xfail(reason="Missing implementation for np.nanvar")
),
param(
do("where", make_xrarray({"x": 10, "y": 5}) > 0.5),
False,
marks=xfail(reason="Conversion of dense to sparse when using sparse mask"),
),
],
ids=repr,
)
def test_dataarray_method(func, sparse_output):
arr_s = make_xrarray(
{"x": 10, "y": 5}, coords={"x": np.arange(10), "y": np.arange(5)}
)
arr_d = xr.DataArray(arr_s.data.todense(), coords=arr_s.coords, dims=arr_s.dims)
ret_s = func(arr_s)
ret_d = func(arr_d)
if sparse_output:
assert isinstance(ret_s.data, sparse.SparseArray)
assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True)
else:
assert np.allclose(ret_s, ret_d, equal_nan=True)
@pytest.mark.parametrize(
"func,sparse_output",
[
(do("squeeze"), True),
param(
do("searchsorted", [1, 2, 3]),
False,
marks=xfail(reason="'COO' object has no attribute 'searchsorted'"),
),
],
)
def test_datarray_1d_method(func, sparse_output):
arr_s = make_xrarray({"x": 10}, coords={"x": np.arange(10)})
arr_d = xr.DataArray(arr_s.data.todense(), coords=arr_s.coords, dims=arr_s.dims)
ret_s = func(arr_s)
ret_d = func(arr_d)
if sparse_output:
assert isinstance(ret_s.data, sparse.SparseArray)
assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True)
else:
assert np.allclose(ret_s, ret_d, equal_nan=True)
class TestSparseDataArrayAndDataset:
@pytest.fixture(autouse=True)
def setUp(self):
self.sp_ar = sparse.random((4, 6), random_state=0, density=0.5)
self.sp_xr = xr.DataArray(
self.sp_ar, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
self.ds_ar = self.sp_ar.todense()
self.ds_xr = xr.DataArray(
self.ds_ar, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
def test_to_dataset_roundtrip(self):
x = self.sp_xr
assert_equal(x, x.to_dataset("x").to_array("x"))
def test_align(self):
a1 = xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": ["a", "b", "c", "d"]},
)
b1 = xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": ["a", "b", "d", "e"]},
)
a2, b2 = xr.align(a1, b1, join="inner")
assert isinstance(a2.data, sparse.SparseArray)
assert isinstance(b2.data, sparse.SparseArray)
assert np.all(a2.coords["x"].data == ["a", "b", "d"])
assert np.all(b2.coords["x"].data == ["a", "b", "d"])
@pytest.mark.xfail(
reason="COO objects currently do not accept more than one "
"iterable index at a time"
)
def test_align_2d(self):
A1 = xr.DataArray(
self.sp_ar,
dims=["x", "y"],
coords={
"x": np.arange(self.sp_ar.shape[0]),
"y": np.arange(self.sp_ar.shape[1]),
},
)
A2 = xr.DataArray(
self.sp_ar,
dims=["x", "y"],
coords={
"x": np.arange(1, self.sp_ar.shape[0] + 1),
"y": np.arange(1, self.sp_ar.shape[1] + 1),
},
)
B1, B2 = xr.align(A1, A2, join="inner")
assert np.all(B1.coords["x"] == np.arange(1, self.sp_ar.shape[0]))
assert np.all(B1.coords["y"] == np.arange(1, self.sp_ar.shape[0]))
assert np.all(B1.coords["x"] == B2.coords["x"])
assert np.all(B1.coords["y"] == B2.coords["y"])
def test_align_outer(self):
a1 = xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": ["a", "b", "c", "d"]},
)
b1 = xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": ["a", "b", "d", "e"]},
)
a2, b2 = xr.align(a1, b1, join="outer")
assert isinstance(a2.data, sparse.SparseArray)
assert isinstance(b2.data, sparse.SparseArray)
assert np.all(a2.coords["x"].data == ["a", "b", "c", "d", "e"])
assert np.all(b2.coords["x"].data == ["a", "b", "c", "d", "e"])
def test_concat(self):
ds1 = xr.Dataset(data_vars={"d": self.sp_xr})
ds2 = xr.Dataset(data_vars={"d": self.sp_xr})
ds3 = xr.Dataset(data_vars={"d": self.sp_xr})
out = xr.concat([ds1, ds2, ds3], dim="x")
assert_sparse_equal(
out["d"].data,
sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=0),
)
out = xr.concat([self.sp_xr, self.sp_xr, self.sp_xr], dim="y")
assert_sparse_equal(
out.data, sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=1)
)
def test_stack(self):
arr = make_xrarray({"w": 2, "x": 3, "y": 4})
stacked = arr.stack(z=("x", "y"))
z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=["x", "y"])
expected = xr.DataArray(
arr.data.reshape((2, -1)), {"w": [0, 1], "z": z}, dims=["w", "z"]
)
assert_equal(expected, stacked)
roundtripped = stacked.unstack()
assert arr.identical(roundtripped)
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_ufuncs(self):
x = self.sp_xr
assert_equal(np.sin(x), xu.sin(x))
def test_dataarray_repr(self):
a = xr.DataArray(
sparse.COO.from_numpy(np.ones(4)),
dims=["x"],
coords={"y": ("x", sparse.COO.from_numpy(np.arange(4, dtype="i8")))},
)
expected = dedent(
"""\
<xarray.DataArray (x: 4)>
<COO: shape=(4,), dtype=float64, nnz=4, fill_value=0.0>
Coordinates:
y (x) int64 <COO: nnz=3, fill_value=0>
Dimensions without coordinates: x"""
)
assert expected == repr(a)
def test_dataset_repr(self):
ds = xr.Dataset(
data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))},
coords={"y": ("x", sparse.COO.from_numpy(np.arange(4, dtype="i8")))},
)
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
y (x) int64 <COO: nnz=3, fill_value=0>
Dimensions without coordinates: x
Data variables:
a (x) float64 <COO: nnz=4, fill_value=0.0>"""
)
assert expected == repr(ds)
def test_sparse_dask_dataset_repr(self):
pytest.importorskip("dask", minversion="2.0")
ds = xr.Dataset(
data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))}
).chunk()
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 4)
Dimensions without coordinates: x
Data variables:
a (x) float64 dask.array<chunksize=(4,), meta=sparse.COO>"""
)
assert expected == repr(ds)
def test_dataarray_pickle(self):
a1 = xr.DataArray(
sparse.COO.from_numpy(np.ones(4)),
dims=["x"],
coords={"y": ("x", sparse.COO.from_numpy(np.arange(4)))},
)
a2 = pickle.loads(pickle.dumps(a1))
assert_identical(a1, a2)
def test_dataset_pickle(self):
ds1 = xr.Dataset(
data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))},
coords={"y": ("x", sparse.COO.from_numpy(np.arange(4)))},
)
ds2 = pickle.loads(pickle.dumps(ds1))
assert_identical(ds1, ds2)
def test_coarsen(self):
a1 = self.ds_xr
a2 = self.sp_xr
m1 = a1.coarsen(x=2, boundary="trim").mean()
m2 = a2.coarsen(x=2, boundary="trim").mean()
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="No implementation of np.pad")
def test_rolling(self):
a1 = self.ds_xr
a2 = self.sp_xr
m1 = a1.rolling(x=2, center=True).mean()
m2 = a2.rolling(x=2, center=True).mean()
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="Coercion to dense")
def test_rolling_exp(self):
a1 = self.ds_xr
a2 = self.sp_xr
m1 = a1.rolling_exp(x=2, center=True).mean()
m2 = a2.rolling_exp(x=2, center=True).mean()
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="No implementation of np.einsum")
def test_dot(self):
a1 = self.xp_xr.dot(self.xp_xr[0])
a2 = self.sp_ar.dot(self.sp_ar[0])
assert_equal(a1, a2)
@pytest.mark.xfail(reason="Groupby reductions produce dense output")
def test_groupby(self):
x1 = self.ds_xr
x2 = self.sp_xr
m1 = x1.groupby("x").mean(xr.ALL_DIMS)
m2 = x2.groupby("x").mean(xr.ALL_DIMS)
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="Groupby reductions produce dense output")
def test_groupby_first(self):
x = self.sp_xr.copy()
x.coords["ab"] = ("x", ["a", "a", "b", "b"])
x.groupby("ab").first()
x.groupby("ab").first(skipna=False)
@pytest.mark.xfail(reason="Groupby reductions produce dense output")
def test_groupby_bins(self):
x1 = self.ds_xr
x2 = self.sp_xr
m1 = x1.groupby_bins("x", bins=[0, 3, 7, 10]).sum(xr.ALL_DIMS)
m2 = x2.groupby_bins("x", bins=[0, 3, 7, 10]).sum(xr.ALL_DIMS)
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="Resample produces dense output")
def test_resample(self):
t1 = xr.DataArray(
np.linspace(0, 11, num=12),
coords=[
pd.date_range("15/12/1999", periods=12, freq=pd.DateOffset(months=1))
],
dims="time",
)
t2 = t1.copy()
t2.data = sparse.COO(t2.data)
m1 = t1.resample(time="QS-DEC").mean()
m2 = t2.resample(time="QS-DEC").mean()
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail
def test_reindex(self):
x1 = self.ds_xr
x2 = self.sp_xr
for kwargs in [
{"x": [2, 3, 4]},
{"x": [1, 100, 2, 101, 3]},
{"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]},
]:
m1 = x1.reindex(**kwargs)
m2 = x2.reindex(**kwargs)
assert np.allclose(m1, m2, equal_nan=True)
@pytest.mark.xfail
def test_merge(self):
x = self.sp_xr
y = xr.merge([x, x.rename("bar")]).to_array()
assert isinstance(y, sparse.SparseArray)
@pytest.mark.xfail
def test_where(self):
a = np.arange(10)
cond = a > 3
xr.DataArray(a).where(cond)
s = sparse.COO.from_numpy(a)
cond = s > 3
xr.DataArray(s).where(cond)
x = xr.DataArray(s)
cond = x > 3
x.where(cond)
class TestSparseCoords:
@pytest.mark.xfail(reason="Coercion of coords to dense")
def test_sparse_coords(self):
xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": sparse.COO.from_numpy([1, 2, 3, 4])},
)
def test_chunk():
s = sparse.COO.from_numpy(np.array([0, 0, 1, 2]))
a = DataArray(s)
ac = a.chunk(2)
assert ac.chunks == ((2, 2),)
assert isinstance(ac.data._meta, sparse.COO)
assert_identical(ac, a)
ds = a.to_dataset(name="a")
dsc = ds.chunk(2)
assert dsc.chunks == {"dim_0": (2, 2)}
assert_identical(dsc, ds)
| 32.017606
| 88
| 0.521463
|
4a0c6c5d29c9152d6748d728d706ae5b56ad241d
| 2,431
|
py
|
Python
|
deployment/modules/solr.py
|
fizista/Mturk-Tracker
|
9016528a36054112d2dbc63035eff5d4cd82001f
|
[
"MIT"
] | null | null | null |
deployment/modules/solr.py
|
fizista/Mturk-Tracker
|
9016528a36054112d2dbc63035eff5d4cd82001f
|
[
"MIT"
] | null | null | null |
deployment/modules/solr.py
|
fizista/Mturk-Tracker
|
9016528a36054112d2dbc63035eff5d4cd82001f
|
[
"MIT"
] | null | null | null |
from os.path import join as pjoin, isdir
from fabric.api import sudo, settings, env
from fabric.colors import yellow
from modules.utils import (PROPER_SUDO_PREFIX as SUDO_PREFIX, show,
cset, cget, local_files_dir, upload_templated_folder_with_perms,
upload_template_with_perms, create_target_directories)
def provision(update=False):
"""Uploads an install script to /project_name/scripts and runs it.
The script will not download solr if '/tmp/{project_name}/solr.zip' exists,
nor it will attempt an install (eg. unpack and copy) if the following file
exists: '{supervisor_dir}/solr/fabric_solr_install_success' (root of where
solr is installed).
Use update=True is as an override.
"""
# upload the script to {project_dir}/scripts/setup_solr.sh
user = cget("user")
solr_dir = cset('solr_dir', pjoin(cget("service_dir"), 'solr'))
script_name = "setup_solr.sh"
source = pjoin(cget("local_root"), 'deployment', 'scripts', script_name)
dest_scripts = cget("script_dir")
create_target_directories([dest_scripts, solr_dir], "700", user)
context = dict(env['ctx'])
destination = pjoin(dest_scripts, script_name)
upload_template_with_perms(source, destination, context, mode="644")
# run the script
show(yellow("Installing solr with update=%s." % update))
with settings(sudo_prefix=SUDO_PREFIX, warn_only=True):
script = destination
# the script will copy files into: MTURK/solr
ret = sudo("MTURK={home} && UPDATE={update} && . {script}".format(
home=cget('service_dir'), script=script,
update='true' if update else 'false'))
if ret.return_code != 0:
show(yellow("Error while installing sorl."))
def configure():
"""Uploads solr configuration files."""
context = dict(env["ctx"])
local_dir = local_files_dir("solr")
dest_dir = pjoin(cget('service_dir'), 'solr')
confs = cget("solr_files") or [local_dir]
show(yellow("Uploading solr configuration files: %s." % confs))
for name in confs:
source = pjoin(local_dir, name)
destination = pjoin(dest_dir, name)
if isdir(source):
upload_templated_folder_with_perms(source, local_dir, dest_dir,
context, mode="644", directories_mode="700")
else:
upload_template_with_perms(
source, destination, context, mode="644")
| 40.516667
| 79
| 0.678733
|
4a0c6e6ec8500279fdc9f8254cf1f6f16533b56d
| 2,482
|
py
|
Python
|
parallel-nn/src_mnist/args.py
|
AbsoluteStratos/blog-code
|
3a8e308d55931b053b8a47268c52d62e0fa16bd8
|
[
"MIT"
] | 2
|
2021-07-30T10:04:18.000Z
|
2022-01-30T18:29:30.000Z
|
parallel-nn/src_mnist/args.py
|
AbsoluteStratos/blog-code
|
3a8e308d55931b053b8a47268c52d62e0fa16bd8
|
[
"MIT"
] | 1
|
2021-10-17T20:08:41.000Z
|
2021-10-17T20:08:41.000Z
|
parallel-nn/src_mnist/args.py
|
AbsoluteStratos/blog-code
|
3a8e308d55931b053b8a47268c52d62e0fa16bd8
|
[
"MIT"
] | 2
|
2021-07-30T10:04:20.000Z
|
2021-09-01T00:07:14.000Z
|
'''
How to Train Neural Networks in Parallel (From Scratch)
===
Author: Nicholas Geneva (MIT Liscense)
url: https://nicholasgeneva.com/blog/
github: https://github.com/NickGeneva/blog-code
===
'''
import argparse
import numpy as np
import torch
import random
from pathlib import Path
class Parser(argparse.ArgumentParser):
def __init__(self):
super(Parser, self).__init__(description='Read')
self.add_argument('--comm', type=str, default="serial", choices=['serial', 'mpi', 'nccl', 'ncclp', 'gloo'], help='experiment name')
# data
self.add_argument('--ntrain', type=int, default=10000, help="number of training data")
self.add_argument('--ntest', type=int, default=1000, help="number of training data")
self.add_argument('--train-batch-size', type=int, default=256, help='batch size for training')
self.add_argument('--test-batch-size', type=int, default=64, help='batch size for testing')
# training
self.add_argument('--epoch-start', type=int, default=0, help='epoch to start at, will load pre-trained network')
self.add_argument('--epochs', type=int, default=300, help='number of epochs to train')
self.add_argument('--lr', type=float, default=0.001, help='ADAM learning rate')
self.add_argument('--seed', type=int, default=12345, help='manual seed used in PyTorch and Numpy')
# logging
self.add_argument('--plot-freq', type=int, default=25, help='how many epochs to wait before plotting test output')
self.add_argument('--test-freq', type=int, default=5, help='how many epochs to test the model')
self.add_argument('--ckpt-freq', type=int, default=25, help='how many epochs to wait before saving the model')
def parse(self):
"""
Parse program arguements
"""
args = self.parse_args()
args.run_dir = Path('./mnist_outputs') / '{}'.format(args.comm) \
/ 'ntrain{}_batch{}'.format(args.ntrain, args.train_batch_size)
args.ckpt_dir = args.run_dir / "checkpoints"
args.pred_dir = args.run_dir / "predictions"
for path in (args.run_dir, args.ckpt_dir, args.pred_dir):
Path(path).mkdir(parents=True, exist_ok=True)
# Set random seed
if args.seed is None:
args.seed = random.randint(1, 10000)
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(seed=args.seed)
return args
| 43.54386
| 139
| 0.651491
|
4a0c6f072f133290d9877a706ab3a723765e085e
| 7,625
|
py
|
Python
|
fpdb/common/table.py
|
xvzezi/filedb-python
|
513de426976e2782aa9aced1a2bf522db7aae51d
|
[
"MIT"
] | null | null | null |
fpdb/common/table.py
|
xvzezi/filedb-python
|
513de426976e2782aa9aced1a2bf522db7aae51d
|
[
"MIT"
] | null | null | null |
fpdb/common/table.py
|
xvzezi/filedb-python
|
513de426976e2782aa9aced1a2bf522db7aae51d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
###########################
# file: table.py
# date: 2021-7-16
# author: Sturmfy
# desc: wrapper of the FileJson
# version:
# 2021-7-16 init design
###########################
from .cache import JsonFile
class FilterType:
EQUAL = 0
LESS = 1
LESSEQ = 2
GRTR = 3
GRTREQ = 4
NOT = 5
OR = 6
AND = 7
class Filter(object):
def __init__(self):
super(Filter, self).__init__()
self.__lval = None
self.__rval = None
self.__type = None
def eval(self, val):
lv = self.__lval
if isinstance(lv, Filter):
lv = lv.eval(val)
rv = self.__rval
if isinstance(rv, Filter):
rv = rv.eval(val)
# print 'eval', self.__type, lv, rv, val
if self.__type == FilterType.NOT:
return not rv
elif self.__type == FilterType.OR:
return lv or rv
elif self.__type == FilterType.AND:
return lv and rv
elif self.__type == FilterType.EQUAL:
return val == rv
elif self.__type == FilterType.LESS:
return val < rv
elif self.__type == FilterType.LESSEQ:
return val <= rv
elif self.__type == FilterType.GRTR:
return val > rv
elif self.__type == FilterType.GRTREQ:
return val >= rv
return False
@classmethod
def NOT(cls, tar):
f = cls()
f.__type = FilterType.NOT
f.__rval = tar
return f
@classmethod
def OR(cls, tar1, tar2):
f = cls()
f.__type = FilterType.OR
f.__lval = tar1
f.__rval = tar2
return f
@classmethod
def AND(cls, tar1, tar2):
f = cls()
f.__type = FilterType.AND
f.__lval = tar1
f.__rval = tar2
return f
@classmethod
def EQUAL(cls, tar):
f = cls()
f.__type = FilterType.EQUAL
f.__rval = tar
return f
@classmethod
def LESS(cls, tar):
f = cls()
f.__type = FilterType.LESS
f.__rval = tar
return f
@classmethod
def LESSEQ(cls, tar):
f = cls()
f.__type = FilterType.LESSEQ
f.__rval = tar
return f
@classmethod
def GRTR(cls, tar):
f = cls()
f.__type = FilterType.GRTR
f.__rval = tar
return f
@classmethod
def GRTREQ(cls, tar):
f = cls()
f.__type = FilterType.GRTREQ
f.__rval = tar
return f
class Table(object):
def __init__(self, file):
# type: (JsonFile) -> None
super(Table, self).__init__()
self.file = file
if self.file.data.get('data', None) is None:
self.file.data['data'] = []
self.meta = None # type: dict
self.unique = {} # type: dict[str,bool]
self.__meta_set()
def __meta_set(self):
self.file.lock()
self.meta = self.file.data.get('__meta', None)
if self.meta is not None:
ul = self.meta.get('unique', [])
for u in ul:
self.unique[u] = True
self.file.release()
return
def need_init(self):
return self.meta is None
def init_new_table(self, configs = {
'unique':['id', 'name']}):
if self.meta:
return
self.file.lock()
self.meta = configs
self.file.data['__meta'] = self.meta
self.file.data['data'] = []
self.file.release()
self.__meta_set()
self.file.flush()
def find(self, conds):
# type: (dict[str,Filter]) -> any
res = []
self.file.lock()
for o in self.file.data['data']:
is_ok = True
for i in conds:
kv = o.get(i, None)
if kv is None:
is_ok = False
# print 'Key not found', i
break
if not conds[i].eval(kv):
# print 'Values not satisfied', kv
is_ok = False
break
if is_ok:
res.append(o)
self.file.release()
return res
def insert(self, data):
# 1. check meta file
for m in self.unique:
if data.get(m, None) is None:
print 'unique', m, 'not found'
return False
# 2. find if have same meta
query = {}
for m in self.unique:
query[m] = Filter.EQUAL(data[m])
tar = self.find(query)
if len(tar) > 0:
print 'unexpected same unique', tar
return False
# 2. insert
self.file.lock()
self.file.data['data'].append(data)
self.file.release()
self.file.flush()
# print self.file.data
return True
def update(self, data):
# 1. check meta file
for m in self.unique:
if data.get(m, None) is None:
print 'unique', m, 'not found'
return False
# 2. find the target
query = {}
for m in self.unique:
query[m] = Filter.EQUAL(data[m])
self.file.lock()
for k in xrange(len(self.file.data['data'])):
o = self.file.data['data'][k]
is_ok = True
for i in query:
kv = o.get(i, None)
if kv is None:
is_ok = False
break
if not query[i].eval(kv):
is_ok = False
break
if is_ok:
self.file.data['data'][k] = data
break
self.file.release()
self.file.flush()
return True
def remove(self, conds):
# type: (dict[str,Filter]) -> any
res = []
self.file.lock()
for k in xrange(len(self.file.data['data'])):
o = self.file.data['data'][k]
is_ok = True
for i in conds:
kv = o.get(i, None)
if kv is None:
is_ok = False
break
if not conds[i].eval(kv):
is_ok = False
break
if is_ok:
res.append(k)
for i in xrange(len(res)-1, -1, -1):
del self.file.data['data'][res[i]]
self.file.release()
self.file.flush()
return res
def close(self):
self.file.close()
if __name__ == "__main__":
from cache import DBCache
cache = DBCache('data')
cache.create('test')
f = cache.open('test')
t = Table(f)
t.init_new_table()
print t.insert({
'id':123,
'name':'woaini',
'value':'k'
})
print t.insert({
'id':456,
'name':'woaini2',
'value':'k'
})
print t.insert({
'id':178,
'name':'woaini',
'value':'k2'
})
print 'find: '
print t.find({
'id':Filter.OR(Filter.LESS(156), Filter.GRTR(200)),
})
print t.find({
'id':Filter.OR(Filter.LESS(156), Filter.GRTR(200)),
'value':Filter.NOT(Filter.EQUAL('k'))
})
print t.find({
'name':Filter.EQUAL('woaini')
})
t.update({
'id':123,
'name':'woaini',
'value':'k1',
'hah':0
})
print 'remove'
t.remove({
'id':Filter.AND(Filter.GRTR(177), Filter.LESS(179))
})
t.close()
cache.shutdown()
| 25.332226
| 59
| 0.465705
|
4a0c6f9021fe3b2c1d136c69246e23788d2e861f
| 672
|
py
|
Python
|
commands/cat.py
|
DiscordHackers/BetterBot
|
5b21c9b1280d205fa0b81c99f87626c1c8a24b81
|
[
"Apache-2.0"
] | null | null | null |
commands/cat.py
|
DiscordHackers/BetterBot
|
5b21c9b1280d205fa0b81c99f87626c1c8a24b81
|
[
"Apache-2.0"
] | null | null | null |
commands/cat.py
|
DiscordHackers/BetterBot
|
5b21c9b1280d205fa0b81c99f87626c1c8a24b81
|
[
"Apache-2.0"
] | null | null | null |
import disnake as discord
import requests
import json
from disnake.ext import commands
from api.check import utils, block
from api.server import base, main
class Cat(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
@block.block()
async def cat(self, ctx):
response = requests.get('https://some-random-api.ml/img/cat')
json_data = json.loads(response.text)
embed = discord.Embed(color = 0xFFA500, title = main.get_lang(ctx.guild, "CAT_TITLE"))
embed.set_image(url = json_data['link'])
await ctx.reply(embed = embed)
def setup(client):
client.add_cog(Cat(client))
| 24.888889
| 94
| 0.678571
|
4a0c6fd4d22f33ca615df603929c5052243b64e0
| 4,487
|
py
|
Python
|
aries_cloudagent/protocols/issue_credential/v2_0/messages/cred_format.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 247
|
2019-07-02T21:10:21.000Z
|
2022-03-30T13:55:33.000Z
|
aries_cloudagent/protocols/issue_credential/v2_0/messages/cred_format.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 1,462
|
2019-07-02T20:57:30.000Z
|
2022-03-31T23:13:35.000Z
|
aries_cloudagent/protocols/issue_credential/v2_0/messages/cred_format.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 377
|
2019-06-20T21:01:31.000Z
|
2022-03-30T08:27:53.000Z
|
"""Issue-credential protocol message attachment format."""
from collections import namedtuple
from enum import Enum
from typing import Mapping, Sequence, Type, TYPE_CHECKING, Union
from uuid import uuid4
from marshmallow import EXCLUDE, fields
from .....utils.classloader import DeferLoad
from .....messaging.decorators.attach_decorator import AttachDecorator
from .....messaging.models.base import BaseModel, BaseModelSchema
from .....messaging.valid import UUIDFour
from ..models.detail.indy import V20CredExRecordIndy
from ..models.detail.ld_proof import V20CredExRecordLDProof
if TYPE_CHECKING:
from ..formats.handler import V20CredFormatHandler
FormatSpec = namedtuple("FormatSpec", "aries detail handler")
class V20CredFormat(BaseModel):
"""Issue-credential protocol message attachment format."""
class Meta:
"""Issue-credential protocol message attachment format metadata."""
schema_class = "V20CredFormatSchema"
class Format(Enum):
"""Attachment format."""
INDY = FormatSpec(
"hlindy/",
V20CredExRecordIndy,
DeferLoad(
"aries_cloudagent.protocols.issue_credential.v2_0"
".formats.indy.handler.IndyCredFormatHandler"
),
)
LD_PROOF = FormatSpec(
"aries/",
V20CredExRecordLDProof,
DeferLoad(
"aries_cloudagent.protocols.issue_credential.v2_0"
".formats.ld_proof.handler.LDProofCredFormatHandler"
),
)
@classmethod
def get(cls, label: Union[str, "V20CredFormat.Format"]):
"""Get format enum for label."""
if isinstance(label, str):
for fmt in V20CredFormat.Format:
if label.startswith(fmt.aries) or label == fmt.api:
return fmt
elif isinstance(label, V20CredFormat.Format):
return label
return None
@property
def api(self) -> str:
"""Admin API specifier."""
return self.name.lower()
@property
def aries(self) -> str:
"""Aries specifier prefix."""
return self.value.aries
@property
def detail(self) -> Union[V20CredExRecordIndy, V20CredExRecordLDProof]:
"""Accessor for credential exchange detail class."""
return self.value.detail
@property
def handler(self) -> Type["V20CredFormatHandler"]:
"""Accessor for credential exchange format handler."""
return self.value.handler.resolved
def validate_fields(self, message_type: str, attachment_data: Mapping):
"""Raise ValidationError for invalid attachment formats."""
self.handler.validate_fields(message_type, attachment_data)
def get_attachment_data(
self,
formats: Sequence["V20CredFormat"],
attachments: Sequence[AttachDecorator],
):
"""Find attachment of current format, decode and return its content."""
for fmt in formats:
if V20CredFormat.Format.get(fmt.format) is self:
attach_id = fmt.attach_id
break
else:
return None
for atch in attachments:
if atch.ident == attach_id:
return atch.content
return None
def __init__(
self,
*,
attach_id: str = None,
format_: str = None,
):
"""Initialize issue-credential protocol message attachment format."""
self.attach_id = attach_id or uuid4()
self.format_ = format_
@property
def format(self) -> str:
"""Return format."""
return self.format_
class V20CredFormatSchema(BaseModelSchema):
"""Issue-credential protocol message attachment format schema."""
class Meta:
"""Issue-credential protocol message attachment format schema metadata."""
model_class = V20CredFormat
unknown = EXCLUDE
attach_id = fields.Str(
required=True,
allow_none=False,
description="Attachment identifier",
example=UUIDFour.EXAMPLE,
)
format_ = fields.Str(
required=True,
allow_none=False,
description="Attachment format specifier",
data_key="format",
example="aries/ld-proof-vc-detail@v1.0",
)
| 30.944828
| 83
| 0.609316
|
4a0c701441ca6e50a7ae882104073d87ec1cc055
| 6,985
|
py
|
Python
|
config/settings/production.py
|
SpisTresci/scrooge
|
787b7d5f8ece8f3f24feb4273505e6c0ea60b5d7
|
[
"MIT"
] | 1
|
2021-01-04T04:30:24.000Z
|
2021-01-04T04:30:24.000Z
|
config/settings/production.py
|
SpisTresci/scrooge
|
787b7d5f8ece8f3f24feb4273505e6c0ea60b5d7
|
[
"MIT"
] | 6
|
2019-12-21T03:19:17.000Z
|
2020-01-07T07:28:04.000Z
|
config/settings/production.py
|
SpisTresci/scrooge
|
787b7d5f8ece8f3f24feb4273505e6c0ea60b5d7
|
[
"MIT"
] | null | null | null |
import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["vps607308.ovh.net"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Scrooge <noreply@vps607308.ovh.net>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Scrooge]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool("COMPRESS_ENABLED", default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| 39.022346
| 100
| 0.600286
|
4a0c70bee63196c309eb7cf813a9159fdb1b6acf
| 14,893
|
py
|
Python
|
app/grandchallenge/archives/views.py
|
gcjordi/grand-challenge.org
|
b00e16feb9090d7d938f9934c59cc9c3ade01490
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/archives/views.py
|
gcjordi/grand-challenge.org
|
b00e16feb9090d7d938f9934c59cc9c3ade01490
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/archives/views.py
|
gcjordi/grand-challenge.org
|
b00e16feb9090d7d938f9934c59cc9c3ade01490
|
[
"Apache-2.0"
] | null | null | null |
from dal import autocomplete
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import (
PermissionRequiredMixin,
UserPassesTestMixin,
)
from django.contrib.messages.views import SuccessMessageMixin
from django.core.exceptions import (
NON_FIELD_ERRORS,
PermissionDenied,
ValidationError,
)
from django.forms.utils import ErrorList
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.timezone import now
from django.views.generic import (
CreateView,
DetailView,
FormView,
ListView,
UpdateView,
)
from guardian.mixins import (
LoginRequiredMixin,
PermissionListMixin,
PermissionRequiredMixin as ObjectPermissionRequiredMixin,
)
from guardian.shortcuts import get_objects_for_user
from rest_framework.settings import api_settings
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework_guardian.filters import ObjectPermissionsFilter
from grandchallenge.archives.filters import ArchiveFilter
from grandchallenge.archives.forms import (
ArchiveCasesToReaderStudyForm,
ArchiveForm,
ArchivePermissionRequestUpdateForm,
EditorsForm,
UploadersForm,
UsersForm,
)
from grandchallenge.archives.models import Archive, ArchivePermissionRequest
from grandchallenge.archives.serializers import ArchiveSerializer
from grandchallenge.archives.tasks import add_images_to_archive
from grandchallenge.cases.forms import UploadRawImagesForm
from grandchallenge.cases.models import Image, RawImageUploadSession
from grandchallenge.core.filters import FilterMixin
from grandchallenge.core.forms import UserFormKwargsMixin
from grandchallenge.core.permissions.mixins import UserIsNotAnonMixin
from grandchallenge.core.permissions.rest_framework import (
DjangoObjectOnlyPermissions,
)
from grandchallenge.core.renderers import PaginatedCSVRenderer
from grandchallenge.core.templatetags.random_encode import random_encode
from grandchallenge.core.views import PermissionRequestUpdate
from grandchallenge.datatables.views import Column, PaginatedTableListView
from grandchallenge.reader_studies.models import ReaderStudy
from grandchallenge.subdomains.utils import reverse
class ArchiveList(PermissionListMixin, FilterMixin, ListView):
model = Archive
permission_required = (
f"{model._meta.app_label}.view_{model._meta.model_name}"
)
ordering = "-created"
filter_class = ArchiveFilter
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context.update(
{
"jumbotron_title": "Archives",
"jumbotron_description": format_html(
(
"An archive can be used to collect set of medical "
"images, which can later be used in a reader study, "
"challenge or algorithm. Please <a href='{}'>contact "
"us</a> if you would like to set up your own archive."
),
random_encode("mailto:support@grand-challenge.org"),
),
}
)
return context
class ArchiveCreate(
PermissionRequiredMixin, UserFormKwargsMixin, CreateView,
):
model = Archive
form_class = ArchiveForm
permission_required = (
f"{model._meta.app_label}.add_{model._meta.model_name}"
)
def form_valid(self, form):
response = super().form_valid(form=form)
self.object.add_editor(self.request.user)
return response
class ArchiveDetail(
LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
):
model = Archive
permission_required = (
f"{model._meta.app_label}.use_{model._meta.model_name}"
)
raise_exception = True
def on_permission_check_fail(self, request, response, obj=None):
response = self.get(request)
return response
def check_permissions(self, request):
try:
return super().check_permissions(request)
except PermissionDenied:
return HttpResponseRedirect(
reverse(
"archives:permission-request-create",
kwargs={"slug": self.object.slug},
)
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user_remove_form = UsersForm()
user_remove_form.fields["action"].initial = UsersForm.REMOVE
uploader_remove_form = UploadersForm()
uploader_remove_form.fields["action"].initial = UploadersForm.REMOVE
editor_remove_form = EditorsForm()
editor_remove_form.fields["action"].initial = EditorsForm.REMOVE
limit = 1000
context.update(
{
"user_remove_form": user_remove_form,
"uploader_remove_form": uploader_remove_form,
"editor_remove_form": editor_remove_form,
"now": now().isoformat(),
"limit": limit,
"offsets": range(0, context["object"].images.count(), limit),
}
)
pending_permission_requests = ArchivePermissionRequest.objects.filter(
archive=context["object"], status=ArchivePermissionRequest.PENDING,
).count()
context.update(
{"pending_permission_requests": pending_permission_requests}
)
return context
class ArchiveUpdate(
UserFormKwargsMixin,
LoginRequiredMixin,
ObjectPermissionRequiredMixin,
UpdateView,
):
model = Archive
form_class = ArchiveForm
permission_required = (
f"{model._meta.app_label}.change_{model._meta.model_name}"
)
raise_exception = True
class ArchiveUsersAutocomplete(
LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView
):
def test_func(self):
return get_objects_for_user(
user=self.request.user, perms="change_archive", klass=Archive
).exists()
def get_queryset(self):
qs = (
get_user_model()
.objects.all()
.order_by("username")
.exclude(username=settings.ANONYMOUS_USER_NAME)
)
if self.q:
qs = qs.filter(username__istartswith=self.q)
return qs
class ArchiveGroupUpdateMixin(
LoginRequiredMixin,
ObjectPermissionRequiredMixin,
SuccessMessageMixin,
FormView,
):
template_name = "archives/archive_user_groups_form.html"
permission_required = (
f"{Archive._meta.app_label}.change_{Archive._meta.model_name}"
)
raise_exception = True
def get_permission_object(self):
return self.archive
@cached_property
def archive(self):
return get_object_or_404(Archive, slug=self.kwargs["slug"])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"object": self.archive, "role": self.get_form().role})
return context
def get_success_url(self):
return self.archive.get_absolute_url()
def form_valid(self, form):
form.add_or_remove_user(archive=self.archive)
return super().form_valid(form)
class ArchiveEditorsUpdate(ArchiveGroupUpdateMixin):
form_class = EditorsForm
success_message = "Editors successfully updated"
class ArchiveUploadersUpdate(ArchiveGroupUpdateMixin):
form_class = UploadersForm
success_message = "Uploaders successfully updated"
class ArchiveUsersUpdate(ArchiveGroupUpdateMixin):
form_class = UsersForm
success_message = "Users successfully updated"
class ArchivePermissionRequestCreate(
UserIsNotAnonMixin, SuccessMessageMixin, CreateView
):
model = ArchivePermissionRequest
fields = ()
@property
def archive(self):
return get_object_or_404(Archive, slug=self.kwargs["slug"])
def get_success_url(self):
return self.archive.get_absolute_url()
def get_success_message(self, cleaned_data):
return self.object.status_to_string()
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.archive = self.archive
try:
redirect = super().form_valid(form)
return redirect
except ValidationError as e:
form._errors[NON_FIELD_ERRORS] = ErrorList(e.messages)
return super().form_invalid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
permission_request = ArchivePermissionRequest.objects.filter(
archive=self.archive, user=self.request.user
).first()
context.update(
{
"permission_request": permission_request,
"archive": self.archive,
}
)
return context
class ArchivePermissionRequestList(ObjectPermissionRequiredMixin, ListView):
model = ArchivePermissionRequest
permission_required = (
f"{Archive._meta.app_label}.change_{Archive._meta.model_name}"
)
raise_exception = True
@property
def archive(self):
return get_object_or_404(Archive, slug=self.kwargs["slug"])
def get_permission_object(self):
return self.archive
def get_queryset(self):
queryset = super().get_queryset()
queryset = (
queryset.filter(archive=self.archive)
.exclude(status=ArchivePermissionRequest.ACCEPTED)
.select_related("user__user_profile", "user__verification")
)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"archive": self.archive})
return context
class ArchivePermissionRequestUpdate(PermissionRequestUpdate):
model = ArchivePermissionRequest
form_class = ArchivePermissionRequestUpdateForm
base_model = Archive
redirect_namespace = "archives"
user_check_attrs = ["is_user", "is_uploader", "is_editor"]
permission_required = (
f"{Archive._meta.app_label}.change_{Archive._meta.model_name}"
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"archive": self.base_object})
return context
class ArchiveUploadSessionCreate(
UserFormKwargsMixin,
LoginRequiredMixin,
ObjectPermissionRequiredMixin,
CreateView,
):
model = RawImageUploadSession
form_class = UploadRawImagesForm
template_name = "archives/archive_upload_session_create.html"
permission_required = (
f"{Archive._meta.app_label}.upload_{Archive._meta.model_name}"
)
raise_exception = True
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update(
{
"linked_task": add_images_to_archive.signature(
kwargs={"archive_pk": self.archive.pk}, immutable=True
)
}
)
return kwargs
@cached_property
def archive(self):
return get_object_or_404(Archive, slug=self.kwargs["slug"])
def get_permission_object(self):
return self.archive
def form_valid(self, form):
form.instance.creator = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"archive": self.archive})
return context
class ArchiveCasesList(
LoginRequiredMixin, ObjectPermissionRequiredMixin, PaginatedTableListView,
):
model = Image
permission_required = (
f"{Archive._meta.app_label}.use_{Archive._meta.model_name}"
)
raise_exception = True
template_name = "archives/archive_cases_list.html"
row_template = "archives/archive_cases_row.html"
search_fields = [
"pk",
"name",
]
columns = [
Column(title="Name", sort_field="name"),
Column(title="Created", sort_field="created"),
Column(title="Creator", sort_field="origin__creator__username"),
Column(title="View", sort_field="pk"),
Column(title="Algorithm Results", sort_field="pk"),
Column(title="Download", sort_field="pk"),
]
@cached_property
def archive(self):
return get_object_or_404(Archive, slug=self.kwargs["slug"])
def get_permission_object(self):
return self.archive
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"archive": self.archive})
return context
def get_queryset(self):
qs = super().get_queryset()
return (
qs.filter(archive=self.archive)
.prefetch_related(
"files",
"componentinterfacevalue_set__algorithms_jobs_as_input__algorithm_image__algorithm",
)
.select_related(
"origin__creator__user_profile",
"origin__creator__verification",
)
)
class ArchiveCasesToReaderStudyUpdate(
LoginRequiredMixin,
ObjectPermissionRequiredMixin,
SuccessMessageMixin,
FormView,
):
form_class = ArchiveCasesToReaderStudyForm
permission_required = (
f"{Archive._meta.app_label}.use_{Archive._meta.model_name}"
)
raise_exception = True
template_name = "archives/archive_cases_to_reader_study_form.html"
@cached_property
def archive(self):
return get_object_or_404(Archive, slug=self.kwargs["slug"])
def get_permission_object(self):
return self.archive
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"archive": self.archive})
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"user": self.request.user, "archive": self.archive})
return kwargs
def form_valid(self, form):
reader_study: ReaderStudy = form.cleaned_data["reader_study"]
images = form.cleaned_data["images"]
reader_study.images.add(*images)
self.success_url = reader_study.get_absolute_url()
self.success_message = f"Added {len(images)} cases to {reader_study}."
return super().form_valid(form)
class ArchiveViewSet(ReadOnlyModelViewSet):
serializer_class = ArchiveSerializer
queryset = Archive.objects.all()
permission_classes = (DjangoObjectOnlyPermissions,)
filter_backends = (ObjectPermissionsFilter,)
renderer_classes = (
*api_settings.DEFAULT_RENDERER_CLASSES,
PaginatedCSVRenderer,
)
| 31.027083
| 100
| 0.680118
|
4a0c725ba03af6b477054e5536c1de88c8da6f3a
| 3,628
|
py
|
Python
|
groupdocs_conversion_cloud/models/j2c_convert_options.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python
|
841d06ad3205e10e8f2726517779ac2d7c33a02a
|
[
"MIT"
] | 5
|
2019-11-21T04:58:45.000Z
|
2021-02-05T05:22:37.000Z
|
groupdocs_conversion_cloud/models/j2c_convert_options.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python
|
841d06ad3205e10e8f2726517779ac2d7c33a02a
|
[
"MIT"
] | null | null | null |
groupdocs_conversion_cloud/models/j2c_convert_options.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python
|
841d06ad3205e10e8f2726517779ac2d7c33a02a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="J2cConvertOptions.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import JpgConvertOptions
class J2cConvertOptions(JpgConvertOptions):
"""
J2c convert options
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, **kwargs): # noqa: E501
"""Initializes new instance of J2cConvertOptions""" # noqa: E501
base = super(J2cConvertOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, J2cConvertOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.884615
| 85
| 0.598677
|
4a0c7461b5e0403201c4f5168b296c5ee78ff659
| 7,752
|
py
|
Python
|
trade.py
|
zysilence/gym-anytrading
|
e534c442474a9384219c051567c560ba359be6ba
|
[
"MIT"
] | null | null | null |
trade.py
|
zysilence/gym-anytrading
|
e534c442474a9384219c051567c560ba359be6ba
|
[
"MIT"
] | null | null | null |
trade.py
|
zysilence/gym-anytrading
|
e534c442474a9384219c051567c560ba359be6ba
|
[
"MIT"
] | null | null | null |
""" Main file with entry point.
Author: sfan
"""
import time
import gym
import gym_anytrading
from gym_anytrading.envs import TradingEnv, ForexEnv, StocksEnv, MyStockCnnEnv, MyStockCwtEnv
from gym_anytrading.datasets import FOREX_EURUSD_1H_ASK, STOCKS_GOOGL
from gym_anytrading.datasets import XAUUSD_1H, XAUUSD_4H, XAUUSD_1D
import matplotlib.pyplot as plt
from stable_baselines.common.policies import MlpPolicy, CnnPolicy, FeedForwardPolicy
# from stable_baselines.deepq.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
from stable_baselines import DQN
from stable_baselines.a2c.utils import conv, linear, conv_to_fc, batch_to_seq, seq_to_batch, lstm
import tensorflow as tf
import numpy as np
def nature_cnn(scaled_images, **kwargs):
"""
CNN from Nature paper.
:param scaled_images: (TensorFlow Tensor) Image input placeholder
:param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN
:return: (TensorFlow Tensor) The CNN output layer
"""
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=8, stride=4, init_scale=np.sqrt(2), **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=4, stride=2, init_scale=np.sqrt(2), **kwargs))
layer_3 = activ(conv(layer_2, 'c3', n_filters=64, filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs))
layer_3 = conv_to_fc(layer_3)
return activ(linear(layer_3, 'fc1', n_hidden=512, init_scale=np.sqrt(2)))
def custom_cnn(scaled_images, **kwargs):
"""
Customized CNN.
Result is good in training but bad in back-test.
:param scaled_images: (TensorFlow Tensor) Image input placeholder
:param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN
:return: (TensorFlow Tensor) The CNN output layer
"""
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=16, filter_size=(5, 1), stride=4, init_scale=np.sqrt(2), **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=16, filter_size=(4, 1), stride=2, init_scale=np.sqrt(2), **kwargs))
layer_3 = activ(conv(layer_2, 'c3', n_filters=32, filter_size=(3, 1), stride=1, init_scale=np.sqrt(2), **kwargs))
layer_3 = conv_to_fc(layer_3)
return activ(linear(layer_3, 'fc1', n_hidden=64, init_scale=np.sqrt(2)))
def custom_cnn_with_dropout(scaled_images, **kwargs):
"""
Customized CNN using dropout layer.
The result is not good in traing.
:param scaled_images: (TensorFlow Tensor) Image input placeholder
:param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN
:return: (TensorFlow Tensor) The CNN output layer
"""
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=16, filter_size=(5, 1), stride=4, init_scale=np.sqrt(2), **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=16, filter_size=(4, 1), stride=2, init_scale=np.sqrt(2), **kwargs))
layer_3 = activ(conv(layer_2, 'c3', n_filters=32, filter_size=(3, 1), stride=1, init_scale=np.sqrt(2), **kwargs))
layer_3 = conv_to_fc(layer_3)
layer_3 = tf.nn.dropout(layer_3, keep_prob=0.2)
layer_4 = activ(linear(layer_3, 'fc1', n_hidden=64, init_scale=np.sqrt(2)))
layer_5 = tf.nn.dropout(layer_4, keep_prob=0.5)
output = activ(linear(layer_5, 'fc2', n_hidden=64, init_scale=np.sqrt(2)))
return output
def custom_cnn_for_cwt(scaled_images, **kwargs):
"""
Customized CNN.
Result is good in training but bad in back-test.
:param scaled_images: (TensorFlow Tensor) Image input placeholder
:param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN
:return: (TensorFlow Tensor) The CNN output layer
"""
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=16, filter_size=5, stride=4, init_scale=np.sqrt(2), **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=16, filter_size=3, stride=2, init_scale=np.sqrt(2), **kwargs))
layer_3 = activ(conv(layer_2, 'c3', n_filters=32, filter_size=2, stride=1, init_scale=np.sqrt(2), **kwargs))
layer_3 = conv_to_fc(layer_3)
return activ(linear(layer_3, 'fc1', n_hidden=64, init_scale=np.sqrt(2)))
class CustomCnnPolicy(FeedForwardPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, **_kwargs):
super(CustomCnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
cnn_extractor=custom_cnn,
feature_extraction="cnn", **_kwargs)
def callback(locals_, globals_):
self_ = locals_['self']
"""
# Log additional tensor
if not self_.is_tb_set:
with self_.graph.as_default():
tf.summary.scalar('value_target', tf.reduce_mean(self_.value_target))
self_.summary = tf.summary.merge_all()
self_.is_tb_set = True
"""
# Log scalar value (here a random variable)
summary_idx = env._summary_idx
history_idx = env._history_idx
history_len = env._history_len
# Log when episode ends
if summary_idx < history_idx:
for i in range(summary_idx + 1, history_idx + 1):
total_reward = env._reward_history[i % history_len]
total_profit = env._profit_history[i % history_len]
summary = tf.Summary(value=[tf.Summary.Value(tag='total reward', simple_value=total_reward),
tf.Summary.Value(tag='total profit', simple_value=total_profit)])
locals_['writer'].add_summary(summary, self_.num_timesteps)
env._summary_idx = env._history_idx
return True
if __name__ == '__main__':
start = time.time()
tb_log_name = 'PPO2_Cnn_win60_XAUUSD_4H_Nofee'
model_path = './model/{}'.format(tb_log_name)
window_size = 60
train_test_split = 0.8
df_data = XAUUSD_4H
# df_data = FOREX_EURUSD_1H_ASK
split_idx = int(len(df_data) * train_test_split)
total_bound = (window_size, len(df_data))
train_bound = (window_size, split_idx)
test_bound = (split_idx - window_size, len(df_data))
# Train
env = MyStockCnnEnv(df=df_data,
frame_bound=train_bound,
window_size=window_size)
# env = gym.make('forex-v0', frame_bound=(10, len(FOREX_EURUSD_1H_ASK)), window_size=10)
observation = env.reset()
model = PPO2(CustomCnnPolicy, env, verbose=0, tensorboard_log="./tensorboard_log/")
# model = DQN(MlpPolicy, env, verbose=1)
print('=' * 50)
print('Model trainging: {}'.format(tb_log_name))
print('=' * 50)
model.learn(total_timesteps=10000000, tb_log_name=tb_log_name, callback=callback)
model.save(model_path)
env = MyStockCnnEnv(df=df_data,
frame_bound=test_bound,
window_size=window_size)
observation = env.reset()
model = PPO2.load(model_path)
process_end = time.time()
print('=' * 50)
print('Data processing time: {}'.format(process_end - start))
print('=' * 50)
print('Model Testing: {}'.format(tb_log_name))
print('=' * 50)
# Test
step = 0
while True:
step += 1
action, _ = model.predict(observation, deterministic=True)
observation, reward, done, info = env.step(action)
# env.render()
# print('Step {}, profit {}'.format(step, info.get('total_profit')))
if done:
print("info:", info)
break
end = time.time()
print('Elapsed time: {} s'.format(end - start))
plt.cla()
env.render_all(title=tb_log_name)
plt.show()
| 41.015873
| 123
| 0.676213
|
4a0c7482ea68ad1c82d9744fdb0439fca9994a53
| 5,331
|
py
|
Python
|
app/admin/views.py
|
razage/TTracker3
|
6098c809b01b5fcb3dd3828228fbaa3d7a160360
|
[
"MIT"
] | null | null | null |
app/admin/views.py
|
razage/TTracker3
|
6098c809b01b5fcb3dd3828228fbaa3d7a160360
|
[
"MIT"
] | 1
|
2015-01-27T13:58:58.000Z
|
2015-01-27T13:58:58.000Z
|
app/admin/views.py
|
razage/TTracker3
|
6098c809b01b5fcb3dd3828228fbaa3d7a160360
|
[
"MIT"
] | null | null | null |
from os.path import join
from flask import Blueprint, flash, Markup, redirect, render_template, request, session, url_for
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.security import generate_password_hash
from .decorators import admin_required
from .forms import *
from app import app, db
from app.tickets.models import Os
from app.users.models import Technicians
mod = Blueprint('admin', __name__, url_prefix="/admin")
@mod.route('/edithp/', methods=['GET', 'POST'])
@admin_required
def edithp():
file = join('app', 'static', 'home.txt')
form = EditHomepageForm(request.form)
if form.validate_on_submit():
f = open(file, 'w')
f.write(form.bodytext.data)
f.close()
flash(Markup("<b>Success!</b> The homepage has been updated."), app.config['ALERT_CATEGORIES']['SUCCESS'])
return redirect(url_for('home'))
try:
chp = open(file, 'r').read()
except FileNotFoundError:
chp = "Home.txt does not exist."
form.bodytext.data = chp
return render_template('admin/edithp.html', form=form, title="Edit Homepage")
@mod.route('/addos/', methods=['GET', 'POST'])
@admin_required
def addos():
form = AddOSForm(request.form)
if form.validate_on_submit():
db.session.add(Os(form.osname.data))
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
os = db.session.query(Os).filter(Os.osname == form.osname.data).one()
if not os.enabled:
os.enabled = True
db.session.commit()
flash(Markup("The <b>%s</b> Operating System has been added to the database." % form.osname.data),
app.config["ALERT_CATEGORIES"]["SUCCESS"])
return redirect(url_for("home"))
return render_template("admin/addos.html", form=form, title="Add an Operating System")
@mod.route('/remos/', methods=['GET', 'POST'])
@admin_required
def removeos():
form = RemoveOSForm(request.form)
form.osname.choices = [(o.osname, o.osname) for o in db.session.query(Os).order_by(Os.osname).all()]
if form.validate_on_submit():
try:
os = db.session.query(Os).filter(Os.osname == form.osname.data).one()
except NoResultFound():
flash("This OS is not in the database.", app.config["ALERT_CATEGORIES"]["ERROR"])
return redirect(url_for("home"))
os.enabled = False
db.session.commit()
flash(Markup("Operating system <b>%s</b> has been removed." % form.osname.data),
app.config["ALERT_CATEGORIES"]["SUCCESS"])
return redirect(url_for("home"))
return render_template("admin/removeos.html", form=form, title="Remove an Operating System")
@mod.route('/graduate/', methods=['GET', 'POST'])
@admin_required
def graduate():
form = GraduateForm(request.form)
form.techname.choices = [(u.full_name, u.full_name) for u in
db.session.query(Technicians).filter(Technicians.enrolled,
Technicians.full_name != session[
'technician_name']).order_by(
Technicians.full_name)]
if form.validate_on_submit():
try:
tech = db.session.query(Technicians).filter(Technicians.full_name == form.techname.data).one()
except NoResultFound():
flash("This user is not in the database.", app.config["ALERT_CATEGORIES"]["ERROR"])
return redirect(url_for("home"))
tech.enrolled = False
db.session.commit()
flash(Markup("Technician <b>%s</b> has been graduated." % form.techname.data),
app.config["ALERT_CATEGORIES"]["SUCCESS"])
return redirect(url_for("home"))
return render_template("admin/graduate.html", form=form, title="Graduate a Technician")
@mod.route('/resetpwd/', methods=['GET', 'POST'])
@admin_required
def resetpwd():
form = ResetTechPasswordForm(request.form)
form.techname.choices = [(u.full_name, u.full_name) for u in
db.session.query(Technicians).filter(Technicians.enrolled,
Technicians.full_name != session[
'technician_name']).order_by(
Technicians.full_name)]
if form.validate_on_submit():
tech = db.session.query(Technicians).filter(Technicians.full_name == form.techname.data).one()
tech.password = generate_password_hash(form.password.data)
db.session.commit()
flash(Markup("User <b>%s's</b> password has been updated." % form.techname.data),
app.config["ALERT_CATEGORIES"]["SUCCESS"])
return redirect(url_for("home"))
return render_template("admin/reset.html", form=form, title="Reset Technician's Password")
@mod.route('/semesterviews/')
@admin_required
def semesterviewindex():
year = app.config["FIRST_YEAR"]
years = []
while year <= app.config["CUR_YEAR"]:
years.append(year)
year += 1
return render_template("admin/semesterindex.html", title="Semester Index")
| 42.309524
| 114
| 0.61602
|
4a0c754cede72da08409ba90033bd6f5b4e0a6e5
| 1,343
|
py
|
Python
|
olds/mc_train_opts.py
|
xdr940/DeepSfMLearner
|
591efe6b2ab74b0c72215fe69d2c38d624547fa3
|
[
"MIT"
] | null | null | null |
olds/mc_train_opts.py
|
xdr940/DeepSfMLearner
|
591efe6b2ab74b0c72215fe69d2c38d624547fa3
|
[
"MIT"
] | null | null | null |
olds/mc_train_opts.py
|
xdr940/DeepSfMLearner
|
591efe6b2ab74b0c72215fe69d2c38d624547fa3
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import os
import argparse
from path import Path
file_dir = os.path.dirname(__file__) # the directory that run_infer_opts.py resides in
class mc_train_opts:
def __init__(self):
self.parser = argparse.ArgumentParser(description="Monodepthv2 options")
# TEST MCDataset
self.parser.add_argument("--data_path",
default="/home/roit/datasets/MC")
self.parser.add_argument("--height", default=192)
self.parser.add_argument("--width", default=256)
self.parser.add_argument("--frame_idxs",default=[-1,0,1])
self.parser.add_argument("--scales",default=[0,1,2,3])
self.parser.add_argument("--batch_size",default=1)
self.parser.add_argument("--num_workers",default=1)
self.parser.add_argument("--mc",
type=str,
help="dataset to train on",
# default="mc",
default='kitti',
choices=["kitti", "kitti_odom", "kitti_depth", "kitti_test", "mc"])
self.parser.add_argument("--splits",default='mc_lite')
def args(self):
self.options = self.parser.parse_args()
return self.options
| 36.297297
| 100
| 0.584512
|
4a0c756647441d4266c21b5e71d32ca65301a996
| 793
|
py
|
Python
|
algorithms/python/SmallestSubsequenceOfDistinctCharacters/SmallestSubsequenceOfDistinctCharacters.py
|
artekr/LeetCode
|
a072b57423e4ba74cf205eb7a1cf59afde0dcd4f
|
[
"MIT"
] | null | null | null |
algorithms/python/SmallestSubsequenceOfDistinctCharacters/SmallestSubsequenceOfDistinctCharacters.py
|
artekr/LeetCode
|
a072b57423e4ba74cf205eb7a1cf59afde0dcd4f
|
[
"MIT"
] | null | null | null |
algorithms/python/SmallestSubsequenceOfDistinctCharacters/SmallestSubsequenceOfDistinctCharacters.py
|
artekr/LeetCode
|
a072b57423e4ba74cf205eb7a1cf59afde0dcd4f
|
[
"MIT"
] | 2
|
2020-08-07T17:27:18.000Z
|
2022-01-25T20:18:06.000Z
|
##########
# !!! What does `lexicographically smallest subsequence` mean??
#########
class Solution:
def smallestSubsequence(self, text: str) -> str:
if not text:
return ""
charSet = {}
for c in text:
if c not in charSet:
charSet[c] = c
result = ""
for c in sorted(charSet.keys()):
result += c
print(result)
return result
# Solution().smallestSubsequence("cdadabcc")
assert Solution().smallestSubsequence("") == ""
assert Solution().smallestSubsequence("cdadabcc") == "adbc"
assert Solution().smallestSubsequence("abcd") == "abcd"
assert Solution().smallestSubsequence("ecbacba") == "eacb"
assert Solution().smallestSubsequence("leetcode") == "letcod"
print("OH YEAH!")
| 29.37037
| 63
| 0.590164
|
4a0c765c65ffab3d28e7703c3cbfebdead621b59
| 3,879
|
py
|
Python
|
common/generator.py
|
baimengwei/Traffic-Flow-Test-Platform
|
71f68a797cd94a71f895ab2f5225c4ef529f35a5
|
[
"MIT"
] | 2
|
2021-11-18T07:56:22.000Z
|
2021-11-18T07:56:56.000Z
|
common/generator.py
|
baimengwei/Traffic-Flow-Test-Platform
|
71f68a797cd94a71f895ab2f5225c4ef529f35a5
|
[
"MIT"
] | null | null | null |
common/generator.py
|
baimengwei/Traffic-Flow-Test-Platform
|
71f68a797cd94a71f895ab2f5225c4ef529f35a5
|
[
"MIT"
] | null | null | null |
from configs.config_phaser import *
from misc.utils import write_summary, downsample
class Generator:
def __init__(self, conf_path, round_number, is_test=False):
self.conf_exp, self.conf_agent, self.conf_traffic = \
conf_path.load_conf_file()
self.conf_path = conf_path
self.round_number = round_number
# create env
env_name = self.conf_traffic.ENV_NAME
env_package = __import__('envs.%s_env' % env_name)
env_package = getattr(env_package, '%s_env' % env_name)
env_class = getattr(env_package, '%sEnv' % env_name.title())
self.env = env_class(self.conf_path, is_test=is_test)
# update infos
agents_infos = self.env.get_agents_info()
self.conf_traffic.set_traffic_infos(agents_infos)
# create agents
agent_name = self.conf_exp.MODEL_NAME
agent_package = __import__('algs.%s.%s_agent'
% (agent_name.upper(),
agent_name.lower()))
agent_package = getattr(agent_package, '%s' % agent_name.upper())
agent_package = getattr(agent_package, '%s_agent' % agent_name.lower())
agent_class = getattr(agent_package, '%sAgent' % agent_name.upper())
self.list_agent = []
self.list_inter = list(sorted(list(agents_infos.keys())))
for inter_name in self.list_inter:
# store config
self.conf_traffic.set_intersection(inter_name)
self.conf_path.dump_conf_file(
self.conf_exp, self.conf_agent,
self.conf_traffic, inter_name=inter_name)
# create agent
agent = agent_class(self.conf_path, self.round_number, inter_name)
self.list_agent.append(agent)
self.list_reward = {k: 0 for k in agents_infos.keys()}
def generate(self, *, done_enable=False, choice_random=True):
state = self.env.reset()
step_num = 0
total_step = int(self.conf_traffic.EPISODE_LEN /
self.conf_traffic.TIME_MIN_ACTION)
while step_num < total_step:
action_list = []
for one_state, agent in zip(state, self.list_agent):
action = agent.choose_action(
one_state, choice_random=choice_random)
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# DEBUG
# print(state, action_list, reward, next_state)
state = next_state
for idx, inter in enumerate(self.list_inter):
self.list_reward[inter] += reward[idx]
step_num += 1
if step_num % 10 == 0: print('.', end='')
if done_enable and done:
break
print('||final done||')
self.env.bulk_log(reward=self.list_reward)
def generate_test(self):
for agent in self.list_agent:
agent.load_network(self.round_number)
self.generate(done_enable=False, choice_random=True)
for inter_name in self.conf_traffic.TRAFFIC_INFOS:
write_summary(self.conf_path, self.round_number, inter_name)
for inter_name in sorted(self.conf_traffic.TRAFFIC_INFOS.keys()):
path_to_log_file = os.path.join(
self.conf_path.WORK_TEST, "%s.pkl" % inter_name)
downsample(path_to_log_file)
def generate_none(self):
self.generate(done_enable=False, choice_random=False)
for inter_name in self.conf_traffic.TRAFFIC_INFOS:
write_summary(self.conf_path, self.round_number, inter_name)
for inter_name in sorted(self.conf_traffic.TRAFFIC_INFOS.keys()):
path_to_log_file = os.path.join(
self.conf_path.WORK_TEST, "%s.pkl" % inter_name)
downsample(path_to_log_file)
| 42.163043
| 79
| 0.620521
|
4a0c769e9328fd53fab5bbf133ab667eef8e15dd
| 41,897
|
py
|
Python
|
rift/packet_common.py
|
kennethhuang123/rift-python
|
f4c208fe39cb14535573708637fa2345c919666b
|
[
"Apache-2.0"
] | 43
|
2018-07-19T17:41:35.000Z
|
2022-03-16T04:04:09.000Z
|
rift/packet_common.py
|
kennethhuang123/rift-python
|
f4c208fe39cb14535573708637fa2345c919666b
|
[
"Apache-2.0"
] | 96
|
2018-07-19T11:06:08.000Z
|
2021-07-27T10:52:09.000Z
|
rift/packet_common.py
|
kennethhuang123/rift-python
|
f4c208fe39cb14535573708637fa2345c919666b
|
[
"Apache-2.0"
] | 29
|
2018-07-24T22:01:20.000Z
|
2022-02-13T21:28:18.000Z
|
# pylint:disable=too-many-lines
import copy
import ipaddress
import struct
import sortedcontainers
import thrift.protocol.TBinaryProtocol
import thrift.transport.TTransport
import common.ttypes
import constants
import encoding.ttypes
import encoding.constants
import key
import utils
RIFT_MAGIC = 0xA1F7
class PacketInfo:
ERR_MSG_TOO_SHORT = "Message too short"
ERR_WRONG_MAGIC = "Wrong magic value"
ERR_WRONG_MAJOR_VERSION = "Wrong major version"
ERR_TRIFT_DECODE = "Thrift decode error"
ERR_TRIFT_VALIDATE = "Thrift validate error"
ERR_MISSING_OUTER_SEC_ENV = "Missing outer security envelope"
ERR_ZERO_OUTER_KEY_ID_NOT_ACCEPTED = "Zero outer key id not accepted"
ERR_NON_ZERO_OUTER_KEY_ID_NOT_ACCEPTED = "Non-zero outer key id not accepted"
ERR_INCORRECT_OUTER_FINGERPRINT = "Incorrect outer fingerprint"
ERR_MISSING_ORIGIN_SEC_ENV = "Missing TIE origin security envelope"
ERR_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED = "Zero TIE origin key id not accepted"
ERR_NON_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED = "Non-zero TIE origin key id not accepted"
ERR_UNEXPECTED_ORIGIN_SEC_ENV = "Unexpected TIE origin security envelope"
ERR_INCONSISTENT_ORIGIN_KEY_ID = "Inconsistent TIE origin key id and fingerprint"
ERR_INCORRECT_ORIGIN_FINGERPRINT = "Incorrect TIE origin fingerprint"
ERR_REFLECTED_NONCE_OUT_OF_SYNC = "Reflected nonce out of sync"
DECODE_ERRORS = [
ERR_MSG_TOO_SHORT,
ERR_WRONG_MAGIC,
ERR_WRONG_MAJOR_VERSION,
ERR_TRIFT_DECODE,
ERR_TRIFT_VALIDATE]
AUTHENTICATION_ERRORS = [
ERR_MISSING_OUTER_SEC_ENV,
ERR_ZERO_OUTER_KEY_ID_NOT_ACCEPTED,
ERR_NON_ZERO_OUTER_KEY_ID_NOT_ACCEPTED,
ERR_INCORRECT_OUTER_FINGERPRINT,
ERR_MISSING_ORIGIN_SEC_ENV,
ERR_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED,
ERR_NON_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED,
ERR_UNEXPECTED_ORIGIN_SEC_ENV,
ERR_INCONSISTENT_ORIGIN_KEY_ID,
ERR_INCORRECT_ORIGIN_FINGERPRINT,
ERR_REFLECTED_NONCE_OUT_OF_SYNC]
def __init__(self):
# Where was the message received from?
self.rx_intf = None
self.address_family = None
self.from_addr_port_str = None
# RIFT model object
self.protocol_packet = None
self.encoded_protocol_packet = None
self.packet_type = None
# Error string (None if decode was successful)
self.error = None
self.error_details = None
# Envelope header (magic and packet number)
self.env_header = None
self.packet_nr = None
# Outer security envelope header
self.outer_sec_env_header = None
self.outer_key_id = None
self.nonce_local = None
self.nonce_remote = None
self.remaining_tie_lifetime = None
self.outer_fingerprint_len = None
self.outer_fingerprint = None
# Origin security envelope header
self.origin_sec_env_header = None
self.origin_key_id = None
self.origin_fingerprint_len = None
self.origin_fingerprint = None
def __str__(self):
result_str = ""
if self.packet_nr is not None:
result_str += "packet-nr={} ".format(self.packet_nr)
if self.outer_key_id is not None:
result_str += "outer-key-id={} ".format(self.outer_key_id)
if self.nonce_local is not None:
result_str += "nonce-local={} ".format(self.nonce_local)
if self.nonce_remote is not None:
result_str += "nonce-remote={} ".format(self.nonce_remote)
if self.remaining_tie_lifetime is not None:
if self.remaining_tie_lifetime == 0xffffffff:
result_str += "remaining-lie-lifetime=all-ones "
else:
result_str += "remaining-lie-lifetime={} ".format(self.remaining_tie_lifetime)
if self.outer_fingerprint_len is not None:
result_str += "outer-fingerprint-len={} ".format(self.outer_fingerprint_len)
if self.origin_key_id is not None:
result_str += "origin-key-id={} ".format(self.origin_key_id)
if self.origin_fingerprint_len is not None:
result_str += "origin-fingerprint-len={} ".format(self.origin_fingerprint_len)
if self.protocol_packet is not None:
result_str += "protocol-packet={}".format(self.protocol_packet)
return result_str
def message_parts(self):
assert self.env_header
assert self.outer_sec_env_header
assert self.encoded_protocol_packet
if self.origin_sec_env_header:
return [self.env_header,
self.outer_sec_env_header,
self.origin_sec_env_header,
self.encoded_protocol_packet]
else:
return [self.env_header,
self.outer_sec_env_header,
self.encoded_protocol_packet]
def update_env_header(self, packet_nr):
self.packet_nr = packet_nr
self.env_header = struct.pack("!HH", RIFT_MAGIC, packet_nr)
def update_outer_sec_env_header(self, outer_key, nonce_local, nonce_remote,
remaining_lifetime=None):
if remaining_lifetime:
remaining_tie_lifetime = remaining_lifetime
else:
remaining_tie_lifetime = 0xffffffff
post = struct.pack("!HHL", nonce_local, nonce_remote, remaining_tie_lifetime)
if outer_key:
self.outer_key_id = outer_key.key_id
self.outer_fingerprint = outer_key.padded_digest(
[post, self.origin_sec_env_header, self.encoded_protocol_packet])
self.outer_fingerprint_len = len(self.outer_fingerprint) // 4
else:
self.outer_key_id = 0
self.outer_fingerprint = b''
self.outer_fingerprint_len = 0
self.nonce_local = nonce_local
self.nonce_remote = nonce_remote
self.remaining_tie_lifetime = remaining_tie_lifetime
reserved = 0
major_version = encoding.constants.protocol_major_version
pre = struct.pack("!BBBB", reserved, major_version, self.outer_key_id,
self.outer_fingerprint_len)
self.outer_sec_env_header = pre + self.outer_fingerprint + post
def update_origin_sec_env_header(self, origin_key):
if origin_key:
self.origin_key_id = origin_key.key_id
self.origin_fingerprint = origin_key.padded_digest([self.encoded_protocol_packet])
self.origin_fingerprint_len = len(self.origin_fingerprint) // 4
else:
self.origin_key_id = 0
self.origin_fingerprint = b''
self.origin_fingerprint_len = 0
byte1 = (self.origin_key_id >> 16) & 0xff
byte2 = (self.origin_key_id >> 8) & 0xff
byte3 = self.origin_key_id & 0xff
pre = struct.pack("!BBBB", byte1, byte2, byte3, self.origin_fingerprint_len)
self.origin_sec_env_header = pre + self.origin_fingerprint
def ipv4_prefix_tup(ipv4_prefix):
return (ipv4_prefix.address, ipv4_prefix.prefixlen)
def ipv6_prefix_tup(ipv6_prefix):
return (ipv6_prefix.address, ipv6_prefix.prefixlen)
def ip_prefix_tup(ip_prefix):
assert (ip_prefix.ipv4prefix is None) or (ip_prefix.ipv6prefix is None)
assert (ip_prefix.ipv4prefix is not None) or (ip_prefix.ipv6prefix is not None)
if ip_prefix.ipv4prefix:
return (4, ipv4_prefix_tup(ip_prefix.ipv4prefix))
return (6, ipv6_prefix_tup(ip_prefix.ipv6prefix))
def tie_id_tup(tie_id):
return (tie_id.direction, tie_id.originator, tie_id.tietype, tie_id.tie_nr)
def tie_header_tup(tie_header):
return (tie_header.tieid, tie_header.seq_nr,
tie_header.origination_time)
def link_id_pair_tup(link_id_pair):
return (link_id_pair.local_id, link_id_pair.remote_id)
def timestamp_tup(timestamp):
return (timestamp.AS_sec, timestamp.AS_nsec)
def add_missing_methods_to_thrift():
# See http://bit.ly/thrift-missing-hash for details about why this is needed
common.ttypes.IPv4PrefixType.__hash__ = (
lambda self: hash(ipv4_prefix_tup(self)))
common.ttypes.IPv4PrefixType.__eq__ = (
lambda self, other: ipv4_prefix_tup(self) == ipv4_prefix_tup(other))
common.ttypes.IPv6PrefixType.__hash__ = (
lambda self: hash(ipv6_prefix_tup(self)))
common.ttypes.IPv6PrefixType.__eq__ = (
lambda self, other: ipv6_prefix_tup(self) == ipv6_prefix_tup(other))
common.ttypes.IPPrefixType.__hash__ = (
lambda self: hash(ip_prefix_tup(self)))
common.ttypes.IPPrefixType.__eq__ = (
lambda self, other: ip_prefix_tup(self) == ip_prefix_tup(other))
common.ttypes.IPPrefixType.__str__ = ip_prefix_str
common.ttypes.IPPrefixType.__lt__ = (
lambda self, other: ip_prefix_tup(self) < ip_prefix_tup(other))
common.ttypes.IEEE802_1ASTimeStampType.__hash__ = (
lambda self: hash(timestamp_tup(self)))
common.ttypes.IEEE802_1ASTimeStampType.__eq__ = (
lambda self, other: timestamp_tup(self) == timestamp_tup(other))
encoding.ttypes.TIEID.__hash__ = (
lambda self: hash(tie_id_tup(self)))
encoding.ttypes.TIEID.__eq__ = (
lambda self, other: tie_id_tup(self) == tie_id_tup(other))
encoding.ttypes.TIEID.__lt__ = (
lambda self, other: tie_id_tup(self) < tie_id_tup(other))
encoding.ttypes.TIEHeader.__hash__ = (
lambda self: hash(tie_header_tup(self)))
encoding.ttypes.TIEHeader.__eq__ = (
lambda self, other: tie_header_tup(self) == tie_header_tup(other))
encoding.ttypes.TIEHeaderWithLifeTime.__hash__ = (
lambda self: hash((tie_header_tup(self.header), self.remaining_lifetime)))
encoding.ttypes.TIEHeaderWithLifeTime.__eq__ = (
lambda self, other: (tie_header_tup(self.header) == tie_header_tup(other.header)) and
self.remaining_lifetime == other.remaining_lifetime)
encoding.ttypes.LinkIDPair.__hash__ = (
lambda self: hash(link_id_pair_tup(self)))
encoding.ttypes.LinkIDPair.__eq__ = (
lambda self, other: link_id_pair_tup(self) == link_id_pair_tup(other))
encoding.ttypes.LinkIDPair.__hash__ = (
lambda self: hash(link_id_pair_tup(self)))
encoding.ttypes.LinkIDPair.__lt__ = (
lambda self, other: link_id_pair_tup(self) < link_id_pair_tup(other))
def encode_protocol_packet(protocol_packet, origin_key):
packet_info = PacketInfo()
packet_info.protocol_packet = protocol_packet
if protocol_packet.content.lie:
packet_info.packet_type = constants.PACKET_TYPE_LIE
elif protocol_packet.content.tie:
packet_info.packet_type = constants.PACKET_TYPE_TIE
elif protocol_packet.content.tide:
packet_info.packet_type = constants.PACKET_TYPE_TIDE
elif protocol_packet.content.tire:
packet_info.packet_type = constants.PACKET_TYPE_TIRE
reencode_packet_info(packet_info, origin_key)
return packet_info
def reencode_packet_info(packet_info, origin_key):
# Since Thrift does not support unsigned integer, we need to "fix" unsigned integers to be
# encoded as signed integers.
# We have to make a deep copy of the non-encoded packet, but this "fixing" involves changing
# various fields in the non-encoded packet from the range (0...MAX_UNSIGNED_INT) to
# (MIN_SIGNED_INT...MAX_SIGNED_INT) for various sizes of integers.
# For the longest time, I tried to avoid making a deep copy of the non-encoded packets, at least
# for some of the packets. For transient messages (e.g. LIEs) that is easier than for persistent
# messages (e.g. TIE which are stored in the database, or TIDEs which are encoded once and sent
# multiple times). However, in the end this turned out to be impossible or at least a
# bountiful source of bugs, because transient messages contain direct or indirect references
# to persistent objects. So, I gave up, and now always do a deep copy of the message to be
# encoded.
protocol_packet = packet_info.protocol_packet
fixed_protocol_packet = copy.deepcopy(protocol_packet)
fix_prot_packet_before_encode(fixed_protocol_packet)
transport_out = thrift.transport.TTransport.TMemoryBuffer()
protocol_out = thrift.protocol.TBinaryProtocol.TBinaryProtocol(transport_out)
fixed_protocol_packet.write(protocol_out)
packet_info.encoded_protocol_packet = transport_out.getvalue()
# If it is a TIE, update the origin security header. We do this here since it only needs to be
# done once when the packet is encoded. However, for the envelope header and for the outer
# security header it is up to the caller to call the corresponding update function before
# sending out the encoded message:
# * The envelope header must be updated each time the packet number changes
# * The outer security header must be updated each time a nonce or the remaining TIE lifetime
# changes.
if protocol_packet.content.tie:
packet_info.update_origin_sec_env_header(origin_key)
return packet_info
def decode_message(rx_intf, from_info, message, active_outer_key, accept_outer_keys,
active_origin_key, accept_origin_keys):
packet_info = PacketInfo()
record_source_info(packet_info, rx_intf, from_info)
continue_offset = decode_envelope_header(packet_info, message)
if continue_offset == -1:
return packet_info
continue_offset = decode_outer_security_header(packet_info, message, continue_offset)
if continue_offset == -1:
return packet_info
if packet_info.remaining_tie_lifetime != 0xffffffff:
continue_offset = decode_origin_security_header(packet_info, message, continue_offset)
if continue_offset == -1:
return packet_info
continue_offset = decode_protocol_packet(packet_info, message, continue_offset)
if continue_offset == -1:
return packet_info
if not check_outer_fingerprint(packet_info, active_outer_key, accept_outer_keys):
return packet_info
if not check_origin_fingerprint(packet_info, active_origin_key, accept_origin_keys):
return packet_info
return packet_info
def set_lifetime(packet_info, lifetime):
packet_info.remaining_tie_lifetime = lifetime
def record_source_info(packet_info, rx_intf, from_info):
packet_info.rx_intf = rx_intf
if from_info:
if len(from_info) == 2:
packet_info.address_family = constants.ADDRESS_FAMILY_IPV4
packet_info.from_addr_port_str = "from {}:{}".format(from_info[0], from_info[1])
else:
assert len(from_info) == 4
packet_info.address_family = constants.ADDRESS_FAMILY_IPV6
packet_info.from_addr_port_str = "from [{}]:{}".format(from_info[0], from_info[1])
def decode_envelope_header(packet_info, message):
if len(message) < 4:
packet_info.error = packet_info.ERR_MSG_TOO_SHORT
packet_info.error_details = "Missing magic and packet number"
return -1
(magic, packet_nr) = struct.unpack("!HH", message[0:4])
if magic != RIFT_MAGIC:
packet_info.error = packet_info.ERR_WRONG_MAGIC
packet_info.error_details = "Expected 0x{:x}, got 0x{:x}".format(RIFT_MAGIC, magic)
return -1
packet_info.env_header = message[0:4]
packet_info.packet_nr = packet_nr
return 4
def decode_outer_security_header(packet_info, message, offset):
start_header_offset = offset
message_len = len(message)
if offset + 4 > message_len:
packet_info.error = packet_info.ERR_MSG_TOO_SHORT
packet_info.error_details = \
"Missing major version, outer key id and outer fingerprint length"
return -1
(_reserved, major_version, outer_key_id, outer_fingerprint_len) = \
struct.unpack("!BBBB", message[offset:offset+4])
offset += 4
expected_major_version = encoding.constants.protocol_major_version
if major_version != expected_major_version:
packet_info.error = packet_info.ERR_WRONG_MAJOR_VERSION
packet_info.error_details = ("Expected {}, got {}"
.format(expected_major_version, major_version))
return -1
outer_fingerprint_len *= 4
if offset + outer_fingerprint_len > message_len:
packet_info.error = packet_info.ERR_MSG_TOO_SHORT
packet_info.error_details = "Missing outer fingerprint"
return -1
outer_fingerprint = message[offset:offset+outer_fingerprint_len]
offset += outer_fingerprint_len
if offset + 8 > message_len:
packet_info.error = packet_info.ERR_MSG_TOO_SHORT
packet_info.error_details = \
"Missing nonce local, nonce remote and remaining tie lifetime"
return -1
(nonce_local, nonce_remote, remaining_tie_lifetime) = \
struct.unpack("!HHL", message[offset:offset+8])
offset += 8
packet_info.outer_sec_env_header = message[start_header_offset:offset]
packet_info.outer_key_id = outer_key_id
packet_info.nonce_local = nonce_local
packet_info.nonce_remote = nonce_remote
packet_info.remaining_tie_lifetime = remaining_tie_lifetime
packet_info.outer_fingerprint_len = outer_fingerprint_len
packet_info.outer_fingerprint = outer_fingerprint
return offset
def decode_origin_security_header(packet_info, message, offset):
start_header_offset = offset
message_len = len(message)
if offset + 4 > message_len:
packet_info.error = packet_info.ERR_MSG_TOO_SHORT
packet_info.error_details = \
"Missing TIE origin key id and TIE origin fingerprint length"
return -1
(byte1, byte2, byte3, origin_fingerprint_len) = struct.unpack("!BBBB", message[offset:offset+4])
origin_key_id = (byte1 << 16) | (byte2 << 8) | byte3
offset += 4
if ((origin_key_id == 0 and origin_fingerprint_len != 0) or
(origin_key_id != 0 and origin_fingerprint_len == 0)):
packet_info.error = packet_info.ERR_INCONSISTENT_ORIGIN_KEY_ID
return -1
origin_fingerprint_len *= 4
if offset + origin_fingerprint_len > message_len:
packet_info.error = packet_info.ERR_MSG_TOO_SHORT
packet_info.error_details = "Missing TIE origin fingerprint"
return -1
origin_fingerprint = message[offset:offset+origin_fingerprint_len]
offset += origin_fingerprint_len
packet_info.origin_sec_env_header = message[start_header_offset:offset]
packet_info.origin_key_id = origin_key_id
packet_info.origin_fingerprint_len = origin_fingerprint_len
packet_info.origin_fingerprint = origin_fingerprint
return offset
def decode_protocol_packet(packet_info, message, offset):
encoded_protocol_packet = message[offset:]
transport_in = thrift.transport.TTransport.TMemoryBuffer(encoded_protocol_packet)
protocol_in = thrift.protocol.TBinaryProtocol.TBinaryProtocol(transport_in)
protocol_packet = encoding.ttypes.ProtocolPacket()
try:
protocol_packet.read(protocol_in)
# We don't know what exception Thrift might throw
# pylint: disable=broad-except
except Exception as err:
packet_info.error = packet_info.ERR_TRIFT_DECODE
packet_info.error_details = str(err)
return -1
try:
protocol_packet.validate()
except thrift.protocol.TProtocol.TProtocolException as err:
packet_info.error = packet_info.ERR_TRIFT_VALIDATE
packet_info.error_details = str(err)
return -1
fix_prot_packet_after_decode(protocol_packet)
packet_info.encoded_protocol_packet = encoded_protocol_packet
packet_info.protocol_packet = protocol_packet
if protocol_packet.content.lie:
packet_info.packet_type = constants.PACKET_TYPE_LIE
elif protocol_packet.content.tie:
packet_info.packet_type = constants.PACKET_TYPE_TIE
elif protocol_packet.content.tide:
packet_info.packet_type = constants.PACKET_TYPE_TIDE
elif protocol_packet.content.tire:
packet_info.packet_type = constants.PACKET_TYPE_TIRE
return len(message)
def check_outer_fingerprint(packet_info, active_outer_key, accept_outer_keys):
if not packet_info.outer_sec_env_header:
packet_info.error = packet_info.ERR_MISSING_OUTER_SEC_ENV
return packet_info
use_key = find_key_id(packet_info.outer_key_id, active_outer_key, accept_outer_keys)
if not use_key:
if packet_info.outer_key_id == 0:
packet_info.error = packet_info.ERR_ZERO_OUTER_KEY_ID_NOT_ACCEPTED
else:
packet_info.error = packet_info.ERR_NON_ZERO_OUTER_KEY_ID_NOT_ACCEPTED
packet_info.error_details = "Outer key id is " + str(packet_info.outer_key_id)
return False
post = packet_info.outer_sec_env_header[-8:]
expected = use_key.padded_digest([post, packet_info.origin_sec_env_header,
packet_info.encoded_protocol_packet])
if packet_info.outer_fingerprint != expected:
packet_info.error = packet_info.ERR_INCORRECT_OUTER_FINGERPRINT
return False
return True
def check_origin_fingerprint(packet_info, active_origin_key, accept_origin_keys):
if packet_info.protocol_packet:
if packet_info.protocol_packet.content.tie:
if not packet_info.origin_sec_env_header:
packet_info.error = packet_info.ERR_MISSING_ORIGIN_SEC_ENV
return packet_info
else:
if packet_info.origin_sec_env_header:
packet_info.error = packet_info.ERR_UNEXPECTED_ORIGIN_SEC_ENV
return packet_info
if not packet_info.origin_sec_env_header:
return True
use_key = find_key_id(packet_info.origin_key_id, active_origin_key, accept_origin_keys)
if not use_key:
if packet_info.origin_key_id == 0:
packet_info.error = packet_info.ERR_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED
else:
packet_info.error = packet_info.ERR_NON_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED
packet_info.error_details = "TIE origin key id is " + str(packet_info.origin_key_id)
return False
expected = use_key.padded_digest([packet_info.encoded_protocol_packet])
if packet_info.origin_fingerprint != expected:
packet_info.error = packet_info.ERR_INCORRECT_ORIGIN_FINGERPRINT
return False
return True
def find_key_id(key_id, active_key, accept_keys):
if active_key and active_key.key_id == key_id:
return active_key
if accept_keys is not None:
for accept_key in accept_keys:
if accept_key.key_id == key_id:
return accept_key
if key_id == 0 and active_key is None and (accept_keys is None or accept_keys == []):
return key.Key(0, "null", None)
return None
# What follows are some horrible hacks to deal with the fact that Thrift only support signed 8, 16,
# 32, and 64 bit numbers and not unsigned 8, 16, 32, and 64 bit numbers. The RIFT specification has
# several fields are intended to contain an unsigned numbers, but that are actually specified in the
# .thrift files as a signed numbers. Just look for the following text in the specification: "MUST be
# interpreted in implementation as unsigned ..." where ... can be 8 bits, or 16 bits, or 32 bits, or
# 64 bits. Keep in mind Python does not have sized integers: values of type int are unbounded (i.e.
# they have no limit on the size and no minimum or maximum value).
MAX_U64 = 0xffffffffffffffff
MAX_S64 = 0x7fffffffffffffff
MAX_U32 = 0xffffffff
MAX_S32 = 0x7fffffff
MAX_U16 = 0xffff
MAX_S16 = 0x7fff
MAX_U8 = 0xff
MAX_S8 = 0x7f
def u64_to_s64(u64):
return u64 if u64 <= MAX_S64 else u64 - MAX_U64 - 1
def u32_to_s32(u32):
return u32 if u32 <= MAX_S32 else u32 - MAX_U32 - 1
def u16_to_s16(u16):
return u16 if u16 <= MAX_S16 else u16 - MAX_U16 - 1
def u8_to_s8(u08):
return u08 if u08 <= MAX_S8 else u08 - MAX_U8 - 1
def s64_to_u64(s64):
return s64 if s64 >= 0 else s64 + MAX_U64 + 1
def s32_to_u32(s32):
return s32 if s32 >= 0 else s32 + MAX_U32 + 1
def s16_to_u16(s16):
return s16 if s16 >= 0 else s16 + MAX_U16 + 1
def s8_to_u8(s08):
return s08 if s08 >= 0 else s08 + MAX_U8 + 1
def fix_int(value, size, encode):
if encode:
# Fix before encode
if size == 8:
return u8_to_s8(value)
if size == 16:
return u16_to_s16(value)
if size == 32:
return u32_to_s32(value)
if size == 64:
return u64_to_s64(value)
assert False
else:
# Fix after decode
if size == 8:
return s8_to_u8(value)
if size == 16:
return s16_to_u16(value)
if size == 32:
return s32_to_u32(value)
if size == 64:
return s64_to_u64(value)
assert False
return value # Unreachable, stop pylint from complaining about inconsistent-return-statements
def fix_dict(old_dict, dict_fixes, encode):
(key_fixes, value_fixes) = dict_fixes
new_dict = {}
for the_key, value in old_dict.items():
new_key = fix_value(the_key, key_fixes, encode)
new_value = fix_value(value, value_fixes, encode)
new_dict[new_key] = new_value
return new_dict
def fix_struct(fixed_struct, fixes, encode):
for fix in fixes:
(field_name, field_fix) = fix
if field_name in vars(fixed_struct):
field_value = getattr(fixed_struct, field_name)
if field_value is not None:
new_value = fix_value(field_value, field_fix, encode)
setattr(fixed_struct, field_name, new_value)
return fixed_struct
def fix_set(old_set, fix, encode):
new_set = set()
for old_value in old_set:
new_value = fix_value(old_value, fix, encode)
new_set.add(new_value)
return new_set
def fix_list(old_list, fix, encode):
new_list = []
for old_value in old_list:
new_value = fix_value(old_value, fix, encode)
new_list.append(new_value)
return new_list
def fix_value(value, fix, encode):
if isinstance(value, set):
new_value = fix_set(value, fix, encode)
elif isinstance(value, list):
new_value = fix_list(value, fix, encode)
elif isinstance(fix, int):
new_value = fix_int(value, fix, encode)
elif isinstance(fix, tuple):
new_value = fix_dict(value, fix, encode)
elif isinstance(fix, list):
new_value = fix_struct(value, fix, encode)
else:
assert False
return new_value
def fix_packet_before_encode(packet, fixes):
fix_struct(packet, fixes, True)
def fix_packet_after_decode(packet, fixes):
fix_struct(packet, fixes, False)
TIEID_FIXES = [
('originator', 64),
('tie_nr', 32)
]
TIMESTAMP_FIXES = [
('AS_sec', 64),
('AS_nsec', 32)
]
TIE_HEADER_FIXES = [
('tieid', TIEID_FIXES),
('seq_nr', 64),
('origination_time', TIMESTAMP_FIXES),
('origination_lifetime', 32)
]
TIE_HEADER_WITH_LIFETIME_FIXES = [
('header', TIE_HEADER_FIXES),
('remaining_lifetime', 32),
]
LINK_ID_PAIR_FIXES = [
('local_id', 32), # Draft doesn't mention this needs to treated as unsigned
('remote_id', 32) # Draft doesn't mention this needs to treated as unsigned
]
NODE_NEIGHBORS_TIE_ELEMENT_FIXES = [
('level', 16),
('cost', 32),
('link_ids', LINK_ID_PAIR_FIXES),
('bandwidth', 32)
]
IP_PREFIX_FIXES = [
('ipv4prefix', [
('address', 32),
('prefixlen', 8) # Draft doesn't mention this needs to treated as unsigned
]),
('ipv6prefix', [
('prefixlen', 8) # Draft doesn't mention this needs to treated as unsigned
])
]
PREFIX_ATTRIBUTES_FIXES = [
('metric', 32), ('tags', 64),
('monotonic_clock', [
('timestamp', TIMESTAMP_FIXES),
('transactionid', 8)
])
]
PREFIX_TIE_ELEMENT_FIXES = [
('prefixes', (IP_PREFIX_FIXES, PREFIX_ATTRIBUTES_FIXES))
]
PROTOCOL_PACKET_FIXES = [
('header', [
('major_version', 8),
('minor_version', 16),
('sender', 64),
('level', 16)]),
('content', [
('lie', [
('local_id', 32), # Draft doesn't mention this needs to treated as unsigned
('flood_port', 16),
('link_mtu_size', 32),
('link_bandwidth', 32),
('neighbor', [
('originator', 64),
('remote_id', 32) # Draft doesn't mention this needs to treated as unsigned
]),
('pod', 32),
('holdtime', 16), # Draft doesn't mention this needs to treated as unsigned
('label', 32)]),
('tide', [
('start_range', TIEID_FIXES),
('end_range', TIEID_FIXES),
('headers', TIE_HEADER_WITH_LIFETIME_FIXES)
]),
('tire', [
('headers', TIE_HEADER_WITH_LIFETIME_FIXES)
]),
('tie', [
('header', TIE_HEADER_FIXES),
('element', [
('node', [
('level', 16),
('neighbors', (64, NODE_NEIGHBORS_TIE_ELEMENT_FIXES))
]),
('prefixes', PREFIX_TIE_ELEMENT_FIXES),
('positive_disaggregation_prefixes', PREFIX_TIE_ELEMENT_FIXES),
('negative_disaggregation_prefixes', PREFIX_TIE_ELEMENT_FIXES),
('external_prefixes', PREFIX_TIE_ELEMENT_FIXES),
])
])
])
]
def fix_prot_packet_before_encode(protocol_packet):
fix_packet_before_encode(protocol_packet, PROTOCOL_PACKET_FIXES)
def fix_prot_packet_after_decode(protocol_packet):
fix_packet_after_decode(protocol_packet, PROTOCOL_PACKET_FIXES)
def make_tie_id(direction, originator, tie_type, tie_nr):
tie_id = encoding.ttypes.TIEID(
direction=direction,
originator=originator,
tietype=tie_type,
tie_nr=tie_nr)
return tie_id
def make_tie_header(direction, originator, tie_type, tie_nr, seq_nr,
origination_time=None):
tie_id = make_tie_id(direction, originator, tie_type, tie_nr)
tie_header = encoding.ttypes.TIEHeader(
tieid=tie_id,
seq_nr=seq_nr,
origination_time=origination_time)
return tie_header
def make_tie_header_with_lifetime(direction, originator,
tie_type, tie_nr, seq_nr,
lifetime,
origination_time=None):
tie_header_with_lifetime = encoding.ttypes.TIEHeaderWithLifeTime(
header=make_tie_header(direction, originator, tie_type, tie_nr, seq_nr, origination_time),
remaining_lifetime=lifetime)
return tie_header_with_lifetime
def expand_tie_header_with_lifetime(tie_header, lifetime):
return encoding.ttypes.TIEHeaderWithLifeTime(header=tie_header, remaining_lifetime=lifetime)
def make_prefix_tie_packet(direction, originator, tie_nr, seq_nr):
tie_type = common.ttypes.TIETypeType.PrefixTIEType
tie_header = make_tie_header(direction, originator, tie_type, tie_nr, seq_nr)
prefixes = {}
prefix_tie_element = encoding.ttypes.PrefixTIEElement(prefixes=prefixes)
tie_element = encoding.ttypes.TIEElement(prefixes=prefix_tie_element)
tie_packet = encoding.ttypes.TIEPacket(header=tie_header, element=tie_element)
return tie_packet
def make_ip_address(address_str):
if ":" in address_str:
return make_ipv6_address(address_str)
else:
return make_ipv4_address(address_str)
def make_ipv4_address(address_str):
return ipaddress.IPv4Address(address_str)
def make_ipv6_address(address_str):
return ipaddress.IPv6Address(address_str)
def make_ip_prefix(prefix_str):
if ":" in prefix_str:
return make_ipv6_prefix(prefix_str)
else:
return make_ipv4_prefix(prefix_str)
def make_ipv4_prefix(prefix_str):
ipv4_network = ipaddress.IPv4Network(prefix_str)
address = int(ipv4_network.network_address)
prefixlen = ipv4_network.prefixlen
ipv4_prefix = common.ttypes.IPv4PrefixType(address, prefixlen)
prefix = common.ttypes.IPPrefixType(ipv4prefix=ipv4_prefix)
return prefix
def make_ipv6_prefix(prefix_str):
ipv6_network = ipaddress.IPv6Network(prefix_str)
address = ipv6_network.network_address.packed
prefixlen = ipv6_network.prefixlen
ipv6_prefix = common.ttypes.IPv6PrefixType(address, prefixlen)
prefix = common.ttypes.IPPrefixType(ipv6prefix=ipv6_prefix)
return prefix
def add_ipv4_prefix_to_prefix_tie(prefix_tie_packet, prefix, metric, tags=None,
monotonic_clock=None):
attributes = encoding.ttypes.PrefixAttributes(metric, tags, monotonic_clock)
prefix_tie_packet.element.prefixes.prefixes[prefix] = attributes
def add_ipv6_prefix_to_prefix_tie(prefix_tie_packet, ipv6_prefix_string, metric, tags=None,
monotonic_clock=None):
prefix = make_ipv6_prefix(ipv6_prefix_string)
attributes = encoding.ttypes.PrefixAttributes(metric=metric,
tags=tags,
monotonic_clock=monotonic_clock)
prefix_tie_packet.element.prefixes.prefixes[prefix] = attributes
def make_node_tie_packet(name, level, direction, originator, tie_nr, seq_nr):
tie_type = common.ttypes.TIETypeType.NodeTIEType
tie_header = make_tie_header(direction, originator, tie_type, tie_nr, seq_nr)
node_tie_element = encoding.ttypes.NodeTIEElement(
level=level,
neighbors={},
capabilities=encoding.ttypes.NodeCapabilities(
protocol_minor_version=encoding.constants.protocol_minor_version,
flood_reduction=True),
flags=None, # TODO: Implement this
name=name)
tie_element = encoding.ttypes.TIEElement(node=node_tie_element)
tie_packet = encoding.ttypes.TIEPacket(header=tie_header, element=tie_element)
return tie_packet
def make_tide_packet(start_range, end_range):
tide_packet = encoding.ttypes.TIDEPacket(start_range=start_range,
end_range=end_range,
headers=[])
return tide_packet
def add_tie_header_to_tide(tide_packet, tie_header):
assert tie_header.__class__ == encoding.ttypes.TIEHeaderWithLifeTime
tide_packet.headers.append(tie_header)
def make_tire_packet():
tire_packet = encoding.ttypes.TIREPacket(headers=set())
return tire_packet
def add_tie_header_to_tire(tire_packet, tie_header):
assert tie_header.__class__ == encoding.ttypes.TIEHeaderWithLifeTime
tire_packet.headers.add(tie_header)
DIRECTION_TO_STR = {
common.ttypes.TieDirectionType.South: "South",
common.ttypes.TieDirectionType.North: "North"
}
def direction_str(direction):
if direction in DIRECTION_TO_STR:
return DIRECTION_TO_STR[direction]
else:
return str(direction)
def ipv4_prefix_str(ipv4_prefix):
address = ipv4_prefix.address
length = ipv4_prefix.prefixlen
return str(ipaddress.IPv4Network((address, length)))
def ipv6_prefix_str(ipv6_prefix):
address = ipv6_prefix.address.rjust(16, b"\x00")
length = ipv6_prefix.prefixlen
return str(ipaddress.IPv6Network((address, length)))
def ip_prefix_str(ip_prefix):
assert (ip_prefix.ipv4prefix is None) or (ip_prefix.ipv6prefix is None)
assert (ip_prefix.ipv4prefix is not None) or (ip_prefix.ipv6prefix is not None)
result = ""
if ip_prefix.ipv4prefix:
result += ipv4_prefix_str(ip_prefix.ipv4prefix)
if ip_prefix.ipv6prefix:
result += ipv6_prefix_str(ip_prefix.ipv6prefix)
return result
TIETYPE_TO_STR = {
common.ttypes.TIETypeType.NodeTIEType: "Node",
common.ttypes.TIETypeType.PrefixTIEType: "Prefix",
common.ttypes.TIETypeType.PositiveDisaggregationPrefixTIEType: "Pos-Dis-Prefix",
common.ttypes.TIETypeType.NegativeDisaggregationPrefixTIEType: "Neg-Dis-Prefix",
common.ttypes.TIETypeType.ExternalPrefixTIEType: "Ext-Prefix",
common.ttypes.TIETypeType.PGPrefixTIEType: "PG-Prefix",
common.ttypes.TIETypeType.KeyValueTIEType: "Key-Value"
}
def tietype_str(tietype):
if tietype in TIETYPE_TO_STR:
return TIETYPE_TO_STR[tietype]
else:
return str(tietype)
def tie_id_str(tie_id):
return (direction_str(tie_id.direction) + ":" +
str(tie_id.originator) + ":" +
tietype_str(tie_id.tietype) + ":" +
str(tie_id.tie_nr))
HIERARCHY_INDICATIONS_TO_STR = {
common.ttypes.HierarchyIndications.leaf_only: "LeafOnly",
common.ttypes.HierarchyIndications.leaf_only_and_leaf_2_leaf_procedures: "LeafToLeaf",
common.ttypes.HierarchyIndications.top_of_fabric: "TopOfFabric",
}
def hierarchy_indications_str(hierarchy_indications):
if hierarchy_indications in HIERARCHY_INDICATIONS_TO_STR:
return HIERARCHY_INDICATIONS_TO_STR[hierarchy_indications]
else:
return str(hierarchy_indications)
def bandwidth_str(bandwidth):
return str(bandwidth) + " Mbps"
def link_id_pair_str(link_id_pair):
return str(link_id_pair.local_id) + "-" + str(link_id_pair.remote_id)
def node_element_str(element):
lines = []
if element.name is not None:
lines.append("Name: " + str(element.name))
lines.append("Level: " + str(element.level))
if element.flags is not None:
lines.append("Flags:")
if element.flags.overload is not None:
lines.append(" Overload: " + str(element.flags.overload))
if element.capabilities is not None:
lines.append("Capabilities:")
if element.capabilities.flood_reduction is not None:
lines.append(" Flood reduction: " + str(element.capabilities.flood_reduction))
if element.capabilities.hierarchy_indications is not None:
lines.append(" Leaf indications: " +
hierarchy_indications_str(element.capabilities.hierarchy_indications))
sorted_neighbors = sortedcontainers.SortedDict(element.neighbors)
for system_id, neighbor in sorted_neighbors.items():
lines.append("Neighbor: " + utils.system_id_str(system_id))
lines.append(" Level: " + str(neighbor.level))
if neighbor.cost is not None:
lines.append(" Cost: " + str(neighbor.cost))
if neighbor.bandwidth is not None:
lines.append(" Bandwidth: " + bandwidth_str(neighbor.bandwidth))
if neighbor.link_ids is not None:
sorted_link_ids = sorted(neighbor.link_ids)
for link_id_pair in sorted_link_ids:
lines.append(" Link: " + link_id_pair_str(link_id_pair))
return lines
def prefixes_str(label_str, prefixes):
lines = []
sorted_prefixes = sortedcontainers.SortedDict(prefixes.prefixes)
for prefix, attributes in sorted_prefixes.items():
line = label_str + ' ' + ip_prefix_str(prefix)
lines.append(line)
if attributes:
if attributes.metric:
line = " Metric: " + str(attributes.metric)
lines.append(line)
if attributes.tags:
for tag in attributes.tags:
line = " Tag: " + str(tag)
lines.append(line)
if attributes.monotonic_clock:
line = " Monotonic-clock:"
lines.append(line)
if attributes.monotonic_clock.timestamp:
line = " Timestamp: "
line += str(attributes.monotonic_clock.timestamp.AS_sec)
if attributes.monotonic_clock.timestamp.AS_nsec:
nsec_str = "{:06d}".format(attributes.monotonic_clock.timestamp.AS_nsec)
line += "." + nsec_str
lines.append(line)
if attributes.monotonic_clock.transactionid:
line = " Transaction-ID: " + str(attributes.monotonic_clock.transactionid)
lines.append(line)
return lines
def pg_prefix_element_str(_element):
# TODO: Implement this
return "TODO"
def key_value_element_str(_element):
# TODO: Implement this
return "TODO"
def unknown_element_str(_element):
# TODO: Implement this
return "TODO"
def element_str(tietype, element):
if tietype == common.ttypes.TIETypeType.NodeTIEType:
return node_element_str(element.node)
elif tietype == common.ttypes.TIETypeType.PrefixTIEType:
return prefixes_str("Prefix:", element.prefixes)
elif tietype == common.ttypes.TIETypeType.PositiveDisaggregationPrefixTIEType:
return prefixes_str("Pos-Dis-Prefix:", element.positive_disaggregation_prefixes)
elif tietype == common.ttypes.TIETypeType.NegativeDisaggregationPrefixTIEType:
return prefixes_str("Neg-Dis-Prefix:", element.negative_disaggregation_prefixes)
elif tietype == common.ttypes.TIETypeType.PGPrefixTIEType:
return unknown_element_str(element)
elif tietype == common.ttypes.TIETypeType.KeyValueTIEType:
return key_value_element_str(element.keyvalues)
elif tietype == common.ttypes.TIETypeType.ExternalPrefixTIEType:
return prefixes_str("Ext-Prefix:", element.external_prefixes)
else:
return unknown_element_str(element)
def assert_prefix_address_family(prefix, address_family):
assert isinstance(prefix, common.ttypes.IPPrefixType)
if address_family == constants.ADDRESS_FAMILY_IPV4:
assert prefix.ipv4prefix is not None
assert prefix.ipv6prefix is None
elif address_family == constants.ADDRESS_FAMILY_IPV6:
assert prefix.ipv4prefix is None
assert prefix.ipv6prefix is not None
else:
assert False
| 41.400198
| 100
| 0.692102
|
4a0c770f0b4fe43f6e40bc5c56070a6012259462
| 2,890
|
py
|
Python
|
benchmark/illgal_recognizer/mosaic/main.py
|
lzjzx1122/FaaSFlow
|
c4a32a04797770c21fe6a0dcacd85ac27a3d29ec
|
[
"Apache-2.0"
] | 24
|
2021-12-02T01:00:54.000Z
|
2022-03-27T00:50:28.000Z
|
benchmark/illgal_recognizer/mosaic/main.py
|
lzjzx1122/FaaSFlow
|
c4a32a04797770c21fe6a0dcacd85ac27a3d29ec
|
[
"Apache-2.0"
] | null | null | null |
benchmark/illgal_recognizer/mosaic/main.py
|
lzjzx1122/FaaSFlow
|
c4a32a04797770c21fe6a0dcacd85ac27a3d29ec
|
[
"Apache-2.0"
] | 3
|
2021-12-02T01:00:47.000Z
|
2022-03-04T07:33:09.000Z
|
import cv2 as cv2
import couchdb
import os,json
# couchdb_address = 'http://openwhisk:openwhisk@10.2.64.8:5984/'
# db = couchdb.Server(couchdb_address)
# def active_storage(avtive_type, user_object,document_id,filename,file_path=None,content_type=None, save_path=None):
# if avtive_type == 'PUT':
# content = open(file_path, 'rb')
# user_object.put_attachment(user_object[document_id], content.read(), filename = filename, content_type = content_type)
# content.close()
# elif avtive_type == 'GET':
# r = user_object.get_attachment(document_id,filename = filename)
# with open(save_path,'wb') as f: f.write(r.read())
def main():
# evt = json.loads(event)
# user_name = evt['user_name']
# document_id = evt['document_id']
# image_name = evt['image_name']
# user_object = db[user_name]
# illegal_flag = user_object[document_id]['illegal_flag']
inputs = store.fetch(['user_name', 'image_name', 'output_prefix'])
image_name = inputs['image_name']
mosaic_prefix = inputs['output_prefix']
image_data = store.fetch([image_name])[image_name]
input_filepath = os.path.join('work', image_name)
with open(input_filepath, 'wb') as f: f.write(image_data)
# if illegal_flag == True:
# mosaic_prefix = user_object[document_id]['output_prefix']
# input_path = os.path.join('..',user_name,document_id)
# input_filepath = os.path.join(input_path,image_name)
# if os.path.exists(input_filepath):os.remove(input_filepath)
# else: os.makedirs(input_path)
# active_storage('GET', user_object,document_id,image_name,save_path = input_filepath)
img = cv2.imread(input_filepath,1)
height, width, deep = img.shape
mosaic_height = 8
for m in range(height-mosaic_height):
for n in range(width-mosaic_height):
if m%mosaic_height==0 and n%mosaic_height==0 :
for i in range(mosaic_height):
for j in range(mosaic_height):
b,g,r=img[m,n]
img[m+i,n+j]=(b,g,r)
mosaic_filename = '{0}_{1}'.format(mosaic_prefix, image_name)
mosaic_filepath = os.path.join('work', mosaic_filename)
# mosaic_path = os.path.join(input_path,'mosaic')
# mosaic_file_path = os.path.join(mosaic_path,mosaic_filename)
# if os.path.exists(mosaic_file_path):os.remove(mosaic_file_path)
# else: os.makedirs(mosaic_path)
cv2.imwrite(mosaic_filepath, img)
# active_storage('PUT', user_object,document_id,mosaic_filename,mosaic_file_path,'application/octet')
# else: pass
with open(mosaic_filepath, 'rb') as f: mosaic_img = f.read()
store.put({'mosaic_name': mosaic_filename, mosaic_filename: mosaic_img}, {mosaic_filename: 'application/octet'})
# main('{"user_name":"user_2","document_id":"object_id_1","image_name":"test.jpg"}')
| 43.134328
| 128
| 0.674048
|
4a0c77913f79c14a4eccf1be739e670424d0ad05
| 16,878
|
py
|
Python
|
Framework/LanguageSupport/thrift/gen-py/MMIStandard/services/MBlendingService.py
|
FG-92/MOSIM_Core
|
abc32fd0d2213859b11b6d41193d5d7f760e4104
|
[
"MIT"
] | 19
|
2020-11-30T09:29:11.000Z
|
2021-12-10T06:10:11.000Z
|
Framework/LanguageSupport/thrift/gen-py/MMIStandard/services/MBlendingService.py
|
FG-92/MOSIM_Core
|
abc32fd0d2213859b11b6d41193d5d7f760e4104
|
[
"MIT"
] | null | null | null |
Framework/LanguageSupport/thrift/gen-py/MMIStandard/services/MBlendingService.py
|
FG-92/MOSIM_Core
|
abc32fd0d2213859b11b6d41193d5d7f760e4104
|
[
"MIT"
] | 6
|
2021-01-20T01:46:37.000Z
|
2021-09-28T10:22:14.000Z
|
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import MMIStandard.services.MMIServiceBase
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(MMIStandard.services.MMIServiceBase.Iface):
def SetBlendingMask(self, mask, avatarID):
"""
Parameters:
- mask
- avatarID
"""
pass
def Blend(self, startPosture, targetPosture, weight):
"""
Parameters:
- startPosture
- targetPosture
- weight
"""
pass
class Client(MMIStandard.services.MMIServiceBase.Client, Iface):
def __init__(self, iprot, oprot=None):
MMIStandard.services.MMIServiceBase.Client.__init__(self, iprot, oprot)
def SetBlendingMask(self, mask, avatarID):
"""
Parameters:
- mask
- avatarID
"""
self.send_SetBlendingMask(mask, avatarID)
return self.recv_SetBlendingMask()
def send_SetBlendingMask(self, mask, avatarID):
self._oprot.writeMessageBegin('SetBlendingMask', TMessageType.CALL, self._seqid)
args = SetBlendingMask_args()
args.mask = mask
args.avatarID = avatarID
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_SetBlendingMask(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = SetBlendingMask_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "SetBlendingMask failed: unknown result")
def Blend(self, startPosture, targetPosture, weight):
"""
Parameters:
- startPosture
- targetPosture
- weight
"""
self.send_Blend(startPosture, targetPosture, weight)
return self.recv_Blend()
def send_Blend(self, startPosture, targetPosture, weight):
self._oprot.writeMessageBegin('Blend', TMessageType.CALL, self._seqid)
args = Blend_args()
args.startPosture = startPosture
args.targetPosture = targetPosture
args.weight = weight
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_Blend(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = Blend_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "Blend failed: unknown result")
class Processor(MMIStandard.services.MMIServiceBase.Processor, Iface, TProcessor):
def __init__(self, handler):
MMIStandard.services.MMIServiceBase.Processor.__init__(self, handler)
self._processMap["SetBlendingMask"] = Processor.process_SetBlendingMask
self._processMap["Blend"] = Processor.process_Blend
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_SetBlendingMask(self, seqid, iprot, oprot):
args = SetBlendingMask_args()
args.read(iprot)
iprot.readMessageEnd()
result = SetBlendingMask_result()
try:
result.success = self._handler.SetBlendingMask(args.mask, args.avatarID)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("SetBlendingMask", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_Blend(self, seqid, iprot, oprot):
args = Blend_args()
args.read(iprot)
iprot.readMessageEnd()
result = Blend_result()
try:
result.success = self._handler.Blend(args.startPosture, args.targetPosture, args.weight)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("Blend", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class SetBlendingMask_args(object):
"""
Attributes:
- mask
- avatarID
"""
def __init__(self, mask=None, avatarID=None,):
self.mask = mask
self.avatarID = avatarID
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.mask = {}
(_ktype183, _vtype184, _size182) = iprot.readMapBegin()
for _i186 in range(_size182):
_key187 = iprot.readI32()
_val188 = iprot.readDouble()
self.mask[_key187] = _val188
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.avatarID = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('SetBlendingMask_args')
if self.mask is not None:
oprot.writeFieldBegin('mask', TType.MAP, 1)
oprot.writeMapBegin(TType.I32, TType.DOUBLE, len(self.mask))
for kiter189, viter190 in self.mask.items():
oprot.writeI32(kiter189)
oprot.writeDouble(viter190)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.avatarID is not None:
oprot.writeFieldBegin('avatarID', TType.STRING, 2)
oprot.writeString(self.avatarID.encode('utf-8') if sys.version_info[0] == 2 else self.avatarID)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(SetBlendingMask_args)
SetBlendingMask_args.thrift_spec = (
None, # 0
(1, TType.MAP, 'mask', (TType.I32, None, TType.DOUBLE, None, False), None, ), # 1
(2, TType.STRING, 'avatarID', 'UTF8', None, ), # 2
)
class SetBlendingMask_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = MMIStandard.core.ttypes.MBoolResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('SetBlendingMask_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(SetBlendingMask_result)
SetBlendingMask_result.thrift_spec = (
(0, TType.STRUCT, 'success', [MMIStandard.core.ttypes.MBoolResponse, None], None, ), # 0
)
class Blend_args(object):
"""
Attributes:
- startPosture
- targetPosture
- weight
"""
def __init__(self, startPosture=None, targetPosture=None, weight=None,):
self.startPosture = startPosture
self.targetPosture = targetPosture
self.weight = weight
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.startPosture = MMIStandard.avatar.ttypes.MAvatarPostureValues()
self.startPosture.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.targetPosture = MMIStandard.avatar.ttypes.MAvatarPostureValues()
self.targetPosture.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.weight = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Blend_args')
if self.startPosture is not None:
oprot.writeFieldBegin('startPosture', TType.STRUCT, 1)
self.startPosture.write(oprot)
oprot.writeFieldEnd()
if self.targetPosture is not None:
oprot.writeFieldBegin('targetPosture', TType.STRUCT, 2)
self.targetPosture.write(oprot)
oprot.writeFieldEnd()
if self.weight is not None:
oprot.writeFieldBegin('weight', TType.DOUBLE, 3)
oprot.writeDouble(self.weight)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Blend_args)
Blend_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'startPosture', [MMIStandard.avatar.ttypes.MAvatarPostureValues, None], None, ), # 1
(2, TType.STRUCT, 'targetPosture', [MMIStandard.avatar.ttypes.MAvatarPostureValues, None], None, ), # 2
(3, TType.DOUBLE, 'weight', None, None, ), # 3
)
class Blend_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = MMIStandard.avatar.ttypes.MAvatarPostureValues()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Blend_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Blend_result)
Blend_result.thrift_spec = (
(0, TType.STRUCT, 'success', [MMIStandard.avatar.ttypes.MAvatarPostureValues, None], None, ), # 0
)
fix_spec(all_structs)
del all_structs
| 34.374745
| 134
| 0.604692
|
4a0c78176da7bcfe8bbacefa5edb7c9732203574
| 1,764
|
py
|
Python
|
var/spack/repos/builtin/packages/librsvg/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-02-08T15:05:27.000Z
|
2021-02-08T15:05:27.000Z
|
var/spack/repos/builtin/packages/librsvg/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/librsvg/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2022-01-18T23:39:24.000Z
|
2022-01-18T23:39:24.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Librsvg(AutotoolsPackage):
"""Library to render SVG files using Cairo"""
homepage = "https://wiki.gnome.org/Projects/LibRsvg"
url = "https://download.gnome.org/sources/librsvg/2.44/librsvg-2.44.14.tar.xz"
version('2.50.2', sha256='6211f271ce4cd44a7318190d36712e9cea384a933d3e3570004edeb210a056d3')
version('2.50.0', sha256='b3fadba240f09b9c9898ab20cb7311467243e607cf8f928b7c5f842474ee3df4')
version('2.44.14', sha256='6a85a7868639cdd4aa064245cc8e9d864dad8b8e9a4a8031bb09a4796bc4e303')
depends_on("gobject-introspection", type='build')
depends_on("pkgconfig", type='build')
depends_on("rust", type='build')
depends_on("cairo+gobject")
depends_on("gdk-pixbuf")
depends_on("glib")
depends_on("libcroco")
depends_on("pango")
depends_on('libffi')
depends_on('libxml2')
depends_on('shared-mime-info')
def url_for_version(self, version):
url = "https://download.gnome.org/sources/librsvg/"
url += "{0}/librsvg-{1}.tar.xz"
return url.format(version.up_to(2), version)
def setup_dependent_build_environment(self, env, dependent_spec):
env.prepend_path('XDG_DATA_DIRS', self.prefix.share)
def setup_dependent_run_environment(self, env, dependent_spec):
env.prepend_path('XDG_DATA_DIRS', self.prefix.share)
def setup_build_environment(self, env):
env.prepend_path('XDG_DATA_DIRS', self.prefix.share)
def setup_run_environment(self, env):
env.prepend_path('XDG_DATA_DIRS', self.prefix.share)
| 37.531915
| 97
| 0.722222
|
4a0c7919af7ab78a1b246cedc9267eaf932869a5
| 5,314
|
py
|
Python
|
lib/googlecloudsdk/compute/lib/utils.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/compute/lib/utils.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/compute/lib/utils.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Google Inc. All Rights Reserved.
"""Utility functions that don't belong in the other utility modules."""
import cStringIO
import re
import urlparse
from googlecloudapis.compute.v1 import compute_v1_client
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.compute.lib import constants
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.util import console_io
def ZoneNameToRegionName(zone_name):
"""Converts zone name to region name: 'us-central1-a' -> 'us-central1'."""
return zone_name.rsplit('-', 1)[0]
def CollectionToResourceType(collection):
"""Converts a collection to a resource type: 'compute.disks' -> 'disks'."""
return collection.split('.', 1)[1]
def CollectionToApi(collection):
"""Converts a collection to an api: 'compute.disks' -> 'compute'."""
return collection.split('.', 1)[0]
def NormalizeGoogleStorageUri(uri):
"""Converts gs:// to http:// if uri begins with gs:// else returns uri."""
if uri and uri.startswith('gs://'):
return 'http://storage.googleapis.com/' + uri[len('gs://'):]
else:
return uri
def CamelCaseToOutputFriendly(string):
"""Converts camel case text into output friendly text.
Args:
string: The string to convert.
Returns:
The string converted from CamelCase to output friendly text.
Examples:
'camelCase' -> 'camel case'
'CamelCase' -> 'camel case'
'camelTLA' -> 'camel tla'
"""
return re.sub('([A-Z]+)', r' \1', string).strip().lower()
def ConstructList(title, items):
"""Returns a string displaying the items and a title."""
buf = cStringIO.StringIO()
printer = console_io.ListPrinter(title)
printer.Print(sorted(set(items)), output_stream=buf)
return buf.getvalue()
def RaiseToolException(problems, error_message=None):
"""Raises a ToolException with the given list of problems."""
errors = []
for _, message in problems:
errors.append(message)
raise calliope_exceptions.ToolException(
ConstructList(
error_message or 'Some requests did not succeed:',
errors))
def AddZoneFlag(parser, resource_type, operation_type):
"""Adds a --zone flag to the given parser."""
short_help = 'The zone of the {0} to {1}.'.format(
resource_type, operation_type)
zone = parser.add_argument(
'--zone',
help=short_help)
zone.detailed_help = '{0} {1}'.format(
short_help, constants.ZONE_PROPERTY_EXPLANATION)
def AddRegionFlag(parser, resource_type, operation_type):
"""Adds a --region flag to the given parser."""
short_help = 'The region of the {0} to {1}.'.format(
resource_type, operation_type)
region = parser.add_argument(
'--region',
help=short_help)
region.detailed_help = '{0} {1}'.format(
short_help, constants.REGION_PROPERTY_EXPLANATION)
def PromptForDeletion(refs, scope_name=None, prompt_title=None):
"""Prompts the user to confirm deletion of resources."""
if not refs:
return
resource_type = CollectionToResourceType(refs[0].Collection())
resource_name = CamelCaseToOutputFriendly(resource_type)
prompt_list = []
for ref in refs:
if scope_name:
item = '[{0}] in [{1}]'.format(ref.Name(), getattr(ref, scope_name))
else:
item = '[{0}]'.format(ref.Name())
prompt_list.append(item)
prompt_title = (prompt_title or
'The following {0} will be deleted:'.format(resource_name))
prompt_message = ConstructList(prompt_title, prompt_list)
if not console_io.PromptContinue(message=prompt_message):
raise calliope_exceptions.ToolException('Deletion aborted by user.')
def BytesToGb(size):
"""Converts a disk size in bytes to GB."""
if not size:
return None
if size % constants.BYTES_IN_ONE_GB != 0:
raise calliope_exceptions.ToolException(
'Disk size must be a multiple of 1 GB. Did you mean [{0}GB]?'
.format(size / constants.BYTES_IN_ONE_GB + 1))
return size / constants.BYTES_IN_ONE_GB
def WarnIfDiskSizeIsTooSmall(size_gb, disk_type):
"""Writes a warning message if the given disk size is too small."""
if not size_gb:
return
if disk_type and 'pd-ssd' in disk_type:
warning_threshold_gb = constants.SSD_DISK_PERFORMANCE_WARNING_GB
else:
warning_threshold_gb = constants.STANDARD_DISK_PERFORMANCE_WARNING_GB
if size_gb < warning_threshold_gb:
log.warn(
'You have selected a disk size of under [%sGB]. This may result in '
'poor I/O performance. For more information, see: '
'https://developers.google.com/compute/docs/disks#pdperformance.',
warning_threshold_gb)
def UpdateContextEndpointEntries(context,
):
"""Updates context to set API enpoints; requires context['http'] be set."""
http = context['http']
api_host = properties.VALUES.core.api_host.Get()
version_component = 'compute/v1/'
client = compute_v1_client.ComputeV1
compute_url = urlparse.urljoin(api_host, version_component)
compute = client(url=compute_url, get_credentials=False, http=http)
context['compute'] = compute
context['resources'] = resources.REGISTRY.CloneAndSwitchAPIs(compute)
context['batch-url'] = urlparse.urljoin(api_host, 'batch')
| 31.630952
| 77
| 0.708882
|
4a0c79aff2bca394568b5e4212872173d8f2380b
| 6,198
|
py
|
Python
|
home.admin/config.scripts/lnd.initwallet.py
|
softyengineer/raspiblitz
|
04b054e55c21194e27946670b0a5e4896f6805ba
|
[
"MIT"
] | 1
|
2020-01-13T12:36:43.000Z
|
2020-01-13T12:36:43.000Z
|
home.admin/config.scripts/lnd.initwallet.py
|
softyengineer/raspiblitz
|
04b054e55c21194e27946670b0a5e4896f6805ba
|
[
"MIT"
] | null | null | null |
home.admin/config.scripts/lnd.initwallet.py
|
softyengineer/raspiblitz
|
04b054e55c21194e27946670b0a5e4896f6805ba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import codecs, os, sys, base64
# display config script info
if len(sys.argv) <= 1 or sys.argv[1] == "-h" or sys.argv[1] == "help":
print("# ! always activate virtual env first: source /home/admin/python-env-lnd/bin/activate")
print("# ! and run with with: python /home/admin/config.scripts/lnd.initwallet.py")
print("# creating or recovering the LND wallet")
print("# lnd.winitwallet.py new [walletpassword] [?seedpassword]")
print("# lnd.winitwallet.py seed [walletpassword] [\"seeds-words-seperated-spaces\"] [?seedpassword]")
print("# lnd.winitwallet.py scb [walletpassword] [\"seeds-words-seperated-spaces\"] [filepathSCB] [?seedpassword]")
print("err='missing parameters'")
sys.exit(1)
import grpc
from lndlibs import rpc_pb2 as ln
from lndlibs import rpc_pb2_grpc as lnrpc
from pathlib2 import Path
walletpassword=""
seedwords=""
seedpassword=""
filepathSCB=""
mode=sys.argv[1]
if mode=="new":
print("# *** CREATING NEW LND WALLET ***")
if len(sys.argv)>2:
walletpassword=sys.argv[2]
if len(walletpassword)<8:
print("err='wallet password is too short'")
sys.exit(1)
else:
print("err='wallet password is too short'")
sys.exit(1)
if len(sys.argv)>3:
seedpassword=sys.argv[3]
elif mode=="seed" or mode=="scb":
if len(sys.argv)>2:
walletpassword=sys.argv[2]
if len(walletpassword)<8:
print("err='wallet password is too short'")
sys.exit(1)
else:
print("err='not correct amount of parameter - missing wallet password'")
sys.exit(1)
if len(sys.argv)>3:
seedwordString=sys.argv[3]
seedwords=seedwordString.split(" ")
if len(seedwords)<24:
print("err='not 24 seed words seperated by just spaces (surrounded with \")'")
sys.exit(1)
else:
print("err='not correct amount of parameter - missing seed string'")
sys.exit(1)
if mode=="seed":
if len(sys.argv)>4:
seedpassword=sys.argv[4]
elif mode=="scb":
if len(sys.argv)>4:
filepathSCB=sys.argv[4]
scbFile = Path(filepathSCB)
if scbFile.is_file():
print("# OK SCB file exists")
else:
print("err='the given filepathSCB - file does not exists or no permission'")
sys.exit(1)
else:
print("err='not correct amount of parameter - missing seed filepathSCB'")
sys.exit(1)
if len(sys.argv)>5:
seedpassword=sys.argv[4]
else:
print("err='unkown mode parameter - run without any parameters to see options'")
sys.exit(1)
os.environ['GRPC_SSL_CIPHER_SUITES'] = 'HIGH+ECDSA'
cert = open('/mnt/hdd/lnd/tls.cert', 'rb').read()
ssl_creds = grpc.ssl_channel_credentials(cert)
channel = grpc.secure_channel('localhost:10009', ssl_creds)
stub = lnrpc.WalletUnlockerStub(channel)
if mode=="new":
request = ln.GenSeedRequest()
try:
response = stub.GenSeed(request)
seedwords = response.cipher_seed_mnemonic
seedwordsString=', '.join(seedwords)
print("seedwords='"+seedwordsString+"'")
# add a 6x4 formatted version to the output
seedwords6x4=""
for i in range(0,len(seedwords)):
if i % 6 == 0 and i != 0:
seedwords6x4=seedwords6x4+"\n"
singleWord=str(i+1)+":"+seedwords[i]
while len(singleWord)<12:
singleWord=singleWord+" "
seedwords6x4=seedwords6x4+singleWord
print("seedwords6x4='"+seedwords6x4+"'")
except grpc.RpcError as rpc_error_call:
code = rpc_error_call.code()
print >> sys.stderr, code
details = rpc_error_call.details()
print("err='RPCError GenSeedRequest'")
print("errMore='"+details+"'")
sys.exit(1)
except:
e = sys.exc_info()[0]
print >> sys.stderr, e
print("err='GenSeedRequest'")
sys.exit(1)
request = ln.InitWalletRequest(
wallet_password=walletpassword,
cipher_seed_mnemonic=seedwords
)
try:
response = stub.InitWallet(request)
except grpc.RpcError as rpc_error_call:
code = rpc_error_call.code()
print >> sys.stderr, code
details = rpc_error_call.details()
print("err='RPCError InitWallet'")
print("errMore='"+details+"'")
sys.exit(1)
except:
e = sys.exc_info()[0]
print >> sys.stderr, e
print("err='InitWallet'")
sys.exit(1)
elif mode=="seed":
request = ln.InitWalletRequest(
wallet_password=walletpassword,
cipher_seed_mnemonic=seedwords,
recovery_window=5000,
aezeed_passphrase=seedpassword
)
try:
response = stub.InitWallet(request)
except grpc.RpcError as rpc_error_call:
code = rpc_error_call.code()
print >> sys.stderr, code
details = rpc_error_call.details()
print("err='RPCError InitWallet'")
print("errMore='"+details+"'")
sys.exit(1)
except:
e = sys.exc_info()[0]
print >> sys.stderr, e
print("err='InitWallet'")
sys.exit(1)
elif mode=="scb":
import binascii
with open(filepathSCB, 'rb') as f:
content = f.read()
scbHexString=binascii.hexlify(content)
print(scbHexString)
request = ln.InitWalletRequest(
wallet_password=walletpassword,
cipher_seed_mnemonic=seedwords,
recovery_window=5000,
aezeed_passphrase=seedpassword,
channel_backups=scbHexString
)
try:
response = stub.InitWallet(request)
except grpc.RpcError as rpc_error_call:
code = rpc_error_call.code()
print >> sys.stderr, code
details = rpc_error_call.details()
print("err='RPCError InitWallet'")
print("errMore='"+details+"'")
sys.exit(1)
except:
e = sys.exc_info()[0]
print >> sys.stderr, e
print("err='InitWallet'")
sys.exit(1)
print("err='TODO: implement creating from seed/scb'")
sys.exit(1)
| 30.382353
| 119
| 0.604227
|
4a0c7bd05897fb638d21fb9cd673fefa3521fc6f
| 2,351
|
py
|
Python
|
examples/example1/backend_b.py
|
FragmentedPacket/diffsync
|
e9bc79cce83277bd3a097b337568705b916b3590
|
[
"Apache-2.0"
] | null | null | null |
examples/example1/backend_b.py
|
FragmentedPacket/diffsync
|
e9bc79cce83277bd3a097b337568705b916b3590
|
[
"Apache-2.0"
] | null | null | null |
examples/example1/backend_b.py
|
FragmentedPacket/diffsync
|
e9bc79cce83277bd3a097b337568705b916b3590
|
[
"Apache-2.0"
] | null | null | null |
"""Example of a DiffSync adapter implementation.
Copyright (c) 2020 Network To Code, LLC <info@networktocode.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=wrong-import-order
from diffsync import DiffSync
from models import Site, Device, Interface
DATA = {
"atl": {
"atl-spine1": {"role": "spine", "interfaces": {"eth0": "Interface 0", "eth1": "Interface 1"}},
"atl-spine2": {"role": "spine", "interfaces": {"eth0": "Interface 0", "eth1": "Interface 1"}},
},
"nyc": {
"nyc-spine1": {"role": "spine", "interfaces": {"eth0": "Interface 0/0", "eth1": "Interface 1"}},
"nyc-spine2": {"role": "spine", "interfaces": {"eth0": "Interface 0", "eth1": "Interface 1"}},
},
"sfo": {
"sfo-spine1": {"role": "leaf", "interfaces": {"eth0": "Interface 0", "eth1": "Interface 1"}},
"sfo-spine2": {"role": "spine", "interfaces": {"eth0": "TBD", "eth1": "ddd"}},
},
}
class BackendB(DiffSync):
"""Example of a DiffSync adapter implementation."""
site = Site
device = Device
interface = Interface
top_level = ["site"]
nb = None
def load(self):
"""Initialize the BackendB Object by loading some site, device and interfaces from DATA."""
for site_name, site_data in DATA.items():
site = self.site(name=site_name)
self.add(site)
for device_name, device_data in site_data.items():
device = self.device(name=device_name, role=device_data["role"], site_name=site_name)
self.add(device)
site.add_child(device)
for intf_name, desc in device_data["interfaces"].items():
intf = self.interface(name=intf_name, device_name=device_name, description=desc)
self.add(intf)
device.add_child(intf)
| 36.734375
| 104
| 0.629094
|
4a0c7c2877ede72a2ebc85f61c3fc2cec52f7861
| 2,053
|
py
|
Python
|
doajtest/unit/test_datasets.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 47
|
2015-04-24T13:13:39.000Z
|
2022-03-06T03:22:42.000Z
|
doajtest/unit/test_datasets.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 1,215
|
2015-01-02T14:29:38.000Z
|
2022-03-28T14:19:13.000Z
|
doajtest/unit/test_datasets.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 14
|
2015-11-27T13:01:23.000Z
|
2021-05-21T07:57:23.000Z
|
from portality import datasets
from doajtest.helpers import DoajTestCase
class TestDatasets(DoajTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_01_countries(self):
""" Use country information from our datasets """
assert datasets.get_country_code('united kingdom') == 'GB', 'expected GB, received: {}'.format(datasets.get_country_name('GB'))
assert datasets.get_country_name('GB') == 'United Kingdom', 'expected United Kingdom, received: {}'.format(datasets.get_country_name('GB'))
# If the country is unrecognised, we send it back unchanged.
assert datasets.get_country_code('mordor') == 'mordor'
assert datasets.get_country_name('mordor') == 'mordor'
# Unless fail_if_not_found is set in get_country_code()
assert datasets.get_country_code('united states') == 'US'
assert datasets.get_country_code('the shire', fail_if_not_found=True) is None
assert datasets.get_country_code('the shire', fail_if_not_found=False) == 'the shire'
# When we have more than one option, the first alphabetically is returned
assert datasets.get_country_name('AE') == 'United Arab Emirates'
def test_02_currencies(self):
""" Utilise currency information from the datasets """
assert datasets.get_currency_code('yen') == 'JPY'
assert datasets.get_currency_name('JPY') == 'JPY - Yen'
assert datasets.get_currency_code('pound sterling') == 'GBP'
assert datasets.get_currency_name('GBP') == 'GBP - Pound Sterling'
assert datasets.get_currency_code('pound') is None
assert datasets.get_currency_code('doubloons') is None
def test_03_languages(self):
""" Use language information from our datasets """
assert datasets.name_for_lang('en') == 'English'
assert datasets.name_for_lang('eng') == 'English'
assert datasets.language_for('English').name == 'English'
assert datasets.language_for('german').bibliographic == 'ger'
| 43.680851
| 147
| 0.684851
|
4a0c7ca88a3ad8fc33884cdecfdf1f259194675f
| 2,963
|
py
|
Python
|
src/sims4communitylib/enums/lot_traits_enum.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
src/sims4communitylib/enums/lot_traits_enum.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
src/sims4communitylib/enums/lot_traits_enum.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from sims4communitylib.enums.enumtypes.common_int import CommonInt
class CommonLotTraitId(CommonInt):
"""Identifiers for vanilla lot traits.
"""
CELEBRITY_HANG_OUT_HIGH_FAME: 'CommonLotTraitId' = 191710
CELEBRITY_HANG_OUT_LOW_FAME: 'CommonLotTraitId' = 191708
CELEBRITY_HOME_LOT_TRAIT: 'CommonLotTraitId' = 199661
CHILDS_PLAY: 'CommonLotTraitId' = 144151
CON_CURSED: 'CommonLotTraitId' = 137285
CON_HAUNTED: 'CommonLotTraitId' = 137286
CON_LIVELY_NEIGHBORS: 'CommonLotTraitId' = 137276
CON_NEEDS_TLC: 'CommonLotTraitId' = 137283
CON_PRICEY: 'CommonLotTraitId' = 137272
CONVIVIAL: 'CommonLotTraitId' = 144156
CREEPY_CRAWLIES: 'CommonLotTraitId' = 179482
FILTHY: 'CommonLotTraitId' = 144148
FRESH_AIR: 'CommonLotTraitId' = 144158
GNOMES: 'CommonLotTraitId' = 147778
GREAT_ACOUSTICS: 'CommonLotTraitId' = 144154
GREAT_SOIL: 'CommonLotTraitId' = 144150
GREMLINS: 'CommonLotTraitId' = 147847
GRODY: 'CommonLotTraitId' = 144146
HAUNTED: 'CommonLotTraitId' = 149429
HIGH_SPEED_INTERNET: 'CommonLotTraitId' = 144153
HOMEY: 'CommonLotTraitId' = 144152
MEAN_VIBE: 'CommonLotTraitId' = 144144
NATURAL_LIGHT: 'CommonLotTraitId' = 144157
NO_TRESPASSING: 'CommonLotTraitId' = 144159
ON_DARK_LEY_LINE: 'CommonLotTraitId' = 154647
PEACE_AND_QUIET: 'CommonLotTraitId' = 179555
PENNY_PIXIES: 'CommonLotTraitId' = 144147
PET_WORLD_BREEDING_GROUND: 'CommonLotTraitId' = 170199
PET_WORLD_CAT_FRIENDLY: 'CommonLotTraitId' = 170196
PET_WORLD_CAT_HANGOUT: 'CommonLotTraitId' = 170190
PET_WORLD_DOG_FRIENDLY: 'CommonLotTraitId' = 170197
PET_WORLD_DOG_HANGOUT: 'CommonLotTraitId' = 170191
PET_WORLD_TRAINING_GROUND: 'CommonLotTraitId' = 170198
PRO_CHEAP: 'CommonLotTraitId' = 137273
PRO_CHEFS_KITCHEN: 'CommonLotTraitId' = 137281
PRO_GREAT_ATMOSPHERE: 'CommonLotTraitId' = 137287
PRO_GREAT_VIEW: 'CommonLotTraitId' = 137278
PRO_HISTORICAL: 'CommonLotTraitId' = 137280
PRO_HOME_STUDIO: 'CommonLotTraitId' = 137284
PRO_LOW_DEPOSIT: 'CommonLotTraitId' = 137282
PRO_NEAR_GOOD_SCHOOLS: 'CommonLotTraitId' = 137223
PRO_ON_LEY_LINE: 'CommonLotTraitId' = 137275
PRO_QUIET: 'CommonLotTraitId' = 137277
PRO_ROMANTIC_FIREPLACE: 'CommonLotTraitId' = 137279
PRO_SERVICED_APARTMENT: 'CommonLotTraitId' = 137274
QUAKE_ZONE: 'CommonLotTraitId' = 144143
REGISTERED_VAMPIRE_LAIR: 'CommonLotTraitId' = 155246
ROMANTIC_ATMOSPHERE: 'CommonLotTraitId' = 144149
SCIENCE_LAIR: 'CommonLotTraitId' = 144155
SUNNY_ASPECT: 'CommonLotTraitId' = 144145
TEEN_HANG_OUT: 'CommonLotTraitId' = 162560
VAMPIRE_NEXUS: 'CommonLotTraitId' = 154888
| 44.223881
| 125
| 0.757678
|
4a0c7ccc83ff7001df2c492e6f683f90cf178eda
| 2,633
|
py
|
Python
|
userena/contrib/umessages/templatetags/umessages_tags.py
|
jdavidagudelo/django-userena-ce
|
970ca25ca367112625933bd61a0ba745b052692e
|
[
"BSD-3-Clause"
] | 86
|
2018-03-09T22:24:39.000Z
|
2021-12-12T22:30:33.000Z
|
userena/contrib/umessages/templatetags/umessages_tags.py
|
jdavidagudelo/django-userena-ce
|
970ca25ca367112625933bd61a0ba745b052692e
|
[
"BSD-3-Clause"
] | 113
|
2018-02-25T12:24:13.000Z
|
2022-02-22T17:59:51.000Z
|
userena/contrib/umessages/templatetags/umessages_tags.py
|
jdavidagudelo/django-userena-ce
|
970ca25ca367112625933bd61a0ba745b052692e
|
[
"BSD-3-Clause"
] | 19
|
2018-08-16T18:13:48.000Z
|
2021-12-11T18:14:30.000Z
|
from django import template
from userena.contrib.umessages.models import MessageRecipient
import re
register = template.Library()
class MessageCount(template.Node):
def __init__(self, um_from_user, var_name, um_to_user=None):
self.user = template.Variable(um_from_user)
self.var_name = var_name
if um_to_user:
self.um_to_user = template.Variable(um_to_user)
else:
self.um_to_user = um_to_user
def render(self, context):
try:
user = self.user.resolve(context)
except template.VariableDoesNotExist:
return ""
if not self.um_to_user:
message_count = MessageRecipient.objects.count_unread_messages_for(user)
else:
try:
um_to_user = self.um_to_user.resolve(context)
except template.VariableDoesNotExist:
return ""
message_count = MessageRecipient.objects.count_unread_messages_between(
user, um_to_user
)
context[self.var_name] = message_count
return ""
@register.tag
def get_unread_message_count_for(parser, token):
"""
Returns the unread message count for a user.
Syntax::
{% get_unread_message_count_for [user] as [var_name] %}
Example usage::
{% get_unread_message_count_for pero as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError(
"%s tag requires arguments" % token.contents.split()[0]
)
m = re.search(r"(.*?) as (\w+)", arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
user, var_name = m.groups()
return MessageCount(user, var_name)
@register.tag
def get_unread_message_count_between(parser, token):
"""
Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError(
"%s tag requires arguments" % token.contents.split()[0]
)
m = re.search(r"(.*?) and (.*?) as (\w+)", arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
um_from_user, um_to_user, var_name = m.groups()
return MessageCount(um_from_user, var_name, um_to_user)
| 27.715789
| 85
| 0.639195
|
4a0c7d613121af834b27442414f939c7fda9287a
| 836
|
py
|
Python
|
python/oneflow/compatible/single_client/sysconfig.py
|
zzk0/oneflow
|
ab15f5986ee0081da5493ee63d3f2acf063ae229
|
[
"Apache-2.0"
] | 3,285
|
2020-07-31T05:51:22.000Z
|
2022-03-31T15:20:16.000Z
|
python/oneflow/compatible/single_client/sysconfig.py
|
zzk0/oneflow
|
ab15f5986ee0081da5493ee63d3f2acf063ae229
|
[
"Apache-2.0"
] | 2,417
|
2020-07-31T06:28:58.000Z
|
2022-03-31T23:04:14.000Z
|
python/oneflow/compatible/single_client/sysconfig.py
|
zzk0/oneflow
|
ab15f5986ee0081da5493ee63d3f2acf063ae229
|
[
"Apache-2.0"
] | 520
|
2020-07-31T05:52:42.000Z
|
2022-03-29T02:38:11.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible.single_client.framework.sysconfig import (
get_compile_flags,
get_include,
get_lib,
get_link_flags,
has_rpc_backend_grpc,
has_rpc_backend_local,
with_cuda,
get_cuda_version,
with_xla,
)
| 30.962963
| 72
| 0.770335
|
4a0c7db9e942f0558c28cb5689f35f6fc2d5eafd
| 1,120
|
py
|
Python
|
tests/unit/sagemaker/huggingface/huggingface_utils.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | 1
|
2021-08-31T09:39:37.000Z
|
2021-08-31T09:39:37.000Z
|
tests/unit/sagemaker/huggingface/huggingface_utils.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/sagemaker/huggingface/huggingface_utils.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from sagemaker import image_uris
REGION = "us-east-1"
GPU_INSTANCE_TYPE = "ml.p2.xlarge"
def get_full_gpu_image_uri(
version,
base_framework_version,
region=REGION,
instance_type=GPU_INSTANCE_TYPE,
):
return image_uris.retrieve(
"huggingface",
region,
version=version,
py_version="py36",
instance_type=instance_type,
image_scope="training",
base_framework_version=base_framework_version,
container_version="cu110-ubuntu18.04",
)
| 30.27027
| 72
| 0.724107
|
4a0c7ecef89cee1d225f05c70831ce1b53228bdd
| 17,738
|
py
|
Python
|
fastflix/widgets/panels/cover_panel.py
|
AwesomeGitHubRepos/FastFlix
|
60adf2b68a13907ac17013cb621867b2b302c101
|
[
"MIT"
] | 1
|
2021-06-14T04:35:50.000Z
|
2021-06-14T04:35:50.000Z
|
fastflix/widgets/panels/cover_panel.py
|
AwesomeGitHubRepos/FastFlix
|
60adf2b68a13907ac17013cb621867b2b302c101
|
[
"MIT"
] | 1
|
2020-12-24T13:08:56.000Z
|
2020-12-24T13:08:56.000Z
|
fastflix/widgets/panels/cover_panel.py
|
leonardyan/FastFlix
|
01f19c2de74945a4c60db61711aea9d3fe01b0cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import re
from pathlib import Path
from typing import List, Tuple, Union
from box import Box
from qtpy import QtCore, QtGui, QtWidgets
from fastflix.language import t
from fastflix.models.encode import AttachmentTrack
from fastflix.models.fastflix_app import FastFlixApp
from fastflix.models.video import VideoSettings
from fastflix.shared import link
logger = logging.getLogger("fastflix")
class CoverPanel(QtWidgets.QWidget):
def __init__(self, parent, app: FastFlixApp):
super().__init__(parent)
self.app = app
self.main = parent.main
self.attachments = Box()
layout = QtWidgets.QGridLayout()
sp = QtWidgets.QSizePolicy()
sp.setVerticalPolicy(QtWidgets.QSizePolicy.Policy.Maximum)
sp.setHorizontalPolicy(QtWidgets.QSizePolicy.Policy.Maximum)
# row, column, row span, column span
layout.addWidget(QtWidgets.QLabel(t("Poster Cover")), 0, 0, 1, 5)
layout.addWidget(QtWidgets.QLabel(t("Landscape Cover")), 0, 6, 1, 4)
info_label = QtWidgets.QLabel(
link("https://codecalamity.com/guides/video-thumbnails/", t("Enabling cover thumbnails on your system"))
)
info_label.setOpenExternalLinks(True)
layout.addWidget(info_label, 10, 0, 1, 9, QtCore.Qt.AlignLeft)
poster_options_layout = QtWidgets.QHBoxLayout()
self.cover_passthrough_checkbox = QtWidgets.QCheckBox(t("Copy Cover"))
self.small_cover_passthrough_checkbox = QtWidgets.QCheckBox(t("Copy Small Cover (no preview)"))
poster_options_layout.addWidget(self.cover_passthrough_checkbox)
poster_options_layout.addWidget(self.small_cover_passthrough_checkbox)
land_options_layout = QtWidgets.QHBoxLayout()
self.cover_land_passthrough_checkbox = QtWidgets.QCheckBox(t("Copy Landscape Cover"))
self.small_cover_land_passthrough_checkbox = QtWidgets.QCheckBox(t("Copy Small Landscape Cover (no preview)"))
land_options_layout.addWidget(self.cover_land_passthrough_checkbox)
land_options_layout.addWidget(self.small_cover_land_passthrough_checkbox)
self.cover_passthrough_checkbox.toggled.connect(lambda: self.cover_passthrough_check())
self.small_cover_passthrough_checkbox.toggled.connect(lambda: self.small_cover_passthrough_check())
self.cover_land_passthrough_checkbox.toggled.connect(lambda: self.cover_land_passthrough_check())
self.small_cover_land_passthrough_checkbox.toggled.connect(lambda: self.small_cover_land_passthrough_check())
self.poster = QtWidgets.QLabel()
self.poster.setSizePolicy(sp)
self.landscape = QtWidgets.QLabel()
self.landscape.setSizePolicy(sp)
layout.addLayout(poster_options_layout, 1, 0, 1, 4)
layout.addLayout(land_options_layout, 1, 6, 1, 4)
layout.addWidget(self.poster, 2, 0, 8, 4)
layout.addWidget(self.landscape, 2, 6, 8, 4)
layout.addLayout(self.init_cover(), 9, 0, 1, 4)
layout.addLayout(self.init_landscape_cover(), 9, 6, 1, 4)
layout.columnStretch(5)
self.setLayout(layout)
def init_cover(self):
layout = QtWidgets.QHBoxLayout()
self.cover_path = QtWidgets.QLineEdit()
self.cover_path.textChanged.connect(lambda: self.update_cover())
self.cover_button = QtWidgets.QPushButton(
icon=self.style().standardIcon(QtWidgets.QStyle.SP_FileDialogContentsView)
)
self.cover_button.clicked.connect(lambda: self.select_cover())
layout.addWidget(self.cover_path)
layout.addWidget(self.cover_button)
return layout
def select_cover(self):
dirname = Path(self.cover_path.text()).parent
if not dirname.exists():
dirname = Path()
filename = QtWidgets.QFileDialog.getOpenFileName(
self,
caption=t("Cover"),
directory=str(dirname),
filter=f"{t('Supported Image Files')} (*.png *.jpeg *.jpg)",
)
if not filename or not filename[0]:
return
self.cover_path.setText(filename[0])
self.update_cover()
def update_cover(self, cover_path=None):
if cover_path:
cover = str(cover_path)
else:
cover = self.cover_path.text().strip()
if not cover:
self.poster.setPixmap(QtGui.QPixmap())
self.update_cover_settings()
self.main.page_update(build_thumbnail=False)
return
if (
not Path(cover).exists()
or not Path(cover).is_file()
or not cover.lower().endswith((".jpg", ".png", ".jpeg"))
):
return
try:
pixmap = QtGui.QPixmap(cover)
pixmap = pixmap.scaled(230, 230, QtCore.Qt.KeepAspectRatio)
self.poster.setPixmap(pixmap)
except Exception:
logger.exception(t("Bad image"))
self.cover_path.setText("")
else:
self.update_cover_settings()
self.main.page_update(build_thumbnail=False)
def init_landscape_cover(self):
layout = QtWidgets.QHBoxLayout()
self.cover_land_path = QtWidgets.QLineEdit()
self.cover_land_path.textChanged.connect(lambda: self.update_landscape_cover())
self.landscape_button = QtWidgets.QPushButton(
icon=self.style().standardIcon(QtWidgets.QStyle.SP_FileDialogContentsView)
)
self.landscape_button.clicked.connect(lambda: self.select_landscape_cover())
layout.addWidget(self.cover_land_path)
layout.addWidget(self.landscape_button)
return layout
def select_landscape_cover(self):
dirname = Path(self.cover_land_path.text()).parent
if not dirname.exists():
dirname = Path()
filename = QtWidgets.QFileDialog.getOpenFileName(
self,
caption=t("Landscape Cover"),
directory=str(dirname),
filter=f"{t('Supported Image Files')} (*.png *.jpeg *.jpg)",
)
if not filename or not filename[0]:
return
self.cover_land_path.setText(filename[0])
self.update_landscape_cover()
def update_landscape_cover(self, cover_path=None):
if cover_path:
cover = str(cover_path)
else:
cover = self.cover_land_path.text().strip()
if not cover:
self.landscape.setPixmap(QtGui.QPixmap())
self.update_cover_settings()
self.main.page_update(build_thumbnail=False)
return
if (
not Path(cover).exists()
or not Path(cover).is_file()
or not cover.lower().endswith((".jpg", ".png", ".jpeg"))
):
return
try:
pixmap = QtGui.QPixmap(cover)
pixmap = pixmap.scaled(230, 230, QtCore.Qt.KeepAspectRatio)
self.landscape.setPixmap(pixmap)
except Exception:
logger.exception(t("Bad image"))
self.cover_land_path.setText("")
else:
self.update_cover_settings()
self.main.page_update(build_thumbnail=False)
def get_attachment(self, filename) -> Tuple[Union[Path, None], Union[int, None]]:
attr = getattr(self, f"{filename}_path", None)
cover_image = None
index = None
if attr and attr.text().strip():
cover_image = Path(attr.text().strip())
if (
self.app.fastflix.current_video
and getattr(self, f"{filename}_passthrough_checkbox").isChecked()
and filename in self.attachments
):
cover_image = self.app.fastflix.current_video.work_path / self.attachments[filename].name
index = self.attachments[filename].stream
return cover_image if cover_image else None, index
def update_cover_settings(self):
if not self.app.fastflix.current_video:
return
start_outdex = (
1 # Video Track
+ len(self.app.fastflix.current_video.video_settings.audio_tracks)
+ len(self.app.fastflix.current_video.video_settings.subtitle_tracks)
)
attachments: List[AttachmentTrack] = []
for filename in ("cover", "cover_land", "small_cover", "small_cover_land"):
attachment, index = self.get_attachment(filename)
if attachment:
attachments.append(
AttachmentTrack(
index=index,
outdex=start_outdex,
file_path=attachment,
filename=filename,
attachment_type="cover",
)
)
start_outdex += 1
self.app.fastflix.current_video.video_settings.attachment_tracks = attachments
def cover_passthrough_check(self):
checked = self.cover_passthrough_checkbox.isChecked()
if checked and "cover" in self.attachments:
self.cover_path.setDisabled(True)
self.cover_button.setDisabled(True)
pixmap = QtGui.QPixmap(str(self.app.fastflix.current_video.work_path / self.attachments.cover.name))
pixmap = pixmap.scaled(230, 230, QtCore.Qt.KeepAspectRatio)
self.poster.setPixmap(pixmap)
else:
self.cover_path.setDisabled(False)
self.cover_button.setDisabled(False)
if not self.cover_path.text() or not Path(self.cover_path.text()).exists():
self.poster.setPixmap(QtGui.QPixmap())
else:
pixmap = QtGui.QPixmap(self.cover_path.text())
pixmap = pixmap.scaled(230, 230, QtCore.Qt.KeepAspectRatio)
self.poster.setPixmap(pixmap)
self.main.page_update(build_thumbnail=False)
def small_cover_passthrough_check(self):
self.main.page_update(build_thumbnail=False)
def cover_land_passthrough_check(self):
checked = self.cover_land_passthrough_checkbox.isChecked()
if checked and "cover_land" in self.attachments:
self.cover_land_path.setDisabled(True)
self.landscape_button.setDisabled(True)
pixmap = QtGui.QPixmap(str(self.app.fastflix.current_video.work_path / self.attachments.cover_land.name))
pixmap = pixmap.scaled(230, 230, QtCore.Qt.KeepAspectRatio)
self.landscape.setPixmap(pixmap)
else:
self.cover_land_path.setDisabled(False)
self.landscape_button.setDisabled(False)
if not self.cover_land_path.text() or not Path(self.cover_land_path.text()).exists():
self.landscape.setPixmap(QtGui.QPixmap())
else:
pixmap = QtGui.QPixmap(self.cover_land_path.text())
pixmap = pixmap.scaled(230, 230, QtCore.Qt.KeepAspectRatio)
self.landscape.setPixmap(pixmap)
self.main.page_update(build_thumbnail=False)
def small_cover_land_passthrough_check(self):
self.main.page_update(build_thumbnail=False)
def clear_covers(self, reconnect=True):
self.cover_passthrough_checkbox.toggled.disconnect()
self.small_cover_passthrough_checkbox.toggled.disconnect()
self.cover_land_passthrough_checkbox.toggled.disconnect()
self.small_cover_land_passthrough_checkbox.toggled.disconnect()
self.cover_passthrough_checkbox.setChecked(False)
self.small_cover_passthrough_checkbox.setChecked(False)
self.cover_land_passthrough_checkbox.setChecked(False)
self.small_cover_land_passthrough_checkbox.setChecked(False)
self.cover_passthrough_checkbox.setDisabled(True)
self.small_cover_passthrough_checkbox.setDisabled(True)
self.cover_land_passthrough_checkbox.setDisabled(True)
self.small_cover_land_passthrough_checkbox.setDisabled(True)
self.attachments = Box()
self.poster.setPixmap(QtGui.QPixmap())
self.landscape.setPixmap(QtGui.QPixmap())
self.cover_path.setDisabled(False)
self.cover_path.setText("")
self.cover_button.setDisabled(False)
self.cover_land_path.setDisabled(False)
self.cover_land_path.setText("")
self.landscape_button.setDisabled(False)
if reconnect:
self.cover_passthrough_checkbox.toggled.connect(lambda: self.cover_passthrough_check())
self.small_cover_passthrough_checkbox.toggled.connect(lambda: self.small_cover_passthrough_check())
self.cover_land_passthrough_checkbox.toggled.connect(lambda: self.cover_land_passthrough_check())
self.small_cover_land_passthrough_checkbox.toggled.connect(
lambda: self.small_cover_land_passthrough_check()
)
def new_source(self, attachments):
self.clear_covers(reconnect=False)
for attachment in attachments:
filename = attachment.get("tags", {}).get("filename", "")
base_name = filename.rsplit(".", 1)[0]
file_path = self.app.fastflix.current_video.work_path / filename
if base_name == "cover" and file_path.exists():
self.cover_passthrough_checkbox.setChecked(True)
self.cover_passthrough_checkbox.setDisabled(False)
self.update_cover(str(file_path))
self.cover_path.setDisabled(True)
self.cover_path.setText("")
self.cover_button.setDisabled(True)
self.attachments.cover = {"name": filename, "stream": attachment.index, "tags": attachment.tags}
if base_name == "cover_land" and file_path.exists():
self.cover_land_passthrough_checkbox.setChecked(True)
self.cover_land_passthrough_checkbox.setDisabled(False)
self.update_landscape_cover(str(file_path))
self.cover_land_path.setDisabled(True)
self.cover_land_path.setText("")
self.landscape_button.setDisabled(True)
self.attachments.cover_land = {"name": filename, "stream": attachment.index, "tags": attachment.tags}
if base_name == "small_cover" and file_path.exists():
self.small_cover_passthrough_checkbox.setChecked(True)
self.small_cover_passthrough_checkbox.setDisabled(False)
self.attachments.small_cover = {"name": filename, "stream": attachment.index, "tags": attachment.tags}
if base_name == "small_cover_land" and file_path.exists():
self.small_cover_land_passthrough_checkbox.setChecked(True)
self.small_cover_land_passthrough_checkbox.setDisabled(False)
self.attachments.small_cover_land = {
"name": filename,
"stream": attachment.index,
"tags": attachment.tags,
}
self.cover_passthrough_checkbox.toggled.connect(lambda: self.cover_passthrough_check())
self.small_cover_passthrough_checkbox.toggled.connect(lambda: self.small_cover_passthrough_check())
self.cover_land_passthrough_checkbox.toggled.connect(lambda: self.cover_land_passthrough_check())
self.small_cover_land_passthrough_checkbox.toggled.connect(lambda: self.small_cover_land_passthrough_check())
def reload_from_queue(self, streams, settings: VideoSettings):
self.new_source(streams.attachment)
self.cover_passthrough_checkbox.setChecked(False)
self.cover_land_passthrough_checkbox.setChecked(False)
self.small_cover_land_passthrough_checkbox.setChecked(False)
self.small_cover_passthrough_checkbox.setChecked(False)
for attachment in settings.attachment_tracks:
if attachment.filename == "cover":
if attachment.index is None:
self.cover_path.setText(str(attachment.file_path))
self.update_cover(attachment.file_path)
else:
self.cover_passthrough_checkbox.setChecked(True)
if attachment.filename == "cover_land":
if attachment.index is None:
self.cover_land_path.setText(str(attachment.file_path))
self.update_landscape_cover(attachment.file_path)
else:
self.cover_land_passthrough_checkbox.setChecked(True)
if attachment.filename == "small_cover_land":
if attachment.index is not None:
self.small_cover_land_passthrough_checkbox.setChecked(True)
if attachment.filename == "small_cover":
if attachment.index is not None:
self.small_cover_passthrough_checkbox.setChecked(True)
# def update_cover_settings(self):
# start_outdex = (
# 1 # Video Track
# + len(self.app.fastflix.current_video.video_settings.audio_tracks)
# + len(self.app.fastflix.current_video.video_settings.subtitle_tracks)
# )
# attachments: List[AttachmentTrack] = []
#
# for filename in ("cover", "cover_land", "small_cover", "small_cover_land"):
# attachment = self.get_attachment(filename)
# if attachment:
# attachments.append(
# AttachmentTrack(
# outdex=start_outdex, file_path=attachment, filename=filename, attachment_type="cover"
# )
# )
# start_outdex += 1
# self.app.fastflix.current_video.video_settings.attachment_tracks = attachments
| 44.345
| 119
| 0.648777
|
4a0c7f3680cb371f8eacd8456368aa2b4e1a47b8
| 10,095
|
py
|
Python
|
bitbots_navigation/bitbots_visual_compass/src/visual_compass_setup.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 5
|
2019-10-30T06:32:37.000Z
|
2022-03-11T08:48:43.000Z
|
bitbots_navigation/bitbots_visual_compass/src/visual_compass_setup.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 83
|
2019-03-07T16:34:47.000Z
|
2022-03-23T17:02:06.000Z
|
bitbots_navigation/bitbots_visual_compass/src/visual_compass_setup.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 4
|
2019-07-28T11:24:02.000Z
|
2021-09-10T16:20:20.000Z
|
#! /usr/bin/env python3
from os import path
import socket
import rospy
import rospkg
import math
import cv2
import pickle
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from std_msgs.msg import Header
from dynamic_reconfigure.server import Server
from bitbots_visual_compass.cfg import VisualCompassConfig
from worker import VisualCompass
import tf2_ros as tf2
from tf2_geometry_msgs import PoseStamped
from humanoid_league_msgs.msg import HeadMode
from tf.transformations import euler_from_quaternion
from key_point_converter import KeyPointConverter
from datetime import datetime
# TODO: rosdep, u.a. motion etc...
# TODO: set head mode
# TODO: check published pose
# TODO: fix drop old images (also in startup)
class VisualCompassSetup():
# type: () -> None
"""
TODO docs
Subscribes to raw image
This sets the head behavior to a special head mode, where it scans for image features above the fieldboundary.
The head behavior sends a trigger message, if it reaches predefined points. If this node gets trigged the current image features are saved in our feature map.
Afterwards the map is saved on the robot and the robot handler can download it. Than hes is able to share it with the other robots.
"""
def __init__(self):
# type: () -> None
"""
Initiate VisualCompassHandler
return: None
"""
# Init ROS package
rospack = rospkg.RosPack()
self.package_path = rospack.get_path('bitbots_visual_compass')
rospy.init_node('bitbots_visual_compass_setup')
rospy.loginfo('Initializing visual compass setup')
self.bridge = CvBridge()
self.config = {}
self.image_msg = None
self.compass = None
self.hostname = socket.gethostname()
# TODO: docs
self.base_frame = 'base_footprint'
self.camera_frame = 'camera_optical_frame'
self.tf_buffer = tf2.Buffer(cache_time=rospy.Duration(50))
self.listener = tf2.TransformListener(self.tf_buffer)
self.pub_head_mode = rospy.Publisher(
'head_mode',
HeadMode,
queue_size=1)
# Register VisualCompassConfig server for dynamic reconfigure and set callback
Server(VisualCompassConfig, self.dynamic_reconfigure_callback)
rospy.logwarn("------------------------------------------------")
rospy.logwarn("|| WARNING ||")
rospy.logwarn("||Please remove the LAN cable from the Robot, ||")
rospy.logwarn("||after pressing 'YES' you have 10 Seconds ||")
rospy.logwarn("||until the head moves OVER the LAN port!!! ||")
rospy.logwarn("------------------------------------------------\n\n")
try:
input = raw_input
except NameError:
pass
accept = input("Do you REALLY want to start? (YES/n)")
if accept == "YES":
rospy.logwarn("REMOVE THE LAN CABLE NOW!!!!!")
rospy.sleep(10)
head_mode = HeadMode()
head_mode.headMode = 10
self.pub_head_mode.publish(head_mode)
rospy.loginfo("Head mode has been set!")
rospy.spin()
else:
rospy.signal_shutdown("You aborted the process! Shuting down correctly.")
def dynamic_reconfigure_callback(self, config, level):
# type: (dict, TODO) -> None
"""
TODO docs
"""
self.compass = VisualCompass(config)
if self.changed_config_param(config, 'compass_type') or \
self.changed_config_param(config, 'compass_matcher') or \
self.changed_config_param(config, 'compass_multiple_map_image_count'):
self.feature_map_images_count = 0
self.processed_set_all_feature_map_images = False
rospy.loginfo('Loaded configuration: compass type: %(type)s | matcher type: %(matcher)s | map images: %(feature_map_count)d' % {
'type': config['compass_type'],
'matcher': config['compass_matcher'],
'feature_map_count': config['compass_multiple_map_image_count']})
# Subscribe to Image-message
if self.changed_config_param(config, 'img_msg_topic') or \
self.changed_config_param(config, 'img_msg_queue_size'):
if hasattr(self, 'sub_image_msg'):
self.sub_image_msg.unregister()
self.sub_image_msg = rospy.Subscriber(
config['img_msg_topic'],
Image,
self.image_callback,
queue_size=config['img_msg_queue_size'],
tcp_nodelay=True,
buff_size=60000000)
# https://github.com/ros/ros_comm/issues/536
# Register message server to call set truth callback
if self.changed_config_param(config, 'feature_map_trigger_topic') or \
self.changed_config_param(config, 'feature_map_trigger_queue_size'):
if hasattr(self, 'sub_trigger_set_feature_map'):
self.sub_image_msg.unregister()
self.sub_trigger_set_feature_map = rospy.Subscriber(
config['feature_map_trigger_topic'],
Header,
self.set_truth_callback,
queue_size=config['feature_map_trigger_queue_size'])
self.config = config
self.check_image_count()
return self.config
def set_truth_callback(self, request):
if self.image_msg:
# TODO: check timestamps
orientation = self.tf_buffer.lookup_transform(self.base_frame, self.camera_frame, self.image_msg.header.stamp, timeout=rospy.Duration(0.5)).transform.rotation
yaw_angle = (euler_from_quaternion((
orientation.x,
orientation.y,
orientation.z,
orientation.w))[2] + 0.5 * math.pi) % (2 * math.pi)
image = self.bridge.imgmsg_to_cv2(self.image_msg, 'bgr8')
self.compass.set_truth(yaw_angle, image)
self.feature_map_images_count += 1
self.check_image_count()
else:
rospy.logwarn('No image received yet.')
def image_callback(self, image_msg):
# type: (Image) -> None
"""
TODO docs
"""
# Drops old images
# TODO: fix
# image_age = rospy.get_rostime() - image_msg.header.stamp
# if image_age.to_sec() > 0.1:
# print("Visual Compass: Dropped Image-message") # TODO debug printer
# return
self.image_msg = image_msg
def check_image_count(self):
# type: () -> None
"""
TODO docs
"""
config_feature_map_images_count = self.config['compass_multiple_map_image_count']
if self.feature_map_images_count != config_feature_map_images_count:
rospy.loginfo('Visual compass: %(var)d of %(config)d map images set. More images are needed.' %
{'var': self.feature_map_images_count, 'config': config_feature_map_images_count})
self.processed_set_all_feature_map_images = False
else:
if not(self.processed_set_all_feature_map_images):
rospy.loginfo('Visual compass: All map images have been processed.')
self.save_feature_map(self.config['feature_map_file_path'])
self.processed_set_all_feature_map_images = True
def save_feature_map(self, feature_map_file_path):
# type (str) -> None
"""
TODO docs
"""
converter = KeyPointConverter()
# get keypoints and mean feature count per image
features = self.compass.get_feature_map()
mean_feature_count = self.compass.get_mean_feature_count()
# convert keypoints to basic values
keypoints = features[0]
keypoint_values = [converter.key_point2values(kp) for kp in keypoints]
descriptors = features[1]
meta = {
'field': self.config['feature_map_field'],
'date': datetime.now(),
'device': self.hostname,
'compass_type': self.config['compass_type'],
'compass_matcher': self.config['compass_matcher'],
'compass_multiple_map_image_count': self.config['compass_multiple_map_image_count'],
'keypoint_count': len(keypoint_values),
'descriptor_count': len(descriptors),
'mean_feature_count': mean_feature_count,
}
dump_features = {
'keypoint_values': keypoint_values,
'descriptors': descriptors,
'meta': meta}
# generate file path
file_path = self.package_path + feature_map_file_path
# warn, if file does exist allready
if path.isfile(file_path):
rospy.logwarn('Map file at: %(path)s does ALLREADY EXIST. This will be overwritten.' % {'path': file_path})
# save keypoints in pickle file
with open(file_path, 'wb') as f:
pickle.dump(dump_features, f)
info_str = "\n\t-----------------------------------------------------------------------------------------------------------------\n" + \
"\tSaved map file at: %(path)s\n" % {'path': file_path} + \
"\tRUN the following command on your system (NOT THE ROBOT) to save the map file in your current directory:\n" + \
"\n\tscp bitbots@%(host)s:%(path)s .\n" % {'path': file_path, 'host': self.hostname} + \
"\t-----------------------------------------------------------------------------------------------------------------"
rospy.loginfo(info_str)
# shutdown setup process
rospy.signal_shutdown('Visual compass setup finished cleanly.')
def changed_config_param(self, config, param_name):
# type: (dict, str) -> bool
"""
TODO
"""
return param_name not in self.config or config[param_name] != self.config[param_name]
if __name__ == '__main__':
VisualCompassSetup()
| 38.530534
| 170
| 0.604458
|
4a0c7fc2c4aa65904ad21fbd25c46989b1d9cb7e
| 2,217
|
py
|
Python
|
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0024.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0024.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0024.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : 使用ALTER SYSTEM SET修改数据库参数listen_addresses为特殊字符
Description :
1、使用ALTER SYSTEM SET修改数据库参数listen_addresses为+
gsql -d [数据库名] -p [端口号]
alter system set listen_addresses to +;
Expect :
1、设置失败,有合理报错
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import macro
logger = Logger()
primary_sh = CommonSH("PrimaryDbUser")
class GucSetListenAddresses(unittest.TestCase):
def setUp(self):
logger.info(
"Opengauss_Function_Guc_Connectionauthentication_Case0024开始执行"
)
# 查看数据库状态是否正常
db_status = primary_sh.get_db_cluster_status("status")
if not db_status:
logger.info("The status of db cluster is abnormal. Please check! \
db_status: {}".format(db_status))
self.assertTrue(db_status)
self.DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH
self.constant = Constant()
def test_guc_set_listen_addresses(self):
# 1、使用ALTER SYSTEM SET修改数据库参数listen_addresses为特殊字符,合理报错
logger.info("使用ALTER SYSTEM SET修改数据库参数listen_addresses为特殊字符,报语法错误")
sql1 = "alter system set listen_addresses to +"
logger.info(sql1)
body1 = primary_sh.execut_db_sql(sql1)
logger.info(body1)
self.assertIn(self.constant.SQL_WRONG_MSG[1], body1)
def tearDown(self):
logger.info("---------------------无需清理环境--------------------------")
# 无需清理环境
logger.info(
"Opengauss_Function_Guc_Connectionauthentication_Case0024执行结束"
)
| 33.089552
| 84
| 0.682454
|
4a0c7fed6964fe2bc22e3498da8aba59b223a69e
| 3,119
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/users/models.py
|
zhiliangsu/MeiduoMall
|
a3968c52f6815ccda6513371d331580dc5aa58f3
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/users/models.py
|
zhiliangsu/MeiduoMall
|
a3968c52f6815ccda6513371d331580dc5aa58f3
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/users/models.py
|
zhiliangsu/MeiduoMall
|
a3968c52f6815ccda6513371d331580dc5aa58f3
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractUser
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from django.conf import settings
from meiduo_mall.utils.models import BaseModel
# Create your models here.
class User(AbstractUser):
"""用户模型类"""
mobile = models.CharField(max_length=11, unique=True, verbose_name='手机号')
email_active = models.BooleanField(verbose_name='邮箱状态', default=False)
default_address = models.ForeignKey('Address', related_name='users', null=True, blank=True,
on_delete=models.SET_NULL, verbose_name='默认地址')
class Meta:
db_table = 'tb_users'
verbose_name = '用户'
verbose_name_plural = verbose_name
def generate_verify_email_url(self):
"""生成激活url"""
# 1.创建加密的序列化器对象
serializer = Serializer(settings.SECRET_KEY, 60 * 60)
# 2.包装一个要加密的字典数据
data = {'user_id': self.id, 'email': self.email}
# 3.调用dumps方法加密
token = serializer.dumps(data).decode()
# 4.拼接好verify_url并响应
return 'http://www.meiduo.site:8080/success_verify_email.html?token=' + token
@staticmethod
def check_verify_email_token(token):
"""token解密及查询user"""
# 1.创建加密的序列化器对象
serializer = Serializer(settings.SECRET_KEY, 60 * 60)
# 2.调用loads方法对token解密
try:
data = serializer.loads(token)
except BadData:
return None
else:
# 3.取出user_id和email,然后用这两个字段查到唯一的那个用户
user_id = data.get('user_id')
email = data.get('email')
try:
user = User.objects.get(id=user_id, email=email)
except User.DoesNotExist:
return None
else:
return user
class Address(BaseModel):
"""
用户地址
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses', verbose_name='用户')
title = models.CharField(max_length=20, verbose_name='地址名称')
receiver = models.CharField(max_length=20, verbose_name='收货人')
province = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='province_addresses',
verbose_name='省')
city = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='city_addresses', verbose_name='市')
district = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='district_addresses',
verbose_name='区')
place = models.CharField(max_length=50, verbose_name='地址')
mobile = models.CharField(max_length=11, verbose_name='手机')
tel = models.CharField(max_length=20, null=True, blank=True, default='', verbose_name='固定电话')
email = models.CharField(max_length=30, null=True, blank=True, default='', verbose_name='电子邮箱')
is_deleted = models.BooleanField(default=False, verbose_name='逻辑删除')
class Meta:
db_table = 'tb_address'
verbose_name = '用户地址'
verbose_name_plural = verbose_name
ordering = ['-update_time']
| 38.9875
| 117
| 0.649247
|
4a0c7ffe5a4596f8bb204f44a22706238ef4127a
| 557
|
py
|
Python
|
curso em video - Phython/desafios/desafio 37.py
|
ThyagoHiggins/LP-Phython
|
78e84aa77e786cc33b7d91397d17e93c3d5a692a
|
[
"MIT"
] | null | null | null |
curso em video - Phython/desafios/desafio 37.py
|
ThyagoHiggins/LP-Phython
|
78e84aa77e786cc33b7d91397d17e93c3d5a692a
|
[
"MIT"
] | null | null | null |
curso em video - Phython/desafios/desafio 37.py
|
ThyagoHiggins/LP-Phython
|
78e84aa77e786cc33b7d91397d17e93c3d5a692a
|
[
"MIT"
] | null | null | null |
num = int(input('Digite um número inteiro: '))
opcao = int(input('Escolha a seguinte opção:\n [ 1 ] Converter para Binário\n [ 2 ] Converter para Octal\n '
'[ 3 ] Converter para hexadecimal\n'
'Sua opção: '))
if opcao == 1:
print(f'O número {num} convertido em Binário é {bin(num)[2:]}')
elif opcao == 2:
print(f'O número {num} convertido em OCTAL é {oct(num)[2:]}')
elif opcao == 3:
print(f'O número {num} convertido em Hexadecimal é {hex(num)[2:]}')
else:
print('Meu filho!!! Opção errada, TCHAU!')
| 46.416667
| 108
| 0.601436
|
4a0c805a0c4c771ee1ef983636a9cf80bb58f2d1
| 4,312
|
py
|
Python
|
zun/common/policy.py
|
cooldharma06/zun_glance_tag
|
555399275afdff748888036a2fca47bbf347956b
|
[
"Apache-2.0"
] | null | null | null |
zun/common/policy.py
|
cooldharma06/zun_glance_tag
|
555399275afdff748888036a2fca47bbf347956b
|
[
"Apache-2.0"
] | null | null | null |
zun/common/policy.py
|
cooldharma06/zun_glance_tag
|
555399275afdff748888036a2fca47bbf347956b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For zun."""
from oslo_policy import policy
from zun.common import exception
import zun.conf
_ENFORCER = None
CONF = zun.conf.CONF
# we can get a policy enforcer by this init.
# oslo policy support change policy rule dynamically.
# at present, policy.enforce will reload the policy rules when it checks
# the policy files have been touched.
def init(policy_file=None, rules=None,
default_rule=None, use_conf=True, overwrite=True):
"""Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is
specified, ``conf.policy_file`` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
:meth:`load_rules` with ``force_reload=True``,
:meth:`clear` or :meth:`set_rules` with
``overwrite=True`` is called this will be overwritten.
:param default_rule: Default rule to use, conf.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
:param overwrite: Whether to overwrite existing rules when reload rules
from config file.
"""
global _ENFORCER
if not _ENFORCER:
# https://docs.openstack.org/oslo.policy/latest/user/usage.html
_ENFORCER = policy.Enforcer(CONF,
policy_file=policy_file,
rules=rules,
default_rule=default_rule,
use_conf=use_conf,
overwrite=overwrite)
return _ENFORCER
def enforce(context, rule=None, target=None,
do_raise=True, exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param dict context: As much information about the user performing the
action as possible.
:param rule: The rule to evaluate.
:param dict target: As much information about the object being operated
on as possible.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to :meth:`enforce` (both
positional and keyword arguments) will be passed to
the exception class. If not specified,
:class:`PolicyNotAuthorized` will be used.
:return: ``False`` if the policy does not allow the action and `exc` is
not provided; otherwise, returns a value that evaluates to
``True``. Note: for rules using the "case" expression, this
``True`` value will be the specified string from the
expression.
"""
enforcer = init()
credentials = context.to_dict()
if not exc:
exc = exception.PolicyNotAuthorized
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
return enforcer.enforce(rule, target, credentials,
do_raise=do_raise, exc=exc, *args, **kwargs)
def check_is_admin(context):
"""Whether or not user is admin according to policy setting.
"""
init()
target = {}
credentials = context.to_dict()
return _ENFORCER.enforce('context_is_admin', target, credentials)
| 41.461538
| 79
| 0.61039
|
4a0c808a40118ed6422978baa7ab2e0a21e60134
| 432
|
py
|
Python
|
djinn/math/Vector.py
|
djeof-1/Djinn-IV
|
2b191b68109f46a9fc264b6a823f70beb58ddffd
|
[
"MIT"
] | 3
|
2015-12-26T11:48:34.000Z
|
2016-01-13T14:24:27.000Z
|
build/lib/djinn/math/Vector.py
|
l0ftyWhizZ/DJINN-IV
|
2b191b68109f46a9fc264b6a823f70beb58ddffd
|
[
"MIT"
] | 1
|
2015-12-28T14:15:26.000Z
|
2015-12-28T14:15:26.000Z
|
djinn/math/Vector.py
|
djeof-1/Djinn-IV
|
2b191b68109f46a9fc264b6a823f70beb58ddffd
|
[
"MIT"
] | 2
|
2016-01-05T10:18:13.000Z
|
2016-01-16T09:45:28.000Z
|
import pygame
from pygame.locals import *
import math
class Vector:
def __init__(self,x,y,z):
self._x = x
self._y = y
self._z = z
def addVector(self,x,y,z):
self._x += x
self._y += y
self._z += z
def scalarProduct(self,vector):
self._x *= vector._x
self._y *= vector._y
self._z *= vector._z
return self._x+self._y+self._z
| 18
| 38
| 0.523148
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.