code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import srllib.qtgui
from PyQt4.QtCore import QObject, SIGNAL
import functools
def deferred_slot(func, optimize=False):
""" Decorator for turning a method into a deferred slot.
When calling a deferred slot, it is queued with the QApplication (must be
a L{srllib.qtgui.Application} instance). Queued calls are dispatched
periodically, which saves CPU time as opposed to making GUI calls directly
as signals are received.
"""
@functools.wraps(func)
def schedule(*args, **kwds):
srllib.qtgui.get_app().queue_deferred(func, args, kwds, optimize)
return schedule
def deferred_slot_optimize(func):
""" Optimized version of L{deferred_slot}.
Optimization happens by only queueing one call to a slot at a time.
"""
return deferred_slot(func, optimize=True)
class StatefulConnection(QObject):
""" A connection between a Qt signal and a slot, which is capable of
storing an extra set of arguments to the slot.
We subclass QObject and make instances children of the signal emitter,
so that their lifetime is bound to the latter.
"""
def __init__(self, emitter, signal, slot, extra_args=[]):
"""
@param emitter: The signal emitter.
@param signal: Signal signature (PyQt4.QtCore.Signal is invoked on this).
@param slot: The slot to be invoked.
@param extra_args: Extra arguments to pass when invoking the slot.
"""
QObject.__init__(self, emitter)
self.__slot, self.__extra = slot, extra_args
QObject.connect(emitter, SIGNAL(signal), self)
def __call__(self, *args, **kwds):
args = args + tuple(self.__extra)
self.__slot(*args, **kwds)
def connect(emitter, signal, slot):
""" Simplified version of QObject.connect which takes a raw slot signature.
@param emitter: Signal emitter.
@param signal: Signal signature (PyQt4.QtCore.Signal is invoked on this).
@param slot: Signal signature (PyQt4.QtCore.Signal is invoked on this).
"""
QObject.connect(emitter, SIGNAL(signal), slot) | srllib/qtgui/_signal.py | import srllib.qtgui
from PyQt4.QtCore import QObject, SIGNAL
import functools
def deferred_slot(func, optimize=False):
""" Decorator for turning a method into a deferred slot.
When calling a deferred slot, it is queued with the QApplication (must be
a L{srllib.qtgui.Application} instance). Queued calls are dispatched
periodically, which saves CPU time as opposed to making GUI calls directly
as signals are received.
"""
@functools.wraps(func)
def schedule(*args, **kwds):
srllib.qtgui.get_app().queue_deferred(func, args, kwds, optimize)
return schedule
def deferred_slot_optimize(func):
""" Optimized version of L{deferred_slot}.
Optimization happens by only queueing one call to a slot at a time.
"""
return deferred_slot(func, optimize=True)
class StatefulConnection(QObject):
""" A connection between a Qt signal and a slot, which is capable of
storing an extra set of arguments to the slot.
We subclass QObject and make instances children of the signal emitter,
so that their lifetime is bound to the latter.
"""
def __init__(self, emitter, signal, slot, extra_args=[]):
"""
@param emitter: The signal emitter.
@param signal: Signal signature (PyQt4.QtCore.Signal is invoked on this).
@param slot: The slot to be invoked.
@param extra_args: Extra arguments to pass when invoking the slot.
"""
QObject.__init__(self, emitter)
self.__slot, self.__extra = slot, extra_args
QObject.connect(emitter, SIGNAL(signal), self)
def __call__(self, *args, **kwds):
args = args + tuple(self.__extra)
self.__slot(*args, **kwds)
def connect(emitter, signal, slot):
""" Simplified version of QObject.connect which takes a raw slot signature.
@param emitter: Signal emitter.
@param signal: Signal signature (PyQt4.QtCore.Signal is invoked on this).
@param slot: Signal signature (PyQt4.QtCore.Signal is invoked on this).
"""
QObject.connect(emitter, SIGNAL(signal), slot) | 0.735926 | 0.290849 |
import numpy as np
import torch
from torch.nn import Module
from sklearn.metrics import accuracy_score
from torch import nn, optim
from torch.optim.lr_scheduler import MultiplicativeLR
class DeepSeqNet(Module):
def __init__(self):
super(DeepSeqNet, self).__init__()
def _compile(self, optimizer, learning_rate):
self._set_optim(optimizer, learning_rate)
self._set_scheduler()
self._set_criterion()
def _set_optim(self, optimizer, learning_rate):
optimizer = optimizer.lower()
if optimizer == 'adam':
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
elif optimizer == 'rmsprop':
self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate)
else:
self.optimizer = optim.SGD(self.parameters(), lr=learning_rate)
def _set_scheduler(self):
self.scheduler = MultiplicativeLR(self.optimizer, lr_lambda=(lambda x: 0.95))
def _set_criterion(self):
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
raise NotImplementedError()
def fit(self, x, y):
self.train()
self.optimizer.zero_grad()
y_ = self.forward(x)
loss = self.criterion(y_, y)
loss.backward()
self.optimizer.step()
return loss
def evaluate(self, data_iterator):
self.eval()
labels, preds = [], []
for _, batch in enumerate(data_iterator):
x = batch.text.t()
if torch.cuda.is_available():
x = x.cuda()
y_ = self.forward(x)
pred = torch.argmax(y_, 1)
preds.extend(pred.cpu().numpy())
labels.extend(batch.label.numpy())
score = accuracy_score(labels, np.array(preds).flatten())
return score
def run_epoch(self, train_iterator, val_iterator):
train_losses = []
val_accuracies = []
losses = []
for i, batch in enumerate(train_iterator):
x = batch.text.t()
y = batch.label.type(torch.LongTensor)
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
loss = self.fit(x, y)
losses.append(loss.item())
if i % 100 == 0 and i != 0:
avg_train_loss = float(np.mean(losses))
train_losses.append(avg_train_loss)
losses = []
val_accuracy = self.evaluate(val_iterator)
print("Iteration: %4d | train loss: %3.2f | val acc.: %.2f" % ((i + 1), avg_train_loss * 100, val_accuracy * 100))
# Run the scheduler to reduce the learning rate
self.scheduler.step(epoch=None)
return train_losses, val_accuracies | models/deep_seq_net.py | import numpy as np
import torch
from torch.nn import Module
from sklearn.metrics import accuracy_score
from torch import nn, optim
from torch.optim.lr_scheduler import MultiplicativeLR
class DeepSeqNet(Module):
def __init__(self):
super(DeepSeqNet, self).__init__()
def _compile(self, optimizer, learning_rate):
self._set_optim(optimizer, learning_rate)
self._set_scheduler()
self._set_criterion()
def _set_optim(self, optimizer, learning_rate):
optimizer = optimizer.lower()
if optimizer == 'adam':
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
elif optimizer == 'rmsprop':
self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate)
else:
self.optimizer = optim.SGD(self.parameters(), lr=learning_rate)
def _set_scheduler(self):
self.scheduler = MultiplicativeLR(self.optimizer, lr_lambda=(lambda x: 0.95))
def _set_criterion(self):
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
raise NotImplementedError()
def fit(self, x, y):
self.train()
self.optimizer.zero_grad()
y_ = self.forward(x)
loss = self.criterion(y_, y)
loss.backward()
self.optimizer.step()
return loss
def evaluate(self, data_iterator):
self.eval()
labels, preds = [], []
for _, batch in enumerate(data_iterator):
x = batch.text.t()
if torch.cuda.is_available():
x = x.cuda()
y_ = self.forward(x)
pred = torch.argmax(y_, 1)
preds.extend(pred.cpu().numpy())
labels.extend(batch.label.numpy())
score = accuracy_score(labels, np.array(preds).flatten())
return score
def run_epoch(self, train_iterator, val_iterator):
train_losses = []
val_accuracies = []
losses = []
for i, batch in enumerate(train_iterator):
x = batch.text.t()
y = batch.label.type(torch.LongTensor)
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
loss = self.fit(x, y)
losses.append(loss.item())
if i % 100 == 0 and i != 0:
avg_train_loss = float(np.mean(losses))
train_losses.append(avg_train_loss)
losses = []
val_accuracy = self.evaluate(val_iterator)
print("Iteration: %4d | train loss: %3.2f | val acc.: %.2f" % ((i + 1), avg_train_loss * 100, val_accuracy * 100))
# Run the scheduler to reduce the learning rate
self.scheduler.step(epoch=None)
return train_losses, val_accuracies | 0.939241 | 0.419291 |
import sample_utils, config, parse_midas_data, sfs_utils, diversity_utils, gene_diversity_utils, core_gene_utils
import os, os.path, sys, gzip
import numpy
temporal_change_directory = '%s/temporal_changes/' % (config.data_directory)
intermediate_filename_template = '%s/%s.txt.gz'
min_coverage = config.min_median_coverage
min_sample_size = 2
def load_temporal_change_map(species_name, prev_cohort='all', min_coverage = 0):
dir = "%s/cov%i_prev_%s" % (temporal_change_directory, min_coverage, prev_cohort)
intermediate_filename = intermediate_filename_template % (dir, species_name)
temporal_change_map = {}
if not os.path.isfile(intermediate_filename):
return temporal_change_map
file = gzip.open(intermediate_filename,"r")
file.readline() # header
for line in file:
items = line.split(",")
if items[0].strip()!=species_name:
continue
sample_1 = items[1].strip()
sample_2 = items[2].strip()
type = items[3].strip()
num_opportunities = float(items[4])
perr = float(items[5])
sample_pair = (sample_1, sample_2)
if sample_pair not in temporal_change_map:
temporal_change_map[sample_pair] = {}
changes = []
if len(items)<7:
pass
else:
change_strs = items[6:]
for change_str in change_strs:
subitems = change_str.split(";")
# switch on type of change
if type=='snps':
gene_name = subitems[0].strip()
contig = subitems[1].strip()
position = long(subitems[2])
variant_type = subitems[3].strip()
A1 = float(subitems[4])
D1 = float(subitems[5])
A2 = float(subitems[6])
D2 = float(subitems[7])
changes.append( (gene_name, contig, position, variant_type, A1, D1, A2, D2) )
elif type=='genes':
gene_name = subitems[0].strip()
D1 = float(subitems[1])
Dm1 = float(subitems[2])
D2 = float(subitems[3])
Dm2 = float(subitems[4])
changes.append( (gene_name, D1, Dm1, D2, Dm2) )
elif type=='private_snps':
gene_name = subitems[0].strip()
contig = subitems[1].strip()
position = long(subitems[2])
variant_type = subitems[3].strip()
A1 = float(subitems[4])
D1 = float(subitems[5])
A2 = float(subitems[6])
D2 = float(subitems[7])
changes.append( (gene_name, contig, position, variant_type, A1, D1, A2, D2) )
temporal_change_map[sample_pair][type] = num_opportunities, perr, changes
return temporal_change_map
def calculate_private_reversions_from_temporal_change_map(temporal_change_map, sample_1, sample_2, lower_threshold=config.consensus_lower_threshold,
upper_threshold=config.consensus_upper_threshold):
sample_pair = sample_1, sample_2
if sample_pair not in temporal_change_map:
return -1, None, None
if 'private_snps' not in temporal_change_map[sample_pair]:
return -1, None, None
# otherwise, some hope!
private_snp_opportunities, private_snp_perr, private_snps = temporal_change_map[sample_pair]['private_snps']
mutations = []
private_snp_reversions = []
for snp_change in private_snps:
a,b,c,d,A1,D1,A2,D2 = snp_change
if D1==0 or D2==0:
private_snp_opportunities-=1
continue
f1 = A1*1.0/D1
f2 = A2*1.0/D2
if f1>=upper_threshold and f2<=lower_threshold:
private_snp_reversions.append(snp_change)
if f1<=upper_threshold and f2>=upper_threshold:
mutations.append(snp_change)
return private_snp_opportunities, private_snp_perr, private_snp_reversions
def calculate_mutations_reversions_from_temporal_change_map(temporal_change_map, sample_1, sample_2, lower_threshold=config.consensus_lower_threshold,
upper_threshold=config.consensus_upper_threshold):
sample_pair = sample_1, sample_2
if sample_pair not in temporal_change_map:
return -1, -1, [], []
if 'snps' not in temporal_change_map[sample_pair]:
return -1, -1, [], []
# otherwise, some hope!
snp_opportunities, snp_perr, snp_changes = temporal_change_map[sample_pair]['snps']
mutations = []
reversions = []
for snp_change in snp_changes:
a,b,c,d,A1,D1,A2,D2 = snp_change
f1 = A1*1.0/D1
f2 = A2*1.0/D2
if (f1<=lower_threshold) and (f2>=upper_threshold):
mutations.append(snp_change)
elif (f1>=upper_threshold) and (f2<=lower_threshold):
reversions.append(snp_change)
return snp_opportunities, snp_perr, mutations, reversions
def calculate_gains_losses_from_temporal_change_map(temporal_change_map, sample_1, sample_2, max_absent_copynum=config.gainloss_max_absent_copynum, min_normal_copynum=config.gainloss_min_normal_copynum, max_normal_copynum=config.gainloss_max_normal_copynum):
sample_pair = sample_1, sample_2
if sample_pair not in temporal_change_map:
return -1, -1, [], []
if 'genes' not in temporal_change_map[sample_pair]:
return -1, -1, [], []
# otherwise, some hope!
gene_opportunities, gene_perr, gene_changes = temporal_change_map[sample_pair]['genes']
gains = []
losses = []
for gene_change in gene_changes:
gene_name, D1, Dm1, D2, Dm2 = gene_change
copynum_1 = D1/Dm1
copynum_2 = D2/Dm2
if (copynum_1<=max_absent_copynum) and (copynum_2>=min_normal_copynum) and (copynum_2<=max_normal_copynum):
gains.append(gene_change)
elif (copynum_2<=max_absent_copynum) and (copynum_1>=min_normal_copynum) and (copynum_1<=max_normal_copynum):
losses.append(gene_change)
return gene_opportunities, gene_perr, gains, losses | utils/temporal_changes_utils.py | import sample_utils, config, parse_midas_data, sfs_utils, diversity_utils, gene_diversity_utils, core_gene_utils
import os, os.path, sys, gzip
import numpy
temporal_change_directory = '%s/temporal_changes/' % (config.data_directory)
intermediate_filename_template = '%s/%s.txt.gz'
min_coverage = config.min_median_coverage
min_sample_size = 2
def load_temporal_change_map(species_name, prev_cohort='all', min_coverage = 0):
dir = "%s/cov%i_prev_%s" % (temporal_change_directory, min_coverage, prev_cohort)
intermediate_filename = intermediate_filename_template % (dir, species_name)
temporal_change_map = {}
if not os.path.isfile(intermediate_filename):
return temporal_change_map
file = gzip.open(intermediate_filename,"r")
file.readline() # header
for line in file:
items = line.split(",")
if items[0].strip()!=species_name:
continue
sample_1 = items[1].strip()
sample_2 = items[2].strip()
type = items[3].strip()
num_opportunities = float(items[4])
perr = float(items[5])
sample_pair = (sample_1, sample_2)
if sample_pair not in temporal_change_map:
temporal_change_map[sample_pair] = {}
changes = []
if len(items)<7:
pass
else:
change_strs = items[6:]
for change_str in change_strs:
subitems = change_str.split(";")
# switch on type of change
if type=='snps':
gene_name = subitems[0].strip()
contig = subitems[1].strip()
position = long(subitems[2])
variant_type = subitems[3].strip()
A1 = float(subitems[4])
D1 = float(subitems[5])
A2 = float(subitems[6])
D2 = float(subitems[7])
changes.append( (gene_name, contig, position, variant_type, A1, D1, A2, D2) )
elif type=='genes':
gene_name = subitems[0].strip()
D1 = float(subitems[1])
Dm1 = float(subitems[2])
D2 = float(subitems[3])
Dm2 = float(subitems[4])
changes.append( (gene_name, D1, Dm1, D2, Dm2) )
elif type=='private_snps':
gene_name = subitems[0].strip()
contig = subitems[1].strip()
position = long(subitems[2])
variant_type = subitems[3].strip()
A1 = float(subitems[4])
D1 = float(subitems[5])
A2 = float(subitems[6])
D2 = float(subitems[7])
changes.append( (gene_name, contig, position, variant_type, A1, D1, A2, D2) )
temporal_change_map[sample_pair][type] = num_opportunities, perr, changes
return temporal_change_map
def calculate_private_reversions_from_temporal_change_map(temporal_change_map, sample_1, sample_2, lower_threshold=config.consensus_lower_threshold,
upper_threshold=config.consensus_upper_threshold):
sample_pair = sample_1, sample_2
if sample_pair not in temporal_change_map:
return -1, None, None
if 'private_snps' not in temporal_change_map[sample_pair]:
return -1, None, None
# otherwise, some hope!
private_snp_opportunities, private_snp_perr, private_snps = temporal_change_map[sample_pair]['private_snps']
mutations = []
private_snp_reversions = []
for snp_change in private_snps:
a,b,c,d,A1,D1,A2,D2 = snp_change
if D1==0 or D2==0:
private_snp_opportunities-=1
continue
f1 = A1*1.0/D1
f2 = A2*1.0/D2
if f1>=upper_threshold and f2<=lower_threshold:
private_snp_reversions.append(snp_change)
if f1<=upper_threshold and f2>=upper_threshold:
mutations.append(snp_change)
return private_snp_opportunities, private_snp_perr, private_snp_reversions
def calculate_mutations_reversions_from_temporal_change_map(temporal_change_map, sample_1, sample_2, lower_threshold=config.consensus_lower_threshold,
upper_threshold=config.consensus_upper_threshold):
sample_pair = sample_1, sample_2
if sample_pair not in temporal_change_map:
return -1, -1, [], []
if 'snps' not in temporal_change_map[sample_pair]:
return -1, -1, [], []
# otherwise, some hope!
snp_opportunities, snp_perr, snp_changes = temporal_change_map[sample_pair]['snps']
mutations = []
reversions = []
for snp_change in snp_changes:
a,b,c,d,A1,D1,A2,D2 = snp_change
f1 = A1*1.0/D1
f2 = A2*1.0/D2
if (f1<=lower_threshold) and (f2>=upper_threshold):
mutations.append(snp_change)
elif (f1>=upper_threshold) and (f2<=lower_threshold):
reversions.append(snp_change)
return snp_opportunities, snp_perr, mutations, reversions
def calculate_gains_losses_from_temporal_change_map(temporal_change_map, sample_1, sample_2, max_absent_copynum=config.gainloss_max_absent_copynum, min_normal_copynum=config.gainloss_min_normal_copynum, max_normal_copynum=config.gainloss_max_normal_copynum):
sample_pair = sample_1, sample_2
if sample_pair not in temporal_change_map:
return -1, -1, [], []
if 'genes' not in temporal_change_map[sample_pair]:
return -1, -1, [], []
# otherwise, some hope!
gene_opportunities, gene_perr, gene_changes = temporal_change_map[sample_pair]['genes']
gains = []
losses = []
for gene_change in gene_changes:
gene_name, D1, Dm1, D2, Dm2 = gene_change
copynum_1 = D1/Dm1
copynum_2 = D2/Dm2
if (copynum_1<=max_absent_copynum) and (copynum_2>=min_normal_copynum) and (copynum_2<=max_normal_copynum):
gains.append(gene_change)
elif (copynum_2<=max_absent_copynum) and (copynum_1>=min_normal_copynum) and (copynum_1<=max_normal_copynum):
losses.append(gene_change)
return gene_opportunities, gene_perr, gains, losses | 0.133415 | 0.126704 |
def check_winner(input_list, size):
"""
Check the winner number in row, column, or diagonal direction.
Arguments:
input_list -- a two dimensional list for checking.
size -- the length for winning.
Returns:
winner -- the winner player number, if no winner return None.
"""
# Check row
winner = check_row_winner(input_list, size)
if winner == None:
# Transpose matrix
input_list = transpose(input_list)
# Check column
winner = check_row_winner(input_list, size)
if winner == None:
# Check diagnal
winner = check_diagonal_winner(input_list, size)
if winner == None:
winner = check_diagonal_winner(list(zip(*reversed(input_list))), size)
return winner
def transpose(input_list):
"""
Transpose a two dimensinal list.
Arguments:
input_list -- a two dimensional list for transposing.
Returns:
result -- transposed two dimensinal list.
"""
result = []
for i in range(len(input_list[0])):
new_line = [new_list[i] for new_list in input_list]
result.append(new_line)
return result
def check_row_winner(input_list, size):
"""
Check the winner number in row direction.
Arguments:
input_list -- a two dimensional list for checking.
size -- the length for winning.
Returns:
winner -- the winner player number, if no winner return None.
"""
for line in input_list:
count = 1
for idx, value in enumerate(line):
if line[idx] == line[idx+1]:
count += 1
else:
count = 1
if count == size and value != ' ':
return value
if idx == len(line)-size+1:
break
def check_diagonal_winner(input_list, size):
"""
Check the winner number in diagonal direction.
Arguments:
input_list -- a two dimensional list for checking.
size -- the length for winning.
Returns:
winner -- the winner player number, if no winner return None.
"""
for row_idx, line in enumerate(input_list):
winner = ' '
try:
list_for_check = []
for i in range(size):
list_for_check.append(input_list[row_idx+i][i])
if list_for_check.count(list_for_check[0]) == size:
if list_for_check[0] != ' ':
return list_for_check[0]
except IndexError:
winner = ' '
def draw_board_v2(input_list):
"""
Draw game boards.
Arguments:
input_list -- a two dimensional list for game board.
"""
h_element = ' ---'
for v_element in input_list:
print(h_element * len(input_list))
row = [('| ' + j + ' ') for j in v_element]
row = ''.join(map(str,row))
print(row + '|')
print(h_element * len(input_list))
def draw_turn(row, column, input_list, user):
"""
Draw the game board after user typing a choice.
Arguments:
row -- the row index.
column -- the column index.
input_list -- a two dimensional list for game board.
user -- the user who type the choice
Returns:
input_list -- a two dimensional list for game board after changed. If the position has been change perviously, return False.
"""
mark_dict = {'player1':'X', 'player2':'O'}
if input_list[row-1][column-1] == ' ':
input_list[row-1][column-1] = mark_dict[user]
else:
print('That position has been taken, please input a new place:')
return input_list
return input_list
def require_input(user, input_list, info_dict):
"""
Get the user's input position.
Arguments:
input_list -- a two dimensional list for game board.
user -- the user who type the choice
info_dict -- the information database.
Returns:
input_list -- a two dimensional list for game board after changed.
"""
import copy
input_list_before = copy.deepcopy(input_list)
while True:
row, column = input("Round {}, {}'s turn:".format(info_dict['round'], user)).split()
input_list = draw_turn(int(row), int(column), input_list, info_dict[user])
if input_list != input_list_before:
break
draw_board_v2(input_list)
return input_list
def initiation():
'''Ask user how large the game board you want'''
pass
def to_the_end():
'''
Check is there position available.
'''
pass
def main():
print('Welcome to the game!')
user1 = input("Player 1's name:")
user2 = input("Player 2's name:")
info_dict = {user1:'player1', user2:'player2', 'round':1}
input_list = [[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' ']]
draw_board_v2(input_list)
while True:
# The code is redundant, improvement needed.
input_list = require_input(user1, input_list, info_dict)
if check_winner(input_list, 4) not in [None, ' ']:
print('{} win!'.format(winner))
break
input_list = require_input(user2, input_list, info_dict)
if check_winner(input_list, 4) not in [None, ' ']:
print('{} win!'.format(winner))
break
info_dict['round'] += 1
if __name__ == "__main__":
main()
# >>> %Run test.py
# Welcome to the game!
# Player 1's name:Soi
# Player 2's name:Peruru
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 1, Soi's turn:2 2
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 1, Peruru's turn:3 3
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | O | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 2, Soi's turn:3 2
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 2, Peruru's turn:3 4
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | O | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 3, Soi's turn:4 2
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | O | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 3, Peruru's turn:6 6
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | O | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | O |
# --- --- --- --- --- ---
# Round 4, Soi's turn:5 2
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | O | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | | | | O |
# --- --- --- --- --- ---
# X win! | Exercise-29-Tic-Tac-Toe-Game.py | def check_winner(input_list, size):
"""
Check the winner number in row, column, or diagonal direction.
Arguments:
input_list -- a two dimensional list for checking.
size -- the length for winning.
Returns:
winner -- the winner player number, if no winner return None.
"""
# Check row
winner = check_row_winner(input_list, size)
if winner == None:
# Transpose matrix
input_list = transpose(input_list)
# Check column
winner = check_row_winner(input_list, size)
if winner == None:
# Check diagnal
winner = check_diagonal_winner(input_list, size)
if winner == None:
winner = check_diagonal_winner(list(zip(*reversed(input_list))), size)
return winner
def transpose(input_list):
"""
Transpose a two dimensinal list.
Arguments:
input_list -- a two dimensional list for transposing.
Returns:
result -- transposed two dimensinal list.
"""
result = []
for i in range(len(input_list[0])):
new_line = [new_list[i] for new_list in input_list]
result.append(new_line)
return result
def check_row_winner(input_list, size):
"""
Check the winner number in row direction.
Arguments:
input_list -- a two dimensional list for checking.
size -- the length for winning.
Returns:
winner -- the winner player number, if no winner return None.
"""
for line in input_list:
count = 1
for idx, value in enumerate(line):
if line[idx] == line[idx+1]:
count += 1
else:
count = 1
if count == size and value != ' ':
return value
if idx == len(line)-size+1:
break
def check_diagonal_winner(input_list, size):
"""
Check the winner number in diagonal direction.
Arguments:
input_list -- a two dimensional list for checking.
size -- the length for winning.
Returns:
winner -- the winner player number, if no winner return None.
"""
for row_idx, line in enumerate(input_list):
winner = ' '
try:
list_for_check = []
for i in range(size):
list_for_check.append(input_list[row_idx+i][i])
if list_for_check.count(list_for_check[0]) == size:
if list_for_check[0] != ' ':
return list_for_check[0]
except IndexError:
winner = ' '
def draw_board_v2(input_list):
"""
Draw game boards.
Arguments:
input_list -- a two dimensional list for game board.
"""
h_element = ' ---'
for v_element in input_list:
print(h_element * len(input_list))
row = [('| ' + j + ' ') for j in v_element]
row = ''.join(map(str,row))
print(row + '|')
print(h_element * len(input_list))
def draw_turn(row, column, input_list, user):
"""
Draw the game board after user typing a choice.
Arguments:
row -- the row index.
column -- the column index.
input_list -- a two dimensional list for game board.
user -- the user who type the choice
Returns:
input_list -- a two dimensional list for game board after changed. If the position has been change perviously, return False.
"""
mark_dict = {'player1':'X', 'player2':'O'}
if input_list[row-1][column-1] == ' ':
input_list[row-1][column-1] = mark_dict[user]
else:
print('That position has been taken, please input a new place:')
return input_list
return input_list
def require_input(user, input_list, info_dict):
"""
Get the user's input position.
Arguments:
input_list -- a two dimensional list for game board.
user -- the user who type the choice
info_dict -- the information database.
Returns:
input_list -- a two dimensional list for game board after changed.
"""
import copy
input_list_before = copy.deepcopy(input_list)
while True:
row, column = input("Round {}, {}'s turn:".format(info_dict['round'], user)).split()
input_list = draw_turn(int(row), int(column), input_list, info_dict[user])
if input_list != input_list_before:
break
draw_board_v2(input_list)
return input_list
def initiation():
'''Ask user how large the game board you want'''
pass
def to_the_end():
'''
Check is there position available.
'''
pass
def main():
print('Welcome to the game!')
user1 = input("Player 1's name:")
user2 = input("Player 2's name:")
info_dict = {user1:'player1', user2:'player2', 'round':1}
input_list = [[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' ']]
draw_board_v2(input_list)
while True:
# The code is redundant, improvement needed.
input_list = require_input(user1, input_list, info_dict)
if check_winner(input_list, 4) not in [None, ' ']:
print('{} win!'.format(winner))
break
input_list = require_input(user2, input_list, info_dict)
if check_winner(input_list, 4) not in [None, ' ']:
print('{} win!'.format(winner))
break
info_dict['round'] += 1
if __name__ == "__main__":
main()
# >>> %Run test.py
# Welcome to the game!
# Player 1's name:Soi
# Player 2's name:Peruru
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 1, Soi's turn:2 2
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 1, Peruru's turn:3 3
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | O | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 2, Soi's turn:3 2
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 2, Peruru's turn:3 4
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | O | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 3, Soi's turn:4 2
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | O | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# Round 3, Peruru's turn:6 6
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | O | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | | | | | O |
# --- --- --- --- --- ---
# Round 4, Soi's turn:5 2
# --- --- --- --- --- ---
# | | | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | O | O | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | X | | | | |
# --- --- --- --- --- ---
# | | | | | | O |
# --- --- --- --- --- ---
# X win! | 0.708515 | 0.491212 |
import argparse
import getpass
import json
import datetime
import chimp
import time
import logging
import os
import threading
import sys
try:
LIST_ID = os.environ['MAILCHIMP_LIST_ID']
except:
logging.fatal('Error please give a list')
def load_mailchimp():
if os.path.isfile('members.json'):
with open('members.json') as f:
l = json.load(f)
return l
logging.fatal('Failure to open members.json')
def update_members():
chimp_requester = chimp.ChimpRequester()
chimp_requester.raw_update(LIST_ID)
def update_list(l, go=True):
c = chimp.ChimpRequester()
while go:
t = str(datetime.datetime.utcnow())
time.sleep(10)
logging.debug('Updating list')
updated = c.update_list(LIST_ID, t)
transform = chimp.transform_mailchimp_response(updated)
if transform:
l.update(transform)
def get_acsii(filename, default_text):
if os.path.isfile(filename):
with open(filename) as f:
ascii_art = f.read()
return ascii_art
return default_text
def parse_input(input, invalid_text):
if input[:7] == ';601744' and len(input) > 16:
return input[7:17]
elif input[:10] == '%E?;601744' and len(input) > 19:
return input[10:20]
else:
return input
def main():
id = load_mailchimp()
go = True
d = threading.Thread(name='update', target=update_list, kwargs={'l':id,'go':go})
d.daemon = True
d.start()
checkin = []
print(chr(27) + "[2J")
soda = get_acsii('soda.txt', 'Welcome to SoDA!')
print '\n\n\n\n\n'
enter_id_text = get_acsii('enter_id.txt', 'Enter your student ID: ')
success_id_text = get_acsii('success_id.txt', 'Success, you are checked in!')
mailchimp_text = get_acsii('mailchimp_text.txt','Please enter your information into Mailchimp')
invalid_text = get_acsii('invalid.txt', 'Invalid card swipe: Please try again!:)')
while True:
try:
print soda
print '\n\n\n\n\n'
input = getpass.getpass(enter_id_text)
parsed_input = unicode(parse_input(input, invalid_text))
if parsed_input is None:
continue
if parsed_input in id:
print(chr(27) + "[2J")
print success_id_text
print '\n\n'
checkin.append(id[parsed_input])
time.sleep(2)
print(chr(27) + "[2J")
else:
print(chr(27) + "[2J")
checkin.append({
parsed_input: {
}
})
print mailchimp_text
time.sleep(2)
print(chr(27) + "[2J")
except KeyboardInterrupt:
logging.debug('Writing information to file')
if not os.path.isdir('./sign-ins'):
os.mkdir('./sign-ins')
file_name = './sign-ins/check_in_{}.json'.format(str(datetime.datetime.utcnow()))
with open(file_name, 'w+') as f:
members = {}
members['members'] = checkin
json.dump(members, f)
logging.debug('Updating Members.json')
with open('members.json', 'w') as f:
json.dump(id, f)
go = False
sys.exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-u','--update', help='Raw update members.json', action='store_true',
default=False, dest='update')
parser.add_argument('-d', '--debug', help='Set logging level to debug', action='store_true',
default=False, dest='debug')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.update:
update_members()
main() | main.py | import argparse
import getpass
import json
import datetime
import chimp
import time
import logging
import os
import threading
import sys
try:
LIST_ID = os.environ['MAILCHIMP_LIST_ID']
except:
logging.fatal('Error please give a list')
def load_mailchimp():
if os.path.isfile('members.json'):
with open('members.json') as f:
l = json.load(f)
return l
logging.fatal('Failure to open members.json')
def update_members():
chimp_requester = chimp.ChimpRequester()
chimp_requester.raw_update(LIST_ID)
def update_list(l, go=True):
c = chimp.ChimpRequester()
while go:
t = str(datetime.datetime.utcnow())
time.sleep(10)
logging.debug('Updating list')
updated = c.update_list(LIST_ID, t)
transform = chimp.transform_mailchimp_response(updated)
if transform:
l.update(transform)
def get_acsii(filename, default_text):
if os.path.isfile(filename):
with open(filename) as f:
ascii_art = f.read()
return ascii_art
return default_text
def parse_input(input, invalid_text):
if input[:7] == ';601744' and len(input) > 16:
return input[7:17]
elif input[:10] == '%E?;601744' and len(input) > 19:
return input[10:20]
else:
return input
def main():
id = load_mailchimp()
go = True
d = threading.Thread(name='update', target=update_list, kwargs={'l':id,'go':go})
d.daemon = True
d.start()
checkin = []
print(chr(27) + "[2J")
soda = get_acsii('soda.txt', 'Welcome to SoDA!')
print '\n\n\n\n\n'
enter_id_text = get_acsii('enter_id.txt', 'Enter your student ID: ')
success_id_text = get_acsii('success_id.txt', 'Success, you are checked in!')
mailchimp_text = get_acsii('mailchimp_text.txt','Please enter your information into Mailchimp')
invalid_text = get_acsii('invalid.txt', 'Invalid card swipe: Please try again!:)')
while True:
try:
print soda
print '\n\n\n\n\n'
input = getpass.getpass(enter_id_text)
parsed_input = unicode(parse_input(input, invalid_text))
if parsed_input is None:
continue
if parsed_input in id:
print(chr(27) + "[2J")
print success_id_text
print '\n\n'
checkin.append(id[parsed_input])
time.sleep(2)
print(chr(27) + "[2J")
else:
print(chr(27) + "[2J")
checkin.append({
parsed_input: {
}
})
print mailchimp_text
time.sleep(2)
print(chr(27) + "[2J")
except KeyboardInterrupt:
logging.debug('Writing information to file')
if not os.path.isdir('./sign-ins'):
os.mkdir('./sign-ins')
file_name = './sign-ins/check_in_{}.json'.format(str(datetime.datetime.utcnow()))
with open(file_name, 'w+') as f:
members = {}
members['members'] = checkin
json.dump(members, f)
logging.debug('Updating Members.json')
with open('members.json', 'w') as f:
json.dump(id, f)
go = False
sys.exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-u','--update', help='Raw update members.json', action='store_true',
default=False, dest='update')
parser.add_argument('-d', '--debug', help='Set logging level to debug', action='store_true',
default=False, dest='debug')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.update:
update_members()
main() | 0.098096 | 0.05962 |
import zarr
from pathlib import Path
from cached_property import cached_property
import gcsfs
GCP_PROJECT = 'malariagen-jupyterhub'
AG1000G_RELEASE_DIR = Path("ag1000g-release")
PHASE1_AR3_DIR = AG1000G_RELEASE_DIR / 'phase1.AR3'
PHASE1_AR31_DIR = AG1000G_RELEASE_DIR / 'phase1.AR3.1'
PHASE2_AR1_DIR = AG1000G_RELEASE_DIR / 'phase2.AR1'
class Phase1AR3(object):
def __init__(self):
self.fs = gcsfs.GCSFileSystem(project=GCP_PROJECT, token='anon',
access='read_only')
@cached_property
def variation_main(self):
path = PHASE1_AR3_DIR / 'variation/main/zarr/ag1000g.phase1.ar3'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def variation_main_pass(self):
path = PHASE1_AR3_DIR / 'variation/main/zarr/ag1000g.phase1.ar3.pass'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def variation_main_pass_biallelic(self):
path = PHASE1_AR3_DIR / 'variation/main/zarr/ag1000g.phase1.ar3.pass.biallelic'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
class Phase1AR31(object):
def __init__(self):
self.fs = gcsfs.GCSFileSystem(project=GCP_PROJECT, token='anon',
access='read_only')
@cached_property
def haplotypes_main(self):
path = PHASE1_AR31_DIR / 'haplotypes/main/zarr/ag1000g.phase1.ar3.1.haplotypes'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
class Phase2AR1(object):
def __init__(self):
self.fs = gcsfs.GCSFileSystem(project=GCP_PROJECT, token='anon',
access='read_only')
@cached_property
def variation_main(self):
path = PHASE2_AR1_DIR / 'variation/main/zarr/all/ag1000g.phase2.ar1'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def variation_main_pass(self):
path = PHASE2_AR1_DIR / 'variation/main/zarr/pass/ag1000g.phase2.ar1.pass'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def variation_main_pass_biallelic(self):
path = PHASE2_AR1_DIR / 'variation/main/zarr/biallelic/ag1000g.phase2.ar1.pass.biallelic'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def haplotypes_main(self):
path = PHASE2_AR1_DIR / 'haplotypes/main/zarr/ag1000g.phase2.ar1.haplotypes'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store) | catalogs/ag1000g/gcp.py | import zarr
from pathlib import Path
from cached_property import cached_property
import gcsfs
GCP_PROJECT = 'malariagen-jupyterhub'
AG1000G_RELEASE_DIR = Path("ag1000g-release")
PHASE1_AR3_DIR = AG1000G_RELEASE_DIR / 'phase1.AR3'
PHASE1_AR31_DIR = AG1000G_RELEASE_DIR / 'phase1.AR3.1'
PHASE2_AR1_DIR = AG1000G_RELEASE_DIR / 'phase2.AR1'
class Phase1AR3(object):
def __init__(self):
self.fs = gcsfs.GCSFileSystem(project=GCP_PROJECT, token='anon',
access='read_only')
@cached_property
def variation_main(self):
path = PHASE1_AR3_DIR / 'variation/main/zarr/ag1000g.phase1.ar3'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def variation_main_pass(self):
path = PHASE1_AR3_DIR / 'variation/main/zarr/ag1000g.phase1.ar3.pass'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def variation_main_pass_biallelic(self):
path = PHASE1_AR3_DIR / 'variation/main/zarr/ag1000g.phase1.ar3.pass.biallelic'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
class Phase1AR31(object):
def __init__(self):
self.fs = gcsfs.GCSFileSystem(project=GCP_PROJECT, token='anon',
access='read_only')
@cached_property
def haplotypes_main(self):
path = PHASE1_AR31_DIR / 'haplotypes/main/zarr/ag1000g.phase1.ar3.1.haplotypes'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
class Phase2AR1(object):
def __init__(self):
self.fs = gcsfs.GCSFileSystem(project=GCP_PROJECT, token='anon',
access='read_only')
@cached_property
def variation_main(self):
path = PHASE2_AR1_DIR / 'variation/main/zarr/all/ag1000g.phase2.ar1'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def variation_main_pass(self):
path = PHASE2_AR1_DIR / 'variation/main/zarr/pass/ag1000g.phase2.ar1.pass'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def variation_main_pass_biallelic(self):
path = PHASE2_AR1_DIR / 'variation/main/zarr/biallelic/ag1000g.phase2.ar1.pass.biallelic'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store)
@cached_property
def haplotypes_main(self):
path = PHASE2_AR1_DIR / 'haplotypes/main/zarr/ag1000g.phase2.ar1.haplotypes'
store = gcsfs.GCSMap(str(path), gcs=self.fs, check=False, create=False)
return zarr.open_consolidated(store) | 0.522689 | 0.112503 |
import requests
ROOT_URL = "http://www.bom.gov.au/fwo/"
WEATHER_TEXT = (
"{name} -- Location {username}'s Place --Time {local_date_time} -- The "
"Wind is from the {wind_dir} -- Wind speed {wind_spd_kt} KPH -- Wind "
"gusts {gust_kmh} KPH -- Air temps is {air_temp}{degree}C -- {temp_f}"
"{degree}F -- Relative Humidity is {rel_hum}% -- Air Pressure is "
"{press}kPa -- Rain {rain_trace} -- co-ord's Lon/Lat {lon}/{lat}"
)
FIELDS = {
"rain_trace",
"degree",
"temp_f",
"rel_hum",
"local_date_time",
"press",
"wind_dir",
"air_temp",
"name",
"gust_kmh",
"wind_spd_kt",
"username",
"lat",
"lon",
"sea_state",
}
USER_LOOKUP = {
"sveta": "IDN60901/IDN60901.94767.json",
"oksana": "IDN60901/IDN60901.94767.json",
"berg": "IDN60801/IDN60801.94785.json",
"bluemaxima": "IDN60801/IDN60801.94733.json",
"dodobrain": "IDQ60901/IDQ60901.94575.json",
"thearm": "IDN60801/IDN60801.94592.json",
"ukn0me": "IDW60801/IDW60801.95610.json",
"dooblynoobly": "IDQ60901/IDQ60901.94576.json",
"doobz": "IDQ60901/IDQ60901.94576.json",
"oobz": "IDQ60901/IDQ60901.94576.json",
"sydneyi": "IDN60901/IDN60901.94768.json",
"duoi": "IDN60801/IDN60801.95704.json",
"mwsb": "IDN60801/IDN60801.94926.json",
"dudz": "IDN60801/IDN60801.95757.json",
"chris": "IDN60901/IDN60901.94768.json",
"macspud": "IDV60901/IDV60901.95936.json",
"mcspud": "IDV60901/IDV60901.95936.json",
"veritay": "IDV60901/IDV60901.95936.json",
"wyoung": "IDN60801/IDN60801.94749.json",
"win32user": "IDN60901/IDN60901.94765.json",
"orlock": "IDV60801/IDV60801.94864.json",
"pebbles": "IDV60901/IDV60901.94872.json",
}
def _stiv_bullshit():
"""define stiv's weather"""
url = "https://api.weather.gov/stations/KIGQ/observations/current"
return url
def _get(weather_data, item):
"""get the data from url"""
return weather_data.get(item, "")
def _format_output(**values):
"""set the format up for the output"""
return WEATHER_TEXT.format(**values)
def _calculate_temp_in_c(temp):
"""return the calculated celcius to farenheit"""
return str((temp * 9 / 5.0 + 32) if temp else "")
def weather(user):
"""get the weather per pre defined uer url"""
user = user.lower()
if user == "stiv":
return _stiv_bullshit()
location = USER_LOOKUP.get(user)
if not location:
return "Berg was too busy sucking dongs to add your location."
url = ROOT_URL + location
resp = requests.get(url).json()
weather_data = resp.get("observations", {}).get("data")[0]
temp_f = _get(weather_data, "air_temp")
output = {k: _get(weather_data, k) for k, v in weather_data.items() if k in FIELDS}
output["degree"] = "\N{DEGREE SIGN}"
output["temp_f"] = "%.2f" % (temp_f * 9 / 5 + 32)
output["username"] = user if not user == 'mcspud' else 'macspud'
return _format_output(**output)
def handler(connection, event):
if event.arguments and event.arguments[0].startswith("my place"):
connection.privmsg(event.target, weather(event.source.nick))
def get_handlers():
return (("pubmsg", handler),) | src/aussie_bot/modules/weather.py | import requests
ROOT_URL = "http://www.bom.gov.au/fwo/"
WEATHER_TEXT = (
"{name} -- Location {username}'s Place --Time {local_date_time} -- The "
"Wind is from the {wind_dir} -- Wind speed {wind_spd_kt} KPH -- Wind "
"gusts {gust_kmh} KPH -- Air temps is {air_temp}{degree}C -- {temp_f}"
"{degree}F -- Relative Humidity is {rel_hum}% -- Air Pressure is "
"{press}kPa -- Rain {rain_trace} -- co-ord's Lon/Lat {lon}/{lat}"
)
FIELDS = {
"rain_trace",
"degree",
"temp_f",
"rel_hum",
"local_date_time",
"press",
"wind_dir",
"air_temp",
"name",
"gust_kmh",
"wind_spd_kt",
"username",
"lat",
"lon",
"sea_state",
}
USER_LOOKUP = {
"sveta": "IDN60901/IDN60901.94767.json",
"oksana": "IDN60901/IDN60901.94767.json",
"berg": "IDN60801/IDN60801.94785.json",
"bluemaxima": "IDN60801/IDN60801.94733.json",
"dodobrain": "IDQ60901/IDQ60901.94575.json",
"thearm": "IDN60801/IDN60801.94592.json",
"ukn0me": "IDW60801/IDW60801.95610.json",
"dooblynoobly": "IDQ60901/IDQ60901.94576.json",
"doobz": "IDQ60901/IDQ60901.94576.json",
"oobz": "IDQ60901/IDQ60901.94576.json",
"sydneyi": "IDN60901/IDN60901.94768.json",
"duoi": "IDN60801/IDN60801.95704.json",
"mwsb": "IDN60801/IDN60801.94926.json",
"dudz": "IDN60801/IDN60801.95757.json",
"chris": "IDN60901/IDN60901.94768.json",
"macspud": "IDV60901/IDV60901.95936.json",
"mcspud": "IDV60901/IDV60901.95936.json",
"veritay": "IDV60901/IDV60901.95936.json",
"wyoung": "IDN60801/IDN60801.94749.json",
"win32user": "IDN60901/IDN60901.94765.json",
"orlock": "IDV60801/IDV60801.94864.json",
"pebbles": "IDV60901/IDV60901.94872.json",
}
def _stiv_bullshit():
"""define stiv's weather"""
url = "https://api.weather.gov/stations/KIGQ/observations/current"
return url
def _get(weather_data, item):
"""get the data from url"""
return weather_data.get(item, "")
def _format_output(**values):
"""set the format up for the output"""
return WEATHER_TEXT.format(**values)
def _calculate_temp_in_c(temp):
"""return the calculated celcius to farenheit"""
return str((temp * 9 / 5.0 + 32) if temp else "")
def weather(user):
"""get the weather per pre defined uer url"""
user = user.lower()
if user == "stiv":
return _stiv_bullshit()
location = USER_LOOKUP.get(user)
if not location:
return "Berg was too busy sucking dongs to add your location."
url = ROOT_URL + location
resp = requests.get(url).json()
weather_data = resp.get("observations", {}).get("data")[0]
temp_f = _get(weather_data, "air_temp")
output = {k: _get(weather_data, k) for k, v in weather_data.items() if k in FIELDS}
output["degree"] = "\N{DEGREE SIGN}"
output["temp_f"] = "%.2f" % (temp_f * 9 / 5 + 32)
output["username"] = user if not user == 'mcspud' else 'macspud'
return _format_output(**output)
def handler(connection, event):
if event.arguments and event.arguments[0].startswith("my place"):
connection.privmsg(event.target, weather(event.source.nick))
def get_handlers():
return (("pubmsg", handler),) | 0.296756 | 0.281906 |
import pandas as pd
import numpy as np
import os, csv
from collections import defaultdict
import logging
class CityInfo:
def __init__(self):
# Make dict
self.cities_data = {}
self.cities_data_ascii_names = {}
with open('worldcities.csv', encoding='utf-8') as csvDataFile:
csvReader = csv.reader(csvDataFile)
for row in csvReader:
self.cities_data[row[0]] = row[2:]
self.cities_data_ascii_names[row[1]] = row[2:]
def get_city_coord(self, city: str):
city = city.title()
city = city.split(',')[0]
if city == "Cracow" or city == "Krakow":
city = "Kraków"
elif city == "Warszawa":
city = "Warsaw"
elif city == "Wroclaw":
city = "Wrocław"
elif city == "Helsingfors":
city = "Helsinki"
try:
city_data = self.cities_data[city]
return city_data[0], city_data[1]
except:
city_data = self.cities_data_ascii_names[city]
return city_data[0], city_data[1]
def to_eur(money, currency):
if currency == 'EUR' and currency == '€':
return money
elif currency == 'USD' and currency == '$':
return money / 1.08
elif currency == 'A$' and currency == 'AUD':
return money / 1.80
elif currency == 'PLN':
return money / 4.58
elif currency == 'kr':
return money / 11.00
elif currency == 'GBP' or currency == '£':
return money / 0.88
elif currency == 'CHF':
return money / 1.06
elif currency == 'CAD' or currency == 'C$':
return money / 1.53
elif currency == 'HUF':
return money / 367.93
elif currency == 'CZK':
return money / 27.78
elif currency == '₹' or currency == 'JPY':
return money / 117.25
else:
None
if __name__ == "__main__":
ci = CityInfo()
min_low_salaries = {}
max_high_salaries = {}
with open('DATABASE.csv', encoding='utf-8') as csvDataFile:
csvReader = csv.reader(csvDataFile)
next(csvReader)
for row in csvReader:
salary = row[-2].strip()
cities = row[-1]
salary_high = row[-4]
salary_low = row[-3]
salary_high = int(float(salary_high))
salary_low = int(float(salary_low))
if salary_high == 0 or salary_low == 0:
continue
if row[-2] == 'PLN':
# Per hour
if salary_low <= 500:
salary_low *= 160
if salary_high <= 500:
salary *= 160
# Per day
if salary_low > 500 and salary_low <= 2000:
salary_low *= 20
if salary_high > 500 and salary_high <= 2000:
salary_high *= 20
# To year
salary_high *= 12
salary_low *= 12
if row[-2] == '$':
# To year salary
if salary_high < 1000:
salary_high *= 160 * 12
if salary_low < 1000:
salary_low *= 160 * 12
salary_high = to_eur(salary_high, row[-2])
salary_low = to_eur(salary_low, row[-2])
if salary_high == None or salary_low == None:
continue
for c in row[-6].split(','):
c = c.strip()
try:
latitude, longitude = ci.get_city_coord(c)
try:
if min_low_salaries[(latitude, longitude)] > salary_low:
min_low_salaries[(latitude, longitude)] = salary_low
except:
min_low_salaries[(latitude, longitude)] = salary_low
try:
if max_high_salaries[(latitude, longitude)] < salary_high:
max_high_salaries[(latitude, longitude)] = salary_high
except:
max_high_salaries[(latitude, longitude)] = salary_high
except KeyError as ex:
pass
except Exception as ex:
#logging.exception("Something awful happened!")
pass
db = defaultdict(list)
for k in min_low_salaries.keys():
db['latitude'].append(k[0])
db['longitude'].append(k[1])
db['salary_low'].append(min_low_salaries[k])
df = pd.DataFrame.from_dict(db)
df.to_csv(f'kepler_low.csv', index=False)
db = defaultdict(list)
for k in max_high_salaries.keys():
db['latitude'].append(k[0])
db['longitude'].append(k[1])
db['salary_high'].append(max_high_salaries[k])
df = pd.DataFrame.from_dict(db)
df.to_csv(f'kepler_high.csv', index=False) | data/kepler.py | import pandas as pd
import numpy as np
import os, csv
from collections import defaultdict
import logging
class CityInfo:
def __init__(self):
# Make dict
self.cities_data = {}
self.cities_data_ascii_names = {}
with open('worldcities.csv', encoding='utf-8') as csvDataFile:
csvReader = csv.reader(csvDataFile)
for row in csvReader:
self.cities_data[row[0]] = row[2:]
self.cities_data_ascii_names[row[1]] = row[2:]
def get_city_coord(self, city: str):
city = city.title()
city = city.split(',')[0]
if city == "Cracow" or city == "Krakow":
city = "Kraków"
elif city == "Warszawa":
city = "Warsaw"
elif city == "Wroclaw":
city = "Wrocław"
elif city == "Helsingfors":
city = "Helsinki"
try:
city_data = self.cities_data[city]
return city_data[0], city_data[1]
except:
city_data = self.cities_data_ascii_names[city]
return city_data[0], city_data[1]
def to_eur(money, currency):
if currency == 'EUR' and currency == '€':
return money
elif currency == 'USD' and currency == '$':
return money / 1.08
elif currency == 'A$' and currency == 'AUD':
return money / 1.80
elif currency == 'PLN':
return money / 4.58
elif currency == 'kr':
return money / 11.00
elif currency == 'GBP' or currency == '£':
return money / 0.88
elif currency == 'CHF':
return money / 1.06
elif currency == 'CAD' or currency == 'C$':
return money / 1.53
elif currency == 'HUF':
return money / 367.93
elif currency == 'CZK':
return money / 27.78
elif currency == '₹' or currency == 'JPY':
return money / 117.25
else:
None
if __name__ == "__main__":
ci = CityInfo()
min_low_salaries = {}
max_high_salaries = {}
with open('DATABASE.csv', encoding='utf-8') as csvDataFile:
csvReader = csv.reader(csvDataFile)
next(csvReader)
for row in csvReader:
salary = row[-2].strip()
cities = row[-1]
salary_high = row[-4]
salary_low = row[-3]
salary_high = int(float(salary_high))
salary_low = int(float(salary_low))
if salary_high == 0 or salary_low == 0:
continue
if row[-2] == 'PLN':
# Per hour
if salary_low <= 500:
salary_low *= 160
if salary_high <= 500:
salary *= 160
# Per day
if salary_low > 500 and salary_low <= 2000:
salary_low *= 20
if salary_high > 500 and salary_high <= 2000:
salary_high *= 20
# To year
salary_high *= 12
salary_low *= 12
if row[-2] == '$':
# To year salary
if salary_high < 1000:
salary_high *= 160 * 12
if salary_low < 1000:
salary_low *= 160 * 12
salary_high = to_eur(salary_high, row[-2])
salary_low = to_eur(salary_low, row[-2])
if salary_high == None or salary_low == None:
continue
for c in row[-6].split(','):
c = c.strip()
try:
latitude, longitude = ci.get_city_coord(c)
try:
if min_low_salaries[(latitude, longitude)] > salary_low:
min_low_salaries[(latitude, longitude)] = salary_low
except:
min_low_salaries[(latitude, longitude)] = salary_low
try:
if max_high_salaries[(latitude, longitude)] < salary_high:
max_high_salaries[(latitude, longitude)] = salary_high
except:
max_high_salaries[(latitude, longitude)] = salary_high
except KeyError as ex:
pass
except Exception as ex:
#logging.exception("Something awful happened!")
pass
db = defaultdict(list)
for k in min_low_salaries.keys():
db['latitude'].append(k[0])
db['longitude'].append(k[1])
db['salary_low'].append(min_low_salaries[k])
df = pd.DataFrame.from_dict(db)
df.to_csv(f'kepler_low.csv', index=False)
db = defaultdict(list)
for k in max_high_salaries.keys():
db['latitude'].append(k[0])
db['longitude'].append(k[1])
db['salary_high'].append(max_high_salaries[k])
df = pd.DataFrame.from_dict(db)
df.to_csv(f'kepler_high.csv', index=False) | 0.286568 | 0.234133 |
""" Userbot module which contains afk-related commands """
from random import choice, randint
from asyncio import sleep
from datetime import datetime
from telethon.events import StopPropagation
from userbot import (AFKREASON, COUNT_MSG, CMD_HELP, ISAFK, BOTLOG,
BOTLOG_CHATID, USERS, PM_AUTO_BAN)
from userbot.events import register
# ========================= CONSTANTS ============================
AFKSTR = [
"Saya sedang sibuk sekarang. jika sangat penting anda bisa kirim nomor whatsapp pacarmu!\n#Bot",
"Saya sedang tidak online sekarang. Jika memang penting, Tinggalkan pesan setelah bunyi beep:\n`beeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeep`!\n#Bot",
"Mungkin belum saatnya kita bertemu.\n#Bot",
"Aku Akan Balik Sebentar Lagi dan Jika tidak...,\ntunggulah lebih lama :v.\n#Bot",
"Aku sedang tidak disini. \nYang pasti Aku sedang berada di suatu tempat.\n#Bot",
"Aku bukan orang yang spesial tapi aku selalu ada bersamamu,Kecuali sekarang aja sih.\n#Bot",
"Ada 3 hal di duinia ini yang tidak bisa kuhitung, jumlah bintang di langit, ikan di laut dan cintaku padamu.\n#Bot",
"Rasa sayangku ke kamu kaya pas powerangers waktu gak ada monster nggak berubah.\n#Bot",
"Coba cari aku kearah ini\n---->\n#Bot",
"Coba cari aku kearah ini\n<----\n#Bot",
"Mohon Tinggalkan Pesan Yang penting kepadaku, Jika Tak Penting Ya udah.\n#Bot",
"Sudah! Jangan ada hubungan lagi, Aku tau kau selingkuh!.\n#Bot",
"Jika Aku Onlen,Aku bakal memberitahumu dimana aku.\nTapi aku tidak, \nJadi tanyakan aku saat aku kembali...\n#Bot",
"Aku Pergi!\nAku tidak tahu kapan aku kembali!\nKuharap Beberapa menit setelah pesan ini!\n#Bot",
"Ane lagi Gak Ada Sekarang :(, \nJadi Harap lampirkan Nama, alamat, nomer wa pacarmu, dan sertakan fotonya ya!\n#Bot",
"Maap Yak, Ane Lagi kagak Disini,\nJadi Rasakan Kebebasan Mengobrol Dengan Userbot Ku ini.\nDan Aku akan kembali sebentar lagi.\n#Bot",
"Aku Yakin Kamu Menunggu pesan balasan dariku!\n#Bot",
"Hidup sangatlah singkat,\nPerbanyak lah hidup ini dengan ibadah..\nJangan nonton JAV mulu!\n#Bot",
"Aku tidak disini sekarang..\nTetapi Jika Aku disini...\nMemang kamu mau menjalin hubungan kembali denganku?\n#Bot",
]
# =================================================================
@register(incoming=True, disable_edited=True)
async def mention_afk(mention):
""" This function takes care of notifying the people who mention you that you are AFK."""
global COUNT_MSG
global USERS
global ISAFK
global afk_time
global afk_start
global afk_end
not_afk = datetime.now()
afk_end = not_afk.replace(microsecond=0)
if mention.message.mentioned and not (await mention.get_sender()).bot:
if ISAFK:
now = datetime.now()
afk_since = now - afk_time
day = float(afk_since.seconds) // (24 * 3600)
time = float(afk_since.seconds) % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if day == 1:
afk_str = "Yesterday"
elif day > 1:
if day > 6:
date = now + \
datetime.timedelta(
days=-day, hours=-hours, minutes=-minutes)
afk_str = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-day)
afk_str = wday.strftime('%A')
elif hours > 1:
afk_str = f"`{int(hours)} Jam, {int(minutes)}menit` Lalu"
elif minutes > 0:
afk_str = f"`{int(minutes)} Menit, {int(seconds)}detik` Lalu"
else:
afk_str = f"`{int(seconds)} Detik` Lalu"
if mention.sender_id not in USERS:
if AFKREASON:
await mention.reply("[Offline]"
f"\nKarena : `{AFKREASON}`."
f"\nOffline Sejak: {afk_str}")
else:
await mention.reply(str(choice(AFKSTR)))
USERS.update({mention.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif mention.sender_id in USERS:
if USERS[mention.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await mention.reply("[Offline]"
f"\nKarena: `{AFKREASON}`."
f"\nOffline Sejak: {afk_str}")
else:
await mention.reply(str(choice(AFKSTR)))
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@register(incoming=True, disable_errors=True)
async def afk_on_pm(sender):
""" Function which informs people that you are AFK in PM """
global ISAFK
global USERS
global COUNT_MSG
global afk_time
global afk_start
global afk_end
not_afk = datetime.now()
afk_end = not_afk.replace(microsecond=0)
afk_str = "a while ago"
if sender.is_private and sender.sender_id != 777000 and not (
await sender.get_sender()).bot:
if PM_AUTO_BAN:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
apprv = is_approved(sender.sender_id)
except AttributeError:
apprv = True
else:
apprv = True
if apprv and ISAFK:
now = datetime.now()
afk_since = now - afk_time
day = float(afk_since.seconds) // (24 * 3600)
time = float(afk_since.seconds) % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if day == 1:
afk_str = "Yesterday"
elif day > 1:
if day > 6:
date = now + \
datetime.timedelta(
days=-day, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-day)
afk_str = wday.strftime('%A')
elif hours > 1:
afk_str = f"`{int(hours)} Jam, {int(minutes)} Menit` Lalu"
elif minutes > 0:
afk_str = f"`{int(minutes)} Menit, {int(seconds)}Detik` Lalu"
else:
afk_str = f"`{int(seconds)} Detik` Lalu"
if sender.sender_id not in USERS:
if AFKREASON:
await sender.reply("[Offline]"
f"\nKarena: `{AFKREASON}`."
f"\nOffline Sejak: {afk_str}")
else:
await sender.reply(str(choice(AFKSTR)))
USERS.update({sender.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif apprv and sender.sender_id in USERS:
if USERS[sender.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await sender.reply("[Offline]"
f"\nKarena: `{AFKREASON}`."
f"\nOffline Sejak: {afk_str}")
else:
await sender.reply(str(choice(AFKSTR)))
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@register(outgoing=True, pattern="^.afk(?: |$)(.*)", disable_errors=True)
async def set_afk(afk_e):
""" For .afk command, allows you to inform people that you are afk when they message you """
message = afk_e.text
string = afk_e.pattern_match.group(1)
global ISAFK
global AFKREASON
global afk_time
global afk_start
global afk_end
afk_time = None
afk_end = {}
start1 = datetime.now()
afk_start = start1.replace(microsecond=0)
if string:
AFKREASON = string
await afk_e.edit(f"[Offline]\
\nKarena: `{string}`")
else:
await afk_e.edit("[Offline]")
if BOTLOG:
await afk_e.client.send_message(BOTLOG_CHATID, "#AFK\nAnda Offline")
ISAFK = True
afk_time = datetime.now()
raise StopPropagation
@register(outgoing=True)
async def type_afk_is_not_true(notafk):
""" This sets your status as not afk automatically when you write something while being afk """
global ISAFK
global COUNT_MSG
global USERS
global AFKREASON
global afk_time
global afk_start
global afk_end
not_afk = datetime.now()
afk_end = not_afk.replace(microsecond=0)
if ISAFK:
ISAFK = False
msg = await notafk.respond("[Online]")
await sleep(1)
await msg.delete()
if BOTLOG:
await notafk.client.send_message(
BOTLOG_CHATID,
"Anda menerima " + str(COUNT_MSG) + " Pesan Dari " +
str(len(USERS)),
)
for i in USERS:
name = await notafk.client.get_entity(i)
name0 = str(name.first_name)
await notafk.client.send_message(
BOTLOG_CHATID,
"[" + name0 + "](tg://user?id=" + str(i) + ")" +
" Mengirimkan" + "`" + str(USERS[i]) + " Pesan`",
)
COUNT_MSG = 0
USERS = {}
AFKREASON = None
CMD_HELP.update({
"afk":
".afk [Optional Reason]\
\nPenggunaan: Anda tau AFK kan, Ga perlu dijelasin‚.\
"
}) | userbot/modules/afk.py | """ Userbot module which contains afk-related commands """
from random import choice, randint
from asyncio import sleep
from datetime import datetime
from telethon.events import StopPropagation
from userbot import (AFKREASON, COUNT_MSG, CMD_HELP, ISAFK, BOTLOG,
BOTLOG_CHATID, USERS, PM_AUTO_BAN)
from userbot.events import register
# ========================= CONSTANTS ============================
AFKSTR = [
"Saya sedang sibuk sekarang. jika sangat penting anda bisa kirim nomor whatsapp pacarmu!\n#Bot",
"Saya sedang tidak online sekarang. Jika memang penting, Tinggalkan pesan setelah bunyi beep:\n`beeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeep`!\n#Bot",
"Mungkin belum saatnya kita bertemu.\n#Bot",
"Aku Akan Balik Sebentar Lagi dan Jika tidak...,\ntunggulah lebih lama :v.\n#Bot",
"Aku sedang tidak disini. \nYang pasti Aku sedang berada di suatu tempat.\n#Bot",
"Aku bukan orang yang spesial tapi aku selalu ada bersamamu,Kecuali sekarang aja sih.\n#Bot",
"Ada 3 hal di duinia ini yang tidak bisa kuhitung, jumlah bintang di langit, ikan di laut dan cintaku padamu.\n#Bot",
"Rasa sayangku ke kamu kaya pas powerangers waktu gak ada monster nggak berubah.\n#Bot",
"Coba cari aku kearah ini\n---->\n#Bot",
"Coba cari aku kearah ini\n<----\n#Bot",
"Mohon Tinggalkan Pesan Yang penting kepadaku, Jika Tak Penting Ya udah.\n#Bot",
"Sudah! Jangan ada hubungan lagi, Aku tau kau selingkuh!.\n#Bot",
"Jika Aku Onlen,Aku bakal memberitahumu dimana aku.\nTapi aku tidak, \nJadi tanyakan aku saat aku kembali...\n#Bot",
"Aku Pergi!\nAku tidak tahu kapan aku kembali!\nKuharap Beberapa menit setelah pesan ini!\n#Bot",
"Ane lagi Gak Ada Sekarang :(, \nJadi Harap lampirkan Nama, alamat, nomer wa pacarmu, dan sertakan fotonya ya!\n#Bot",
"Maap Yak, Ane Lagi kagak Disini,\nJadi Rasakan Kebebasan Mengobrol Dengan Userbot Ku ini.\nDan Aku akan kembali sebentar lagi.\n#Bot",
"Aku Yakin Kamu Menunggu pesan balasan dariku!\n#Bot",
"Hidup sangatlah singkat,\nPerbanyak lah hidup ini dengan ibadah..\nJangan nonton JAV mulu!\n#Bot",
"Aku tidak disini sekarang..\nTetapi Jika Aku disini...\nMemang kamu mau menjalin hubungan kembali denganku?\n#Bot",
]
# =================================================================
@register(incoming=True, disable_edited=True)
async def mention_afk(mention):
""" This function takes care of notifying the people who mention you that you are AFK."""
global COUNT_MSG
global USERS
global ISAFK
global afk_time
global afk_start
global afk_end
not_afk = datetime.now()
afk_end = not_afk.replace(microsecond=0)
if mention.message.mentioned and not (await mention.get_sender()).bot:
if ISAFK:
now = datetime.now()
afk_since = now - afk_time
day = float(afk_since.seconds) // (24 * 3600)
time = float(afk_since.seconds) % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if day == 1:
afk_str = "Yesterday"
elif day > 1:
if day > 6:
date = now + \
datetime.timedelta(
days=-day, hours=-hours, minutes=-minutes)
afk_str = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-day)
afk_str = wday.strftime('%A')
elif hours > 1:
afk_str = f"`{int(hours)} Jam, {int(minutes)}menit` Lalu"
elif minutes > 0:
afk_str = f"`{int(minutes)} Menit, {int(seconds)}detik` Lalu"
else:
afk_str = f"`{int(seconds)} Detik` Lalu"
if mention.sender_id not in USERS:
if AFKREASON:
await mention.reply("[Offline]"
f"\nKarena : `{AFKREASON}`."
f"\nOffline Sejak: {afk_str}")
else:
await mention.reply(str(choice(AFKSTR)))
USERS.update({mention.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif mention.sender_id in USERS:
if USERS[mention.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await mention.reply("[Offline]"
f"\nKarena: `{AFKREASON}`."
f"\nOffline Sejak: {afk_str}")
else:
await mention.reply(str(choice(AFKSTR)))
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@register(incoming=True, disable_errors=True)
async def afk_on_pm(sender):
""" Function which informs people that you are AFK in PM """
global ISAFK
global USERS
global COUNT_MSG
global afk_time
global afk_start
global afk_end
not_afk = datetime.now()
afk_end = not_afk.replace(microsecond=0)
afk_str = "a while ago"
if sender.is_private and sender.sender_id != 777000 and not (
await sender.get_sender()).bot:
if PM_AUTO_BAN:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
apprv = is_approved(sender.sender_id)
except AttributeError:
apprv = True
else:
apprv = True
if apprv and ISAFK:
now = datetime.now()
afk_since = now - afk_time
day = float(afk_since.seconds) // (24 * 3600)
time = float(afk_since.seconds) % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if day == 1:
afk_str = "Yesterday"
elif day > 1:
if day > 6:
date = now + \
datetime.timedelta(
days=-day, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-day)
afk_str = wday.strftime('%A')
elif hours > 1:
afk_str = f"`{int(hours)} Jam, {int(minutes)} Menit` Lalu"
elif minutes > 0:
afk_str = f"`{int(minutes)} Menit, {int(seconds)}Detik` Lalu"
else:
afk_str = f"`{int(seconds)} Detik` Lalu"
if sender.sender_id not in USERS:
if AFKREASON:
await sender.reply("[Offline]"
f"\nKarena: `{AFKREASON}`."
f"\nOffline Sejak: {afk_str}")
else:
await sender.reply(str(choice(AFKSTR)))
USERS.update({sender.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif apprv and sender.sender_id in USERS:
if USERS[sender.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await sender.reply("[Offline]"
f"\nKarena: `{AFKREASON}`."
f"\nOffline Sejak: {afk_str}")
else:
await sender.reply(str(choice(AFKSTR)))
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@register(outgoing=True, pattern="^.afk(?: |$)(.*)", disable_errors=True)
async def set_afk(afk_e):
""" For .afk command, allows you to inform people that you are afk when they message you """
message = afk_e.text
string = afk_e.pattern_match.group(1)
global ISAFK
global AFKREASON
global afk_time
global afk_start
global afk_end
afk_time = None
afk_end = {}
start1 = datetime.now()
afk_start = start1.replace(microsecond=0)
if string:
AFKREASON = string
await afk_e.edit(f"[Offline]\
\nKarena: `{string}`")
else:
await afk_e.edit("[Offline]")
if BOTLOG:
await afk_e.client.send_message(BOTLOG_CHATID, "#AFK\nAnda Offline")
ISAFK = True
afk_time = datetime.now()
raise StopPropagation
@register(outgoing=True)
async def type_afk_is_not_true(notafk):
""" This sets your status as not afk automatically when you write something while being afk """
global ISAFK
global COUNT_MSG
global USERS
global AFKREASON
global afk_time
global afk_start
global afk_end
not_afk = datetime.now()
afk_end = not_afk.replace(microsecond=0)
if ISAFK:
ISAFK = False
msg = await notafk.respond("[Online]")
await sleep(1)
await msg.delete()
if BOTLOG:
await notafk.client.send_message(
BOTLOG_CHATID,
"Anda menerima " + str(COUNT_MSG) + " Pesan Dari " +
str(len(USERS)),
)
for i in USERS:
name = await notafk.client.get_entity(i)
name0 = str(name.first_name)
await notafk.client.send_message(
BOTLOG_CHATID,
"[" + name0 + "](tg://user?id=" + str(i) + ")" +
" Mengirimkan" + "`" + str(USERS[i]) + " Pesan`",
)
COUNT_MSG = 0
USERS = {}
AFKREASON = None
CMD_HELP.update({
"afk":
".afk [Optional Reason]\
\nPenggunaan: Anda tau AFK kan, Ga perlu dijelasin‚.\
"
}) | 0.365343 | 0.166117 |
import typing as t
from typing import TYPE_CHECKING
import numpy as np
import joblib
import pytest
from sklearn.ensemble import RandomForestClassifier
import bentoml
import bentoml.models
from bentoml.exceptions import BentoMLException
from tests.utils.helpers import assert_have_file_extension
from tests.utils.frameworks.sklearn_utils import sklearn_model_data
# fmt: off
res_arr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
# fmt: on
if TYPE_CHECKING:
from bentoml import Tag
def save_procedure(
metadata: t.Dict[str, t.Any],
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
) -> "Tag":
model, _ = sklearn_model_data(clf=RandomForestClassifier)
tag_info = bentoml.sklearn.save(
"test_sklearn_model",
model,
metadata=metadata,
labels=labels,
custom_objects=custom_objects,
)
return tag_info
def forbidden_procedure() -> "Tag":
model, _ = sklearn_model_data(clf=RandomForestClassifier)
with bentoml.models.create(
"invalid_module",
module=__name__,
labels=None,
options=None,
context=None,
metadata=None,
) as ctx:
joblib.dump(model, ctx.path_of("saved_model.pkl"))
return ctx.tag
@pytest.mark.parametrize(
"metadata",
[
({"model": "Sklearn", "test": True}),
({"acc": 0.876}),
],
)
def test_sklearn_save_load(metadata: t.Dict[str, t.Any]) -> None:
labels = {"stage": "dev"}
def custom_f(x: int) -> int:
return x + 1
_, data = sklearn_model_data(clf=RandomForestClassifier)
tag = save_procedure(metadata, labels=labels, custom_objects={"func": custom_f})
bentomodel = bentoml.models.get(tag)
assert bentomodel.info.metadata is not None
assert_have_file_extension(bentomodel.path, ".pkl")
for k in labels.keys():
assert labels[k] == bentomodel.info.labels[k]
assert bentomodel.custom_objects["func"](3) == custom_f(3)
loaded = bentoml.sklearn.load(bentomodel.tag)
assert isinstance(loaded, RandomForestClassifier)
np.testing.assert_array_equal(loaded.predict(data), res_arr)
def test_get_model_info_exc() -> None:
tag = forbidden_procedure()
with pytest.raises(BentoMLException):
_ = bentoml.sklearn.load(tag)
def test_sklearn_runner_setup_run_batch() -> None:
_, data = sklearn_model_data(clf=RandomForestClassifier)
tag = save_procedure({})
runner = bentoml.sklearn.load_runner(tag)
assert tag in runner.required_models
assert runner.num_replica == 1
res = runner.run_batch(data)
assert (res == res_arr).all() | tests/integration/frameworks/test_sklearn_impl.py | import typing as t
from typing import TYPE_CHECKING
import numpy as np
import joblib
import pytest
from sklearn.ensemble import RandomForestClassifier
import bentoml
import bentoml.models
from bentoml.exceptions import BentoMLException
from tests.utils.helpers import assert_have_file_extension
from tests.utils.frameworks.sklearn_utils import sklearn_model_data
# fmt: off
res_arr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
# fmt: on
if TYPE_CHECKING:
from bentoml import Tag
def save_procedure(
metadata: t.Dict[str, t.Any],
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
) -> "Tag":
model, _ = sklearn_model_data(clf=RandomForestClassifier)
tag_info = bentoml.sklearn.save(
"test_sklearn_model",
model,
metadata=metadata,
labels=labels,
custom_objects=custom_objects,
)
return tag_info
def forbidden_procedure() -> "Tag":
model, _ = sklearn_model_data(clf=RandomForestClassifier)
with bentoml.models.create(
"invalid_module",
module=__name__,
labels=None,
options=None,
context=None,
metadata=None,
) as ctx:
joblib.dump(model, ctx.path_of("saved_model.pkl"))
return ctx.tag
@pytest.mark.parametrize(
"metadata",
[
({"model": "Sklearn", "test": True}),
({"acc": 0.876}),
],
)
def test_sklearn_save_load(metadata: t.Dict[str, t.Any]) -> None:
labels = {"stage": "dev"}
def custom_f(x: int) -> int:
return x + 1
_, data = sklearn_model_data(clf=RandomForestClassifier)
tag = save_procedure(metadata, labels=labels, custom_objects={"func": custom_f})
bentomodel = bentoml.models.get(tag)
assert bentomodel.info.metadata is not None
assert_have_file_extension(bentomodel.path, ".pkl")
for k in labels.keys():
assert labels[k] == bentomodel.info.labels[k]
assert bentomodel.custom_objects["func"](3) == custom_f(3)
loaded = bentoml.sklearn.load(bentomodel.tag)
assert isinstance(loaded, RandomForestClassifier)
np.testing.assert_array_equal(loaded.predict(data), res_arr)
def test_get_model_info_exc() -> None:
tag = forbidden_procedure()
with pytest.raises(BentoMLException):
_ = bentoml.sklearn.load(tag)
def test_sklearn_runner_setup_run_batch() -> None:
_, data = sklearn_model_data(clf=RandomForestClassifier)
tag = save_procedure({})
runner = bentoml.sklearn.load_runner(tag)
assert tag in runner.required_models
assert runner.num_replica == 1
res = runner.run_batch(data)
assert (res == res_arr).all() | 0.597021 | 0.572962 |
from collections import defaultdict
import matplotlib
import matplotlib.cm as cmx
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import networkx as nx
from sklearn.manifold import TSNE
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.family'] = ['sans-serif']
plt.rcParams['font.sans-serif'] = ['SimHei']
def plot_embeddings(nodes, embeddings, labels, n_class=10, node_text=False, save_path=None):
"""
:param nodes:
:param embeddings: 2-dimensional vectors
:param labels:
:param n_class:
:param node_text:
:return:
"""
matplotlib.use("TkAgg")
markers = ['o', '*', 'x', '<', '1', 'D', '>', '^', "v", 'p', '2', '3', '4', 'X', '.']
cm = plt.get_cmap("nipy_spectral")
cNorm = colors.Normalize(vmin=0, vmax=n_class-1)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
class_dict = defaultdict(list)
for idx, node in enumerate(nodes):
class_dict[int(labels[idx])].append(idx)
info = sorted(class_dict.items(), key=lambda item:item[0])
for _class, _indices in info:
plt.scatter(embeddings[_indices, 0], embeddings[_indices, 1], s=100,
marker=markers[_class % len(markers)],
c=[scalarMap.to_rgba(_class)], label=_class)
if node_text:
for idx, (x, y) in enumerate(embeddings):
plt.text(x, y, nodes[idx])
#plt.legend()
plt.xticks([])
plt.yticks([])
if save_path:
plt.savefig(save_path)
print("Save TSNE result figure.")
#plt.show()
def plot_embedding2D(node_pos, node_colors=None, di_graph=None, labels=None):
node_num, embedding_dimension = node_pos.shape
if embedding_dimension > 2:
print("Embedding dimension greater than 2, use tSNE to reduce it to 2")
model = TSNE(n_components=2)
node_pos = model.fit_transform(node_pos)
if di_graph is None:
# plot using plt scatter
plt.scatter(node_pos[:, 0], node_pos[:, 1], c=node_colors)
else:
# plot using networkx with edge structure
pos = {}
for i in range(node_num):
pos[i] = node_pos[i, :]
if node_colors is not None:
nx.draw_networkx_nodes(di_graph, pos,
node_color=node_colors,
width=0.1, node_size=100,
arrows=False, alpha=0.8,
font_size=5, labels=labels)
else:
nx.draw_networkx(di_graph, pos, node_color=node_colors,
width=0.1, node_size=300, arrows=False,
alpha=0.8, font_size=12, labels=labels)
"""
def robustness_vis():
db = Database()
filters = {"evaluate": "LR", "metric": "l1", "ge_name": "HSELE", "data": "europe"}
cursor = db.find("scores", filters=filters)
LE_records = []
for record in cursor:
LE_records.append(record)
filters['ge_name'] = 'HSELLE'
cursor = db.find("scores", filters=filters)
LLE_records = []
for record in cursor:
LLE_records.append(record)
print(LE_records)
ratio1, ratio2 = [], []
LE_scores, LLE_scores = [], []
for doc1, doc2 in zip(LE_records, LLE_records):
print(doc1)
_scores = doc1['scores']
LE_scores.extend(_scores)
ratio1 += [1.0 - doc1['prob']] * len(_scores)
print(doc2)
_scores = doc2['scores']
LLE_scores.extend(_scores)
ratio2 += [1.0 - doc2['prob']] * len(_scores)
#scores = scores[::-1]
evaluate = ["HSELE"] * len(LE_scores) + ["HSELLE"] * len(LLE_scores)
LE_scores.extend(LLE_scores)
ratio1.extend(ratio2)
print(LE_scores)
data = pd.DataFrame(data={"Accuracy": LE_scores, "Deletion Ratio": ratio1, "method": evaluate})
sns.set(style="ticks")
sns.relplot(x="Deletion Ratio", y="Accuracy", hue="method", data=data, kind="line")
plt.ylim((0.6, 1))
plt.show()
def robustness_from_excel():
import seaborn as sns
HSDLE=[0.738888863, 0.751388817, 0.746428551, 0.757142813, 0.787037011, 0.803703607,
0.820370354, 0.834259237, 0.851851839, 0.870833308, 0.870238073]
HSDLLE=[0.70208315, 0.724999867, 0.743749975, 0.774999971, 0.790476166, 0.813541638,
0.824999978, 0.868055543, 0.881249961, 0.89999996, 0.925]
graphwave=[0.74833333, 0.73666664, 0.748333326, 0.768333312, 0.7883333, 0.754999972,
0.76833318, 0.79166662, 0.7933333, 0.80666664, 0.825]
struc2vec=[0.744999852, 0.733333324, 0.746666652, 0.748333306, 0.7533333, 0.754999997,
0.776666626, 0.789999966, 0.80999998, 0.80833332, 0.814999966]
node2vec=[0.443333312, 0.403333318, 0.4283333, 0.451666658, 0.473333324, 0.511666652,
0.486666646, 0.513333318, 0.489999972, 0.544999986, 0.544999972]
delete_ratio=[0.5, 0.45, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.0]
data = pd.DataFrame(data={"Accuracy": HSDLE + HSDLLE + graphwave + struc2vec + node2vec,
"Deletion Ratio": delete_ratio * 5,
"method": ['HSDLE']*len(HSDLE) + ['HSDLLE']*len(HSDLLE) +
['GraphWave']*len(graphwave) + ['Struc2vec']*len(struc2vec) +
['Node2vec']*len(node2vec)
})
sns.set(style="ticks")
sns.relplot(x="Deletion Ratio", y="Accuracy", hue="method", data=data, kind="line")
plt.ylim((0.0, 1))
plt.show()
if __name__ == '__main__':
robustness_from_excel()
#time_vs()
""" | tools/visualize.py | from collections import defaultdict
import matplotlib
import matplotlib.cm as cmx
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import networkx as nx
from sklearn.manifold import TSNE
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.family'] = ['sans-serif']
plt.rcParams['font.sans-serif'] = ['SimHei']
def plot_embeddings(nodes, embeddings, labels, n_class=10, node_text=False, save_path=None):
"""
:param nodes:
:param embeddings: 2-dimensional vectors
:param labels:
:param n_class:
:param node_text:
:return:
"""
matplotlib.use("TkAgg")
markers = ['o', '*', 'x', '<', '1', 'D', '>', '^', "v", 'p', '2', '3', '4', 'X', '.']
cm = plt.get_cmap("nipy_spectral")
cNorm = colors.Normalize(vmin=0, vmax=n_class-1)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
class_dict = defaultdict(list)
for idx, node in enumerate(nodes):
class_dict[int(labels[idx])].append(idx)
info = sorted(class_dict.items(), key=lambda item:item[0])
for _class, _indices in info:
plt.scatter(embeddings[_indices, 0], embeddings[_indices, 1], s=100,
marker=markers[_class % len(markers)],
c=[scalarMap.to_rgba(_class)], label=_class)
if node_text:
for idx, (x, y) in enumerate(embeddings):
plt.text(x, y, nodes[idx])
#plt.legend()
plt.xticks([])
plt.yticks([])
if save_path:
plt.savefig(save_path)
print("Save TSNE result figure.")
#plt.show()
def plot_embedding2D(node_pos, node_colors=None, di_graph=None, labels=None):
node_num, embedding_dimension = node_pos.shape
if embedding_dimension > 2:
print("Embedding dimension greater than 2, use tSNE to reduce it to 2")
model = TSNE(n_components=2)
node_pos = model.fit_transform(node_pos)
if di_graph is None:
# plot using plt scatter
plt.scatter(node_pos[:, 0], node_pos[:, 1], c=node_colors)
else:
# plot using networkx with edge structure
pos = {}
for i in range(node_num):
pos[i] = node_pos[i, :]
if node_colors is not None:
nx.draw_networkx_nodes(di_graph, pos,
node_color=node_colors,
width=0.1, node_size=100,
arrows=False, alpha=0.8,
font_size=5, labels=labels)
else:
nx.draw_networkx(di_graph, pos, node_color=node_colors,
width=0.1, node_size=300, arrows=False,
alpha=0.8, font_size=12, labels=labels)
"""
def robustness_vis():
db = Database()
filters = {"evaluate": "LR", "metric": "l1", "ge_name": "HSELE", "data": "europe"}
cursor = db.find("scores", filters=filters)
LE_records = []
for record in cursor:
LE_records.append(record)
filters['ge_name'] = 'HSELLE'
cursor = db.find("scores", filters=filters)
LLE_records = []
for record in cursor:
LLE_records.append(record)
print(LE_records)
ratio1, ratio2 = [], []
LE_scores, LLE_scores = [], []
for doc1, doc2 in zip(LE_records, LLE_records):
print(doc1)
_scores = doc1['scores']
LE_scores.extend(_scores)
ratio1 += [1.0 - doc1['prob']] * len(_scores)
print(doc2)
_scores = doc2['scores']
LLE_scores.extend(_scores)
ratio2 += [1.0 - doc2['prob']] * len(_scores)
#scores = scores[::-1]
evaluate = ["HSELE"] * len(LE_scores) + ["HSELLE"] * len(LLE_scores)
LE_scores.extend(LLE_scores)
ratio1.extend(ratio2)
print(LE_scores)
data = pd.DataFrame(data={"Accuracy": LE_scores, "Deletion Ratio": ratio1, "method": evaluate})
sns.set(style="ticks")
sns.relplot(x="Deletion Ratio", y="Accuracy", hue="method", data=data, kind="line")
plt.ylim((0.6, 1))
plt.show()
def robustness_from_excel():
import seaborn as sns
HSDLE=[0.738888863, 0.751388817, 0.746428551, 0.757142813, 0.787037011, 0.803703607,
0.820370354, 0.834259237, 0.851851839, 0.870833308, 0.870238073]
HSDLLE=[0.70208315, 0.724999867, 0.743749975, 0.774999971, 0.790476166, 0.813541638,
0.824999978, 0.868055543, 0.881249961, 0.89999996, 0.925]
graphwave=[0.74833333, 0.73666664, 0.748333326, 0.768333312, 0.7883333, 0.754999972,
0.76833318, 0.79166662, 0.7933333, 0.80666664, 0.825]
struc2vec=[0.744999852, 0.733333324, 0.746666652, 0.748333306, 0.7533333, 0.754999997,
0.776666626, 0.789999966, 0.80999998, 0.80833332, 0.814999966]
node2vec=[0.443333312, 0.403333318, 0.4283333, 0.451666658, 0.473333324, 0.511666652,
0.486666646, 0.513333318, 0.489999972, 0.544999986, 0.544999972]
delete_ratio=[0.5, 0.45, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.0]
data = pd.DataFrame(data={"Accuracy": HSDLE + HSDLLE + graphwave + struc2vec + node2vec,
"Deletion Ratio": delete_ratio * 5,
"method": ['HSDLE']*len(HSDLE) + ['HSDLLE']*len(HSDLLE) +
['GraphWave']*len(graphwave) + ['Struc2vec']*len(struc2vec) +
['Node2vec']*len(node2vec)
})
sns.set(style="ticks")
sns.relplot(x="Deletion Ratio", y="Accuracy", hue="method", data=data, kind="line")
plt.ylim((0.0, 1))
plt.show()
if __name__ == '__main__':
robustness_from_excel()
#time_vs()
""" | 0.718496 | 0.435241 |
__author__ = '<NAME>'
__copyright__ = '2018 Sourcerer, Inc'
import os
import shutil
from datetime import datetime
from .storage_base import StorageBase
class LocalStorage(StorageBase):
def __init__(self, work_dir):
self.work_dir = work_dir
def make_dirs(self, path):
full_path = os.path.join(self.work_dir, path)
os.makedirs(full_path, exist_ok=True)
def move_file(self, from_path, to_path):
try:
os.rename(os.path.join(self.work_dir, from_path),
os.path.join(self.work_dir, to_path))
return True
except OSError:
return False
def remove_file(self, path):
try:
os.remove(path)
return True
except OSError:
return False
def remove_subtree(self, path):
full_path = os.path.join(self.work_dir, path)
shutil.rmtree(full_path, ignore_errors=True)
def list_dir(self, dir_path, include_files=True, include_subdirs=True):
full_path = os.path.join(self.work_dir, dir_path)
result = []
for entry in os.listdir(full_path):
entry_path = os.path.join(full_path, entry)
if not include_files and os.path.isfile(entry_path):
continue
if not include_subdirs and os.path.isdir(entry_path):
continue
result.append(entry)
return result
def file_exists(self, file_path):
full_path = os.path.join(self.work_dir, file_path)
return os.path.isfile(full_path)
def dir_exists(self, dir_path):
full_path = os.path.join(self.work_dir, dir_path)
return os.path.isdir(full_path)
def last_modified(self, path):
full_path = os.path.join(self.work_dir, path)
return datetime.utcfromtimestamp(os.path.getmtime(full_path))
def save_file(self, path, data, content_type='text/plain'):
full_path = os.path.join(self.work_dir, path)
with open(full_path, 'w') as f:
f.write(data)
def load_file(self, path):
full_path = os.path.join(self.work_dir, path)
with open(full_path, 'r') as f:
return f.read() | fame/storage/local_storage.py |
__author__ = '<NAME>'
__copyright__ = '2018 Sourcerer, Inc'
import os
import shutil
from datetime import datetime
from .storage_base import StorageBase
class LocalStorage(StorageBase):
def __init__(self, work_dir):
self.work_dir = work_dir
def make_dirs(self, path):
full_path = os.path.join(self.work_dir, path)
os.makedirs(full_path, exist_ok=True)
def move_file(self, from_path, to_path):
try:
os.rename(os.path.join(self.work_dir, from_path),
os.path.join(self.work_dir, to_path))
return True
except OSError:
return False
def remove_file(self, path):
try:
os.remove(path)
return True
except OSError:
return False
def remove_subtree(self, path):
full_path = os.path.join(self.work_dir, path)
shutil.rmtree(full_path, ignore_errors=True)
def list_dir(self, dir_path, include_files=True, include_subdirs=True):
full_path = os.path.join(self.work_dir, dir_path)
result = []
for entry in os.listdir(full_path):
entry_path = os.path.join(full_path, entry)
if not include_files and os.path.isfile(entry_path):
continue
if not include_subdirs and os.path.isdir(entry_path):
continue
result.append(entry)
return result
def file_exists(self, file_path):
full_path = os.path.join(self.work_dir, file_path)
return os.path.isfile(full_path)
def dir_exists(self, dir_path):
full_path = os.path.join(self.work_dir, dir_path)
return os.path.isdir(full_path)
def last_modified(self, path):
full_path = os.path.join(self.work_dir, path)
return datetime.utcfromtimestamp(os.path.getmtime(full_path))
def save_file(self, path, data, content_type='text/plain'):
full_path = os.path.join(self.work_dir, path)
with open(full_path, 'w') as f:
f.write(data)
def load_file(self, path):
full_path = os.path.join(self.work_dir, path)
with open(full_path, 'r') as f:
return f.read() | 0.328314 | 0.077065 |
from mshr import *
from dolfin import *
from nodes import *
from scipy.sparse import dok_matrix
from ddm import *
from matplotlib import pyplot as plt
import matplotlib.tri as tri
from scipy.sparse.linalg import spsolve, gmres, splu, LinearOperator, inv
from scipy.linalg import expm_cond
parameters['reorder_dofs_serial'] = False
geo = 'geo12'
mesh = Mesh('Geometria/geo12.xml');
mt = mesh.num_vertices()
coord = mesh.coordinates()
nei = connect(mesh)
dmed = (mesh.hmax() + mesh.hmin())/2
Xn = nodes(coord)
Xn.set_neighborhood(nei)
# Number of subdomains
nd = 10
K = dok_matrix((Xn.size, Xn.size), dtype=np.complex)
k = 2*np.pi
f = np.zeros(Xn.size,dtype=np.complex)
A, f = assemble(Xn,k)
u_d = spsolve(A,f)
triang = mesh2triang(mesh)
plt.gca().set_aspect('equal')
plt.tripcolor(triang, np.real(u_d),shading='gouraud')
plt.show()
dx = nd
ovl = 0.1
list_mesh = submeshes(mesh,nd,dx,ovl,Verbose=False)
r,rd = indices(list_mesh, mesh)
R = [];D = [];K = []; Kinv = []
for j in range(nd):
Ri, Di = restriction(len(r[j]),mt,r[j],rd)
submesh = list_mesh[j]
nei = connect(submesh)
Xj = nodes(submesh.coordinates())
Xj.set_neighborhood(nei)
Kj = dok_matrix((Xj.size, Xj.size), dtype=np.complex)
# Assemble submatrix
for I in range(Xj.size):
support = Xj.coord[Xj.get_support(I),:]
Phi, dPhix, dPhiy, dPhix2, dPhiy2 = shape_function(Xj.coord[I],support)
if Xj.type[I] == 1: # Left Boundary
n = 1
if j >= 1:
Kj[I,Xj.get_support(I)] = n*dPhix + 1j*k*Phi - 1j/(2*k)*dPhiy2
else:
Kj[I,Xj.get_support(I)] = n*dPhix + 1j*k* Phi
elif Xj.type[I] == 2: # Right Boundary
n = -1
if j < nd:
Kj[I,Xj.get_support(I)] = n*dPhix + 1j*k* Phi - 1j/(2*k)*dPhiy2
else:
Kj[I,Xj.get_support(I)] = n*dPhix + 1j*k* Phi
elif Xj.type[I] == 3: # Bottom Boundary
n = 1
Kj[I,Xj.get_support(I)] = n*dPhiy + 1j*k*Phi
elif Xj.type[I] == 4: # Top Boundary
n = -1
Kj[I,Xj.get_support(I)] = n*dPhiy + 1j*k*Phi
else: # Internal Nodes
Kj[I,Xj.get_support(I)] = dPhix2 + dPhiy2 +(k**2)*Phi
R.append(Ri)
D.append(Di)
K.append(Kj)
Kinv.append(splu(Kj.tocsc()))
maxiter = 1000
tol = 1e-5
b = rhs
def ddm_operator(r1):
u = 1j*np.zeros(A.shape[0])
residual = r1 - A*u
for i in range(nd):
v1 = Kinv[i].solve(R[i]*residual)
u = u + R[i].transpose()*(D[i]*v1)
return u
M_x = ddm_operator
M1 = LinearOperator(A.shape, M_x)
counter_ddm = Counter_Iter()
M_oras = sum(R[i].transpose()*D[i]*inv(K[i])*R[i] for i in range(nd))
usol_gmres, info = gmres(A,f,M = M_oras,restart = 2000, maxiter=maxiter, \
callback=counter_ddm, tol=tol)
rn = np.array(counter_ddm.rk)
np.save(geo, rn)
plt.semilogy(rn/max(rn))
counter_ddm = Counter_Iter()
usol_gmres, info = gmres(A,f,restart = 100, maxiter=maxiter, \
callback=counter_ddm, tol=tol) | OLD/main.py | from mshr import *
from dolfin import *
from nodes import *
from scipy.sparse import dok_matrix
from ddm import *
from matplotlib import pyplot as plt
import matplotlib.tri as tri
from scipy.sparse.linalg import spsolve, gmres, splu, LinearOperator, inv
from scipy.linalg import expm_cond
parameters['reorder_dofs_serial'] = False
geo = 'geo12'
mesh = Mesh('Geometria/geo12.xml');
mt = mesh.num_vertices()
coord = mesh.coordinates()
nei = connect(mesh)
dmed = (mesh.hmax() + mesh.hmin())/2
Xn = nodes(coord)
Xn.set_neighborhood(nei)
# Number of subdomains
nd = 10
K = dok_matrix((Xn.size, Xn.size), dtype=np.complex)
k = 2*np.pi
f = np.zeros(Xn.size,dtype=np.complex)
A, f = assemble(Xn,k)
u_d = spsolve(A,f)
triang = mesh2triang(mesh)
plt.gca().set_aspect('equal')
plt.tripcolor(triang, np.real(u_d),shading='gouraud')
plt.show()
dx = nd
ovl = 0.1
list_mesh = submeshes(mesh,nd,dx,ovl,Verbose=False)
r,rd = indices(list_mesh, mesh)
R = [];D = [];K = []; Kinv = []
for j in range(nd):
Ri, Di = restriction(len(r[j]),mt,r[j],rd)
submesh = list_mesh[j]
nei = connect(submesh)
Xj = nodes(submesh.coordinates())
Xj.set_neighborhood(nei)
Kj = dok_matrix((Xj.size, Xj.size), dtype=np.complex)
# Assemble submatrix
for I in range(Xj.size):
support = Xj.coord[Xj.get_support(I),:]
Phi, dPhix, dPhiy, dPhix2, dPhiy2 = shape_function(Xj.coord[I],support)
if Xj.type[I] == 1: # Left Boundary
n = 1
if j >= 1:
Kj[I,Xj.get_support(I)] = n*dPhix + 1j*k*Phi - 1j/(2*k)*dPhiy2
else:
Kj[I,Xj.get_support(I)] = n*dPhix + 1j*k* Phi
elif Xj.type[I] == 2: # Right Boundary
n = -1
if j < nd:
Kj[I,Xj.get_support(I)] = n*dPhix + 1j*k* Phi - 1j/(2*k)*dPhiy2
else:
Kj[I,Xj.get_support(I)] = n*dPhix + 1j*k* Phi
elif Xj.type[I] == 3: # Bottom Boundary
n = 1
Kj[I,Xj.get_support(I)] = n*dPhiy + 1j*k*Phi
elif Xj.type[I] == 4: # Top Boundary
n = -1
Kj[I,Xj.get_support(I)] = n*dPhiy + 1j*k*Phi
else: # Internal Nodes
Kj[I,Xj.get_support(I)] = dPhix2 + dPhiy2 +(k**2)*Phi
R.append(Ri)
D.append(Di)
K.append(Kj)
Kinv.append(splu(Kj.tocsc()))
maxiter = 1000
tol = 1e-5
b = rhs
def ddm_operator(r1):
u = 1j*np.zeros(A.shape[0])
residual = r1 - A*u
for i in range(nd):
v1 = Kinv[i].solve(R[i]*residual)
u = u + R[i].transpose()*(D[i]*v1)
return u
M_x = ddm_operator
M1 = LinearOperator(A.shape, M_x)
counter_ddm = Counter_Iter()
M_oras = sum(R[i].transpose()*D[i]*inv(K[i])*R[i] for i in range(nd))
usol_gmres, info = gmres(A,f,M = M_oras,restart = 2000, maxiter=maxiter, \
callback=counter_ddm, tol=tol)
rn = np.array(counter_ddm.rk)
np.save(geo, rn)
plt.semilogy(rn/max(rn))
counter_ddm = Counter_Iter()
usol_gmres, info = gmres(A,f,restart = 100, maxiter=maxiter, \
callback=counter_ddm, tol=tol) | 0.303732 | 0.310472 |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.GetRosParam import GetRosParam
from sara_flexbe_states.Get_Entity_By_ID import GetEntityByID
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.for_loop import ForLoop
from sara_flexbe_states.SetKey import SetKey
from sara_flexbe_states.list_entities_by_name import list_entities_by_name
from flexbe_states.calculation_state import CalculationState
from sara_flexbe_behaviors.action_move_sm import Action_MoveSM
from sara_flexbe_states.get_reachable_waypoint import Get_Reacheable_Waypoint
from sara_flexbe_states.SetRosParam import SetRosParam
from sara_flexbe_states.get_speech import GetSpeech
from flexbe_states.check_condition_state import CheckConditionState
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed May 09 2018
@author: <NAME>
'''
class Get_operatorSM(Behavior):
'''
Find an person and ask them to become operator
'''
def __init__(self):
super(Get_operatorSM, self).__init__()
self.name = 'Get_operator'
# parameters of this behavior
# references to used behaviors
self.add_behavior(Action_MoveSM, 'Move to person/Action_Move')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:814 y:45, x:514 y:274
_state_machine = OperatableStateMachine(outcomes=['Found', 'NotFound'], output_keys=['Operator'])
_state_machine.userdata.Operator = None
_state_machine.userdata.Name = "person"
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:506 y:393, x:515 y:462
_sm_move_to_person_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['Operator'])
with _sm_move_to_person_0:
# x:30 y:83
OperatableStateMachine.add('Getpos',
CalculationState(calculation=lambda x: x.position),
transitions={'done': 'setDistance'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'Operator', 'output_value': 'pose_in'})
# x:35 y:450
OperatableStateMachine.add('Action_Move',
self.use_behavior(Action_MoveSM, 'Move to person/Action_Move'),
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'pose': 'Pose'})
# x:47 y:368
OperatableStateMachine.add('set not rel',
SetKey(Value=False),
transitions={'done': 'Action_Move'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'relative'})
# x:41 y:179
OperatableStateMachine.add('setDistance',
SetKey(Value=1.5),
transitions={'done': 'Close position'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'distance'})
# x:27 y:280
OperatableStateMachine.add('Close position',
Get_Reacheable_Waypoint(),
transitions={'done': 'set not rel'},
autonomy={'done': Autonomy.Off},
remapping={'pose_in': 'pose_in', 'distance': 'distance', 'pose_out': 'Pose'})
with _state_machine:
# x:64 y:35
OperatableStateMachine.add('Get previous ID',
GetRosParam(ParamName="behavior/Operator/Id"),
transitions={'done': 'Get Operator', 'failed': 'for 3'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'Value': 'ID'})
# x:271 y:37
OperatableStateMachine.add('Get Operator',
GetEntityByID(),
transitions={'found': 'Found', 'not_found': 'Say lost operator'},
autonomy={'found': Autonomy.Off, 'not_found': Autonomy.Off},
remapping={'ID': 'ID', 'Entity': 'Operator'})
# x:263 y:155
OperatableStateMachine.add('Say lost operator',
SaraSay(sentence="I lost my operator", input_keys=[], emotion=1, block=True),
transitions={'done': 'for 3'},
autonomy={'done': Autonomy.Off})
# x:780 y:517
OperatableStateMachine.add('ask if operator',
SaraSay(sentence="Are you my operator?", input_keys=[], emotion=1, block=True),
transitions={'done': 'get speech'},
autonomy={'done': Autonomy.Off})
# x:70 y:273
OperatableStateMachine.add('for 3',
ForLoop(repeat=3),
transitions={'do': 'for 3_2', 'end': 'set None'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
# x:249 y:357
OperatableStateMachine.add('say where are you',
SaraSay(sentence="Operator. Where are you?", input_keys=[], emotion=1, block=True),
transitions={'done': 'for 3'},
autonomy={'done': Autonomy.Off})
# x:281 y:265
OperatableStateMachine.add('set None',
SetKey(Value=None),
transitions={'done': 'NotFound'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Operator'})
# x:49 y:511
OperatableStateMachine.add('Get persons',
list_entities_by_name(frontality_level=0.5, distance_max=10),
transitions={'found': 'get next closest', 'none_found': 'say where are you'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'Name', 'entity_list': 'entity_list', 'number': 'number'})
# x:461 y:475
OperatableStateMachine.add('Move to person',
_sm_move_to_person_0,
transitions={'finished': 'ask if operator', 'failed': 'NotFound'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'Operator': 'Operator'})
# x:783 y:161
OperatableStateMachine.add('set new ID',
SetRosParam(ParamName="behavior/Operator/Id"),
transitions={'done': 'Found'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'ID'})
# x:775 y:269
OperatableStateMachine.add('get ID',
CalculationState(calculation=lambda x: x.ID),
transitions={'done': 'set new ID'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'Operator', 'output_value': 'ID'})
# x:784 y:433
OperatableStateMachine.add('get speech',
GetSpeech(watchdog=5),
transitions={'done': 'Yes ?', 'nothing': 'for 3_2', 'fail': 'NotFound'},
autonomy={'done': Autonomy.Off, 'nothing': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'words': 'words'})
# x:69 y:402
OperatableStateMachine.add('for 3_2',
ForLoop(repeat=3),
transitions={'do': 'Get persons', 'end': 'set None'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index2'})
# x:744 y:332
OperatableStateMachine.add('Yes ?',
CheckConditionState(predicate=lambda x: "yes" in x),
transitions={'true': 'get ID', 'false': 'for 3_2'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'words'})
# x:263 y:535
OperatableStateMachine.add('get next closest',
FlexibleCalculationState(calculation=lambda x: x[0][x[1]], input_keys=["entity_list", "index"]),
transitions={'done': 'ask if operator'},
autonomy={'done': Autonomy.Off},
remapping={'entity_list': 'entity_list', 'index': 'index', 'output_value': 'Operator'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | sara_flexbe_behaviors/src/sara_flexbe_behaviors/get_operator_sm.py |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.GetRosParam import GetRosParam
from sara_flexbe_states.Get_Entity_By_ID import GetEntityByID
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.for_loop import ForLoop
from sara_flexbe_states.SetKey import SetKey
from sara_flexbe_states.list_entities_by_name import list_entities_by_name
from flexbe_states.calculation_state import CalculationState
from sara_flexbe_behaviors.action_move_sm import Action_MoveSM
from sara_flexbe_states.get_reachable_waypoint import Get_Reacheable_Waypoint
from sara_flexbe_states.SetRosParam import SetRosParam
from sara_flexbe_states.get_speech import GetSpeech
from flexbe_states.check_condition_state import CheckConditionState
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed May 09 2018
@author: <NAME>
'''
class Get_operatorSM(Behavior):
'''
Find an person and ask them to become operator
'''
def __init__(self):
super(Get_operatorSM, self).__init__()
self.name = 'Get_operator'
# parameters of this behavior
# references to used behaviors
self.add_behavior(Action_MoveSM, 'Move to person/Action_Move')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:814 y:45, x:514 y:274
_state_machine = OperatableStateMachine(outcomes=['Found', 'NotFound'], output_keys=['Operator'])
_state_machine.userdata.Operator = None
_state_machine.userdata.Name = "person"
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:506 y:393, x:515 y:462
_sm_move_to_person_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['Operator'])
with _sm_move_to_person_0:
# x:30 y:83
OperatableStateMachine.add('Getpos',
CalculationState(calculation=lambda x: x.position),
transitions={'done': 'setDistance'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'Operator', 'output_value': 'pose_in'})
# x:35 y:450
OperatableStateMachine.add('Action_Move',
self.use_behavior(Action_MoveSM, 'Move to person/Action_Move'),
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'pose': 'Pose'})
# x:47 y:368
OperatableStateMachine.add('set not rel',
SetKey(Value=False),
transitions={'done': 'Action_Move'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'relative'})
# x:41 y:179
OperatableStateMachine.add('setDistance',
SetKey(Value=1.5),
transitions={'done': 'Close position'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'distance'})
# x:27 y:280
OperatableStateMachine.add('Close position',
Get_Reacheable_Waypoint(),
transitions={'done': 'set not rel'},
autonomy={'done': Autonomy.Off},
remapping={'pose_in': 'pose_in', 'distance': 'distance', 'pose_out': 'Pose'})
with _state_machine:
# x:64 y:35
OperatableStateMachine.add('Get previous ID',
GetRosParam(ParamName="behavior/Operator/Id"),
transitions={'done': 'Get Operator', 'failed': 'for 3'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'Value': 'ID'})
# x:271 y:37
OperatableStateMachine.add('Get Operator',
GetEntityByID(),
transitions={'found': 'Found', 'not_found': 'Say lost operator'},
autonomy={'found': Autonomy.Off, 'not_found': Autonomy.Off},
remapping={'ID': 'ID', 'Entity': 'Operator'})
# x:263 y:155
OperatableStateMachine.add('Say lost operator',
SaraSay(sentence="I lost my operator", input_keys=[], emotion=1, block=True),
transitions={'done': 'for 3'},
autonomy={'done': Autonomy.Off})
# x:780 y:517
OperatableStateMachine.add('ask if operator',
SaraSay(sentence="Are you my operator?", input_keys=[], emotion=1, block=True),
transitions={'done': 'get speech'},
autonomy={'done': Autonomy.Off})
# x:70 y:273
OperatableStateMachine.add('for 3',
ForLoop(repeat=3),
transitions={'do': 'for 3_2', 'end': 'set None'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
# x:249 y:357
OperatableStateMachine.add('say where are you',
SaraSay(sentence="Operator. Where are you?", input_keys=[], emotion=1, block=True),
transitions={'done': 'for 3'},
autonomy={'done': Autonomy.Off})
# x:281 y:265
OperatableStateMachine.add('set None',
SetKey(Value=None),
transitions={'done': 'NotFound'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Operator'})
# x:49 y:511
OperatableStateMachine.add('Get persons',
list_entities_by_name(frontality_level=0.5, distance_max=10),
transitions={'found': 'get next closest', 'none_found': 'say where are you'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'Name', 'entity_list': 'entity_list', 'number': 'number'})
# x:461 y:475
OperatableStateMachine.add('Move to person',
_sm_move_to_person_0,
transitions={'finished': 'ask if operator', 'failed': 'NotFound'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'Operator': 'Operator'})
# x:783 y:161
OperatableStateMachine.add('set new ID',
SetRosParam(ParamName="behavior/Operator/Id"),
transitions={'done': 'Found'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'ID'})
# x:775 y:269
OperatableStateMachine.add('get ID',
CalculationState(calculation=lambda x: x.ID),
transitions={'done': 'set new ID'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'Operator', 'output_value': 'ID'})
# x:784 y:433
OperatableStateMachine.add('get speech',
GetSpeech(watchdog=5),
transitions={'done': 'Yes ?', 'nothing': 'for 3_2', 'fail': 'NotFound'},
autonomy={'done': Autonomy.Off, 'nothing': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'words': 'words'})
# x:69 y:402
OperatableStateMachine.add('for 3_2',
ForLoop(repeat=3),
transitions={'do': 'Get persons', 'end': 'set None'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index2'})
# x:744 y:332
OperatableStateMachine.add('Yes ?',
CheckConditionState(predicate=lambda x: "yes" in x),
transitions={'true': 'get ID', 'false': 'for 3_2'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'words'})
# x:263 y:535
OperatableStateMachine.add('get next closest',
FlexibleCalculationState(calculation=lambda x: x[0][x[1]], input_keys=["entity_list", "index"]),
transitions={'done': 'ask if operator'},
autonomy={'done': Autonomy.Off},
remapping={'entity_list': 'entity_list', 'index': 'index', 'output_value': 'Operator'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | 0.343342 | 0.214527 |
import logging
import re
from collections import OrderedDict
from logging import getLogger
from pathlib import Path
from typing import List, Optional, Set, Tuple
from urllib.request import urlopen
from ordered_set import OrderedSet
from pronunciation_dict_parser.core.types import (Pronunciation,
PronunciationDict, Symbol,
Word)
from tqdm import tqdm
alternative_pronunciation_indicator_pattern = re.compile(r"\([0-9]+\)")
word_pronunciation_pattern = re.compile(r"([^\s]+)\s+(.+)")
symbol_separator_pattern = re.compile(r"\s+")
def parse_url(url: str, encoding: str) -> PronunciationDict:
logger = getLogger(__name__)
logger.info("Downloading dictionary content...")
lines = _read_url_lines(url, encoding)
logger.info("Parsing content...")
resulting_dict = parse_lines(lines)
logger.info("Done.")
logger.info(f"Dictionary entries: {len(resulting_dict)}")
return resulting_dict
def parse_dictionary_from_txt(path: Path, encoding: str, pronunciation_sep: str = None, symbol_sep: str = None, have_counter: bool = None, empty_symbol: Symbol = None) -> PronunciationDict:
logger = getLogger(__name__)
if path is None or not path.exists():
raise Exception()
logger.info("Loading dictionary file...")
lines = _read_lines(path, encoding)
logger.info("Parsing file...")
resulting_dict = parse_lines(lines)
logger.info("Done.")
logger.info(f"# Dictionary entries: {len(resulting_dict)}")
return resulting_dict
def get_occurring_symbols(dictionary: PronunciationDict) -> OrderedSet[Symbol]:
assert isinstance(dictionary, dict)
all_symbols: Set[Symbol] = OrderedSet(sorted({
symbol
for pronunciations in dictionary.values()
for pronunciation in pronunciations
for symbol in pronunciation
}))
return all_symbols
def _read_url_lines(url: str, encoding: str) -> List[str]:
with urlopen(url) as url_content:
result = [line.decode(encoding) for line in url_content]
return result
def _read_lines(file: Path, encoding: Optional[str]) -> List[str]:
assert isinstance(file, Path)
with file.open(encoding=encoding, mode="r") as f:
return f.readlines()
def parse_lines(lines: List[str]) -> PronunciationDict:
result: PronunciationDict = OrderedDict()
logger = getLogger(__name__)
use_tqdm = logger.level <= logging.INFO
data = tqdm(lines) if use_tqdm else lines
for line_nr, line in enumerate(data, start=1):
line_should_be_processed = __should_line_be_processed(line, line_nr)
if line_should_be_processed:
_process_line(line, result, line_nr)
return result
def sort_after_words(dictionary: PronunciationDict) -> PronunciationDict:
result = OrderedDict({k: dictionary[k] for k in sorted(dictionary.keys())})
return result
def _process_line(line: str, dictionary: PronunciationDict, line_nr: int) -> None:
logger = getLogger(__name__)
splitting_result = __try_get_word_and_pronunciation(line)
if splitting_result is None:
logger = getLogger(__name__)
logger.warning(f"Line {line_nr}: Couldn't parse \"{line}\".")
return None
word, pronunciation_arpa = splitting_result
word_upper = word.upper()
if word_upper not in dictionary:
dictionary[word_upper] = OrderedSet()
already_contained = pronunciation_arpa in dictionary[word_upper]
if already_contained:
logger.warning(
f"Line {line_nr}: For word \"{word}\" the same pronunciation \"{' '.join(list(pronunciation_arpa))}\" exists multiple times!")
else:
dictionary[word_upper].add(pronunciation_arpa)
return None
def __try_get_word_and_pronunciation(line: str) -> Optional[Tuple[Word, Pronunciation]]:
line = line.strip()
splitting_result = __try_split_word_pronunciation(line)
if splitting_result is None:
return None
word_str, pronunciation_str = splitting_result
word_str = __remove_double_indicators(word_str)
pronunciation: Pronunciation = tuple(re.split(symbol_separator_pattern, pronunciation_str))
return word_str, pronunciation
def __try_split_word_pronunciation(line: str) -> Optional[Tuple[Word, str]]:
res = re.match(word_pronunciation_pattern, line)
if res is None:
return None
word = res.group(1)
pronunciation_str = res.group(2)
return word, pronunciation_str
def __remove_double_indicators(word: Word) -> Word:
''' example: ABBE(1) => ABBE '''
result = re.sub(alternative_pronunciation_indicator_pattern, '', word)
return result
def __should_line_be_processed(line: str, line_nr: int) -> bool:
logger = getLogger(__name__)
is_empty = len(line) == 0
if is_empty:
logger.info(f"Line {line_nr}: Ignoring empty line.")
return False
is_comment = line.startswith(";;;")
if is_comment:
stripped_line = line.strip("\n")
logger.info(f"Line {line_nr}: Ignoring comment -> \"{stripped_line}\".")
return False
return True | src/pronunciation_dict_parser/core/parser.py | import logging
import re
from collections import OrderedDict
from logging import getLogger
from pathlib import Path
from typing import List, Optional, Set, Tuple
from urllib.request import urlopen
from ordered_set import OrderedSet
from pronunciation_dict_parser.core.types import (Pronunciation,
PronunciationDict, Symbol,
Word)
from tqdm import tqdm
alternative_pronunciation_indicator_pattern = re.compile(r"\([0-9]+\)")
word_pronunciation_pattern = re.compile(r"([^\s]+)\s+(.+)")
symbol_separator_pattern = re.compile(r"\s+")
def parse_url(url: str, encoding: str) -> PronunciationDict:
logger = getLogger(__name__)
logger.info("Downloading dictionary content...")
lines = _read_url_lines(url, encoding)
logger.info("Parsing content...")
resulting_dict = parse_lines(lines)
logger.info("Done.")
logger.info(f"Dictionary entries: {len(resulting_dict)}")
return resulting_dict
def parse_dictionary_from_txt(path: Path, encoding: str, pronunciation_sep: str = None, symbol_sep: str = None, have_counter: bool = None, empty_symbol: Symbol = None) -> PronunciationDict:
logger = getLogger(__name__)
if path is None or not path.exists():
raise Exception()
logger.info("Loading dictionary file...")
lines = _read_lines(path, encoding)
logger.info("Parsing file...")
resulting_dict = parse_lines(lines)
logger.info("Done.")
logger.info(f"# Dictionary entries: {len(resulting_dict)}")
return resulting_dict
def get_occurring_symbols(dictionary: PronunciationDict) -> OrderedSet[Symbol]:
assert isinstance(dictionary, dict)
all_symbols: Set[Symbol] = OrderedSet(sorted({
symbol
for pronunciations in dictionary.values()
for pronunciation in pronunciations
for symbol in pronunciation
}))
return all_symbols
def _read_url_lines(url: str, encoding: str) -> List[str]:
with urlopen(url) as url_content:
result = [line.decode(encoding) for line in url_content]
return result
def _read_lines(file: Path, encoding: Optional[str]) -> List[str]:
assert isinstance(file, Path)
with file.open(encoding=encoding, mode="r") as f:
return f.readlines()
def parse_lines(lines: List[str]) -> PronunciationDict:
result: PronunciationDict = OrderedDict()
logger = getLogger(__name__)
use_tqdm = logger.level <= logging.INFO
data = tqdm(lines) if use_tqdm else lines
for line_nr, line in enumerate(data, start=1):
line_should_be_processed = __should_line_be_processed(line, line_nr)
if line_should_be_processed:
_process_line(line, result, line_nr)
return result
def sort_after_words(dictionary: PronunciationDict) -> PronunciationDict:
result = OrderedDict({k: dictionary[k] for k in sorted(dictionary.keys())})
return result
def _process_line(line: str, dictionary: PronunciationDict, line_nr: int) -> None:
logger = getLogger(__name__)
splitting_result = __try_get_word_and_pronunciation(line)
if splitting_result is None:
logger = getLogger(__name__)
logger.warning(f"Line {line_nr}: Couldn't parse \"{line}\".")
return None
word, pronunciation_arpa = splitting_result
word_upper = word.upper()
if word_upper not in dictionary:
dictionary[word_upper] = OrderedSet()
already_contained = pronunciation_arpa in dictionary[word_upper]
if already_contained:
logger.warning(
f"Line {line_nr}: For word \"{word}\" the same pronunciation \"{' '.join(list(pronunciation_arpa))}\" exists multiple times!")
else:
dictionary[word_upper].add(pronunciation_arpa)
return None
def __try_get_word_and_pronunciation(line: str) -> Optional[Tuple[Word, Pronunciation]]:
line = line.strip()
splitting_result = __try_split_word_pronunciation(line)
if splitting_result is None:
return None
word_str, pronunciation_str = splitting_result
word_str = __remove_double_indicators(word_str)
pronunciation: Pronunciation = tuple(re.split(symbol_separator_pattern, pronunciation_str))
return word_str, pronunciation
def __try_split_word_pronunciation(line: str) -> Optional[Tuple[Word, str]]:
res = re.match(word_pronunciation_pattern, line)
if res is None:
return None
word = res.group(1)
pronunciation_str = res.group(2)
return word, pronunciation_str
def __remove_double_indicators(word: Word) -> Word:
''' example: ABBE(1) => ABBE '''
result = re.sub(alternative_pronunciation_indicator_pattern, '', word)
return result
def __should_line_be_processed(line: str, line_nr: int) -> bool:
logger = getLogger(__name__)
is_empty = len(line) == 0
if is_empty:
logger.info(f"Line {line_nr}: Ignoring empty line.")
return False
is_comment = line.startswith(";;;")
if is_comment:
stripped_line = line.strip("\n")
logger.info(f"Line {line_nr}: Ignoring comment -> \"{stripped_line}\".")
return False
return True | 0.763572 | 0.166947 |
# Import statements
import copy
import sys
import time
import collections
import RUN_stimpy as stimpy
from RUN_stimpy import Premise, Query
from rule_reading_system import Rule, MP_Rule
import rule_settings
import wordnet_relations
import spacy
from anytree import Node, RenderTree
from anytree.exporter import DotExporter
# Settings
import warnings
warnings.filterwarnings("ignore")
# ----------------------------------------
### PREPROCESSING: Load spacy, rules and samples
# ----------------------------------------
# Get spacy nlp for English
nlp = spacy.load('en')
# Get single-premise (sp) and multi-premise (mp) rules
sp_rules = [Rule(r) for r in rule_settings.rule_set]
mp_rules = [MP_Rule(r) for r in rule_settings.mp_rule_set]
rules = sp_rules + mp_rules
# Load rules
for rule in rules:
rule.load_rule(verbose=False)
# Load samples
with open("../Evaluation/Test Samples/test_samples.txt") as infile:
samples_raw = infile.read()
samples = samples_raw.split('\n\n')
# ----------------------------------------
### FUNCTIONS: Printing, parsing and pipeline
# ----------------------------------------
def print_rules():
"""Print all rules in use."""
print('Rules:\n***********')
for rule in rules:
print('- ',rule.data)
def rule_frequencies(all_applied_rules, n=5):
rule_counter=collections.Counter(all_applied_rules)
print()
for rule, frequency in rule_counter.most_common(n):
print(rule)
print(frequency,'x')
print()
def parse_example(sample, verbose=False):
"""Parse samples into comment, premises, hypothesis, relation and validity."""
# Split sample into lines
lines = sample.split('\n')
# Discard empty lines
lines = [line.strip() for line in lines if line.strip() != '']
# Get sample comment
sample_comment = lines[0]
# Get premises (starting with -)
premises = [line.split('-')[1].strip() for line in lines
if line.startswith('-')]
# Get hypothesis
hypothesis = lines[-3]
hypothesis = hypothesis.split(':')[1].strip()
# Get relation
relation = lines[-2]
relation = relation.split(':')[1].strip()
# Get validity
validity = lines[-1]
validity = validity.split(':')[1].strip()
# Print sample
if verbose:
print('\nExample', sample_comment)
print('Premises:')
for prem in premises:
print('-', prem)
print('Hypothesis:',hypothesis)
print('Relation:',relation)
print('Validity:',validity)
# Return segmented and clean parts of sample
return sample_comment, premises, hypothesis, relation, validity
def evaluation_pipeline(samples, full_tree=False):
"""Pipeline for processing and testing single/multi-premise samples."""
# Initial settings
correct_validity = 0
correct_relation = 0
incorrectly_solved = []
# Parse samples and filter out samples that are to be ignored
samples = [parse_example(sample) for sample in samples]
samples = [(sample_comment, premises, hypothesis, relation, validity)
for (sample_comment, premises, hypothesis, relation, validity)
in samples if 'ignore' not in sample_comment.lower()]
# If full tree is created, prepare outfile
if full_tree:
with open('results/trees.txt','w') as outfile:
outfile.write('ALL TRANSITION TREES\n--------------------\n\n')
# Final number of samples
n_examples = len(samples)
# If there are no samples, exit
if not samples:
print('No samples in this set.')
sys.exit
# Lists for collecting applied rules and transitions
all_applied_rules = []
n_transitions = []
# For each sample...
for samp_nr, sample in enumerate(samples):
print('---------------')
print(' Sample #', samp_nr+1)
print('---------------')
# Get sample infos
sample_comment, premises, hypothesis, relation, validity = sample
# Parse query with spacy and save as Token and Sent instances
query_parse = nlp(hypothesis)
query_tokens, query_sent = stimpy.get_tokens_and_sent(query_parse)
query = Query(query_tokens, query_sent)
# Validity and relation in case no better solution is found
fallback_validity = None
fallback_relation = None
# List for premises
all_parsed_premises = []
# Parse and save all premises for further processing
for i,prem in enumerate(premises):
# Parse and save all premises
parsed_premise = nlp(prem)
prem_tokens, prem_sent = stimpy.get_tokens_and_sent(parsed_premise)
premise_instance = Premise(prem_tokens, prem_sent)
all_parsed_premises.append((i,premise_instance))
# Print sample number
if full_tree:
with open('results/trees.txt','a') as outfile:
outfile.write('\n---------------------------------------\n\n'
+str(samp_nr)+'\n')
# For each premise
for i, premise in enumerate(premises):
# Print
print('Processing Premise', str(i+1),'...')
print(premise)
print()
# Save premise as string
string_premise = premise
# Parse premise
premise_parse = nlp(premise)
premise_tokens, premise_sent = stimpy.get_tokens_and_sent(premise_parse)
# Get the other premises
other_premises = [p for (j,p) in all_parsed_premises if j!=i]
# Save original premise
original_premise = Premise(premise_tokens, premise_sent)
premise = copy.deepcopy(original_premise)
# Save original and other premises as attributes to premise
premise.original_premise = original_premise
premise.other_premises = other_premises
# Set polarity scop for premise
premise.set_polarity_scope()
# Wordnet settings
wordnet_sent_to_words_premise = [(t.lemma,t.u_pos) for t
in premise_tokens if t.u_pos
in ['NOUN','ADJ','ADV','VERB']]
wordnet_relations.get_all_wordnet_connections(
wordnet_sent_to_words_premise)
# Process premise in inference pipeline
root_node, PREMISE = stimpy.start_transformation_pipeline(rules, premise, query,
verbose=False,
full_tree=full_tree)
# Print inference tree
stimpy.print_inference_tree(root_node)
# Save inference tree as picture
DotExporter(root_node).to_picture("results/transformation_tree.png")
# Save tree for each hypothesis-premise pair
if full_tree:
with open('results/trees.txt','a') as outfile:
outfile.write('Premise: '+string_premise+'\n')
outfile.write('Hypothesis: '+hypothesis+'\n\n')
outfile.write('Number transitions: '
+str(len(PREMISE.all_branches)))
for pre, fill, node in RenderTree(root_node):
out = "%s%s" % (pre, node.name)+'\n'
outfile.write(out)
outfile.write('\n\n')
# Print statements
print('\n***** RESULTS *****')
print('Relation:', PREMISE.final_relation)
print('Inference is', PREMISE.final_validity)
print()
print('# Total transitions: '+
str(len(set(PREMISE.all_branches))))
print()
# Save number of transitions for this hypothesis-premise pair
n_transitions.append(len(set(PREMISE.all_branches)))
# Save all applied rules
all_applied_rules += PREMISE.all_applied_rules
# Computed validity and relation
computed_validity = PREMISE.final_validity
computed_relation = PREMISE.final_relation
# If computed relation not unknown, a solution was found
if computed_relation not in ['UNKNOWN','unknown']:
break
# If computed relation is unknown or not found
else:
# If available, use fallback solution (usually 'unknown')
try:
computed_validity = PREMISE.fallback_validity
computed_relation = PREMISE.fallback_relation
fallback_validity = computed_validity
fallback_relation = computed_relation
break
# Otherwise, assign "unknown"
except AttributeError:
computed_validity = 'unknown'
computed_relation = 'unknown'
# If try using fallback solution if nothing else is found
if computed_relation in ['UNKNOWN', 'unknown']:
if fallback_validity != None:
computed_validity = fallback_validity
computed_relation = fallback_relation
else:
computed_validity = 'unknown'
computed_relation = 'unknown'
# Print solutions
print('Correct answer: ', validity)
print('Computed answer:', computed_validity)
print('Correct relation: ', relation)
print('Computed relation:', computed_relation)
# Determine whether computed validity and relation are correct
if validity == computed_validity:
print('Correct!')
correct_validity +=1
else:
print('Wrong...')
# Save incorrect samples for later inspection
incorrectly_solved.append((samp_nr, sample_comment))
if relation == computed_relation:
correct_relation += 1
else:
# Save incorrect samples for later inspection
incorrectly_solved.append((samp_nr, sample_comment))
print('\n********************************')
# Print final results
print(' PERFORMANCE OVERVIEW ')
print('********************************')
print('Validity Accuracy:')
print(str(correct_validity)+'/'+str(n_examples))
print(round(correct_validity/n_examples*100.00,2))
print()
print('Relation Accuracy:')
print(str(correct_relation)+'/'+str(n_examples))
print(round(correct_relation/n_examples*100.00,2))
print()
print('Transitions')
print('Total:', sum(n_transitions))
print('Avg: ', round(sum(n_transitions)/len(n_transitions),2))
print('Min: ', min(n_transitions))
print('Max: ', max(n_transitions))
if get_most_frequent_rules:
print('Most frequently used rules:\n')
rule_frequencies(all_applied_rules)
print()
# Print incorrectly solved samples by number and comment
if incorrectly_solved:
print('\nProblematic samples:')
for samp_nr, comment in set(incorrectly_solved):
print(comment)
# ----------------------------------------
### RUN: call pipeline for evaluation
# ----------------------------------------
# Print most freuquently used rules
get_most_frequent_rules = False
start = time.time()
evaluation_pipeline(samples)
end = time.time()
# Proessing time
print('\nProcessing time:')
print(round((end - start)/60,2), 'minutes')
### RESULTS:
# Problematic sampels:
# 19/20
# 33/34 | Scripts/EVALUATE_dev_samples.py | # Import statements
import copy
import sys
import time
import collections
import RUN_stimpy as stimpy
from RUN_stimpy import Premise, Query
from rule_reading_system import Rule, MP_Rule
import rule_settings
import wordnet_relations
import spacy
from anytree import Node, RenderTree
from anytree.exporter import DotExporter
# Settings
import warnings
warnings.filterwarnings("ignore")
# ----------------------------------------
### PREPROCESSING: Load spacy, rules and samples
# ----------------------------------------
# Get spacy nlp for English
nlp = spacy.load('en')
# Get single-premise (sp) and multi-premise (mp) rules
sp_rules = [Rule(r) for r in rule_settings.rule_set]
mp_rules = [MP_Rule(r) for r in rule_settings.mp_rule_set]
rules = sp_rules + mp_rules
# Load rules
for rule in rules:
rule.load_rule(verbose=False)
# Load samples
with open("../Evaluation/Test Samples/test_samples.txt") as infile:
samples_raw = infile.read()
samples = samples_raw.split('\n\n')
# ----------------------------------------
### FUNCTIONS: Printing, parsing and pipeline
# ----------------------------------------
def print_rules():
"""Print all rules in use."""
print('Rules:\n***********')
for rule in rules:
print('- ',rule.data)
def rule_frequencies(all_applied_rules, n=5):
rule_counter=collections.Counter(all_applied_rules)
print()
for rule, frequency in rule_counter.most_common(n):
print(rule)
print(frequency,'x')
print()
def parse_example(sample, verbose=False):
"""Parse samples into comment, premises, hypothesis, relation and validity."""
# Split sample into lines
lines = sample.split('\n')
# Discard empty lines
lines = [line.strip() for line in lines if line.strip() != '']
# Get sample comment
sample_comment = lines[0]
# Get premises (starting with -)
premises = [line.split('-')[1].strip() for line in lines
if line.startswith('-')]
# Get hypothesis
hypothesis = lines[-3]
hypothesis = hypothesis.split(':')[1].strip()
# Get relation
relation = lines[-2]
relation = relation.split(':')[1].strip()
# Get validity
validity = lines[-1]
validity = validity.split(':')[1].strip()
# Print sample
if verbose:
print('\nExample', sample_comment)
print('Premises:')
for prem in premises:
print('-', prem)
print('Hypothesis:',hypothesis)
print('Relation:',relation)
print('Validity:',validity)
# Return segmented and clean parts of sample
return sample_comment, premises, hypothesis, relation, validity
def evaluation_pipeline(samples, full_tree=False):
"""Pipeline for processing and testing single/multi-premise samples."""
# Initial settings
correct_validity = 0
correct_relation = 0
incorrectly_solved = []
# Parse samples and filter out samples that are to be ignored
samples = [parse_example(sample) for sample in samples]
samples = [(sample_comment, premises, hypothesis, relation, validity)
for (sample_comment, premises, hypothesis, relation, validity)
in samples if 'ignore' not in sample_comment.lower()]
# If full tree is created, prepare outfile
if full_tree:
with open('results/trees.txt','w') as outfile:
outfile.write('ALL TRANSITION TREES\n--------------------\n\n')
# Final number of samples
n_examples = len(samples)
# If there are no samples, exit
if not samples:
print('No samples in this set.')
sys.exit
# Lists for collecting applied rules and transitions
all_applied_rules = []
n_transitions = []
# For each sample...
for samp_nr, sample in enumerate(samples):
print('---------------')
print(' Sample #', samp_nr+1)
print('---------------')
# Get sample infos
sample_comment, premises, hypothesis, relation, validity = sample
# Parse query with spacy and save as Token and Sent instances
query_parse = nlp(hypothesis)
query_tokens, query_sent = stimpy.get_tokens_and_sent(query_parse)
query = Query(query_tokens, query_sent)
# Validity and relation in case no better solution is found
fallback_validity = None
fallback_relation = None
# List for premises
all_parsed_premises = []
# Parse and save all premises for further processing
for i,prem in enumerate(premises):
# Parse and save all premises
parsed_premise = nlp(prem)
prem_tokens, prem_sent = stimpy.get_tokens_and_sent(parsed_premise)
premise_instance = Premise(prem_tokens, prem_sent)
all_parsed_premises.append((i,premise_instance))
# Print sample number
if full_tree:
with open('results/trees.txt','a') as outfile:
outfile.write('\n---------------------------------------\n\n'
+str(samp_nr)+'\n')
# For each premise
for i, premise in enumerate(premises):
# Print
print('Processing Premise', str(i+1),'...')
print(premise)
print()
# Save premise as string
string_premise = premise
# Parse premise
premise_parse = nlp(premise)
premise_tokens, premise_sent = stimpy.get_tokens_and_sent(premise_parse)
# Get the other premises
other_premises = [p for (j,p) in all_parsed_premises if j!=i]
# Save original premise
original_premise = Premise(premise_tokens, premise_sent)
premise = copy.deepcopy(original_premise)
# Save original and other premises as attributes to premise
premise.original_premise = original_premise
premise.other_premises = other_premises
# Set polarity scop for premise
premise.set_polarity_scope()
# Wordnet settings
wordnet_sent_to_words_premise = [(t.lemma,t.u_pos) for t
in premise_tokens if t.u_pos
in ['NOUN','ADJ','ADV','VERB']]
wordnet_relations.get_all_wordnet_connections(
wordnet_sent_to_words_premise)
# Process premise in inference pipeline
root_node, PREMISE = stimpy.start_transformation_pipeline(rules, premise, query,
verbose=False,
full_tree=full_tree)
# Print inference tree
stimpy.print_inference_tree(root_node)
# Save inference tree as picture
DotExporter(root_node).to_picture("results/transformation_tree.png")
# Save tree for each hypothesis-premise pair
if full_tree:
with open('results/trees.txt','a') as outfile:
outfile.write('Premise: '+string_premise+'\n')
outfile.write('Hypothesis: '+hypothesis+'\n\n')
outfile.write('Number transitions: '
+str(len(PREMISE.all_branches)))
for pre, fill, node in RenderTree(root_node):
out = "%s%s" % (pre, node.name)+'\n'
outfile.write(out)
outfile.write('\n\n')
# Print statements
print('\n***** RESULTS *****')
print('Relation:', PREMISE.final_relation)
print('Inference is', PREMISE.final_validity)
print()
print('# Total transitions: '+
str(len(set(PREMISE.all_branches))))
print()
# Save number of transitions for this hypothesis-premise pair
n_transitions.append(len(set(PREMISE.all_branches)))
# Save all applied rules
all_applied_rules += PREMISE.all_applied_rules
# Computed validity and relation
computed_validity = PREMISE.final_validity
computed_relation = PREMISE.final_relation
# If computed relation not unknown, a solution was found
if computed_relation not in ['UNKNOWN','unknown']:
break
# If computed relation is unknown or not found
else:
# If available, use fallback solution (usually 'unknown')
try:
computed_validity = PREMISE.fallback_validity
computed_relation = PREMISE.fallback_relation
fallback_validity = computed_validity
fallback_relation = computed_relation
break
# Otherwise, assign "unknown"
except AttributeError:
computed_validity = 'unknown'
computed_relation = 'unknown'
# If try using fallback solution if nothing else is found
if computed_relation in ['UNKNOWN', 'unknown']:
if fallback_validity != None:
computed_validity = fallback_validity
computed_relation = fallback_relation
else:
computed_validity = 'unknown'
computed_relation = 'unknown'
# Print solutions
print('Correct answer: ', validity)
print('Computed answer:', computed_validity)
print('Correct relation: ', relation)
print('Computed relation:', computed_relation)
# Determine whether computed validity and relation are correct
if validity == computed_validity:
print('Correct!')
correct_validity +=1
else:
print('Wrong...')
# Save incorrect samples for later inspection
incorrectly_solved.append((samp_nr, sample_comment))
if relation == computed_relation:
correct_relation += 1
else:
# Save incorrect samples for later inspection
incorrectly_solved.append((samp_nr, sample_comment))
print('\n********************************')
# Print final results
print(' PERFORMANCE OVERVIEW ')
print('********************************')
print('Validity Accuracy:')
print(str(correct_validity)+'/'+str(n_examples))
print(round(correct_validity/n_examples*100.00,2))
print()
print('Relation Accuracy:')
print(str(correct_relation)+'/'+str(n_examples))
print(round(correct_relation/n_examples*100.00,2))
print()
print('Transitions')
print('Total:', sum(n_transitions))
print('Avg: ', round(sum(n_transitions)/len(n_transitions),2))
print('Min: ', min(n_transitions))
print('Max: ', max(n_transitions))
if get_most_frequent_rules:
print('Most frequently used rules:\n')
rule_frequencies(all_applied_rules)
print()
# Print incorrectly solved samples by number and comment
if incorrectly_solved:
print('\nProblematic samples:')
for samp_nr, comment in set(incorrectly_solved):
print(comment)
# ----------------------------------------
### RUN: call pipeline for evaluation
# ----------------------------------------
# Print most freuquently used rules
get_most_frequent_rules = False
start = time.time()
evaluation_pipeline(samples)
end = time.time()
# Proessing time
print('\nProcessing time:')
print(round((end - start)/60,2), 'minutes')
### RESULTS:
# Problematic sampels:
# 19/20
# 33/34 | 0.472197 | 0.222447 |
import requests
import re
from bs4 import BeautifulSoup
import json
import time
import unicodedata
def get_web(currenturl):
try:
res = requests.get(currenturl)
res.raise_for_status()
return res.content
except requests.RequestException as e:
print(e)
return
def get_para(output, currenturl, num, dom, domain, website, text_start, text_end):
article = []
time.sleep(1)
text = get_web(currenturl)
soup = BeautifulSoup(text, 'html.parser')
para_list = soup.find_all('p')[text_start:text_end] # all paragraphs
# full text
for i in range(len(para_list)):
p1 = re.sub('<[^<]+?>', '', str(para_list[i]))
p2 = re.sub(' +\t*\n*', ' ', p1)
p3 = re.sub('\t*\n*', '', p2)
p4 = re.sub("\u2019", "'", p3)
p5 = re.sub('\u2014', '-', p4)
p6 = re.sub('\u201c', '"', p5)
p7 = re.sub('\u201d', '"', p6)
p8 = re.sub('\u2026', '...', p7)
p9 = re.sub("\u2018", "'", p8)
p10 = re.sub("\u2022", "•", p9)
p11 = re.sub("\u00a0", ' ', p10)
p12 = re.sub("\u2009", '', p11)
p13 = re.sub("\u20ac", '€', p12)
p14 = re.sub("\u00a3", '£', p13)
p15 = re.sub("\u00a2", '¢', p14)
p16 = re.sub("\u2009", '', p15) # new -- xy
p17 = re.sub("\xa0", '', p16) # new -- xy
p18 = re.sub("\2010", '-', p17) # new -- xy
para1 = unicodedata.normalize("NFKD", p18)
para2 = ' '.join(para1.split())
article.append(para2)
f = open(output, "a", encoding='utf-8')
dct = {}
dct['article ID'] = dom + str(num).zfill(6)
dct['url'] = currenturl
dct["domain"] = domain
dct["website"] = website
dct['full text'] = article
# write the dictionary
f.writelines(json.dumps(dct))
# time.sleep(1)
f.writelines('\n')
f.close()
return
def main():
input = "url-md.txt"
output = "amd_articles.txt"
dom = 'H'
domain = 'Health'
website = "medical daily"
num = 1 # starting ID
text_start, text_end = 0, -1
# no duplicate url(articles)
q = []
with open(input) as f:
for line in f:
currenturl = line.strip('\n')
if currenturl not in q:
q.append(currenturl)
get_para(output, currenturl, num, dom, domain, website, text_start, text_end)
print('finished URL' + str(num))
num += 1
return
main() | scrape_article.py | import requests
import re
from bs4 import BeautifulSoup
import json
import time
import unicodedata
def get_web(currenturl):
try:
res = requests.get(currenturl)
res.raise_for_status()
return res.content
except requests.RequestException as e:
print(e)
return
def get_para(output, currenturl, num, dom, domain, website, text_start, text_end):
article = []
time.sleep(1)
text = get_web(currenturl)
soup = BeautifulSoup(text, 'html.parser')
para_list = soup.find_all('p')[text_start:text_end] # all paragraphs
# full text
for i in range(len(para_list)):
p1 = re.sub('<[^<]+?>', '', str(para_list[i]))
p2 = re.sub(' +\t*\n*', ' ', p1)
p3 = re.sub('\t*\n*', '', p2)
p4 = re.sub("\u2019", "'", p3)
p5 = re.sub('\u2014', '-', p4)
p6 = re.sub('\u201c', '"', p5)
p7 = re.sub('\u201d', '"', p6)
p8 = re.sub('\u2026', '...', p7)
p9 = re.sub("\u2018", "'", p8)
p10 = re.sub("\u2022", "•", p9)
p11 = re.sub("\u00a0", ' ', p10)
p12 = re.sub("\u2009", '', p11)
p13 = re.sub("\u20ac", '€', p12)
p14 = re.sub("\u00a3", '£', p13)
p15 = re.sub("\u00a2", '¢', p14)
p16 = re.sub("\u2009", '', p15) # new -- xy
p17 = re.sub("\xa0", '', p16) # new -- xy
p18 = re.sub("\2010", '-', p17) # new -- xy
para1 = unicodedata.normalize("NFKD", p18)
para2 = ' '.join(para1.split())
article.append(para2)
f = open(output, "a", encoding='utf-8')
dct = {}
dct['article ID'] = dom + str(num).zfill(6)
dct['url'] = currenturl
dct["domain"] = domain
dct["website"] = website
dct['full text'] = article
# write the dictionary
f.writelines(json.dumps(dct))
# time.sleep(1)
f.writelines('\n')
f.close()
return
def main():
input = "url-md.txt"
output = "amd_articles.txt"
dom = 'H'
domain = 'Health'
website = "medical daily"
num = 1 # starting ID
text_start, text_end = 0, -1
# no duplicate url(articles)
q = []
with open(input) as f:
for line in f:
currenturl = line.strip('\n')
if currenturl not in q:
q.append(currenturl)
get_para(output, currenturl, num, dom, domain, website, text_start, text_end)
print('finished URL' + str(num))
num += 1
return
main() | 0.075649 | 0.06134 |
import os
import subprocess
import click
import dotenv
import locale
import requests
from dialog import Dialog
class Jump:
items: list = []
formatted_menu_items: list = []
def __init__(self) -> None:
self.d: Dialog = Dialog()
self.get_item_list()
self.run()
def get_item_list(self) -> None:
secret_key: str = os.environ.get('AUTH_KEY')
extra_headers: dict = {}
if secret_key is not None:
extra_headers[os.environ.get('AUTH_HEADER')] = secret_key
self.items: list = requests.get(os.environ.get('ENDPOINT'), headers=extra_headers).json()
def format_items(self, items: list, servers_list: bool = False) -> None:
self.formatted_menu_items: list = []
for item in items:
if servers_list is False and item['in_jumpgate'] is False:
pass
else:
self.formatted_menu_items.append((item['name'] if not servers_list else item['display_name'], ''))
def create_menu(self, title: str, items: list, cancel_label: str = 'Back') -> tuple:
return self.d.menu(
text=title,
choices=items,
menu_height=15,
cancel_label=cancel_label,
)
def get_server_info(self, app: str) -> dict:
for item in self.items:
if item['name'] == app:
return item
def get_server_items(self, app: str, server_name: str) -> dict:
app_object: dict = self.get_server_info(app)
for server in app_object['servers']:
if server['display_name'] == server_name:
return server
def run(self):
self.format_items(self.items)
code, app = self.create_menu('Choose an application', self.formatted_menu_items, 'Exit')
if code == self.d.OK:
self.format_items(self.get_server_info(app)['servers'], True)
code, server = self.create_menu('Choose a server', sorted(self.formatted_menu_items, key=self.sort_servers))
if code == self.d.CANCEL:
self.run()
else:
server_info: dict = self.get_server_items(app, server)
if server_info['is_serverpilot']:
command: str = 'ssh -p{} {}@{} -t "cd /srv/users/serverpilot/apps/{}; exec /bin/bash -l"'
else:
command: str = 'ssh -p{} {}@{}'
subprocess.call(
command.format(
server_info['port'],
server_info['user'],
server_info['ip'],
app
),
shell=True,
)
self.run()
def sort_servers(self, server: tuple) -> int:
weights = {
'Staging': -1,
'Acceptance': 0,
'Production': 1,
}
return weights[server[0]] if server[0] in weights else -1
@click.command()
@click.option('--env-file')
def main(env_file):
if env_file is None:
env_file: str = '{}/.jump.env'.format(os.environ.get('HOME'))
if os.path.exists(env_file) is False:
click.secho('Can not find .env file in {}'.format(env_file), fg='red')
exit(1)
dotenv.load_dotenv(env_file)
locale.setlocale(locale.LC_ALL, '')
try:
Jump()
except KeyboardInterrupt:
pass | jump/menu.py |
import os
import subprocess
import click
import dotenv
import locale
import requests
from dialog import Dialog
class Jump:
items: list = []
formatted_menu_items: list = []
def __init__(self) -> None:
self.d: Dialog = Dialog()
self.get_item_list()
self.run()
def get_item_list(self) -> None:
secret_key: str = os.environ.get('AUTH_KEY')
extra_headers: dict = {}
if secret_key is not None:
extra_headers[os.environ.get('AUTH_HEADER')] = secret_key
self.items: list = requests.get(os.environ.get('ENDPOINT'), headers=extra_headers).json()
def format_items(self, items: list, servers_list: bool = False) -> None:
self.formatted_menu_items: list = []
for item in items:
if servers_list is False and item['in_jumpgate'] is False:
pass
else:
self.formatted_menu_items.append((item['name'] if not servers_list else item['display_name'], ''))
def create_menu(self, title: str, items: list, cancel_label: str = 'Back') -> tuple:
return self.d.menu(
text=title,
choices=items,
menu_height=15,
cancel_label=cancel_label,
)
def get_server_info(self, app: str) -> dict:
for item in self.items:
if item['name'] == app:
return item
def get_server_items(self, app: str, server_name: str) -> dict:
app_object: dict = self.get_server_info(app)
for server in app_object['servers']:
if server['display_name'] == server_name:
return server
def run(self):
self.format_items(self.items)
code, app = self.create_menu('Choose an application', self.formatted_menu_items, 'Exit')
if code == self.d.OK:
self.format_items(self.get_server_info(app)['servers'], True)
code, server = self.create_menu('Choose a server', sorted(self.formatted_menu_items, key=self.sort_servers))
if code == self.d.CANCEL:
self.run()
else:
server_info: dict = self.get_server_items(app, server)
if server_info['is_serverpilot']:
command: str = 'ssh -p{} {}@{} -t "cd /srv/users/serverpilot/apps/{}; exec /bin/bash -l"'
else:
command: str = 'ssh -p{} {}@{}'
subprocess.call(
command.format(
server_info['port'],
server_info['user'],
server_info['ip'],
app
),
shell=True,
)
self.run()
def sort_servers(self, server: tuple) -> int:
weights = {
'Staging': -1,
'Acceptance': 0,
'Production': 1,
}
return weights[server[0]] if server[0] in weights else -1
@click.command()
@click.option('--env-file')
def main(env_file):
if env_file is None:
env_file: str = '{}/.jump.env'.format(os.environ.get('HOME'))
if os.path.exists(env_file) is False:
click.secho('Can not find .env file in {}'.format(env_file), fg='red')
exit(1)
dotenv.load_dotenv(env_file)
locale.setlocale(locale.LC_ALL, '')
try:
Jump()
except KeyboardInterrupt:
pass | 0.333612 | 0.061593 |
import unittest
import copy
from unittest.mock import Mock
from algorithms.configuration.entities.agent import Agent
from algorithms.configuration.entities.goal import Goal
from algorithms.configuration.entities.obstacle import Obstacle
from algorithms.configuration.entities.trace import Trace
from algorithms.configuration.maps.dense_map import DenseMap
from algorithms.configuration.maps.sparse_map import SparseMap
from maps.maps import Maps
from simulator.services.debug import DebugLevel
from simulator.services.services import Services
from structures import Size, Point
class TestSparseMap(unittest.TestCase):
def test_copy(self) -> None:
map1: SparseMap = Maps.pixel_map_one_obstacle
map2: SparseMap = copy.copy(map1)
self.assertEqual(map1, map2)
def test_deep_copy(self) -> None:
map1: SparseMap = Maps.pixel_map_one_obstacle
map2: SparseMap = copy.deepcopy(map1)
self.assertEqual(map1, map2)
def test_eq(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
self.assertEqual(map1, map2)
def test_ne_size(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(400, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
self.assertNotEqual(map1, map2)
def test_ne_agent(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 15),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
self.assertNotEqual(map1, map2)
def test_ne_goal(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(100, 160), 10))
self.assertNotEqual(map1, map2)
def test_ne_obstacle(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(90, 100), 40)],
Goal(Point(180, 160), 10))
self.assertNotEqual(map1, map2)
def test_ne_all(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(100, 200),
Agent(Point(10, 20), 10),
[Obstacle(Point(100, 100), 35)],
Goal(Point(180, 10), 10))
self.assertNotEqual(map1, map2)
def test_ne_dense(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: DenseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10)).convert_to_dense_map()
self.assertNotEqual(map1, map2)
def test_ne_instance(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: int = 2
self.assertNotEqual(map1, map2)
def test_eq_dense_map(self) -> None:
map1: DenseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
])
map2: SparseMap = SparseMap(
Size(4, 3),
Agent(Point(0, 1)),
[Obstacle(Point(0, 0)), Obstacle(Point(1, 0)), Obstacle(Point(2, 0)), Obstacle(Point(3, 0))],
Goal(Point(3, 2))
)
self.assertEqual(map2, map1)
def test_convert_to_dense_map(self) -> None:
map1: SparseMap = SparseMap(
Size(4, 3),
Agent(Point(0, 1)),
[Obstacle(Point(0, 0)), Obstacle(Point(1, 0)), Obstacle(Point(2, 0)), Obstacle(Point(3, 0))],
Goal(Point(3, 2))
)
map2: DenseMap = map1.convert_to_dense_map()
self.assertEqual(map1, map2)
def test_move_agent_normal(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
map1.move_agent(Point(1, 1))
self.assertEqual(Point(1, 1), map1.agent.position)
self.assertTrue([Trace(Point(1, 1))], map1.trace)
def test_move_agent_no_trace(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
map1.move_agent(Point(1, 1), True)
self.assertEqual(Point(1, 1), map1.agent.position)
self.assertEqual([], map1.trace)
def test_move_agent_out_of_bounds(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
map1.move_agent(Point(-1, 0))
self.assertEqual(Point(0, 1), map1.agent.position)
self.assertEqual([Trace(Point(0, 1))], map1.trace)
def test_is_goal_reached_normal(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertTrue(map1.is_goal_reached(Point(3, 2)))
def test_is_goal_reached_false(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertFalse(map1.is_goal_reached(Point(2, 2)))
def test_is_goal_reached_out_of_bounds(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertFalse(map1.is_goal_reached(Point(-1, -1)))
def test_is_valid_position_normal(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.EXTENDED_WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertTrue(map1.is_agent_valid_pos(Point(1, 1)))
self.assertTrue(map1.is_agent_valid_pos(Point(0, 1)))
self.assertTrue(map1.is_agent_valid_pos(Point(3, 2)))
self.assertTrue(map1.is_agent_valid_pos(Point(0, 0)))
def test_is_valid_position_invalid(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.EXTENDED_WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.WALL_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertFalse(map1.is_agent_valid_pos(Point(1, 0)))
self.assertFalse(map1.is_agent_valid_pos(Point(-1, -1)))
def test_str(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.EXTENDED_WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.WALL_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertEqual("""SparseMap: {
size: Size(4, 3),
agent: Agent: {position: Point(0, 1), radius: 0},
obstacles: {
size: 4,
entities: [
Obstacle: {position: Point(1, 0), radius: 0},
Obstacle: {position: Point(2, 0), radius: 0},
Obstacle: {position: Point(3, 0), radius: 0},
Obstacle: {position: Point(3, 1), radius: 0},
]
},
goal: Goal: {position: Point(3, 2), radius: 0}
}""", str(map1))
def test_str_debug_level_3(self) -> None:
services: Services = Mock()
services.settings.simulator_write_debug_level = DebugLevel.HIGH
map1: SparseMap = DenseMap([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 3, 0, 0, 0, 0, 0, 0, 0, 0]
], services=services).convert_to_sparse_map()
self.assertEqual("""SparseMap: {
size: Size(10, 3),
agent: Agent: {position: Point(0, 2), radius: 0},
obstacles: {
size: 20,
entities: [
Obstacle: {position: Point(0, 0), radius: 0},
Obstacle: {position: Point(1, 0), radius: 0},
Obstacle: {position: Point(2, 0), radius: 0},
Obstacle: {position: Point(3, 0), radius: 0},
Obstacle: {position: Point(4, 0), radius: 0},
Obstacle: {position: Point(5, 0), radius: 0},
Obstacle: {position: Point(6, 0), radius: 0},
Obstacle: {position: Point(7, 0), radius: 0},
Obstacle: {position: Point(8, 0), radius: 0},
Obstacle: {position: Point(9, 0), radius: 0},
Obstacle: {position: Point(0, 1), radius: 0},
Obstacle: {position: Point(1, 1), radius: 0},
Obstacle: {position: Point(2, 1), radius: 0},
Obstacle: {position: Point(3, 1), radius: 0},
Obstacle: {position: Point(4, 1), radius: 0},
Obstacle: {position: Point(5, 1), radius: 0},
Obstacle: {position: Point(6, 1), radius: 0},
Obstacle: {position: Point(7, 1), radius: 0},
Obstacle: {position: Point(8, 1), radius: 0},
Obstacle: {position: Point(9, 1), radius: 0},
]
},
goal: Goal: {position: Point(1, 2), radius: 0}
}""", str(map1)) | tests/test_maps/test_sparse_map.py | import unittest
import copy
from unittest.mock import Mock
from algorithms.configuration.entities.agent import Agent
from algorithms.configuration.entities.goal import Goal
from algorithms.configuration.entities.obstacle import Obstacle
from algorithms.configuration.entities.trace import Trace
from algorithms.configuration.maps.dense_map import DenseMap
from algorithms.configuration.maps.sparse_map import SparseMap
from maps.maps import Maps
from simulator.services.debug import DebugLevel
from simulator.services.services import Services
from structures import Size, Point
class TestSparseMap(unittest.TestCase):
def test_copy(self) -> None:
map1: SparseMap = Maps.pixel_map_one_obstacle
map2: SparseMap = copy.copy(map1)
self.assertEqual(map1, map2)
def test_deep_copy(self) -> None:
map1: SparseMap = Maps.pixel_map_one_obstacle
map2: SparseMap = copy.deepcopy(map1)
self.assertEqual(map1, map2)
def test_eq(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
self.assertEqual(map1, map2)
def test_ne_size(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(400, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
self.assertNotEqual(map1, map2)
def test_ne_agent(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 15),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
self.assertNotEqual(map1, map2)
def test_ne_goal(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(100, 160), 10))
self.assertNotEqual(map1, map2)
def test_ne_obstacle(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(90, 100), 40)],
Goal(Point(180, 160), 10))
self.assertNotEqual(map1, map2)
def test_ne_all(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: SparseMap = SparseMap(Size(100, 200),
Agent(Point(10, 20), 10),
[Obstacle(Point(100, 100), 35)],
Goal(Point(180, 10), 10))
self.assertNotEqual(map1, map2)
def test_ne_dense(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: DenseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10)).convert_to_dense_map()
self.assertNotEqual(map1, map2)
def test_ne_instance(self) -> None:
map1: SparseMap = SparseMap(Size(200, 200),
Agent(Point(20, 20), 10),
[Obstacle(Point(40, 40), 10), Obstacle(Point(100, 100), 40)],
Goal(Point(180, 160), 10))
map2: int = 2
self.assertNotEqual(map1, map2)
def test_eq_dense_map(self) -> None:
map1: DenseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
])
map2: SparseMap = SparseMap(
Size(4, 3),
Agent(Point(0, 1)),
[Obstacle(Point(0, 0)), Obstacle(Point(1, 0)), Obstacle(Point(2, 0)), Obstacle(Point(3, 0))],
Goal(Point(3, 2))
)
self.assertEqual(map2, map1)
def test_convert_to_dense_map(self) -> None:
map1: SparseMap = SparseMap(
Size(4, 3),
Agent(Point(0, 1)),
[Obstacle(Point(0, 0)), Obstacle(Point(1, 0)), Obstacle(Point(2, 0)), Obstacle(Point(3, 0))],
Goal(Point(3, 2))
)
map2: DenseMap = map1.convert_to_dense_map()
self.assertEqual(map1, map2)
def test_move_agent_normal(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
map1.move_agent(Point(1, 1))
self.assertEqual(Point(1, 1), map1.agent.position)
self.assertTrue([Trace(Point(1, 1))], map1.trace)
def test_move_agent_no_trace(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
map1.move_agent(Point(1, 1), True)
self.assertEqual(Point(1, 1), map1.agent.position)
self.assertEqual([], map1.trace)
def test_move_agent_out_of_bounds(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
map1.move_agent(Point(-1, 0))
self.assertEqual(Point(0, 1), map1.agent.position)
self.assertEqual([Trace(Point(0, 1))], map1.trace)
def test_is_goal_reached_normal(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertTrue(map1.is_goal_reached(Point(3, 2)))
def test_is_goal_reached_false(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertFalse(map1.is_goal_reached(Point(2, 2)))
def test_is_goal_reached_out_of_bounds(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertFalse(map1.is_goal_reached(Point(-1, -1)))
def test_is_valid_position_normal(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.EXTENDED_WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertTrue(map1.is_agent_valid_pos(Point(1, 1)))
self.assertTrue(map1.is_agent_valid_pos(Point(0, 1)))
self.assertTrue(map1.is_agent_valid_pos(Point(3, 2)))
self.assertTrue(map1.is_agent_valid_pos(Point(0, 0)))
def test_is_valid_position_invalid(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.EXTENDED_WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.WALL_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertFalse(map1.is_agent_valid_pos(Point(1, 0)))
self.assertFalse(map1.is_agent_valid_pos(Point(-1, -1)))
def test_str(self) -> None:
map1: SparseMap = DenseMap([
[DenseMap.EXTENDED_WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID, DenseMap.WALL_ID],
[DenseMap.AGENT_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.WALL_ID],
[DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.CLEAR_ID, DenseMap.GOAL_ID],
]).convert_to_sparse_map()
self.assertEqual("""SparseMap: {
size: Size(4, 3),
agent: Agent: {position: Point(0, 1), radius: 0},
obstacles: {
size: 4,
entities: [
Obstacle: {position: Point(1, 0), radius: 0},
Obstacle: {position: Point(2, 0), radius: 0},
Obstacle: {position: Point(3, 0), radius: 0},
Obstacle: {position: Point(3, 1), radius: 0},
]
},
goal: Goal: {position: Point(3, 2), radius: 0}
}""", str(map1))
def test_str_debug_level_3(self) -> None:
services: Services = Mock()
services.settings.simulator_write_debug_level = DebugLevel.HIGH
map1: SparseMap = DenseMap([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 3, 0, 0, 0, 0, 0, 0, 0, 0]
], services=services).convert_to_sparse_map()
self.assertEqual("""SparseMap: {
size: Size(10, 3),
agent: Agent: {position: Point(0, 2), radius: 0},
obstacles: {
size: 20,
entities: [
Obstacle: {position: Point(0, 0), radius: 0},
Obstacle: {position: Point(1, 0), radius: 0},
Obstacle: {position: Point(2, 0), radius: 0},
Obstacle: {position: Point(3, 0), radius: 0},
Obstacle: {position: Point(4, 0), radius: 0},
Obstacle: {position: Point(5, 0), radius: 0},
Obstacle: {position: Point(6, 0), radius: 0},
Obstacle: {position: Point(7, 0), radius: 0},
Obstacle: {position: Point(8, 0), radius: 0},
Obstacle: {position: Point(9, 0), radius: 0},
Obstacle: {position: Point(0, 1), radius: 0},
Obstacle: {position: Point(1, 1), radius: 0},
Obstacle: {position: Point(2, 1), radius: 0},
Obstacle: {position: Point(3, 1), radius: 0},
Obstacle: {position: Point(4, 1), radius: 0},
Obstacle: {position: Point(5, 1), radius: 0},
Obstacle: {position: Point(6, 1), radius: 0},
Obstacle: {position: Point(7, 1), radius: 0},
Obstacle: {position: Point(8, 1), radius: 0},
Obstacle: {position: Point(9, 1), radius: 0},
]
},
goal: Goal: {position: Point(1, 2), radius: 0}
}""", str(map1)) | 0.622574 | 0.706034 |
import pytest
from dataclasses import dataclass
import critbit
@dataclass
class Ingredient:
name: str
enabled: bool = None
@dataclass
class Recipe:
name: str
ingredients: set
def test_no_matches():
"""Test when a match is successful"""
in_kitchen = (
Ingredient(name='steak'),
Ingredient(name='butter'),
Ingredient(name='salt'),
Ingredient(name='pepper')
)
recipes = [
Recipe(
name='pancakes',
ingredients=(
Ingredient(name='milk'),
Ingredient(name='eggs'),
Ingredient(name='flower'),
Ingredient(name='oil')
)
)
]
criteria = critbit.create_criteria(in_kitchen, 'name')
applicants = critbit.create_applicants(recipes, 'ingredients.name', criteria)
assert not critbit.closest(applicants, criteria)
def test_criteria_key_not_found():
"""Test when specified attribute key doesnt exist for criteria."""
with pytest.raises(critbit.KeyNotFound):
fridge = (
Ingredient(name='milk'),
)
critbit.create_criteria(fridge, 'namezzz')
def test_closest():
""""""
@dataclass
class Feature:
feature_id: str
name: str
@dataclass
class Vehicle:
vehicle_id: str
make: str
features: list
features = [
Feature(feature_id=1, name='satnav'),
Feature(feature_id=2, name='leather seats'),
Feature(feature_id=3, name='heated seats'),
Feature(feature_id=4, name='reverse camera'),
Feature(feature_id=5, name='bluetooth'),
Feature(feature_id=6, name='remote start'),
Feature(feature_id=7, name='parking sensors'),
Feature(feature_id=8, name='apple carplay/android auto'),
Feature(feature_id=9, name='sun roof'),
Feature(feature_id=10, name='cruise control'),
]
vehicles = [
Vehicle(
vehicle_id=1,
make='Ford',
features=[
Feature(feature_id=4, name='reverse camera')
]
),
Vehicle(
vehicle_id=2,
make='BMW',
features=[
Feature(feature_id=1, name='satnav'),
]
),
Vehicle(
vehicle_id=3,
make='Mercedes',
features=[
Feature(feature_id=1, name='satnav'),
Feature(feature_id=4, name='reverse camera'),
Feature(feature_id=9, name='sun roof'),
]
)
]
criteria = critbit.create_criteria(features, 'feature_id')
applicants = critbit.create_applicants(vehicles, 'features.feature_id', criteria)
closest = critbit.closest(applicants, criteria)
assert closest.object.vehicle_id == 3 | tests/test_critbit.py | import pytest
from dataclasses import dataclass
import critbit
@dataclass
class Ingredient:
name: str
enabled: bool = None
@dataclass
class Recipe:
name: str
ingredients: set
def test_no_matches():
"""Test when a match is successful"""
in_kitchen = (
Ingredient(name='steak'),
Ingredient(name='butter'),
Ingredient(name='salt'),
Ingredient(name='pepper')
)
recipes = [
Recipe(
name='pancakes',
ingredients=(
Ingredient(name='milk'),
Ingredient(name='eggs'),
Ingredient(name='flower'),
Ingredient(name='oil')
)
)
]
criteria = critbit.create_criteria(in_kitchen, 'name')
applicants = critbit.create_applicants(recipes, 'ingredients.name', criteria)
assert not critbit.closest(applicants, criteria)
def test_criteria_key_not_found():
"""Test when specified attribute key doesnt exist for criteria."""
with pytest.raises(critbit.KeyNotFound):
fridge = (
Ingredient(name='milk'),
)
critbit.create_criteria(fridge, 'namezzz')
def test_closest():
""""""
@dataclass
class Feature:
feature_id: str
name: str
@dataclass
class Vehicle:
vehicle_id: str
make: str
features: list
features = [
Feature(feature_id=1, name='satnav'),
Feature(feature_id=2, name='leather seats'),
Feature(feature_id=3, name='heated seats'),
Feature(feature_id=4, name='reverse camera'),
Feature(feature_id=5, name='bluetooth'),
Feature(feature_id=6, name='remote start'),
Feature(feature_id=7, name='parking sensors'),
Feature(feature_id=8, name='apple carplay/android auto'),
Feature(feature_id=9, name='sun roof'),
Feature(feature_id=10, name='cruise control'),
]
vehicles = [
Vehicle(
vehicle_id=1,
make='Ford',
features=[
Feature(feature_id=4, name='reverse camera')
]
),
Vehicle(
vehicle_id=2,
make='BMW',
features=[
Feature(feature_id=1, name='satnav'),
]
),
Vehicle(
vehicle_id=3,
make='Mercedes',
features=[
Feature(feature_id=1, name='satnav'),
Feature(feature_id=4, name='reverse camera'),
Feature(feature_id=9, name='sun roof'),
]
)
]
criteria = critbit.create_criteria(features, 'feature_id')
applicants = critbit.create_applicants(vehicles, 'features.feature_id', criteria)
closest = critbit.closest(applicants, criteria)
assert closest.object.vehicle_id == 3 | 0.69368 | 0.476945 |
import clr
import sys
pyt_path = r'C:\Program Files (x86)\IronPython 2.7\Lib'
sys.path.append(pyt_path)
import xml.etree.ElementTree as ET
hardcodedshortcuts = {
"ID_APP_EXIT": ["Alt+Fn4"],
"ID_BUTTON_DELETE": ["Delete"],
"ID_BUTTON_REDO": ["Ctrl+Y","Ctrl+Shift+Z"],
"ID_BUTTON_UNDO": ["Ctrl+Z","Alt+Backspace"],
"ID_CHECK_SPELLING": ["Fn7"],
"ID_EDIT_COPY": ["Ctrl+C","Ctrl+Insert"],
"ID_EDIT_CUT": ["Ctrl+X","Ctrl+Delete"],
"ID_EDIT_PASTE": ["Ctrl+V"],
"ID_FILE_NEW_CHOOSE_TEMPLATE": ["Ctrl+N"],
"ID_REVIT_FILE_CLOSE": ["Ctrl+W"],
"ID_REVIT_FILE_OPEN": ["Ctrl+O"],
"ID_REVIT_FILE_PRINT": ["Ctrl+P"],
"ID_REVIT_FILE_SAVE": ["Ctrl+S"],
"ID_SCHEDULE_VIEW_ZOOM_IN": ["Ctrl++"],
"ID_SCHEDULE_VIEW_ZOOM_OUT": ["Ctrl+-"],
"ID_SCHEDULE_VIEW_ZOOM_RESTORE": ["Ctrl+0"]
}
class KeyboardShortcuts:
def __init__(self, commands, commandcount, commandcountwithshortcuts):
self.Commands = commands
self.CommandCount = commandcount
self.CommandCountWithShortcuts = commandcountwithshortcuts
def __repr__(self):
return 'KeyboardShortcuts'
def GetCommandById(self, id):
found = [x for x in self.Commands if x.ID == id]
if len(found) > 0: return found[0]
else: return None
def GetCommandsWithShortcuts(self):
return [x for x in self.Commands if x.HasShortcuts]
class KeyboardShortcutCommand:
def __init__(self, name, id, shortcuts, paths):
self.Name = name.decode("utf-8")
self.ID = id
self.Shortcuts = shortcuts
self.Paths = paths
self.HasShortcuts = len(shortcuts) > 0
self.HasPaths = len(paths) > 0
def __repr__(self):
return 'KeyboardShortcutCommand'
def KSFromPath(path):
try:
Commands = []
CommandCount = 0
CommandCountWithShortcuts = 0
root = e = ET.parse(path).getroot()
for child in root:
if child.tag == "ShortcutItem":
CommandCount += 1
CommandId = child.get("CommandId")
shortcuts = child.get("Shortcuts")
if shortcuts == None:
if CommandId in hardcodedshortcuts:
CommandShortcuts = hardcodedshortcuts[CommandId]
CommandCountWithShortcuts += 1
else: CommandShortcuts = []
else:
CommandShortcuts = shortcuts.split("#")
if CommandId in hardcodedshortcuts: CommandShortcuts = CommandShortcuts + hardcodedshortcuts[CommandId]
CommandCountWithShortcuts += 1
paths = child.get("Paths")
if paths == None: CommandPaths = []
else: CommandPaths = paths.split("; ")
Commands.append(KeyboardShortcutCommand(child.get("CommandName"), CommandId, CommandShortcuts, CommandPaths))
return KeyboardShortcuts(Commands, CommandCount, CommandCountWithShortcuts)
except:
import traceback
return traceback.format_exc()
if isinstance(IN[0], list): OUT = [KSFromPath(x) for x in IN[0]]
else: OUT = KSFromPath(IN[0]) | nodes/2.x/python/KeyboardShortcuts.ByPath.py | import clr
import sys
pyt_path = r'C:\Program Files (x86)\IronPython 2.7\Lib'
sys.path.append(pyt_path)
import xml.etree.ElementTree as ET
hardcodedshortcuts = {
"ID_APP_EXIT": ["Alt+Fn4"],
"ID_BUTTON_DELETE": ["Delete"],
"ID_BUTTON_REDO": ["Ctrl+Y","Ctrl+Shift+Z"],
"ID_BUTTON_UNDO": ["Ctrl+Z","Alt+Backspace"],
"ID_CHECK_SPELLING": ["Fn7"],
"ID_EDIT_COPY": ["Ctrl+C","Ctrl+Insert"],
"ID_EDIT_CUT": ["Ctrl+X","Ctrl+Delete"],
"ID_EDIT_PASTE": ["Ctrl+V"],
"ID_FILE_NEW_CHOOSE_TEMPLATE": ["Ctrl+N"],
"ID_REVIT_FILE_CLOSE": ["Ctrl+W"],
"ID_REVIT_FILE_OPEN": ["Ctrl+O"],
"ID_REVIT_FILE_PRINT": ["Ctrl+P"],
"ID_REVIT_FILE_SAVE": ["Ctrl+S"],
"ID_SCHEDULE_VIEW_ZOOM_IN": ["Ctrl++"],
"ID_SCHEDULE_VIEW_ZOOM_OUT": ["Ctrl+-"],
"ID_SCHEDULE_VIEW_ZOOM_RESTORE": ["Ctrl+0"]
}
class KeyboardShortcuts:
def __init__(self, commands, commandcount, commandcountwithshortcuts):
self.Commands = commands
self.CommandCount = commandcount
self.CommandCountWithShortcuts = commandcountwithshortcuts
def __repr__(self):
return 'KeyboardShortcuts'
def GetCommandById(self, id):
found = [x for x in self.Commands if x.ID == id]
if len(found) > 0: return found[0]
else: return None
def GetCommandsWithShortcuts(self):
return [x for x in self.Commands if x.HasShortcuts]
class KeyboardShortcutCommand:
def __init__(self, name, id, shortcuts, paths):
self.Name = name.decode("utf-8")
self.ID = id
self.Shortcuts = shortcuts
self.Paths = paths
self.HasShortcuts = len(shortcuts) > 0
self.HasPaths = len(paths) > 0
def __repr__(self):
return 'KeyboardShortcutCommand'
def KSFromPath(path):
try:
Commands = []
CommandCount = 0
CommandCountWithShortcuts = 0
root = e = ET.parse(path).getroot()
for child in root:
if child.tag == "ShortcutItem":
CommandCount += 1
CommandId = child.get("CommandId")
shortcuts = child.get("Shortcuts")
if shortcuts == None:
if CommandId in hardcodedshortcuts:
CommandShortcuts = hardcodedshortcuts[CommandId]
CommandCountWithShortcuts += 1
else: CommandShortcuts = []
else:
CommandShortcuts = shortcuts.split("#")
if CommandId in hardcodedshortcuts: CommandShortcuts = CommandShortcuts + hardcodedshortcuts[CommandId]
CommandCountWithShortcuts += 1
paths = child.get("Paths")
if paths == None: CommandPaths = []
else: CommandPaths = paths.split("; ")
Commands.append(KeyboardShortcutCommand(child.get("CommandName"), CommandId, CommandShortcuts, CommandPaths))
return KeyboardShortcuts(Commands, CommandCount, CommandCountWithShortcuts)
except:
import traceback
return traceback.format_exc()
if isinstance(IN[0], list): OUT = [KSFromPath(x) for x in IN[0]]
else: OUT = KSFromPath(IN[0]) | 0.081771 | 0.201794 |
import pytest
from utils import Signer
U64 = 2**64-1
STATE = (0, 0)
@pytest.mark.asyncio
async def test_next(x128_ss):
s0 = splitmix64(42)
s1 = splitmix64(s0)
global STATE
STATE = (s0, s1)
def rotl(x, k):
return (x << k) | (x >> (64 - k))
def next():
global STATE
s0, s1 = STATE
result = (rotl(s0 * 5, 7) * 9) & U64
s1 ^= s0
new_s0 = (rotl(s0, 24) ^ s1 ^ (s1 << 16)) & U64
new_s1 = (rotl(s1, 37)) & U64
STATE = (new_s0, new_s1)
return result
for r in range(1000):
tx = await x128_ss.next().invoke()
r = next()
assert tx.result.rnd == r
@pytest.mark.asyncio
async def test_rotl(x128_ss_test):
for (x, k) in [(1, 0), (1, 1), (2**64, 63), (2**64, 64), (2**123, 64)]:
tx = await x128_ss_test.test_rotl(x, k).call()
r = (x << k) | (x >> (64 - k))
assert tx.result.out == r
# https://xoshiro.di.unimi.it/splitmix64.c
def splitmix64(x):
U64 = 2**64-1
z = x + 0x9e3779b97f4a7c15
z &= U64
z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9
z &= U64
z = (z ^ (z >> 27)) * 0x94d049bb133111eb
z &= U64
return (z ^ (z >> 31)) & U64
@pytest.mark.asyncio
async def test_splitmix64(x128_ss_test):
for x in (0, 1, 2**64-1):
tx = await x128_ss_test.test_splitmix64(x).call()
assert tx.result.out == splitmix64(x)
@pytest.mark.asyncio
async def test_rshift(x128_ss_test):
test_cases = [
(1, 0),
(1, 1),
(2**127, 20),
(2**128+1, 31),
(2**128+1, 32),
(2**192+2**30, 45),
(2**250+2**20, 123)
]
for (v, b) in test_cases:
tx = await x128_ss_test.test_rshift(v, b).call()
assert tx.result.out == v >> b
@pytest.mark.asyncio
async def test_with_account(x128_ss, account_factory):
signer = Signer(0xc0ffee)
account = await account_factory(signer)
prngs = set()
size = 100
for _ in range(size):
tx = await signer.send_transaction(account, x128_ss.contract_address, "next", [])
prngs.add(tx.result.response[0])
assert len(prngs) == size | tests/test_xoroshiro128_starstar.py | import pytest
from utils import Signer
U64 = 2**64-1
STATE = (0, 0)
@pytest.mark.asyncio
async def test_next(x128_ss):
s0 = splitmix64(42)
s1 = splitmix64(s0)
global STATE
STATE = (s0, s1)
def rotl(x, k):
return (x << k) | (x >> (64 - k))
def next():
global STATE
s0, s1 = STATE
result = (rotl(s0 * 5, 7) * 9) & U64
s1 ^= s0
new_s0 = (rotl(s0, 24) ^ s1 ^ (s1 << 16)) & U64
new_s1 = (rotl(s1, 37)) & U64
STATE = (new_s0, new_s1)
return result
for r in range(1000):
tx = await x128_ss.next().invoke()
r = next()
assert tx.result.rnd == r
@pytest.mark.asyncio
async def test_rotl(x128_ss_test):
for (x, k) in [(1, 0), (1, 1), (2**64, 63), (2**64, 64), (2**123, 64)]:
tx = await x128_ss_test.test_rotl(x, k).call()
r = (x << k) | (x >> (64 - k))
assert tx.result.out == r
# https://xoshiro.di.unimi.it/splitmix64.c
def splitmix64(x):
U64 = 2**64-1
z = x + 0x9e3779b97f4a7c15
z &= U64
z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9
z &= U64
z = (z ^ (z >> 27)) * 0x94d049bb133111eb
z &= U64
return (z ^ (z >> 31)) & U64
@pytest.mark.asyncio
async def test_splitmix64(x128_ss_test):
for x in (0, 1, 2**64-1):
tx = await x128_ss_test.test_splitmix64(x).call()
assert tx.result.out == splitmix64(x)
@pytest.mark.asyncio
async def test_rshift(x128_ss_test):
test_cases = [
(1, 0),
(1, 1),
(2**127, 20),
(2**128+1, 31),
(2**128+1, 32),
(2**192+2**30, 45),
(2**250+2**20, 123)
]
for (v, b) in test_cases:
tx = await x128_ss_test.test_rshift(v, b).call()
assert tx.result.out == v >> b
@pytest.mark.asyncio
async def test_with_account(x128_ss, account_factory):
signer = Signer(0xc0ffee)
account = await account_factory(signer)
prngs = set()
size = 100
for _ in range(size):
tx = await signer.send_transaction(account, x128_ss.contract_address, "next", [])
prngs.add(tx.result.response[0])
assert len(prngs) == size | 0.502686 | 0.526708 |
import logging
import uuid
import datetime
from six.moves import http_client
from flask import request, g, abort, url_for, jsonify
from flask.views import MethodView
import marshmallow as ma
from flask_restx import reqparse
from flask_smorest import Blueprint
from drift.core.extensions.urlregistry import Endpoints
from drift.core.extensions.jwt import current_user
from drift.core.extensions.schemachecker import simple_schema_request
from driftbase.models.db import Friendship, FriendInvite, CorePlayer
DEFAULT_INVITE_EXPIRATION_TIME_SECONDS = 60 * 60 * 1
log = logging.getLogger(__name__)
bp = Blueprint("friendships", __name__, url_prefix="/friendships", description="Player to player relationships")
endpoints = Endpoints()
def on_message(queue_name, message):
if queue_name == 'clients' and message['event'] == 'created':
log.info("Friendship is forevur! This one just connected: %s", message['payload'])
def drift_init_extension(app, api, **kwargs):
api.register_blueprint(bp)
endpoints.init_app(app)
app.messagebus.register_consumer(on_message, 'clients')
def get_player(player_id):
player = g.db.query(CorePlayer).get(player_id)
return player
@bp.route('/players/<int:player_id>', endpoint='list')
class FriendshipsAPI(MethodView):
def get(self, player_id):
"""
List my friends
"""
if player_id != current_user["player_id"]:
abort(http_client.FORBIDDEN, description="That is not your player!")
left = g.db.query(Friendship.id, Friendship.player1_id, Friendship.player2_id).filter_by(player1_id=player_id, status="active")
right = g.db.query(Friendship.id, Friendship.player2_id, Friendship.player1_id).filter_by(player2_id=player_id, status="active")
friend_rows = left.union_all(right)
friends = []
for row in friend_rows:
friendship_id = row[0]
friend_id = row[2]
friend = {
"friend_id": friend_id,
"player_url": url_for("players.entry", player_id=friend_id, _external=True),
"friendship_url": url_for("friendships.entry", friendship_id=friendship_id, _external=True)
}
friends.append(friend)
ret = friends
return jsonify(ret)
@simple_schema_request({
"token": {"type": "string", },
}, required=["token"])
def post(self, player_id):
"""
New friend
"""
if player_id != current_user["player_id"]:
abort(http_client.FORBIDDEN, description="That is not your player!")
args = request.json
invite_token = args.get("token")
invite = g.db.query(FriendInvite).filter_by(token=invite_token).first()
if invite is None:
abort(http_client.NOT_FOUND, description="The invite was not found!")
if invite.expiry_date < datetime.datetime.utcnow():
abort(http_client.FORBIDDEN, description="The invite has expired!")
if invite.deleted:
abort(http_client.FORBIDDEN, description="The invite has been deleted!")
friend_id = invite.issued_by_player_id
left_id = player_id
right_id = friend_id
if left_id == right_id:
abort(http_client.FORBIDDEN, description="You cannot befriend yourself!")
if left_id > right_id:
left_id, right_id = right_id, left_id
existing_friendship = g.db.query(Friendship).filter(
Friendship.player1_id == left_id,
Friendship.player2_id == right_id
).first()
if existing_friendship is not None:
friendship = existing_friendship
if friendship.status == "deleted":
friendship.status = "active"
else:
return "{}", http_client.OK
else:
friendship = Friendship(player1_id=left_id, player2_id=right_id)
g.db.add(friendship)
g.db.commit()
ret = {
"friend_id": friend_id,
"url": url_for("friendships.entry", friendship_id=friendship.id, _external=True),
"messagequeue_url": url_for("messages.exchange", exchange="players", exchange_id=friend_id,
_external=True) + "/{queue}",
}
return jsonify(ret), http_client.CREATED
@bp.route('/<int:friendship_id>', endpoint='entry')
class FriendshipAPI(MethodView):
def delete(self, friendship_id):
"""
Remove a friend
"""
player_id = current_user["player_id"]
friendship = g.db.query(Friendship).filter_by(id=friendship_id).first()
if friendship is None:
abort(http_client.NOT_FOUND)
elif friendship.player1_id != player_id and friendship.player2_id != player_id:
abort(http_client.FORBIDDEN)
elif friendship.status == "deleted":
return "{}", http_client.GONE
if friendship:
friendship.status = "deleted"
g.db.commit()
return "{}", http_client.NO_CONTENT
@bp.route('/invites', endpoint='invites')
class FriendInvitesAPI(MethodView):
def post(self):
"""
New Friend token
"""
player_id = current_user["player_id"]
token = str(<KEY>())
expires_seconds = DEFAULT_INVITE_EXPIRATION_TIME_SECONDS
config = g.conf.tenant.get('friends')
if config:
expires_seconds = config['invite_expiration_seconds']
expires_seconds = expires_seconds
expires = datetime.datetime.utcnow() + datetime.timedelta(seconds=expires_seconds)
invite = FriendInvite(
token=token,
issued_by_player_id=player_id,
expiry_date=expires
)
g.db.add(invite)
g.db.commit()
ret = jsonify({
"token": token,
"expires": expires,
"url": url_for("friendships.invite", invite_id=invite.id, _external=True)
}), http_client.CREATED
return ret
@bp.route('/invites/<int:invite_id>', endpoint='invite')
class FriendInviteAPI(MethodView):
def delete(self, invite_id):
"""
Delete a friend token
"""
player_id = current_user["player_id"]
invite = g.db.query(FriendInvite).filter_by(id=invite_id).first()
if not invite:
abort(http_client.NOT_FOUND)
elif invite.issued_by_player_id != player_id:
abort(http_client.FORBIDDEN)
elif invite.deleted:
return "{}", http_client.GONE
invite.deleted = True
g.db.commit()
return "{}", http_client.NO_CONTENT
@endpoints.register
def endpoint_info(*args):
ret = {}
ret["my_friends"] = None
ret["friend_invites"] = url_for("friendships.invites", _external=True)
if current_user:
ret["my_friends"] = url_for("friendships.list", player_id=current_user["player_id"], _external=True)
return ret | driftbase/api/friendships.py | import logging
import uuid
import datetime
from six.moves import http_client
from flask import request, g, abort, url_for, jsonify
from flask.views import MethodView
import marshmallow as ma
from flask_restx import reqparse
from flask_smorest import Blueprint
from drift.core.extensions.urlregistry import Endpoints
from drift.core.extensions.jwt import current_user
from drift.core.extensions.schemachecker import simple_schema_request
from driftbase.models.db import Friendship, FriendInvite, CorePlayer
DEFAULT_INVITE_EXPIRATION_TIME_SECONDS = 60 * 60 * 1
log = logging.getLogger(__name__)
bp = Blueprint("friendships", __name__, url_prefix="/friendships", description="Player to player relationships")
endpoints = Endpoints()
def on_message(queue_name, message):
if queue_name == 'clients' and message['event'] == 'created':
log.info("Friendship is forevur! This one just connected: %s", message['payload'])
def drift_init_extension(app, api, **kwargs):
api.register_blueprint(bp)
endpoints.init_app(app)
app.messagebus.register_consumer(on_message, 'clients')
def get_player(player_id):
player = g.db.query(CorePlayer).get(player_id)
return player
@bp.route('/players/<int:player_id>', endpoint='list')
class FriendshipsAPI(MethodView):
def get(self, player_id):
"""
List my friends
"""
if player_id != current_user["player_id"]:
abort(http_client.FORBIDDEN, description="That is not your player!")
left = g.db.query(Friendship.id, Friendship.player1_id, Friendship.player2_id).filter_by(player1_id=player_id, status="active")
right = g.db.query(Friendship.id, Friendship.player2_id, Friendship.player1_id).filter_by(player2_id=player_id, status="active")
friend_rows = left.union_all(right)
friends = []
for row in friend_rows:
friendship_id = row[0]
friend_id = row[2]
friend = {
"friend_id": friend_id,
"player_url": url_for("players.entry", player_id=friend_id, _external=True),
"friendship_url": url_for("friendships.entry", friendship_id=friendship_id, _external=True)
}
friends.append(friend)
ret = friends
return jsonify(ret)
@simple_schema_request({
"token": {"type": "string", },
}, required=["token"])
def post(self, player_id):
"""
New friend
"""
if player_id != current_user["player_id"]:
abort(http_client.FORBIDDEN, description="That is not your player!")
args = request.json
invite_token = args.get("token")
invite = g.db.query(FriendInvite).filter_by(token=invite_token).first()
if invite is None:
abort(http_client.NOT_FOUND, description="The invite was not found!")
if invite.expiry_date < datetime.datetime.utcnow():
abort(http_client.FORBIDDEN, description="The invite has expired!")
if invite.deleted:
abort(http_client.FORBIDDEN, description="The invite has been deleted!")
friend_id = invite.issued_by_player_id
left_id = player_id
right_id = friend_id
if left_id == right_id:
abort(http_client.FORBIDDEN, description="You cannot befriend yourself!")
if left_id > right_id:
left_id, right_id = right_id, left_id
existing_friendship = g.db.query(Friendship).filter(
Friendship.player1_id == left_id,
Friendship.player2_id == right_id
).first()
if existing_friendship is not None:
friendship = existing_friendship
if friendship.status == "deleted":
friendship.status = "active"
else:
return "{}", http_client.OK
else:
friendship = Friendship(player1_id=left_id, player2_id=right_id)
g.db.add(friendship)
g.db.commit()
ret = {
"friend_id": friend_id,
"url": url_for("friendships.entry", friendship_id=friendship.id, _external=True),
"messagequeue_url": url_for("messages.exchange", exchange="players", exchange_id=friend_id,
_external=True) + "/{queue}",
}
return jsonify(ret), http_client.CREATED
@bp.route('/<int:friendship_id>', endpoint='entry')
class FriendshipAPI(MethodView):
def delete(self, friendship_id):
"""
Remove a friend
"""
player_id = current_user["player_id"]
friendship = g.db.query(Friendship).filter_by(id=friendship_id).first()
if friendship is None:
abort(http_client.NOT_FOUND)
elif friendship.player1_id != player_id and friendship.player2_id != player_id:
abort(http_client.FORBIDDEN)
elif friendship.status == "deleted":
return "{}", http_client.GONE
if friendship:
friendship.status = "deleted"
g.db.commit()
return "{}", http_client.NO_CONTENT
@bp.route('/invites', endpoint='invites')
class FriendInvitesAPI(MethodView):
def post(self):
"""
New Friend token
"""
player_id = current_user["player_id"]
token = str(<KEY>())
expires_seconds = DEFAULT_INVITE_EXPIRATION_TIME_SECONDS
config = g.conf.tenant.get('friends')
if config:
expires_seconds = config['invite_expiration_seconds']
expires_seconds = expires_seconds
expires = datetime.datetime.utcnow() + datetime.timedelta(seconds=expires_seconds)
invite = FriendInvite(
token=token,
issued_by_player_id=player_id,
expiry_date=expires
)
g.db.add(invite)
g.db.commit()
ret = jsonify({
"token": token,
"expires": expires,
"url": url_for("friendships.invite", invite_id=invite.id, _external=True)
}), http_client.CREATED
return ret
@bp.route('/invites/<int:invite_id>', endpoint='invite')
class FriendInviteAPI(MethodView):
def delete(self, invite_id):
"""
Delete a friend token
"""
player_id = current_user["player_id"]
invite = g.db.query(FriendInvite).filter_by(id=invite_id).first()
if not invite:
abort(http_client.NOT_FOUND)
elif invite.issued_by_player_id != player_id:
abort(http_client.FORBIDDEN)
elif invite.deleted:
return "{}", http_client.GONE
invite.deleted = True
g.db.commit()
return "{}", http_client.NO_CONTENT
@endpoints.register
def endpoint_info(*args):
ret = {}
ret["my_friends"] = None
ret["friend_invites"] = url_for("friendships.invites", _external=True)
if current_user:
ret["my_friends"] = url_for("friendships.list", player_id=current_user["player_id"], _external=True)
return ret | 0.430866 | 0.071332 |
import base64
import logging
import requests
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME)
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (
PLATFORM_SCHEMA, ImageProcessingFaceEntity, ATTR_CONFIDENCE, CONF_SOURCE,
CONF_ENTITY_ID, CONF_NAME, DOMAIN)
from homeassistant.const import (CONF_IP_ADDRESS, CONF_PORT)
_LOGGER = logging.getLogger(__name__)
ATTR_BOUNDING_BOX = 'bounding_box'
ATTR_CLASSIFIER = 'classifier'
ATTR_IMAGE_ID = 'image_id'
ATTR_MATCHED = 'matched'
CLASSIFIER = 'facebox'
DATA_FACEBOX = 'facebox_classifiers'
EVENT_CLASSIFIER_TEACH = 'image_processing.teach_classifier'
FILE_PATH = 'file_path'
SERVICE_TEACH_FACE = 'facebox_teach_face'
TIMEOUT = 9
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
})
SERVICE_TEACH_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
})
def encode_image(image):
"""base64 encode an image stream."""
base64_img = base64.b64encode(image).decode('ascii')
return base64_img
def get_matched_faces(faces):
"""Return the name and rounded confidence of matched faces."""
return {face['name']: round(face['confidence'], 2)
for face in faces if face['matched']}
def parse_faces(api_faces):
"""Parse the API face data into the format required."""
known_faces = []
for entry in api_faces:
face = {}
if entry['matched']: # This data is only in matched faces.
face[ATTR_NAME] = entry['name']
face[ATTR_IMAGE_ID] = entry['id']
else: # Lets be explicit.
face[ATTR_NAME] = None
face[ATTR_IMAGE_ID] = None
face[ATTR_CONFIDENCE] = round(100.0*entry['confidence'], 2)
face[ATTR_MATCHED] = entry['matched']
face[ATTR_BOUNDING_BOX] = entry['rect']
known_faces.append(face)
return known_faces
def post_image(url, image):
"""Post an image to the classifier."""
try:
response = requests.post(
url,
json={"base64": encode_image(image)},
timeout=TIMEOUT
)
return response
except requests.exceptions.ConnectionError:
_LOGGER.error("ConnectionError: Is %s running?", CLASSIFIER)
def valid_file_path(file_path):
"""Check that a file_path points to a valid file."""
try:
cv.isfile(file_path)
return True
except vol.Invalid:
_LOGGER.error(
"%s error: Invalid file path: %s", CLASSIFIER, file_path)
return False
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
if DATA_FACEBOX not in hass.data:
hass.data[DATA_FACEBOX] = []
entities = []
for camera in config[CONF_SOURCE]:
facebox = FaceClassifyEntity(
config[CONF_IP_ADDRESS],
config[CONF_PORT],
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME))
entities.append(facebox)
hass.data[DATA_FACEBOX].append(facebox)
add_devices(entities)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get('entity_id')
classifiers = hass.data[DATA_FACEBOX]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]
for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)
hass.services.register(
DOMAIN,
SERVICE_TEACH_FACE,
service_handle,
schema=SERVICE_TEACH_SCHEMA)
class FaceClassifyEntity(ImageProcessingFaceEntity):
"""Perform a face classification."""
def __init__(self, ip, port, camera_entity, name=None):
"""Init with the API key and model id."""
super().__init__()
self._url_check = "http://{}:{}/{}/check".format(ip, port, CLASSIFIER)
self._url_teach = "http://{}:{}/{}/teach".format(ip, port, CLASSIFIER)
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = "{} {}".format(
CLASSIFIER, camera_name)
self._matched = {}
def process_image(self, image):
"""Process an image."""
response = post_image(self._url_check, image)
if response is not None:
response_json = response.json()
if response_json['success']:
total_faces = response_json['facesCount']
faces = parse_faces(response_json['faces'])
self._matched = get_matched_faces(faces)
self.process_faces(faces, total_faces)
else:
self.total_faces = None
self.faces = []
self._matched = {}
def teach(self, name, file_path):
"""Teach classifier a face name."""
if (not self.hass.config.is_allowed_path(file_path)
or not valid_file_path(file_path)):
return
with open(file_path, 'rb') as open_file:
response = requests.post(
self._url_teach,
data={ATTR_NAME: name, 'id': file_path},
files={'file': open_file})
if response.status_code == 200:
self.hass.bus.fire(
EVENT_CLASSIFIER_TEACH, {
ATTR_CLASSIFIER: CLASSIFIER,
ATTR_NAME: name,
FILE_PATH: file_path,
'success': True,
'message': None
})
elif response.status_code == 400:
_LOGGER.warning(
"%s teaching of file %s failed with message:%s",
CLASSIFIER, file_path, response.text)
self.hass.bus.fire(
EVENT_CLASSIFIER_TEACH, {
ATTR_CLASSIFIER: CLASSIFIER,
ATTR_NAME: name,
FILE_PATH: file_path,
'success': False,
'message': response.text
})
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the classifier attributes."""
return {
'matched_faces': self._matched,
'total_matched_faces': len(self._matched),
} | homeassistant/components/image_processing/facebox.py | import base64
import logging
import requests
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME)
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (
PLATFORM_SCHEMA, ImageProcessingFaceEntity, ATTR_CONFIDENCE, CONF_SOURCE,
CONF_ENTITY_ID, CONF_NAME, DOMAIN)
from homeassistant.const import (CONF_IP_ADDRESS, CONF_PORT)
_LOGGER = logging.getLogger(__name__)
ATTR_BOUNDING_BOX = 'bounding_box'
ATTR_CLASSIFIER = 'classifier'
ATTR_IMAGE_ID = 'image_id'
ATTR_MATCHED = 'matched'
CLASSIFIER = 'facebox'
DATA_FACEBOX = 'facebox_classifiers'
EVENT_CLASSIFIER_TEACH = 'image_processing.teach_classifier'
FILE_PATH = 'file_path'
SERVICE_TEACH_FACE = 'facebox_teach_face'
TIMEOUT = 9
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
})
SERVICE_TEACH_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
})
def encode_image(image):
"""base64 encode an image stream."""
base64_img = base64.b64encode(image).decode('ascii')
return base64_img
def get_matched_faces(faces):
"""Return the name and rounded confidence of matched faces."""
return {face['name']: round(face['confidence'], 2)
for face in faces if face['matched']}
def parse_faces(api_faces):
"""Parse the API face data into the format required."""
known_faces = []
for entry in api_faces:
face = {}
if entry['matched']: # This data is only in matched faces.
face[ATTR_NAME] = entry['name']
face[ATTR_IMAGE_ID] = entry['id']
else: # Lets be explicit.
face[ATTR_NAME] = None
face[ATTR_IMAGE_ID] = None
face[ATTR_CONFIDENCE] = round(100.0*entry['confidence'], 2)
face[ATTR_MATCHED] = entry['matched']
face[ATTR_BOUNDING_BOX] = entry['rect']
known_faces.append(face)
return known_faces
def post_image(url, image):
"""Post an image to the classifier."""
try:
response = requests.post(
url,
json={"base64": encode_image(image)},
timeout=TIMEOUT
)
return response
except requests.exceptions.ConnectionError:
_LOGGER.error("ConnectionError: Is %s running?", CLASSIFIER)
def valid_file_path(file_path):
"""Check that a file_path points to a valid file."""
try:
cv.isfile(file_path)
return True
except vol.Invalid:
_LOGGER.error(
"%s error: Invalid file path: %s", CLASSIFIER, file_path)
return False
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
if DATA_FACEBOX not in hass.data:
hass.data[DATA_FACEBOX] = []
entities = []
for camera in config[CONF_SOURCE]:
facebox = FaceClassifyEntity(
config[CONF_IP_ADDRESS],
config[CONF_PORT],
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME))
entities.append(facebox)
hass.data[DATA_FACEBOX].append(facebox)
add_devices(entities)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get('entity_id')
classifiers = hass.data[DATA_FACEBOX]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]
for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)
hass.services.register(
DOMAIN,
SERVICE_TEACH_FACE,
service_handle,
schema=SERVICE_TEACH_SCHEMA)
class FaceClassifyEntity(ImageProcessingFaceEntity):
"""Perform a face classification."""
def __init__(self, ip, port, camera_entity, name=None):
"""Init with the API key and model id."""
super().__init__()
self._url_check = "http://{}:{}/{}/check".format(ip, port, CLASSIFIER)
self._url_teach = "http://{}:{}/{}/teach".format(ip, port, CLASSIFIER)
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = "{} {}".format(
CLASSIFIER, camera_name)
self._matched = {}
def process_image(self, image):
"""Process an image."""
response = post_image(self._url_check, image)
if response is not None:
response_json = response.json()
if response_json['success']:
total_faces = response_json['facesCount']
faces = parse_faces(response_json['faces'])
self._matched = get_matched_faces(faces)
self.process_faces(faces, total_faces)
else:
self.total_faces = None
self.faces = []
self._matched = {}
def teach(self, name, file_path):
"""Teach classifier a face name."""
if (not self.hass.config.is_allowed_path(file_path)
or not valid_file_path(file_path)):
return
with open(file_path, 'rb') as open_file:
response = requests.post(
self._url_teach,
data={ATTR_NAME: name, 'id': file_path},
files={'file': open_file})
if response.status_code == 200:
self.hass.bus.fire(
EVENT_CLASSIFIER_TEACH, {
ATTR_CLASSIFIER: CLASSIFIER,
ATTR_NAME: name,
FILE_PATH: file_path,
'success': True,
'message': None
})
elif response.status_code == 400:
_LOGGER.warning(
"%s teaching of file %s failed with message:%s",
CLASSIFIER, file_path, response.text)
self.hass.bus.fire(
EVENT_CLASSIFIER_TEACH, {
ATTR_CLASSIFIER: CLASSIFIER,
ATTR_NAME: name,
FILE_PATH: file_path,
'success': False,
'message': response.text
})
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the classifier attributes."""
return {
'matched_faces': self._matched,
'total_matched_faces': len(self._matched),
} | 0.639849 | 0.121399 |
from matplotlib.font_manager import FontProperties
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import font_manager
zhfont1=font_manager.FontProperties(fname='SimHei.ttf',size=20)
# %%
root='all_counts'
dir_list=os.listdir(root)
# %%
file=pd.read_csv(os.path.join(root,dir_list[0],'1.csv'),index_col=0)
print(file)
print(np.shape(file))
# %%
driver=list()
for h in range(3):
driver_range=list()
for idx in range(10):
file=pd.read_csv(os.path.join(root,dir_list[idx],str(h)+'.csv'),index_col=0)
driver_range.append(np.array(file))
driver.append(driver_range)
#%%
print(file)
print(np.shape(driver))
# %%
differ_all=list()
fig=plt.figure(figsize=(8,8))
axes=[]
titles=['远距离','中距离','短距离']
img=[]
for h in range(3):
differ=np.zeros((10,10))
for idx in range(10):
for jdx in range(10):
for i in range(5):
for j in range(5):
differ[idx][jdx] = differ[idx][jdx]+driver[h][idx][i][j]*\
np.log(driver[h][idx][i][j]/driver[h][jdx][i][j])
differ_all.append(differ)
axes.append(fig.add_subplot(2,2,h+1))
axes[-1].set_title(titles[h],FontProperties=zhfont1)
axes[-1].set_xlabel('驾驶员(模仿者)',FontProperties=zhfont1)
axes[-1].set_ylabel('驾驶员',FontProperties=zhfont1)
axes[-1].set_xticks(np.linspace(0.5,9.5,4,endpoint=True))
axes[-1].set_xticklabels(['#0','#3','#6','#9'])
axes[-1].set_yticks(np.linspace(0.5,9.5,4,endpoint=True))
axes[-1].set_yticklabels(['#0','#3','#6','#9'])
plt.tick_params(labelsize=15)
img.append(axes[-1].pcolormesh(differ,cmap=mpl.cm.jet))
divider = make_axes_locatable(axes[-1])
cax=divider.append_axes("right",size='5%',pad=0.05)
norm = mpl.colors.Normalize(vmin=0,vmax=1.0)
cmap = mpl.cm.jet
cb = plt.colorbar(mpl.cm.ScalarMappable(norm=norm,cmap=cmap),cax=cax)
plt.subplots_adjust(hspace=0.5,wspace=0.5)
plt.show()
# %% | Python_Code/Processing_multi_differ/multi_difference.py | from matplotlib.font_manager import FontProperties
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import font_manager
zhfont1=font_manager.FontProperties(fname='SimHei.ttf',size=20)
# %%
root='all_counts'
dir_list=os.listdir(root)
# %%
file=pd.read_csv(os.path.join(root,dir_list[0],'1.csv'),index_col=0)
print(file)
print(np.shape(file))
# %%
driver=list()
for h in range(3):
driver_range=list()
for idx in range(10):
file=pd.read_csv(os.path.join(root,dir_list[idx],str(h)+'.csv'),index_col=0)
driver_range.append(np.array(file))
driver.append(driver_range)
#%%
print(file)
print(np.shape(driver))
# %%
differ_all=list()
fig=plt.figure(figsize=(8,8))
axes=[]
titles=['远距离','中距离','短距离']
img=[]
for h in range(3):
differ=np.zeros((10,10))
for idx in range(10):
for jdx in range(10):
for i in range(5):
for j in range(5):
differ[idx][jdx] = differ[idx][jdx]+driver[h][idx][i][j]*\
np.log(driver[h][idx][i][j]/driver[h][jdx][i][j])
differ_all.append(differ)
axes.append(fig.add_subplot(2,2,h+1))
axes[-1].set_title(titles[h],FontProperties=zhfont1)
axes[-1].set_xlabel('驾驶员(模仿者)',FontProperties=zhfont1)
axes[-1].set_ylabel('驾驶员',FontProperties=zhfont1)
axes[-1].set_xticks(np.linspace(0.5,9.5,4,endpoint=True))
axes[-1].set_xticklabels(['#0','#3','#6','#9'])
axes[-1].set_yticks(np.linspace(0.5,9.5,4,endpoint=True))
axes[-1].set_yticklabels(['#0','#3','#6','#9'])
plt.tick_params(labelsize=15)
img.append(axes[-1].pcolormesh(differ,cmap=mpl.cm.jet))
divider = make_axes_locatable(axes[-1])
cax=divider.append_axes("right",size='5%',pad=0.05)
norm = mpl.colors.Normalize(vmin=0,vmax=1.0)
cmap = mpl.cm.jet
cb = plt.colorbar(mpl.cm.ScalarMappable(norm=norm,cmap=cmap),cax=cax)
plt.subplots_adjust(hspace=0.5,wspace=0.5)
plt.show()
# %% | 0.111072 | 0.179764 |
# In[1]:
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
from time import sleep
# In[29]:
path="C:\Program Files\chromedriver.exe"
browser=webdriver.Chrome(path) # creating the object
browser.get("https://www.facebook.com/") #Opening the site
wait=WebDriverWait(browser,600) # site waits for 600 sec
# ### Login Page
# In[30]:
#Typing the email address or phone number
sleep(5)
email_phone = browser.find_element_by_id('email')
email_address = input("Enter the email address or phone number: ")
email_phone.send_keys(email_address)
#Typing the password for the account
pwd = browser.find_element_by_id('pass')
password = input("Enter the password: ")
pwd.send_keys(password)
#Clicking the login button
sleep(2)
log = browser.find_element_by_id('u_0_b')
log.click()
# ### Enabling/Disabling Dark Mode
# In[6]:
#Clicking on the drop-down button
sleep(3)
theme = browser.find_element_by_xpath("//div[@aria-label='Account']")
theme.click()
#Selecting the dark mode
sleep(3)
dark = browser.find_element_by_xpath("//input[@aria-label='Enabled']")
dark.click()
#Unclicking the drop-down button
sleep(2)
unclick = browser.find_element_by_xpath("//div[@aria-label='Account']")
unclick.click()
# ### Posting Story
# In[7]:
sleep(3)
browser.maximize_window() #Maximize the browser window
#Create the story
sleep(3)
story_login = browser.find_element_by_link_text('Create a story').click()
sleep(3)
#Selecting the Text Story
text_story = browser.find_element_by_xpath("//div[@class='i1fnvgqd j83agx80']//div[@class='oajrlxb2 gs1a9yip g5ia77u1 mtkw9kbi tlpljxtp qensuy8j ppp5ayq2 goun2846 ccm00jje s44p3ltw mk2mc5f4 rt8b4zig n8ej3o3l agehan2d sk4xxmp2 rq0escxv nhd2j8a9 pq6dq46d mg4g778l btwxx1t3 pfnyh3mw p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x tgvbjcpo hpfvmrgz jb3vyjys rz4wbd8a qt6c0cv9 a8nywdso l9j0dhe7 i1ao9s8h esuyzwwr f1sip0of du4w35lb lzcic4wl abiwlrkh p8dawk7l']")
text_story.click()
#Adding the text
sleep(3)
text = browser.find_element_by_tag_name("textarea")
sleep(1)
text.send_keys("This story was created using Selenium")
#Confirming the story
sleep(2)
send = browser.find_element_by_class_name("s1i5eluu")
send.click()
# ### Add a new post
# In[7]:
#Selecting the create post
sleep(3)
create = browser.find_element_by_xpath("//div[@class='oajrlxb2 b3i9ofy5 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 j83agx80 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x cxgpxx05 d1544ag0 sj5x9vvc tw6a2znq i1ao9s8h esuyzwwr f1sip0of lzcic4wl l9j0dhe7 abiwlrkh p8dawk7l bp9cbjyn orhb3f3m czkt41v7 fmqxjp7s emzo65vh btwxx1t3 buofh1pr idiwt2bm jifvfom9 ni8dbmo4 stjgntxs kbf60n1y']")
create.click()
sleep(2)
#Typing the message
text = browser.find_element_by_xpath("/html/body/div[1]/div/div/div[1]/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div[2]/div[2]/div[1]/div[1]/div[1]/div/div/div/div/div[2]/div")
text.send_keys("This post was created using Selenium")
#Sending the post
sleep(3)
browser.find_element_by_class_name('s1i5eluu').click()
# ### Adding Bio for first time
# In[11]:
#Going to the profile page
sleep(3)
browser.maximize_window()
sleep(3)
profile = browser.find_element_by_xpath("//a[@class='oajrlxb2 g5ia77u1 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 j83agx80 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x jb3vyjys d1544ag0 qt6c0cv9 tw6a2znq i1ao9s8h esuyzwwr f1sip0of lzcic4wl l9j0dhe7 abiwlrkh p8dawk7l bp9cbjyn e72ty7fz qlfml3jp inkptoze qmr60zad btwxx1t3 tv7at329 taijpn5t']")
profile.click()
sleep(3)
#Clicking the bio button
button = browser.find_element_by_css_selector("#mount_0_0 > div > div > div.rq0escxv.l9j0dhe7.du4w35lb > div.rq0escxv.l9j0dhe7.du4w35lb > div > div > div.j83agx80.cbu4d94t.d6urw2fd.dp1hu0rb.l9j0dhe7.du4w35lb > div.dp1hu0rb.cbu4d94t.j83agx80 > div > div > div:nth-child(1) > div.rq0escxv.l9j0dhe7.du4w35lb.j83agx80.taijpn5t.gs1a9yip.owycx6da.btwxx1t3.ihqw7lf3.cddn0xzi > div > div > div.rq0escxv.l9j0dhe7.du4w35lb.j83agx80.taijpn5t.gs1a9yip.owycx6da.btwxx1t3.d1544ag0.tw6a2znq.discj3wi.b5q2rw42.lq239pai.mysgfdmx.hddg9phg > div > div > div.j83agx80.cbu4d94t.obtkqiv7.sv5sfqaa > div > span > span > div")
button.click()
#Typing the text in the textbox
sleep(3)
textbox = browser.find_element_by_xpath("//textarea[@placeholder='Describe who you are']")
text = input("Enter the bio: ")
textbox.clear
if len(text)<=101:
textbox.send_keys(text)
else:
textbox.quit()
#Saving the bio
sleep(2)
send = browser.find_element_by_xpath("//div[@aria-label='Save']")
send.click()
# ### Sending a message via Messenger
# In[14]:
#Clicking on the Messenger icon
sleep(3)
msg_icon = browser.find_element_by_xpath("//div[@aria-label='Messenger']")
msg_icon.click()
#Searching for the person
sleep(3)
search = browser.find_element_by_xpath("//input[@placeholder='Search Messenger']")
search.click()
#Enter the person name
name = input("Enter the name: ")
search_bar = browser.find_element_by_xpath("//input[@placeholder='Search Messenger']")
search_bar.send_keys(name)
#Opening the chatbox
sleep(3)
tab = browser.find_element_by_xpath("//div[@class='j83agx80 oo9gr5id buofh1pr ni8dbmo4 stjgntxs cxgpxx05 dflh9lhu sj5x9vvc scb9dxdr']")
tab.click()
sleep(2)
#Sending the message
message = browser.find_element_by_xpath("//div[@class='notranslate _5rpu']")
message.send_keys("This message was sent using Selenium")
sleep(3)
browser.find_element_by_xpath("//div[@aria-label='Press Enter to send']").click()
#Closing the Chatbox
sleep(3)
browser.find_element_by_xpath("//div[@aria-label='Close tab']").click()
# ### Logout of the account
# In[28]:
sleep(3)
browser.find_element_by_xpath("//div[@aria-label='Account']").click()
sleep(2)
log_out = browser.find_element_by_xpath("//div[@class='knvmm38d']//div[5]//div[1]//div[1]//div[2]")
log_out.click()
#Closing the browser
sleep(3)
browser.quit() | automation.py |
# In[1]:
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
from time import sleep
# In[29]:
path="C:\Program Files\chromedriver.exe"
browser=webdriver.Chrome(path) # creating the object
browser.get("https://www.facebook.com/") #Opening the site
wait=WebDriverWait(browser,600) # site waits for 600 sec
# ### Login Page
# In[30]:
#Typing the email address or phone number
sleep(5)
email_phone = browser.find_element_by_id('email')
email_address = input("Enter the email address or phone number: ")
email_phone.send_keys(email_address)
#Typing the password for the account
pwd = browser.find_element_by_id('pass')
password = input("Enter the password: ")
pwd.send_keys(password)
#Clicking the login button
sleep(2)
log = browser.find_element_by_id('u_0_b')
log.click()
# ### Enabling/Disabling Dark Mode
# In[6]:
#Clicking on the drop-down button
sleep(3)
theme = browser.find_element_by_xpath("//div[@aria-label='Account']")
theme.click()
#Selecting the dark mode
sleep(3)
dark = browser.find_element_by_xpath("//input[@aria-label='Enabled']")
dark.click()
#Unclicking the drop-down button
sleep(2)
unclick = browser.find_element_by_xpath("//div[@aria-label='Account']")
unclick.click()
# ### Posting Story
# In[7]:
sleep(3)
browser.maximize_window() #Maximize the browser window
#Create the story
sleep(3)
story_login = browser.find_element_by_link_text('Create a story').click()
sleep(3)
#Selecting the Text Story
text_story = browser.find_element_by_xpath("//div[@class='i1fnvgqd j83agx80']//div[@class='oajrlxb2 gs1a9yip g5ia77u1 mtkw9kbi tlpljxtp qensuy8j ppp5ayq2 goun2846 ccm00jje s44p3ltw mk2mc5f4 rt8b4zig n8ej3o3l agehan2d sk4xxmp2 rq0escxv nhd2j8a9 pq6dq46d mg4g778l btwxx1t3 pfnyh3mw p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x tgvbjcpo hpfvmrgz jb3vyjys rz4wbd8a qt6c0cv9 a8nywdso l9j0dhe7 i1ao9s8h esuyzwwr f1sip0of du4w35lb lzcic4wl abiwlrkh p8dawk7l']")
text_story.click()
#Adding the text
sleep(3)
text = browser.find_element_by_tag_name("textarea")
sleep(1)
text.send_keys("This story was created using Selenium")
#Confirming the story
sleep(2)
send = browser.find_element_by_class_name("s1i5eluu")
send.click()
# ### Add a new post
# In[7]:
#Selecting the create post
sleep(3)
create = browser.find_element_by_xpath("//div[@class='oajrlxb2 b3i9ofy5 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 j83agx80 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x cxgpxx05 d1544ag0 sj5x9vvc tw6a2znq i1ao9s8h esuyzwwr f1sip0of lzcic4wl l9j0dhe7 abiwlrkh p8dawk7l bp9cbjyn orhb3f3m czkt41v7 fmqxjp7s emzo65vh btwxx1t3 buofh1pr idiwt2bm jifvfom9 ni8dbmo4 stjgntxs kbf60n1y']")
create.click()
sleep(2)
#Typing the message
text = browser.find_element_by_xpath("/html/body/div[1]/div/div/div[1]/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div[2]/div[2]/div[1]/div[1]/div[1]/div/div/div/div/div[2]/div")
text.send_keys("This post was created using Selenium")
#Sending the post
sleep(3)
browser.find_element_by_class_name('s1i5eluu').click()
# ### Adding Bio for first time
# In[11]:
#Going to the profile page
sleep(3)
browser.maximize_window()
sleep(3)
profile = browser.find_element_by_xpath("//a[@class='oajrlxb2 g5ia77u1 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 j83agx80 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x jb3vyjys d1544ag0 qt6c0cv9 tw6a2znq i1ao9s8h esuyzwwr f1sip0of lzcic4wl l9j0dhe7 abiwlrkh p8dawk7l bp9cbjyn e72ty7fz qlfml3jp inkptoze qmr60zad btwxx1t3 tv7at329 taijpn5t']")
profile.click()
sleep(3)
#Clicking the bio button
button = browser.find_element_by_css_selector("#mount_0_0 > div > div > div.rq0escxv.l9j0dhe7.du4w35lb > div.rq0escxv.l9j0dhe7.du4w35lb > div > div > div.j83agx80.cbu4d94t.d6urw2fd.dp1hu0rb.l9j0dhe7.du4w35lb > div.dp1hu0rb.cbu4d94t.j83agx80 > div > div > div:nth-child(1) > div.rq0escxv.l9j0dhe7.du4w35lb.j83agx80.taijpn5t.gs1a9yip.owycx6da.btwxx1t3.ihqw7lf3.cddn0xzi > div > div > div.rq0escxv.l9j0dhe7.du4w35lb.j83agx80.taijpn5t.gs1a9yip.owycx6da.btwxx1t3.d1544ag0.tw6a2znq.discj3wi.b5q2rw42.lq239pai.mysgfdmx.hddg9phg > div > div > div.j83agx80.cbu4d94t.obtkqiv7.sv5sfqaa > div > span > span > div")
button.click()
#Typing the text in the textbox
sleep(3)
textbox = browser.find_element_by_xpath("//textarea[@placeholder='Describe who you are']")
text = input("Enter the bio: ")
textbox.clear
if len(text)<=101:
textbox.send_keys(text)
else:
textbox.quit()
#Saving the bio
sleep(2)
send = browser.find_element_by_xpath("//div[@aria-label='Save']")
send.click()
# ### Sending a message via Messenger
# In[14]:
#Clicking on the Messenger icon
sleep(3)
msg_icon = browser.find_element_by_xpath("//div[@aria-label='Messenger']")
msg_icon.click()
#Searching for the person
sleep(3)
search = browser.find_element_by_xpath("//input[@placeholder='Search Messenger']")
search.click()
#Enter the person name
name = input("Enter the name: ")
search_bar = browser.find_element_by_xpath("//input[@placeholder='Search Messenger']")
search_bar.send_keys(name)
#Opening the chatbox
sleep(3)
tab = browser.find_element_by_xpath("//div[@class='j83agx80 oo9gr5id buofh1pr ni8dbmo4 stjgntxs cxgpxx05 dflh9lhu sj5x9vvc scb9dxdr']")
tab.click()
sleep(2)
#Sending the message
message = browser.find_element_by_xpath("//div[@class='notranslate _5rpu']")
message.send_keys("This message was sent using Selenium")
sleep(3)
browser.find_element_by_xpath("//div[@aria-label='Press Enter to send']").click()
#Closing the Chatbox
sleep(3)
browser.find_element_by_xpath("//div[@aria-label='Close tab']").click()
# ### Logout of the account
# In[28]:
sleep(3)
browser.find_element_by_xpath("//div[@aria-label='Account']").click()
sleep(2)
log_out = browser.find_element_by_xpath("//div[@class='knvmm38d']//div[5]//div[1]//div[1]//div[2]")
log_out.click()
#Closing the browser
sleep(3)
browser.quit() | 0.252292 | 0.068506 |
import cv2
import mediapipe as mp
# FOR CHECKING THE FRAME RATE
import time
# CREATE A VIDEOCAPTURE OBJECT
cap = cv2.VideoCapture(0);
# TO DETECT HAND
mpHands = mp.solutions.hands
# WE HAVE CREATED A MEDIAPIPE 'HANDS' OBJECT, THUS DETECTING HAND WITH HELP OF THE 21 GIVEN POINTS)
# PARAMS :-
# static_image_mode = false means DETECTION + TRACKING (if tracking confidence is above some threshold)
# SINCE DEFAULT PARAMS USED, WE HAVE NOT PASSED ANYTHING TO Hands
hands = mpHands.Hands();
mpDraw = mp.solutions.drawing_utils
# NOW WE WILL CHECK FRAME RATE SO FOR THAT WE WILL DEFINE PTIME , CTIME
ptime = 0
ctime = 0
if not cap.isOpened():
print("Camera is not started yet")
while True:
# CAPTURE IMAGE FRAME BY FRAME
# RETURNS BOOL AND FRAME , TRUE IF FRAME IS READ CORRECTLY IN BGR FORMAT
success,img = cap.read();
# CONVERT IMAGE TO RGB
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB);
# THIS METHOD PERFORMS HAND LANDMARK ESTIMATION AND THIS METHOD EXPECTS RGB FORMAT IMAGE
results = hands.process(imgRGB)
# IF WE WANT TO GET THE LANDMARK OF OUR HANDS
print(results.multi_hand_landmarks);
# CHECK IF MULTIPLE HANDS ARE THERE ,AND IF YES, EXTRACT THEM
if results.multi_hand_landmarks:
for handlms in results.multi_hand_landmarks:
# HERE WE ARE LOCATING THE 21(0-20) POINTS OF OUR HAND WITH X AND Y COORDINATES FOR EACH HAND FRAME
for id, lm in enumerate(handlms.landmark):
# print(id,lm)
# we are taking height, width and channel
h, w, c= img.shape
# Convert the different parameters into pixels
cx , cy = int(lm.x* w), int(lm.y* h)
# identify id with locations in pixels
#print(id, cx, cy)
# now we will draw a circle for id 0
if id==8:
cv2.circle(img, (cx , cy), 20, (255,0,255), cv2.FILLED)
# now we will draw a circle for id 4
if id ==12:
cv2.circle(img, (cx, cy), 20, (255, 255, 0), cv2.FILLED)
# FOR DRAWING LANDMARKS (HAND_CONNECTIONS HELP TO JOIN THE 21 POINTS TO THE RESPECTIVE POINTS)
mpDraw.draw_landmarks(img,handlms,mpHands.HAND_CONNECTIONS);
ctime = time.time()
fps= 1/(ctime-ptime)
ptime = ctime
# HERE WE ARE DISPLAYING THE FPS ALONG WITH THE VIDEO
cv2.putText(img, str(int(fps)), (10,70), cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
# TO DISPLAY THE FRAME
cv2.imshow("Hand Detector WebCam",img);
if not success:
break;
# IF USER PRESS Q THEN WE HAVE TO QUIT
if cv2.waitKey(1) & 0xFF==ord("q"):
break;
# When Everything Done Release the capture
cap.release()
# Destroy All the windows
cv2.destroyAllWindows() | Virtual Mouse/handtracking.py | import cv2
import mediapipe as mp
# FOR CHECKING THE FRAME RATE
import time
# CREATE A VIDEOCAPTURE OBJECT
cap = cv2.VideoCapture(0);
# TO DETECT HAND
mpHands = mp.solutions.hands
# WE HAVE CREATED A MEDIAPIPE 'HANDS' OBJECT, THUS DETECTING HAND WITH HELP OF THE 21 GIVEN POINTS)
# PARAMS :-
# static_image_mode = false means DETECTION + TRACKING (if tracking confidence is above some threshold)
# SINCE DEFAULT PARAMS USED, WE HAVE NOT PASSED ANYTHING TO Hands
hands = mpHands.Hands();
mpDraw = mp.solutions.drawing_utils
# NOW WE WILL CHECK FRAME RATE SO FOR THAT WE WILL DEFINE PTIME , CTIME
ptime = 0
ctime = 0
if not cap.isOpened():
print("Camera is not started yet")
while True:
# CAPTURE IMAGE FRAME BY FRAME
# RETURNS BOOL AND FRAME , TRUE IF FRAME IS READ CORRECTLY IN BGR FORMAT
success,img = cap.read();
# CONVERT IMAGE TO RGB
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB);
# THIS METHOD PERFORMS HAND LANDMARK ESTIMATION AND THIS METHOD EXPECTS RGB FORMAT IMAGE
results = hands.process(imgRGB)
# IF WE WANT TO GET THE LANDMARK OF OUR HANDS
print(results.multi_hand_landmarks);
# CHECK IF MULTIPLE HANDS ARE THERE ,AND IF YES, EXTRACT THEM
if results.multi_hand_landmarks:
for handlms in results.multi_hand_landmarks:
# HERE WE ARE LOCATING THE 21(0-20) POINTS OF OUR HAND WITH X AND Y COORDINATES FOR EACH HAND FRAME
for id, lm in enumerate(handlms.landmark):
# print(id,lm)
# we are taking height, width and channel
h, w, c= img.shape
# Convert the different parameters into pixels
cx , cy = int(lm.x* w), int(lm.y* h)
# identify id with locations in pixels
#print(id, cx, cy)
# now we will draw a circle for id 0
if id==8:
cv2.circle(img, (cx , cy), 20, (255,0,255), cv2.FILLED)
# now we will draw a circle for id 4
if id ==12:
cv2.circle(img, (cx, cy), 20, (255, 255, 0), cv2.FILLED)
# FOR DRAWING LANDMARKS (HAND_CONNECTIONS HELP TO JOIN THE 21 POINTS TO THE RESPECTIVE POINTS)
mpDraw.draw_landmarks(img,handlms,mpHands.HAND_CONNECTIONS);
ctime = time.time()
fps= 1/(ctime-ptime)
ptime = ctime
# HERE WE ARE DISPLAYING THE FPS ALONG WITH THE VIDEO
cv2.putText(img, str(int(fps)), (10,70), cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
# TO DISPLAY THE FRAME
cv2.imshow("Hand Detector WebCam",img);
if not success:
break;
# IF USER PRESS Q THEN WE HAVE TO QUIT
if cv2.waitKey(1) & 0xFF==ord("q"):
break;
# When Everything Done Release the capture
cap.release()
# Destroy All the windows
cv2.destroyAllWindows() | 0.167627 | 0.055056 |
try:
from django.conf.urls import url
except ImportError:
from django.urls import re_path as url
from django.conf import settings
if getattr(settings, 'POSTMAN_I18N_URLS', False):
from django.utils.translation import pgettext_lazy
else:
def pgettext_lazy(c, m): return m
from django.urls import path
from django.conf.urls import url
from django.urls import reverse_lazy
from .views import visualizations, SchoolRegistration, WeeklyActivityClassroom, delete_activity, TeacherScheduleView, delete_schedule, add_students_new_classroom, Student_Profile, SingleClassroom, CreateEvent, add_students_classroom, Import_Data, School_Register, CreateUpdate, School_Profile, Student_Profiles, Profile, Admin_Register, Teacher_Register, Student_Register, Parent_Register, create_grade, create_classroom, Create_School_Lesson, CreateAssessment, CreateActivity, UserList, WeeklyActivity, SingleActivity, CreateWeeklyActivity, login_user, logout_user, RoleRegistrations, AddStudentAssessment, delete_update
from quiz.views import landing, blog
from django.contrib.auth import views as auth_views
from pinax.messages.views import *
from cal.views import *
from .views import dash, dash_ajax
urlpatterns = [
url(r"^inbox/$",
view=InboxView.as_view(),
name="inbox"),
url(r"^create/$",
view=MessageCreateView.as_view(),
name="message_create"),
url(r"^create/(?P<user_id>\d+)/$",
view=MessageCreateView.as_view(),
name="message_user_create"),
url(r"^thread/(?P<pk>\d+)/$",
view=ThreadView.as_view(),
name="thread_detail"),
url(r"^thread/(?P<pk>\d+)/delete/$",
view=ThreadDeleteView.as_view(),
name="thread_delete"),
url(r'^$',
view=landing.as_view(),
name='landing'),
url(r'home/(?P<school_url>[\w-]+)/create-update/$',
view=CreateUpdate,
name='create_update'),
url(r'home/(?P<school_url>[\w-]+)/create-event/$',
view=CreateEvent,
name='create_event'),
url(r'home/(?P<school_url>[\w-]+)/school-registration/$',
view=SchoolRegistration.as_view(),
name='school_registration'),
url(r'registrations/(?P<school_url>[\w-]+)/',
view=RoleRegistrations.as_view(),
name='role_registration'),
url(r'import/import/',
view=Import_Data,
name='import'),
url('login/(?P<school_url>[\w-]+)',
view=login_user,
name='login'),
url('logout/(?P<school_url>[\w-]+)',
view=logout_user,
name='logout'),
url('update-delete/(?P<school_url>[\w-]+)/(?P<update_id>[\w-]+)',
view=delete_update,
name='delete_update'),
url('activity-delete/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<activity_id>[\w-]+)',
view=delete_activity,
name='delete_activity'),
url(r'^school-register/',
view=School_Register,
name='school_register'),
url(r'^grade/(?P<school_url>[\w-]+)/create_grade/',
view=create_grade,
name='create_grade'),
url(r'^class/(?P<school_url>[\w-]+)/create_classroom/',
view=create_classroom,
name='create_classroom'),
url(r'^class/(?P<school_url>[\w-]+)/(?P<classroom_id>[\w-]+)/classroom/',
view=SingleClassroom,
name='single_classroom'),
url(r'^visualizations/',
view=visualizations,
name='visualizations'),
url(r'^class/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/teacher_schedule/',
view=TeacherScheduleView,
name='teacher_scheduleview'),
url(r'^class/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<schedule_id>[\w-]+)/schedule_delete/',
view=delete_schedule,
name='teacher_scheduledelete'),
url(r'add-students-new/(?P<school_url>[\w-]+)/(?P<classroom_url>[\w-]+)/(?P<grade_level>[\w-]+)/create-new-classroom/',
view=add_students_new_classroom,
name='add_students_new'),
url(r'add-students/(?P<school_url>[\w-]+)/(?P<grade_level>[\w-]+)/(?P<classroom_id>[\w-]+)/create-classroom/',
view=add_students_classroom,
name='add_students'),
url(r'^(?P<school_url>[\w-]+)/admin-register/',
view=Admin_Register,
name='admin_register'),
url(r'^(?P<school_url>[\w-]+)/parent-register/',
view=Parent_Register,
name='parent_register'),
url(r'^(?P<school_url>[\w-]+)/student-register/',
view=Student_Register,
name='student_register'),
url(r'^(?P<school_url>[\w-]+)/teacher-register/',
view=Teacher_Register,
name='teacher_register'),
url(r'^(?P<school_url>[\w-]+)/parent-register/',
view=Parent_Register,
name='parent_register'),
url(r'^(?P<school_url>[\w-]+)/student_profiles/',
view=Student_Profiles.as_view(),
name='Student_Profiles'),
url(r'student/(?P<school_url>[\w-]+)/(?P<student_id>[\w-]+)/',
view=Student_Profile,
name='Student'),
url(r'user-profile/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/',
view=Profile.as_view(),
name='profile'),
url(r'^school_lesson/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<schedule_id>[\w-]+)/',
view=Create_School_Lesson,
name='school_lesson'),
url(r'^school_lesson/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<week_of>[\w-]+)/',
view=Create_School_Lesson,
name='school_lesson_week'),
url(r'^assessment/(?P<school_url>[\w-]+)/(?P<planning_id>[\w-]+)/',
view=CreateAssessment,
name='assessment'),
url(r'^student-assessment/(?P<school_url>[\w-]+)/(?P<planning_id>[\w-]+)/(?P<assessment_id>[\w-]+)/',
view=AddStudentAssessment,
name='addstudentassessment'),
url(r'^l/(?P<school_url>[\w-]+)/(?P<planning_id>[\w-]+)/(?P<username>[\w-]+)/(?P<week_of>[\w-]+)/activity/',
view=CreateWeeklyActivity,
name='weeklyactivitycreate'),
url(r'^create/(?P<school_url>[\w-]+)/(?P<planning_id>[\w-]+)/(?P<username>[\w-]+)/activity/',
view=CreateActivity,
name='activity'),
url(r'^weekly/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<week_of>[\w-]+)/activity/',
view=WeeklyActivity,
name='weekly_activity'),
url(r'weekly-classroom/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<week_of>[\w-]+)/(?P<classroom_id>[\w-]+)/activity/',
view=WeeklyActivityClassroom,
name='weekly_activity_classroom'),
url(r'^l/(?P<school_url>[\w-]+)/activity/(?P<activity_id>[\w-]+)/',
view=SingleActivity,
name='single_activity'),
url(r'^users/(?P<school_url>[\w-]+)/',
view=UserList,
name='user_list'),
url(r'^blog/$',
view=blog.as_view(),
name='blog'),
url(r'^cal_index/$',
view=cal_index, name='cal_index'),
url(r'^cal_search/$',
view=EventSearchListView.as_view(),
name='event_search_list_view'),
url(r'^calendar/$',
view=CalendarView.as_view(),
name='calendar'),
path('dash-', dash),
path('_dash-', dash_ajax),
url(r'(?P<school_url>[\w-]+)',
view=School_Profile.as_view(),
name='school_profile'),
] | schoolio/urls.py | try:
from django.conf.urls import url
except ImportError:
from django.urls import re_path as url
from django.conf import settings
if getattr(settings, 'POSTMAN_I18N_URLS', False):
from django.utils.translation import pgettext_lazy
else:
def pgettext_lazy(c, m): return m
from django.urls import path
from django.conf.urls import url
from django.urls import reverse_lazy
from .views import visualizations, SchoolRegistration, WeeklyActivityClassroom, delete_activity, TeacherScheduleView, delete_schedule, add_students_new_classroom, Student_Profile, SingleClassroom, CreateEvent, add_students_classroom, Import_Data, School_Register, CreateUpdate, School_Profile, Student_Profiles, Profile, Admin_Register, Teacher_Register, Student_Register, Parent_Register, create_grade, create_classroom, Create_School_Lesson, CreateAssessment, CreateActivity, UserList, WeeklyActivity, SingleActivity, CreateWeeklyActivity, login_user, logout_user, RoleRegistrations, AddStudentAssessment, delete_update
from quiz.views import landing, blog
from django.contrib.auth import views as auth_views
from pinax.messages.views import *
from cal.views import *
from .views import dash, dash_ajax
urlpatterns = [
url(r"^inbox/$",
view=InboxView.as_view(),
name="inbox"),
url(r"^create/$",
view=MessageCreateView.as_view(),
name="message_create"),
url(r"^create/(?P<user_id>\d+)/$",
view=MessageCreateView.as_view(),
name="message_user_create"),
url(r"^thread/(?P<pk>\d+)/$",
view=ThreadView.as_view(),
name="thread_detail"),
url(r"^thread/(?P<pk>\d+)/delete/$",
view=ThreadDeleteView.as_view(),
name="thread_delete"),
url(r'^$',
view=landing.as_view(),
name='landing'),
url(r'home/(?P<school_url>[\w-]+)/create-update/$',
view=CreateUpdate,
name='create_update'),
url(r'home/(?P<school_url>[\w-]+)/create-event/$',
view=CreateEvent,
name='create_event'),
url(r'home/(?P<school_url>[\w-]+)/school-registration/$',
view=SchoolRegistration.as_view(),
name='school_registration'),
url(r'registrations/(?P<school_url>[\w-]+)/',
view=RoleRegistrations.as_view(),
name='role_registration'),
url(r'import/import/',
view=Import_Data,
name='import'),
url('login/(?P<school_url>[\w-]+)',
view=login_user,
name='login'),
url('logout/(?P<school_url>[\w-]+)',
view=logout_user,
name='logout'),
url('update-delete/(?P<school_url>[\w-]+)/(?P<update_id>[\w-]+)',
view=delete_update,
name='delete_update'),
url('activity-delete/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<activity_id>[\w-]+)',
view=delete_activity,
name='delete_activity'),
url(r'^school-register/',
view=School_Register,
name='school_register'),
url(r'^grade/(?P<school_url>[\w-]+)/create_grade/',
view=create_grade,
name='create_grade'),
url(r'^class/(?P<school_url>[\w-]+)/create_classroom/',
view=create_classroom,
name='create_classroom'),
url(r'^class/(?P<school_url>[\w-]+)/(?P<classroom_id>[\w-]+)/classroom/',
view=SingleClassroom,
name='single_classroom'),
url(r'^visualizations/',
view=visualizations,
name='visualizations'),
url(r'^class/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/teacher_schedule/',
view=TeacherScheduleView,
name='teacher_scheduleview'),
url(r'^class/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<schedule_id>[\w-]+)/schedule_delete/',
view=delete_schedule,
name='teacher_scheduledelete'),
url(r'add-students-new/(?P<school_url>[\w-]+)/(?P<classroom_url>[\w-]+)/(?P<grade_level>[\w-]+)/create-new-classroom/',
view=add_students_new_classroom,
name='add_students_new'),
url(r'add-students/(?P<school_url>[\w-]+)/(?P<grade_level>[\w-]+)/(?P<classroom_id>[\w-]+)/create-classroom/',
view=add_students_classroom,
name='add_students'),
url(r'^(?P<school_url>[\w-]+)/admin-register/',
view=Admin_Register,
name='admin_register'),
url(r'^(?P<school_url>[\w-]+)/parent-register/',
view=Parent_Register,
name='parent_register'),
url(r'^(?P<school_url>[\w-]+)/student-register/',
view=Student_Register,
name='student_register'),
url(r'^(?P<school_url>[\w-]+)/teacher-register/',
view=Teacher_Register,
name='teacher_register'),
url(r'^(?P<school_url>[\w-]+)/parent-register/',
view=Parent_Register,
name='parent_register'),
url(r'^(?P<school_url>[\w-]+)/student_profiles/',
view=Student_Profiles.as_view(),
name='Student_Profiles'),
url(r'student/(?P<school_url>[\w-]+)/(?P<student_id>[\w-]+)/',
view=Student_Profile,
name='Student'),
url(r'user-profile/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/',
view=Profile.as_view(),
name='profile'),
url(r'^school_lesson/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<schedule_id>[\w-]+)/',
view=Create_School_Lesson,
name='school_lesson'),
url(r'^school_lesson/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<week_of>[\w-]+)/',
view=Create_School_Lesson,
name='school_lesson_week'),
url(r'^assessment/(?P<school_url>[\w-]+)/(?P<planning_id>[\w-]+)/',
view=CreateAssessment,
name='assessment'),
url(r'^student-assessment/(?P<school_url>[\w-]+)/(?P<planning_id>[\w-]+)/(?P<assessment_id>[\w-]+)/',
view=AddStudentAssessment,
name='addstudentassessment'),
url(r'^l/(?P<school_url>[\w-]+)/(?P<planning_id>[\w-]+)/(?P<username>[\w-]+)/(?P<week_of>[\w-]+)/activity/',
view=CreateWeeklyActivity,
name='weeklyactivitycreate'),
url(r'^create/(?P<school_url>[\w-]+)/(?P<planning_id>[\w-]+)/(?P<username>[\w-]+)/activity/',
view=CreateActivity,
name='activity'),
url(r'^weekly/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<week_of>[\w-]+)/activity/',
view=WeeklyActivity,
name='weekly_activity'),
url(r'weekly-classroom/(?P<school_url>[\w-]+)/(?P<username>[\w-]+)/(?P<week_of>[\w-]+)/(?P<classroom_id>[\w-]+)/activity/',
view=WeeklyActivityClassroom,
name='weekly_activity_classroom'),
url(r'^l/(?P<school_url>[\w-]+)/activity/(?P<activity_id>[\w-]+)/',
view=SingleActivity,
name='single_activity'),
url(r'^users/(?P<school_url>[\w-]+)/',
view=UserList,
name='user_list'),
url(r'^blog/$',
view=blog.as_view(),
name='blog'),
url(r'^cal_index/$',
view=cal_index, name='cal_index'),
url(r'^cal_search/$',
view=EventSearchListView.as_view(),
name='event_search_list_view'),
url(r'^calendar/$',
view=CalendarView.as_view(),
name='calendar'),
path('dash-', dash),
path('_dash-', dash_ajax),
url(r'(?P<school_url>[\w-]+)',
view=School_Profile.as_view(),
name='school_profile'),
] | 0.121973 | 0.076857 |
from rest_framework import generics, authentication
from rest_framework import permissions, status, viewsets
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework.generics import get_object_or_404
from rest_framework.authentication import TokenAuthentication
from user.serializers import UserSerializer, AuthTokenSerializer
from user.serializers import UserSerializerRetrieve
from user.serializers import ActivationAccountSerializer
from user.serializers import PasswordRecoverySerializer
from user.serializers import PasswordRecoveryConfirmSerializer
from core.models import CodeActivation, User, Biography
from core.tokens import decode_user_id
class CreateUserView(generics.CreateAPIView):
"""create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""create a new auth token for user"""
serializer_class = AuthTokenSerializer
render_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(viewsets.ModelViewSet):
"""manage the authenticated user"""
serializer_class = UserSerializer
serializer_class_re = UserSerializerRetrieve
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
queryset = User.objects.all()
def retrieve(self, request, pk=None):
queryset = User.objects.get(id=request.user.id)
serializer = self.serializer_class_re(queryset)
return Response(serializer.data)
def partial_update(self, request, *args, **kwargs):
try:
serializer = self.serializer_class_re(
request.user,
data=request.data,
partial=True
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(
{'data': 'current user updated'},
status=status.HTTP_200_OK
)
except User.DoesNotExist as err:
return Response(
{'error': f"{err}"},
status=status.HTTP_404_NOT_FOUND
)
class ActivationAccount(generics.UpdateAPIView):
"""
update:
update a current token to activate to current user
and create profile current user.
"""
serializer_class = ActivationAccountSerializer
queryset = ''
def update(self, request, *args, **kwargs):
"""
update token
"""
uid = self.kwargs.get('uid')
token = self.kwargs.get('token')
url_token = uid+'_'+token
try:
token = CodeActivation.objects.get(code_token=url_token)
except CodeActivation.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
if token.is_expired:
return Response(
data={'detail': 'Expired Token'},
status=status.HTTP_400_BAD_REQUEST,
)
decode_url_id = decode_user_id(uid)
user = get_object_or_404(User, id=decode_url_id)
user.is_active = True
user.save()
token.is_expired = True
token.save()
self.create_biography_user(user)
return Response(status=status.HTTP_200_OK)
def create_biography_user(self, user):
"""create biography(profile user)"""
return Biography.objects.create(user=user)
class PasswordRecovery(generics.CreateAPIView):
"""create and confirm password recovery"""
serializer_class = PasswordRecoverySerializer
render_classes = api_settings.DEFAULT_RENDERER_CLASSES
class PasswordRecoveryConfirm(generics.UpdateAPIView):
serializer_class = PasswordRecoveryConfirmSerializer
queryset = ''
def put(self, request, *args):
"""recovery password done"""
serializer = PasswordRecoveryConfirmSerializer(
data=request.data,
partial=True
)
if serializer.is_valid():
user_id_uid = decode_user_id(request.data.get('uid'))
current_user = get_object_or_404(User, id=user_id_uid)
current_user.set_password(request.data.get('password'))
current_user.save()
return Response(
{'successfuly': 'Password recovery successfuly'}
)
return Response({'error': serializer.errors})
class PasswordUpdate(viewsets.ModelViewSet):
serializer_class = PasswordRecoveryConfirmSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
queryset = ''
def update(self, request, pk=None):
password = request.data.get('password')
password_confirm = request.data.get('password_confirm')
if password != password_confirm:
return Response(
{'error': "Those passwords don't match."},
status=status.HTTP_400_BAD_REQUEST
)
current_user = User.objects.get(id=request.user.id)
current_user.set_password(request.data.get('password'))
current_user.save()
return Response(
{'data': 'password updated.'},
status=status.HTTP_200_OK
) | apiuser/user/views.py | from rest_framework import generics, authentication
from rest_framework import permissions, status, viewsets
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework.generics import get_object_or_404
from rest_framework.authentication import TokenAuthentication
from user.serializers import UserSerializer, AuthTokenSerializer
from user.serializers import UserSerializerRetrieve
from user.serializers import ActivationAccountSerializer
from user.serializers import PasswordRecoverySerializer
from user.serializers import PasswordRecoveryConfirmSerializer
from core.models import CodeActivation, User, Biography
from core.tokens import decode_user_id
class CreateUserView(generics.CreateAPIView):
"""create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""create a new auth token for user"""
serializer_class = AuthTokenSerializer
render_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(viewsets.ModelViewSet):
"""manage the authenticated user"""
serializer_class = UserSerializer
serializer_class_re = UserSerializerRetrieve
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
queryset = User.objects.all()
def retrieve(self, request, pk=None):
queryset = User.objects.get(id=request.user.id)
serializer = self.serializer_class_re(queryset)
return Response(serializer.data)
def partial_update(self, request, *args, **kwargs):
try:
serializer = self.serializer_class_re(
request.user,
data=request.data,
partial=True
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(
{'data': 'current user updated'},
status=status.HTTP_200_OK
)
except User.DoesNotExist as err:
return Response(
{'error': f"{err}"},
status=status.HTTP_404_NOT_FOUND
)
class ActivationAccount(generics.UpdateAPIView):
"""
update:
update a current token to activate to current user
and create profile current user.
"""
serializer_class = ActivationAccountSerializer
queryset = ''
def update(self, request, *args, **kwargs):
"""
update token
"""
uid = self.kwargs.get('uid')
token = self.kwargs.get('token')
url_token = uid+'_'+token
try:
token = CodeActivation.objects.get(code_token=url_token)
except CodeActivation.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
if token.is_expired:
return Response(
data={'detail': 'Expired Token'},
status=status.HTTP_400_BAD_REQUEST,
)
decode_url_id = decode_user_id(uid)
user = get_object_or_404(User, id=decode_url_id)
user.is_active = True
user.save()
token.is_expired = True
token.save()
self.create_biography_user(user)
return Response(status=status.HTTP_200_OK)
def create_biography_user(self, user):
"""create biography(profile user)"""
return Biography.objects.create(user=user)
class PasswordRecovery(generics.CreateAPIView):
"""create and confirm password recovery"""
serializer_class = PasswordRecoverySerializer
render_classes = api_settings.DEFAULT_RENDERER_CLASSES
class PasswordRecoveryConfirm(generics.UpdateAPIView):
serializer_class = PasswordRecoveryConfirmSerializer
queryset = ''
def put(self, request, *args):
"""recovery password done"""
serializer = PasswordRecoveryConfirmSerializer(
data=request.data,
partial=True
)
if serializer.is_valid():
user_id_uid = decode_user_id(request.data.get('uid'))
current_user = get_object_or_404(User, id=user_id_uid)
current_user.set_password(request.data.get('password'))
current_user.save()
return Response(
{'successfuly': 'Password recovery successfuly'}
)
return Response({'error': serializer.errors})
class PasswordUpdate(viewsets.ModelViewSet):
serializer_class = PasswordRecoveryConfirmSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
queryset = ''
def update(self, request, pk=None):
password = request.data.get('password')
password_confirm = request.data.get('password_confirm')
if password != password_confirm:
return Response(
{'error': "Those passwords don't match."},
status=status.HTTP_400_BAD_REQUEST
)
current_user = User.objects.get(id=request.user.id)
current_user.set_password(request.data.get('password'))
current_user.save()
return Response(
{'data': 'password updated.'},
status=status.HTTP_200_OK
) | 0.683208 | 0.091099 |
import wave
import winsound
import struct
import sys
import os
os.chdir(os.path.dirname(os.path.abspath( __file__ )))
# Yields lines narrower than 80 chars
def frmt16(st):
line = ''
while st:
value = (int(struct.unpack("<h", st[0:2])[0]))
value = (value%2**16) if value>=0 else (value%2**16-2**15)
s_value = '0x%04X' % (value >> 4)
st = st[2:]
if len(line)+len(s_value)+2+4 > 80:
yield (line + '\n')
line = ''
line += s_value + ', '
yield line
# Yields lines narrower than 80 chars.
def frmt8(st):
line = ''
while st:
value = ('0x%02X' % (int(struct.unpack("B", st[0:1])[0])) )
st = st[1:]
if len(line)+len(value)+2+4 > 80:
yield (line + '\n')
line = ''
line += value + ', '
yield line
def main():
u = file("out.txt", "w")
for v in sys.argv:
u.write(v + "\n")
u.close()
if (len(sys.argv) == 1) or (sys.argv[1] == '-h') or (sys.argv[1] == '--help'):
print "Usage " + os.path.basename(sys.argv[0]) + " <inwave.wav> [outfile.c]"
print "Note that existing c files will be overwritten mercilessly.\n"
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print "Could not find file " + sys.argv[1]
sys.exit(1)
w = wave.open(sys.argv[1], 'r')
frames = w.readframes(w.getnframes())
frate = w.getframerate()
width = w.getsampwidth()
ch = w.getnchannels()
comp = w.getcomptype()
if comp != "NONE":
raw_input("Cannot process compressed audio. Press enter to exit")
sys.exit(1)
if ch > 1:
print "This script works only for mono."
sys.exit(1)
if len(sys.argv) == 3:
filename = sys.argv[2]
else:
filename = sys.argv[1]
filename_pretty = os.path.splitext(os.path.basename(filename))[0]
f = file(filename_pretty + '.c', 'w')
f.write("// %d frames, %d samples/sec, %d bit/sample \n" %(w.getnframes(), frate, width*8))
f.write("uint"+width*8+"_t " + filename_pretty + "["+str(len(frames)//width)+"] PROGMEM = {\n")
frmt = frmt8 if width == 1 else frmt16
out = ""
for k in frmt(frames):
out += "\t"+k
f.write(out[0:-2]+"\n};\n")
f.close()
main() | xdk-asf-3.51.0/xmega/applications/xmega_a1_xplained_demo/utils/wav2array.py | import wave
import winsound
import struct
import sys
import os
os.chdir(os.path.dirname(os.path.abspath( __file__ )))
# Yields lines narrower than 80 chars
def frmt16(st):
line = ''
while st:
value = (int(struct.unpack("<h", st[0:2])[0]))
value = (value%2**16) if value>=0 else (value%2**16-2**15)
s_value = '0x%04X' % (value >> 4)
st = st[2:]
if len(line)+len(s_value)+2+4 > 80:
yield (line + '\n')
line = ''
line += s_value + ', '
yield line
# Yields lines narrower than 80 chars.
def frmt8(st):
line = ''
while st:
value = ('0x%02X' % (int(struct.unpack("B", st[0:1])[0])) )
st = st[1:]
if len(line)+len(value)+2+4 > 80:
yield (line + '\n')
line = ''
line += value + ', '
yield line
def main():
u = file("out.txt", "w")
for v in sys.argv:
u.write(v + "\n")
u.close()
if (len(sys.argv) == 1) or (sys.argv[1] == '-h') or (sys.argv[1] == '--help'):
print "Usage " + os.path.basename(sys.argv[0]) + " <inwave.wav> [outfile.c]"
print "Note that existing c files will be overwritten mercilessly.\n"
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print "Could not find file " + sys.argv[1]
sys.exit(1)
w = wave.open(sys.argv[1], 'r')
frames = w.readframes(w.getnframes())
frate = w.getframerate()
width = w.getsampwidth()
ch = w.getnchannels()
comp = w.getcomptype()
if comp != "NONE":
raw_input("Cannot process compressed audio. Press enter to exit")
sys.exit(1)
if ch > 1:
print "This script works only for mono."
sys.exit(1)
if len(sys.argv) == 3:
filename = sys.argv[2]
else:
filename = sys.argv[1]
filename_pretty = os.path.splitext(os.path.basename(filename))[0]
f = file(filename_pretty + '.c', 'w')
f.write("// %d frames, %d samples/sec, %d bit/sample \n" %(w.getnframes(), frate, width*8))
f.write("uint"+width*8+"_t " + filename_pretty + "["+str(len(frames)//width)+"] PROGMEM = {\n")
frmt = frmt8 if width == 1 else frmt16
out = ""
for k in frmt(frames):
out += "\t"+k
f.write(out[0:-2]+"\n};\n")
f.close()
main() | 0.041491 | 0.088505 |
__author__ = '<NAME>, <NAME>, <NAME>, <NAME>, <NAME> '
__email__ = '<EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>'
import os
import argparse
import xml.etree.ElementTree as ET
from xml.dom import minidom
import copy
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('out_dir', help='system output directory')
parser.add_argument('model_dir', help='human summaries directory')
parser.add_argument('rouge_config_file', help='ROUGE configuration file')
return parser.parse_args()
###### Template is of this format ######
# <EVAL ID="D1001-A.M.100.A">
# <PEER-ROOT>/dropbox/18-19/573/Data/mydata</PEER-ROOT>
# <MODEL-ROOT>/dropbox/18-19/573/Data/models/devtest/</MODEL-ROOT>
# <INPUT-FORMAT TYPE="SPL"/>
# <PEERS>
# <P ID="1">D1001-A.M.100.A.1</P>
# </PEERS>
# <MODELS>
# <M ID="A">D1001-A.M.100.A.A</M>
# <M ID="B">D1001-A.M.100.A.B</M>
# <M ID="F">D1001-A.M.100.A.F</M>
# <M ID="H">D1001-A.M.100.A.H</M>
# </MODELS>
# </EVAL>
def create_elem_template(out_dir, model_dir):
template = ET.Element('EVAL')
peer_root = ET.Element('PEER-ROOT')
peer_root.text = out_dir
model_root = ET.Element('MODEL-ROOT')
model_root.text = model_dir
input_format = ET.Element('INPUT-FORMAT', {'TYPE': 'SPL'})
peers = ET.Element('PEERS')
models = ET.Element('MODELS')
template.append(peer_root)
template.append(model_root)
template.append(input_format)
template.append(peers)
template.append(models)
return template
def create_xml_tree(out_dir, model_dir):
template = create_elem_template(out_dir, model_dir)
out_dir_list = sorted(os.listdir(out_dir))
model_dir_dict = {}
for model_sum_name in os.listdir(model_dir):
eval_id, p_id = model_sum_name.rsplit('.', 1)
if eval_id not in model_dir_dict:
model_dir_dict[eval_id] = []
model_dir_dict[eval_id].append(model_sum_name)
# build tree
root = ET.Element('ROUGE_EVAL', {'version': '1.5.5'})
for sys_sum_name in out_dir_list:
eval_elem = copy.deepcopy(template)
eval_id, p_id = sys_sum_name.rsplit('.', 1)
eval_elem.set('ID', eval_id)
peers = eval_elem.find('PEERS')
models = eval_elem.find('MODELS')
p = ET.Element('P', {'ID': p_id})
p.text = sys_sum_name
peers.append(p)
if eval_id in model_dir_dict:
for model_sum_name in sorted(model_dir_dict[eval_id]):
m_id = model_sum_name.rsplit('.', 1)[1]
m = ET.Element('M', {'ID': m_id})
m.text = model_sum_name
models.append(m)
if len(models) > 0: #we have gold examples to compare against!
root.append(eval_elem)
return root
def create_config_file(out_dir, model_dir, config_file):
root = create_xml_tree(out_dir, model_dir)
xmlstr = minidom.parseString(ET.tostring(root)).toprettyxml()
with open(config_file, 'w') as f:
f.write(xmlstr[23:])
f.write('\n')
def main():
args = parse_args()
create_config_file(args.out_dir, args.model_dir, args.rouge_config_file)
if __name__ == '__main__':
main() | src/ROUGE/create_config.py | __author__ = '<NAME>, <NAME>, <NAME>, <NAME>, <NAME> '
__email__ = '<EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>'
import os
import argparse
import xml.etree.ElementTree as ET
from xml.dom import minidom
import copy
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('out_dir', help='system output directory')
parser.add_argument('model_dir', help='human summaries directory')
parser.add_argument('rouge_config_file', help='ROUGE configuration file')
return parser.parse_args()
###### Template is of this format ######
# <EVAL ID="D1001-A.M.100.A">
# <PEER-ROOT>/dropbox/18-19/573/Data/mydata</PEER-ROOT>
# <MODEL-ROOT>/dropbox/18-19/573/Data/models/devtest/</MODEL-ROOT>
# <INPUT-FORMAT TYPE="SPL"/>
# <PEERS>
# <P ID="1">D1001-A.M.100.A.1</P>
# </PEERS>
# <MODELS>
# <M ID="A">D1001-A.M.100.A.A</M>
# <M ID="B">D1001-A.M.100.A.B</M>
# <M ID="F">D1001-A.M.100.A.F</M>
# <M ID="H">D1001-A.M.100.A.H</M>
# </MODELS>
# </EVAL>
def create_elem_template(out_dir, model_dir):
template = ET.Element('EVAL')
peer_root = ET.Element('PEER-ROOT')
peer_root.text = out_dir
model_root = ET.Element('MODEL-ROOT')
model_root.text = model_dir
input_format = ET.Element('INPUT-FORMAT', {'TYPE': 'SPL'})
peers = ET.Element('PEERS')
models = ET.Element('MODELS')
template.append(peer_root)
template.append(model_root)
template.append(input_format)
template.append(peers)
template.append(models)
return template
def create_xml_tree(out_dir, model_dir):
template = create_elem_template(out_dir, model_dir)
out_dir_list = sorted(os.listdir(out_dir))
model_dir_dict = {}
for model_sum_name in os.listdir(model_dir):
eval_id, p_id = model_sum_name.rsplit('.', 1)
if eval_id not in model_dir_dict:
model_dir_dict[eval_id] = []
model_dir_dict[eval_id].append(model_sum_name)
# build tree
root = ET.Element('ROUGE_EVAL', {'version': '1.5.5'})
for sys_sum_name in out_dir_list:
eval_elem = copy.deepcopy(template)
eval_id, p_id = sys_sum_name.rsplit('.', 1)
eval_elem.set('ID', eval_id)
peers = eval_elem.find('PEERS')
models = eval_elem.find('MODELS')
p = ET.Element('P', {'ID': p_id})
p.text = sys_sum_name
peers.append(p)
if eval_id in model_dir_dict:
for model_sum_name in sorted(model_dir_dict[eval_id]):
m_id = model_sum_name.rsplit('.', 1)[1]
m = ET.Element('M', {'ID': m_id})
m.text = model_sum_name
models.append(m)
if len(models) > 0: #we have gold examples to compare against!
root.append(eval_elem)
return root
def create_config_file(out_dir, model_dir, config_file):
root = create_xml_tree(out_dir, model_dir)
xmlstr = minidom.parseString(ET.tostring(root)).toprettyxml()
with open(config_file, 'w') as f:
f.write(xmlstr[23:])
f.write('\n')
def main():
args = parse_args()
create_config_file(args.out_dir, args.model_dir, args.rouge_config_file)
if __name__ == '__main__':
main() | 0.17971 | 0.066025 |
import os
import numpy as np
from pynwb import register_class, docval, get_class
from pynwb.core import VectorIndex, VectorData, DynamicTable, ElementIdentifiers
from hdmf.utils import call_docval_func
from pynwb import load_namespaces
name = 'ndx-simulation-output'
here = os.path.abspath(os.path.dirname(__file__))
ns_path = os.path.join(here, 'spec', name + '.namespace.yaml')
load_namespaces(ns_path)
def create_ragged_array(name, values):
"""
:param values: list of lists
:return:
"""
vector_data = VectorData(
name, 'indicates which compartments the data refers to',
[item for sublist in values for item in sublist])
vector_index = VectorIndex(
name + '_index', np.cumsum([len(x) for x in values]), target=vector_data)
return vector_data, vector_index
@register_class('Compartments', name)
class Compartments(DynamicTable):
__columns__ = (
{'name': 'number', 'index': True,
'description': 'cell compartment ids corresponding to a each column in the data'},
{'name': 'position', 'index': True,
'description': 'the observation intervals for each unit'},
{'name': 'label', 'description': 'the electrodes that each spike unit came from',
'index': True, 'table': True}
)
@docval({'name': 'name', 'type': str, 'doc': 'Name of this Compartments object',
'default': 'compartments'},
{'name': 'id', 'type': ('array_data', ElementIdentifiers),
'doc': 'the identifiers for the units stored in this interface', 'default': None},
{'name': 'columns', 'type': (tuple, list), 'doc': 'the columns in this table', 'default': None},
{'name': 'colnames', 'type': 'array_data', 'doc': 'the names of the columns in this table',
'default': None},
{'name': 'description', 'type': str, 'doc': 'a description of what is in this table', 'default': None},
)
def __init__(self, **kwargs):
if kwargs.get('description', None) is None:
kwargs['description'] = "data on spiking units"
call_docval_func(super(Compartments, self).__init__, kwargs)
@staticmethod
def _compartment_finder(cell_compartments, cond, dtype, start_ind):
cell_compartments = np.array(cell_compartments)
if isinstance(cond, dtype):
return start_ind + np.where(cell_compartments == cond)[0]
else:
return np.array([start_ind + np.where(cell_compartments == x)[0] for x in cond]).ravel()
def find_compartments(self, cell, compartment_numbers=None, compartment_labels=None):
"""
Parameters
----------
cell: int
find indices of compartments of this cell
compartment_numbers: int | Iterable(int) (optional)
where these are (this is) the compartment(s)
compartment_labels: str | Iterable(str) (optional)
or where these are (this is) the label(s)
Returns
-------
np.array(dtype=int)
"""
if compartment_numbers is not None and compartment_labels is not None:
raise ValueError('you cannot specify both compartments and compartment_labels')
if cell == 0:
start_ind = 0
else:
start_ind = self.compartments['number_index'].data[cell-1]
cell_compartments = self.compartments['number'][cell]
if compartment_numbers is not None:
return self._compartment_finder(cell_compartments, compartment_numbers, int, start_ind)
elif compartment_labels is not None:
return self._compartment_finder(cell_compartments, compartment_labels, str, start_ind)
else:
return np.arange(start_ind, start_ind + len(cell_compartments), dtype=int)
CompartmentSeries = get_class('CompartmentSeries', name)
CompartmentSeries._compartment_finder = _compartment_finder
CompartmentSeries.find_compartments = find_compartments
SimulationMetaData = get_class('SimulationMetaData', name) | src/pynwb/ndx_simulation_output/simulation_output.py | import os
import numpy as np
from pynwb import register_class, docval, get_class
from pynwb.core import VectorIndex, VectorData, DynamicTable, ElementIdentifiers
from hdmf.utils import call_docval_func
from pynwb import load_namespaces
name = 'ndx-simulation-output'
here = os.path.abspath(os.path.dirname(__file__))
ns_path = os.path.join(here, 'spec', name + '.namespace.yaml')
load_namespaces(ns_path)
def create_ragged_array(name, values):
"""
:param values: list of lists
:return:
"""
vector_data = VectorData(
name, 'indicates which compartments the data refers to',
[item for sublist in values for item in sublist])
vector_index = VectorIndex(
name + '_index', np.cumsum([len(x) for x in values]), target=vector_data)
return vector_data, vector_index
@register_class('Compartments', name)
class Compartments(DynamicTable):
__columns__ = (
{'name': 'number', 'index': True,
'description': 'cell compartment ids corresponding to a each column in the data'},
{'name': 'position', 'index': True,
'description': 'the observation intervals for each unit'},
{'name': 'label', 'description': 'the electrodes that each spike unit came from',
'index': True, 'table': True}
)
@docval({'name': 'name', 'type': str, 'doc': 'Name of this Compartments object',
'default': 'compartments'},
{'name': 'id', 'type': ('array_data', ElementIdentifiers),
'doc': 'the identifiers for the units stored in this interface', 'default': None},
{'name': 'columns', 'type': (tuple, list), 'doc': 'the columns in this table', 'default': None},
{'name': 'colnames', 'type': 'array_data', 'doc': 'the names of the columns in this table',
'default': None},
{'name': 'description', 'type': str, 'doc': 'a description of what is in this table', 'default': None},
)
def __init__(self, **kwargs):
if kwargs.get('description', None) is None:
kwargs['description'] = "data on spiking units"
call_docval_func(super(Compartments, self).__init__, kwargs)
@staticmethod
def _compartment_finder(cell_compartments, cond, dtype, start_ind):
cell_compartments = np.array(cell_compartments)
if isinstance(cond, dtype):
return start_ind + np.where(cell_compartments == cond)[0]
else:
return np.array([start_ind + np.where(cell_compartments == x)[0] for x in cond]).ravel()
def find_compartments(self, cell, compartment_numbers=None, compartment_labels=None):
"""
Parameters
----------
cell: int
find indices of compartments of this cell
compartment_numbers: int | Iterable(int) (optional)
where these are (this is) the compartment(s)
compartment_labels: str | Iterable(str) (optional)
or where these are (this is) the label(s)
Returns
-------
np.array(dtype=int)
"""
if compartment_numbers is not None and compartment_labels is not None:
raise ValueError('you cannot specify both compartments and compartment_labels')
if cell == 0:
start_ind = 0
else:
start_ind = self.compartments['number_index'].data[cell-1]
cell_compartments = self.compartments['number'][cell]
if compartment_numbers is not None:
return self._compartment_finder(cell_compartments, compartment_numbers, int, start_ind)
elif compartment_labels is not None:
return self._compartment_finder(cell_compartments, compartment_labels, str, start_ind)
else:
return np.arange(start_ind, start_ind + len(cell_compartments), dtype=int)
CompartmentSeries = get_class('CompartmentSeries', name)
CompartmentSeries._compartment_finder = _compartment_finder
CompartmentSeries.find_compartments = find_compartments
SimulationMetaData = get_class('SimulationMetaData', name) | 0.705379 | 0.409929 |
import pyarabic.araby as araby
import unicodedata as ud
import os
import nltk
import gensim
from gensim import corpora,models,similarities
import re
from sklearn.decomposition import PCA
from matplotlib import pyplot
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
def punctuation_ar(txt): # this function for token all of the text with deleting punctuation ''.join(c for c in s if not ud.category(c).startswith('P'))
return ''.join(c for c in txt if not ud.category(c).startswith('P'))
def read_txt(name):
f=open(name,"r")
t=f.read()
t=t.decode("utf8")
t=araby.strip_tashkeel(t)
#print t
return t.split()
#creation du model
def create_model(lst):
if not(os.path.isfile("/media/rmimez/8A1CED061CECEE5F/etude/soutenance_M2/word2vec/programs/essayer/model.bin")) :
corpus=lst
print type(corpus)
print corpus
tok_corp=[nltk.word_tokenize(sent )for sent in corpus]
#(1->Skip-gram 0->CBOW)
model=gensim.models.Word2Vec(tok_corp,min_count=1,size=32,sg=0,iter=1000)
print model
model.save('model.bin')
def similar_word(model,word):
# find and print the most similar terms to a word
try:
most_similar = model.wv.most_similar( word )
for term, score in most_similar:
print (term,score)
return most_similar
except Exception as e:
print "this word not in vocabulary"
return None
def vocabular_word():
try:
word_vector = new_model.wv[ word ]
print word_vector
except Exception as e:
print "this word not in vocabulary"
raise e
# get a word vector
def graphic(model):
#representation
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# create a scatter plot of the projection
pyplot.scatter(result[:, 0], result[:, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))
pyplot.show()
def tsne_plot(model):
"Creates and TSNE model and plots it"
labels = []
tokens = []
for word in model.wv.vocab:
tokens.append(model[word])
labels.append(word)
tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i],y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
path="/media/rmimez/8A1CED061CECEE5F/etude/soutenance_M2/word2vec/datasets/Farasa-master/WikiNewsTruth.txt"
create_model(read_txt(path))
# load model
new_model = models.Word2Vec.load('model.bin')
print(new_model)
word = "ﺪﻋﻭ".decode('utf8', errors='ignore')
#print similar_word(new_model,word)
#print list(new_model.wv.vocab)
for x in list(new_model.wv.vocab):
print x
"""
w1="ﺾﻔﺧ"
w2="ﻝﻭﺮﺘﺑ"
w3="ﺔﻳﻭدﻷ"
print new_model.wv.most_similar(positive=[w1,w2], negative=[w3])
"""
#graphic(new_model)
tsne_plot(new_model) | word2vec_Q_A.py |
import pyarabic.araby as araby
import unicodedata as ud
import os
import nltk
import gensim
from gensim import corpora,models,similarities
import re
from sklearn.decomposition import PCA
from matplotlib import pyplot
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
def punctuation_ar(txt): # this function for token all of the text with deleting punctuation ''.join(c for c in s if not ud.category(c).startswith('P'))
return ''.join(c for c in txt if not ud.category(c).startswith('P'))
def read_txt(name):
f=open(name,"r")
t=f.read()
t=t.decode("utf8")
t=araby.strip_tashkeel(t)
#print t
return t.split()
#creation du model
def create_model(lst):
if not(os.path.isfile("/media/rmimez/8A1CED061CECEE5F/etude/soutenance_M2/word2vec/programs/essayer/model.bin")) :
corpus=lst
print type(corpus)
print corpus
tok_corp=[nltk.word_tokenize(sent )for sent in corpus]
#(1->Skip-gram 0->CBOW)
model=gensim.models.Word2Vec(tok_corp,min_count=1,size=32,sg=0,iter=1000)
print model
model.save('model.bin')
def similar_word(model,word):
# find and print the most similar terms to a word
try:
most_similar = model.wv.most_similar( word )
for term, score in most_similar:
print (term,score)
return most_similar
except Exception as e:
print "this word not in vocabulary"
return None
def vocabular_word():
try:
word_vector = new_model.wv[ word ]
print word_vector
except Exception as e:
print "this word not in vocabulary"
raise e
# get a word vector
def graphic(model):
#representation
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# create a scatter plot of the projection
pyplot.scatter(result[:, 0], result[:, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))
pyplot.show()
def tsne_plot(model):
"Creates and TSNE model and plots it"
labels = []
tokens = []
for word in model.wv.vocab:
tokens.append(model[word])
labels.append(word)
tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i],y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
path="/media/rmimez/8A1CED061CECEE5F/etude/soutenance_M2/word2vec/datasets/Farasa-master/WikiNewsTruth.txt"
create_model(read_txt(path))
# load model
new_model = models.Word2Vec.load('model.bin')
print(new_model)
word = "ﺪﻋﻭ".decode('utf8', errors='ignore')
#print similar_word(new_model,word)
#print list(new_model.wv.vocab)
for x in list(new_model.wv.vocab):
print x
"""
w1="ﺾﻔﺧ"
w2="ﻝﻭﺮﺘﺑ"
w3="ﺔﻳﻭدﻷ"
print new_model.wv.most_similar(positive=[w1,w2], negative=[w3])
"""
#graphic(new_model)
tsne_plot(new_model) | 0.202719 | 0.245582 |
from conans.errors import ConanException
def _get_gnu_triplet(os_, arch, compiler=None):
"""
Returns string with <machine>-<vendor>-<op_system> triplet (<vendor> can be omitted in practice)
:param os_: os to be used to create the triplet
:param arch: arch to be used to create the triplet
:param compiler: compiler used to create the triplet (only needed fo windows)
"""
if os_ == "Windows" and compiler is None:
raise ConanException("'compiler' parameter for 'get_gnu_triplet()' is not specified and "
"needed for os=Windows")
# Calculate the arch
machine = {"x86": "i686" if os_ != "Linux" else "x86",
"x86_64": "x86_64",
"armv8": "aarch64",
"armv8_32": "aarch64", # https://wiki.linaro.org/Platform/arm64-ilp32
"armv8.3": "aarch64",
"asm.js": "asmjs",
"wasm": "wasm32",
}.get(arch, None)
if not machine:
# https://wiki.debian.org/Multiarch/Tuples
if os_ == "AIX":
if "ppc32" in arch:
machine = "rs6000"
elif "ppc64" in arch:
machine = "powerpc"
elif "arm" in arch:
machine = "arm"
elif "ppc32be" in arch:
machine = "powerpcbe"
elif "ppc64le" in arch:
machine = "powerpc64le"
elif "ppc64" in arch:
machine = "powerpc64"
elif "ppc32" in arch:
machine = "powerpc"
elif "mips64" in arch:
machine = "mips64"
elif "mips" in arch:
machine = "mips"
elif "sparcv9" in arch:
machine = "sparc64"
elif "sparc" in arch:
machine = "sparc"
elif "s390x" in arch:
machine = "s390x-ibm"
elif "s390" in arch:
machine = "s390-ibm"
elif "sh4" in arch:
machine = "sh4"
elif "e2k" in arch:
# https://lists.gnu.org/archive/html/config-patches/2015-03/msg00000.html
machine = "e2k-unknown"
if machine is None:
raise ConanException("Unknown '%s' machine, Conan doesn't know how to "
"translate it to the GNU triplet, please report at "
" https://github.com/conan-io/conan/issues" % arch)
# Calculate the OS
if compiler == "gcc":
windows_op = "w64-mingw32"
elif compiler == "Visual Studio":
windows_op = "windows-msvc"
else:
windows_op = "windows"
op_system = {"Windows": windows_op,
"Linux": "linux-gnu",
"Darwin": "apple-darwin",
"Android": "linux-android",
"Macos": "apple-darwin",
"iOS": "apple-ios",
"watchOS": "apple-watchos",
"tvOS": "apple-tvos",
# NOTE: it technically must be "asmjs-unknown-emscripten" or
# "wasm32-unknown-emscripten", but it's not recognized by old config.sub versions
"Emscripten": "local-emscripten",
"AIX": "ibm-aix",
"Neutrino": "nto-qnx"}.get(os_, os_.lower())
if os_ in ("Linux", "Android"):
if "arm" in arch and "armv8" not in arch:
op_system += "eabi"
if (arch == "armv5hf" or arch == "armv7hf") and os_ == "Linux":
op_system += "hf"
if arch == "armv8_32" and os_ == "Linux":
op_system += "_ilp32" # https://wiki.linaro.org/Platform/arm64-ilp32
return "%s-%s" % (machine, op_system) | conan/tools/gnu/get_gnu_triplet.py | from conans.errors import ConanException
def _get_gnu_triplet(os_, arch, compiler=None):
"""
Returns string with <machine>-<vendor>-<op_system> triplet (<vendor> can be omitted in practice)
:param os_: os to be used to create the triplet
:param arch: arch to be used to create the triplet
:param compiler: compiler used to create the triplet (only needed fo windows)
"""
if os_ == "Windows" and compiler is None:
raise ConanException("'compiler' parameter for 'get_gnu_triplet()' is not specified and "
"needed for os=Windows")
# Calculate the arch
machine = {"x86": "i686" if os_ != "Linux" else "x86",
"x86_64": "x86_64",
"armv8": "aarch64",
"armv8_32": "aarch64", # https://wiki.linaro.org/Platform/arm64-ilp32
"armv8.3": "aarch64",
"asm.js": "asmjs",
"wasm": "wasm32",
}.get(arch, None)
if not machine:
# https://wiki.debian.org/Multiarch/Tuples
if os_ == "AIX":
if "ppc32" in arch:
machine = "rs6000"
elif "ppc64" in arch:
machine = "powerpc"
elif "arm" in arch:
machine = "arm"
elif "ppc32be" in arch:
machine = "powerpcbe"
elif "ppc64le" in arch:
machine = "powerpc64le"
elif "ppc64" in arch:
machine = "powerpc64"
elif "ppc32" in arch:
machine = "powerpc"
elif "mips64" in arch:
machine = "mips64"
elif "mips" in arch:
machine = "mips"
elif "sparcv9" in arch:
machine = "sparc64"
elif "sparc" in arch:
machine = "sparc"
elif "s390x" in arch:
machine = "s390x-ibm"
elif "s390" in arch:
machine = "s390-ibm"
elif "sh4" in arch:
machine = "sh4"
elif "e2k" in arch:
# https://lists.gnu.org/archive/html/config-patches/2015-03/msg00000.html
machine = "e2k-unknown"
if machine is None:
raise ConanException("Unknown '%s' machine, Conan doesn't know how to "
"translate it to the GNU triplet, please report at "
" https://github.com/conan-io/conan/issues" % arch)
# Calculate the OS
if compiler == "gcc":
windows_op = "w64-mingw32"
elif compiler == "Visual Studio":
windows_op = "windows-msvc"
else:
windows_op = "windows"
op_system = {"Windows": windows_op,
"Linux": "linux-gnu",
"Darwin": "apple-darwin",
"Android": "linux-android",
"Macos": "apple-darwin",
"iOS": "apple-ios",
"watchOS": "apple-watchos",
"tvOS": "apple-tvos",
# NOTE: it technically must be "asmjs-unknown-emscripten" or
# "wasm32-unknown-emscripten", but it's not recognized by old config.sub versions
"Emscripten": "local-emscripten",
"AIX": "ibm-aix",
"Neutrino": "nto-qnx"}.get(os_, os_.lower())
if os_ in ("Linux", "Android"):
if "arm" in arch and "armv8" not in arch:
op_system += "eabi"
if (arch == "armv5hf" or arch == "armv7hf") and os_ == "Linux":
op_system += "hf"
if arch == "armv8_32" and os_ == "Linux":
op_system += "_ilp32" # https://wiki.linaro.org/Platform/arm64-ilp32
return "%s-%s" % (machine, op_system) | 0.665737 | 0.233542 |
import os
import fnmatch
from .models import *
def add_clients(session, protodir, verbose):
"""Add clients to the replay attack database."""
for client in open(os.path.join(protodir, 'clients.txt'), 'rt'):
s = client.strip().split(' ', 2)
if not s:
continue # empty line
id = int(s[0])
set = s[1]
if verbose:
print("Adding client %d on '%s' set..." % (id, set))
session.add(Client(id, set))
def add_real_lists(session, protodir, verbose):
"""Adds all RCD filelists"""
def add_real_list(session, filename):
"""Adds an RCD filelist and materializes RealAccess'es."""
def parse_real_filename(f):
"""Parses the RCD filename and break it in the relevant chunks."""
v = os.path.splitext(os.path.basename(f))[0].split('_')
client_id = int(v[0].replace('client', ''))
path = os.path.splitext(f)[0] # keep only the filename stem
purpose = v[3]
light = v[4]
if len(v) == 6:
take = int(v[5]) # authentication session
else:
take = 1 # enrollment session
return [client_id, path, light], [purpose, take]
for fname in open(filename, 'rt'):
s = fname.strip()
if not s:
continue # emtpy line
filefields, realfields = parse_real_filename(s)
filefields[0] = session.query(Client).filter(Client.id == filefields[0]).one()
file = File(*filefields)
session.add(file)
realfields.insert(0, file)
session.add(RealAccess(*realfields))
add_real_list(session, os.path.join(protodir, 'real-train.txt'))
add_real_list(session, os.path.join(protodir, 'real-devel.txt'))
add_real_list(session, os.path.join(protodir, 'real-test.txt'))
add_real_list(session, os.path.join(protodir, 'recognition-train.txt'))
add_real_list(session, os.path.join(protodir, 'recognition-devel.txt'))
add_real_list(session, os.path.join(protodir, 'recognition-test.txt'))
def add_attack_lists(session, protodir, verbose):
"""Adds all RAD filelists"""
def add_attack_list(session, filename):
"""Adds an RAD filelist and materializes Attacks."""
def parse_attack_filename(f):
"""Parses the RAD filename and break it in the relevant chunks."""
v = os.path.splitext(os.path.basename(f))[0].split('_')
attack_device = v[1] # print, mobile or highdef
client_id = int(v[2].replace('client', ''))
path = os.path.splitext(f)[0] # keep only the filename stem
sample_device = v[4] # highdef or mobile
sample_type = v[5] # photo or video
light = v[6]
attack_support = f.split('/')[-2]
return [client_id, path, light], [attack_support, attack_device, sample_type, sample_device]
for fname in open(filename, 'rt'):
s = fname.strip()
if not s:
continue # emtpy line
filefields, attackfields = parse_attack_filename(s)
filefields[0] = session.query(Client).filter(Client.id == filefields[0]).one()
file = File(*filefields)
session.add(file)
attackfields.insert(0, file)
session.add(Attack(*attackfields))
add_attack_list(session, os.path.join(protodir, 'attack-grandtest-allsupports-train.txt'))
add_attack_list(session, os.path.join(protodir, 'attack-grandtest-allsupports-devel.txt'))
add_attack_list(session, os.path.join(protodir, 'attack-grandtest-allsupports-test.txt'))
def define_protocols(session, protodir, verbose):
"""Defines all available protocols"""
# figures out which protocols to use
valid = {}
for fname in fnmatch.filter(os.listdir(protodir), 'attack-*-allsupports-train.txt'):
s = fname.split('-', 4)
consider = True
files = {}
for grp in ('train', 'devel', 'test'):
# check attack file
attack = os.path.join(protodir, 'attack-%s-allsupports-%s.txt' % (s[1], grp))
if not os.path.exists(attack):
if verbose:
print("Not considering protocol %s as attack list '%s' was not found" % (s[1], attack))
consider = False
# check real file
real = os.path.join(protodir, 'real-%s-allsupports-%s.txt' % (s[1], grp))
if not os.path.exists(real):
alt_real = os.path.join(protodir, 'real-%s.txt' % (grp,))
if not os.path.exists(alt_real):
if verbose:
print("Not considering protocol %s as real list '%s' or '%s' were not found" % (s[1], real, alt_real))
consider = False
else:
real = alt_real
if consider:
files[grp] = (attack, real)
if consider:
valid[s[1]] = files
for protocol, groups in valid.items():
if verbose:
print("Creating protocol '%s'..." % protocol)
# create protocol on the protocol table
obj = Protocol(name=protocol)
for grp, flist in groups.items():
counter = 0
for fname in open(flist[0], 'rt'):
s = os.path.splitext(fname.strip())[0]
q = session.query(Attack).join(File).filter(File.path == s).one()
q.protocols.append(obj)
counter += 1
if verbose:
print(" -> %5s/%-6s: %d files" % (grp, "attack", counter))
counter = 0
for fname in open(flist[1], 'rt'):
s = os.path.splitext(fname.strip())[0]
q = session.query(RealAccess).join(File).filter(File.path == s).one()
q.protocols.append(obj)
counter += 1
if verbose:
print(" -> %5s/%-6s: %d files" % (grp, "real", counter))
session.add(obj)
def create_tables(args):
"""Creates all necessary tables (only to be used at the first time)"""
from bob.db.base.utils import create_engine_try_nolock
engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose >= 2))
Client.metadata.create_all(engine)
RealAccess.metadata.create_all(engine)
Attack.metadata.create_all(engine)
Protocol.metadata.create_all(engine)
# Driver API
# ==========
def create(args):
"""Creates or re-creates this database"""
from bob.db.base.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print(('unlinking %s...' % dbfile))
if os.path.exists(dbfile):
os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, args.files[0], echo=(args.verbose >= 2))
add_clients(s, args.protodir, args.verbose)
add_real_lists(s, args.protodir, args.verbose)
add_attack_lists(s, args.protodir, args.verbose)
define_protocols(s, args.protodir, args.verbose)
s.commit()
s.close()
return 0
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', default=False,
help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Do SQL operations in a verbose way")
parser.add_argument('-D', '--protodir', action='store',
default='/idiap/group/replay/database/protocols/replayattack-database/protocols',
metavar='DIR',
help="Change the relative path to the directory containing the protocol definitions for replay attacks (defaults to %(default)s)")
parser.set_defaults(func=create) # action | bob/db/replay/create.py | import os
import fnmatch
from .models import *
def add_clients(session, protodir, verbose):
"""Add clients to the replay attack database."""
for client in open(os.path.join(protodir, 'clients.txt'), 'rt'):
s = client.strip().split(' ', 2)
if not s:
continue # empty line
id = int(s[0])
set = s[1]
if verbose:
print("Adding client %d on '%s' set..." % (id, set))
session.add(Client(id, set))
def add_real_lists(session, protodir, verbose):
"""Adds all RCD filelists"""
def add_real_list(session, filename):
"""Adds an RCD filelist and materializes RealAccess'es."""
def parse_real_filename(f):
"""Parses the RCD filename and break it in the relevant chunks."""
v = os.path.splitext(os.path.basename(f))[0].split('_')
client_id = int(v[0].replace('client', ''))
path = os.path.splitext(f)[0] # keep only the filename stem
purpose = v[3]
light = v[4]
if len(v) == 6:
take = int(v[5]) # authentication session
else:
take = 1 # enrollment session
return [client_id, path, light], [purpose, take]
for fname in open(filename, 'rt'):
s = fname.strip()
if not s:
continue # emtpy line
filefields, realfields = parse_real_filename(s)
filefields[0] = session.query(Client).filter(Client.id == filefields[0]).one()
file = File(*filefields)
session.add(file)
realfields.insert(0, file)
session.add(RealAccess(*realfields))
add_real_list(session, os.path.join(protodir, 'real-train.txt'))
add_real_list(session, os.path.join(protodir, 'real-devel.txt'))
add_real_list(session, os.path.join(protodir, 'real-test.txt'))
add_real_list(session, os.path.join(protodir, 'recognition-train.txt'))
add_real_list(session, os.path.join(protodir, 'recognition-devel.txt'))
add_real_list(session, os.path.join(protodir, 'recognition-test.txt'))
def add_attack_lists(session, protodir, verbose):
"""Adds all RAD filelists"""
def add_attack_list(session, filename):
"""Adds an RAD filelist and materializes Attacks."""
def parse_attack_filename(f):
"""Parses the RAD filename and break it in the relevant chunks."""
v = os.path.splitext(os.path.basename(f))[0].split('_')
attack_device = v[1] # print, mobile or highdef
client_id = int(v[2].replace('client', ''))
path = os.path.splitext(f)[0] # keep only the filename stem
sample_device = v[4] # highdef or mobile
sample_type = v[5] # photo or video
light = v[6]
attack_support = f.split('/')[-2]
return [client_id, path, light], [attack_support, attack_device, sample_type, sample_device]
for fname in open(filename, 'rt'):
s = fname.strip()
if not s:
continue # emtpy line
filefields, attackfields = parse_attack_filename(s)
filefields[0] = session.query(Client).filter(Client.id == filefields[0]).one()
file = File(*filefields)
session.add(file)
attackfields.insert(0, file)
session.add(Attack(*attackfields))
add_attack_list(session, os.path.join(protodir, 'attack-grandtest-allsupports-train.txt'))
add_attack_list(session, os.path.join(protodir, 'attack-grandtest-allsupports-devel.txt'))
add_attack_list(session, os.path.join(protodir, 'attack-grandtest-allsupports-test.txt'))
def define_protocols(session, protodir, verbose):
"""Defines all available protocols"""
# figures out which protocols to use
valid = {}
for fname in fnmatch.filter(os.listdir(protodir), 'attack-*-allsupports-train.txt'):
s = fname.split('-', 4)
consider = True
files = {}
for grp in ('train', 'devel', 'test'):
# check attack file
attack = os.path.join(protodir, 'attack-%s-allsupports-%s.txt' % (s[1], grp))
if not os.path.exists(attack):
if verbose:
print("Not considering protocol %s as attack list '%s' was not found" % (s[1], attack))
consider = False
# check real file
real = os.path.join(protodir, 'real-%s-allsupports-%s.txt' % (s[1], grp))
if not os.path.exists(real):
alt_real = os.path.join(protodir, 'real-%s.txt' % (grp,))
if not os.path.exists(alt_real):
if verbose:
print("Not considering protocol %s as real list '%s' or '%s' were not found" % (s[1], real, alt_real))
consider = False
else:
real = alt_real
if consider:
files[grp] = (attack, real)
if consider:
valid[s[1]] = files
for protocol, groups in valid.items():
if verbose:
print("Creating protocol '%s'..." % protocol)
# create protocol on the protocol table
obj = Protocol(name=protocol)
for grp, flist in groups.items():
counter = 0
for fname in open(flist[0], 'rt'):
s = os.path.splitext(fname.strip())[0]
q = session.query(Attack).join(File).filter(File.path == s).one()
q.protocols.append(obj)
counter += 1
if verbose:
print(" -> %5s/%-6s: %d files" % (grp, "attack", counter))
counter = 0
for fname in open(flist[1], 'rt'):
s = os.path.splitext(fname.strip())[0]
q = session.query(RealAccess).join(File).filter(File.path == s).one()
q.protocols.append(obj)
counter += 1
if verbose:
print(" -> %5s/%-6s: %d files" % (grp, "real", counter))
session.add(obj)
def create_tables(args):
"""Creates all necessary tables (only to be used at the first time)"""
from bob.db.base.utils import create_engine_try_nolock
engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose >= 2))
Client.metadata.create_all(engine)
RealAccess.metadata.create_all(engine)
Attack.metadata.create_all(engine)
Protocol.metadata.create_all(engine)
# Driver API
# ==========
def create(args):
"""Creates or re-creates this database"""
from bob.db.base.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print(('unlinking %s...' % dbfile))
if os.path.exists(dbfile):
os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, args.files[0], echo=(args.verbose >= 2))
add_clients(s, args.protodir, args.verbose)
add_real_lists(s, args.protodir, args.verbose)
add_attack_lists(s, args.protodir, args.verbose)
define_protocols(s, args.protodir, args.verbose)
s.commit()
s.close()
return 0
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', default=False,
help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Do SQL operations in a verbose way")
parser.add_argument('-D', '--protodir', action='store',
default='/idiap/group/replay/database/protocols/replayattack-database/protocols',
metavar='DIR',
help="Change the relative path to the directory containing the protocol definitions for replay attacks (defaults to %(default)s)")
parser.set_defaults(func=create) # action | 0.297674 | 0.089614 |
import argparse
import numpy as np
import mdtraj as md
import matplotlib.pyplot as plt
from LLC_Membranes.llclib import physical
import pickle
def initialize():
parser = argparse.ArgumentParser(description='Figure out the weight percent of water in the pores and tails')
# trajectory control
parser.add_argument('-t', '--traj', default=False, help='Name of GROMACS trajectory file. This file should be'
'preprocessed so everything is the box. For example, use gmx trjconv -ur tric -pbc atom. In the'
'event that a box vector crosses through a pore, use shift_box.py first to fix that. Specify'
'False (default) if you are only looking at a single frame')
parser.add_argument('-g', '--gro', default='PR.gro', help='Name of GROMACS coordinate file')
parser.add_argument('-r', '--residue', default='SOL', help='Name of residue whose partition we wish to quantify')
parser.add_argument('-begin', default=0, type=int, help='First frame to read')
parser.add_argument('-end', default=-1, type=int, help='Last frame to read')
parser.add_argument('-skip', default=1, type=int, help='Skip every n frames')
# define system
parser.add_argument('-p', '--pore_atoms', nargs='+', default=['C', 'C1', 'C2', 'C3', 'C4', 'C5'], help='Atoms that'
'will be used to define the pore region')
parser.add_argument('-ox', '--tail_oxygen', nargs='+', default=['O5', 'O6', 'O7', 'O8', 'O9', 'O10'], help='Oxygen'
'atoms that will be used to define the tail region')
parser.add_argument('-tr', '--tail_radius', default=0.5, type=float, help='Max distance from tail oxygens a water '
'molecule can exist in order to be counted as inside the pore')
parser.add_argument('-pr', '--pore_radius', default=0.5, type=float, help='Max distance from pore center a water '
'molecule can exist in order to be counted as inside the pore')
parser.add_argument('-b', '--bounds', default=5, type=float, help='Distance from z-center up until which all atoms '
'will be included in calculation (nm)')
parser.add_argument('-natoms', default=137, type=int, help='Number of atoms in monomer residue (not including ions '
' if they are separate residues!')
# save/load options
parser.add_argument('--savename', default='pore_spline.pl', help='Name of file in which to save System object')
parser.add_argument('--load', action="store_true")
parser.add_argument('-boot', '--nboot', default=200, type=int, help='Number of bootstrap trials')
parser.add_argument('--single_frame', action='store_true', help='Specify this flag in order to analyze a single'
'.gro file. No statistics will be generated')
return parser
class System(object):
def __init__(self, gro, pore_atoms, residue, traj=False, begin=0, end=-1, skip=1, npores=4):
""" Define the system and boundaries for pore and tail region
:param gro: coordinate file
:param pore_atoms: atoms used to define the pore locations
:param traj: trajectory file
:param begin: first frame to include
:param end: last frame to include
:param skip: skip every n frames
:param npores: number of pores. Assumes that atoms are number sequentially by pore
"""
print('Loading trajectory...', flush=True, end='')
if traj:
self.t = md.load(traj, top=args.gro)[begin:end:skip]
else:
self.t = md.load(gro)
print('Done')
# coordinates and unit cell dimensions
self.pos = self.t.xyz
box = self.t.unitcell_vectors
self.box = [box[0, 0, 0], box[0, 1, 1], box[0, 2, 2], box[0, 0, 1], box[0, 2, 0],
box[0, 1, 0], box[0, 0, 2], box[0, 1, 2], box[0, 2, 0]] # gromacs format
self.res = np.array([a.residue.name for a in self.t.topology.atoms]) # all of the residues
self.ids = np.array([a.name for a in self.t.topology.atoms]) # all of the atom names
# find pore centers
print('Creating pore splines')
pore_atoms = [a.index for a in self.t.topology.atoms if a.name in pore_atoms]
self.pore_spline, self.bin_centers = physical.trace_pores(self.pos[:, pore_atoms, :], self.t.unitcell_vectors,
20)
def plot(self, frame):
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
for i in range(4):
ax1 = ax[i // 2, i % 2]
ax2 = ax1.twinx()
spline = self.pore_spline[frame, i, ...]
bins = self.bin_centers[frame, i, :]
xrange = (np.amax(spline[:, 0]) - np.amin(spline[:, 0])) / 2
yrange = (np.amax(spline[:, 1]) - np.amin(spline[:, 1])) / 2
ax1.plot(bins, spline[:, 0], color='xkcd:blue', linewidth=2)
ax2.plot(bins, spline[:, 1], color='xkcd:orange', linewidth=2)
if i % 2 == 0:
ax1.set_ylabel('$x$-coordinate', fontsize=14, color='xkcd:blue')
if i % 2 == 1:
ax2.set_ylabel('$y$-coordinate', fontsize=14, color='xkcd:orange')
if i // 2 == 1:
ax1.set_xlabel('$z$-coordinate', fontsize=14)
# set limits -- give a little white space above and below
ax1.set_ylim(spline[:, 0].mean() - xrange*2, spline[:, 0].mean() + xrange*2)
ax2.set_ylim(spline[:, 1].mean() - yrange * 2, spline[:, 1].mean() + yrange * 2)
# format tick size
plt.gcf().get_axes()[i].tick_params(labelsize=14)
ax2.yaxis.set_tick_params(labelsize=14)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
args = initialize().parse_args()
if not args.load:
sys = System(args.gro, args.pore_atoms, args.residue, traj=args.traj, begin=args.begin, end=args.end,
skip=args.skip)
with open(args.savename, "wb") as f:
pickle.dump(sys, f)
else:
with open(args.savename, "rb") as f:
sys = pickle.load(f)
sys.plot(-1) | LLC_Membranes/analysis/pore_wall.py |
import argparse
import numpy as np
import mdtraj as md
import matplotlib.pyplot as plt
from LLC_Membranes.llclib import physical
import pickle
def initialize():
parser = argparse.ArgumentParser(description='Figure out the weight percent of water in the pores and tails')
# trajectory control
parser.add_argument('-t', '--traj', default=False, help='Name of GROMACS trajectory file. This file should be'
'preprocessed so everything is the box. For example, use gmx trjconv -ur tric -pbc atom. In the'
'event that a box vector crosses through a pore, use shift_box.py first to fix that. Specify'
'False (default) if you are only looking at a single frame')
parser.add_argument('-g', '--gro', default='PR.gro', help='Name of GROMACS coordinate file')
parser.add_argument('-r', '--residue', default='SOL', help='Name of residue whose partition we wish to quantify')
parser.add_argument('-begin', default=0, type=int, help='First frame to read')
parser.add_argument('-end', default=-1, type=int, help='Last frame to read')
parser.add_argument('-skip', default=1, type=int, help='Skip every n frames')
# define system
parser.add_argument('-p', '--pore_atoms', nargs='+', default=['C', 'C1', 'C2', 'C3', 'C4', 'C5'], help='Atoms that'
'will be used to define the pore region')
parser.add_argument('-ox', '--tail_oxygen', nargs='+', default=['O5', 'O6', 'O7', 'O8', 'O9', 'O10'], help='Oxygen'
'atoms that will be used to define the tail region')
parser.add_argument('-tr', '--tail_radius', default=0.5, type=float, help='Max distance from tail oxygens a water '
'molecule can exist in order to be counted as inside the pore')
parser.add_argument('-pr', '--pore_radius', default=0.5, type=float, help='Max distance from pore center a water '
'molecule can exist in order to be counted as inside the pore')
parser.add_argument('-b', '--bounds', default=5, type=float, help='Distance from z-center up until which all atoms '
'will be included in calculation (nm)')
parser.add_argument('-natoms', default=137, type=int, help='Number of atoms in monomer residue (not including ions '
' if they are separate residues!')
# save/load options
parser.add_argument('--savename', default='pore_spline.pl', help='Name of file in which to save System object')
parser.add_argument('--load', action="store_true")
parser.add_argument('-boot', '--nboot', default=200, type=int, help='Number of bootstrap trials')
parser.add_argument('--single_frame', action='store_true', help='Specify this flag in order to analyze a single'
'.gro file. No statistics will be generated')
return parser
class System(object):
def __init__(self, gro, pore_atoms, residue, traj=False, begin=0, end=-1, skip=1, npores=4):
""" Define the system and boundaries for pore and tail region
:param gro: coordinate file
:param pore_atoms: atoms used to define the pore locations
:param traj: trajectory file
:param begin: first frame to include
:param end: last frame to include
:param skip: skip every n frames
:param npores: number of pores. Assumes that atoms are number sequentially by pore
"""
print('Loading trajectory...', flush=True, end='')
if traj:
self.t = md.load(traj, top=args.gro)[begin:end:skip]
else:
self.t = md.load(gro)
print('Done')
# coordinates and unit cell dimensions
self.pos = self.t.xyz
box = self.t.unitcell_vectors
self.box = [box[0, 0, 0], box[0, 1, 1], box[0, 2, 2], box[0, 0, 1], box[0, 2, 0],
box[0, 1, 0], box[0, 0, 2], box[0, 1, 2], box[0, 2, 0]] # gromacs format
self.res = np.array([a.residue.name for a in self.t.topology.atoms]) # all of the residues
self.ids = np.array([a.name for a in self.t.topology.atoms]) # all of the atom names
# find pore centers
print('Creating pore splines')
pore_atoms = [a.index for a in self.t.topology.atoms if a.name in pore_atoms]
self.pore_spline, self.bin_centers = physical.trace_pores(self.pos[:, pore_atoms, :], self.t.unitcell_vectors,
20)
def plot(self, frame):
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
for i in range(4):
ax1 = ax[i // 2, i % 2]
ax2 = ax1.twinx()
spline = self.pore_spline[frame, i, ...]
bins = self.bin_centers[frame, i, :]
xrange = (np.amax(spline[:, 0]) - np.amin(spline[:, 0])) / 2
yrange = (np.amax(spline[:, 1]) - np.amin(spline[:, 1])) / 2
ax1.plot(bins, spline[:, 0], color='xkcd:blue', linewidth=2)
ax2.plot(bins, spline[:, 1], color='xkcd:orange', linewidth=2)
if i % 2 == 0:
ax1.set_ylabel('$x$-coordinate', fontsize=14, color='xkcd:blue')
if i % 2 == 1:
ax2.set_ylabel('$y$-coordinate', fontsize=14, color='xkcd:orange')
if i // 2 == 1:
ax1.set_xlabel('$z$-coordinate', fontsize=14)
# set limits -- give a little white space above and below
ax1.set_ylim(spline[:, 0].mean() - xrange*2, spline[:, 0].mean() + xrange*2)
ax2.set_ylim(spline[:, 1].mean() - yrange * 2, spline[:, 1].mean() + yrange * 2)
# format tick size
plt.gcf().get_axes()[i].tick_params(labelsize=14)
ax2.yaxis.set_tick_params(labelsize=14)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
args = initialize().parse_args()
if not args.load:
sys = System(args.gro, args.pore_atoms, args.residue, traj=args.traj, begin=args.begin, end=args.end,
skip=args.skip)
with open(args.savename, "wb") as f:
pickle.dump(sys, f)
else:
with open(args.savename, "rb") as f:
sys = pickle.load(f)
sys.plot(-1) | 0.697815 | 0.404684 |
from deepobs import analyzer
import json
import matplotlib.pyplot as plt
import matplotlib as mpl
import tikzplotlib
import codecs
# get the plot
fig, axess = analyzer.plot_testset_performances('./results/', mode = 'final')
axess[0][0].set_title("DeepOBS init")
axess[0][1].set_title("PyTorch default init")
axess[0][0].set_ylabel("test loss")
axess[1][0].set_ylabel("train loss")
axess[2][0].set_ylabel("test acc")
axess[3][0].set_ylabel("train acc")
axess[0][0].get_legend().remove()
axess[3][1].legend(["Batch Size = 32",
"Batch Size = 64",
"Batch Size = 128"])
#Change line styles
for axes in axess:
lines = axes[0].get_lines()
for line in lines[1:]:
line.set_linewidth(3)
line.set_linestyle("--")
line.set_alpha(0.8)
lines[0].set_linewidth(4)
lines = axes[1].get_lines()
for line in lines[1:]:
line.set_linewidth(3)
line.set_linestyle("--")
line.set_alpha(0.8)
lines[0].set_linewidth(4)
#Change plot y scales
axess[0][0].set_ylim(1, 4)
axess[1][0].set_ylim(1, 4)
axess[0][1].set_ylim(1, 4)
axess[1][1].set_ylim(1, 4)
axess[2][0].set_ylim(0.1, 0.75)
axess[3][0].set_ylim(0.1, 0.75)
axess[2][1].set_ylim(0.1, 0.75)
axess[3][1].set_ylim(0.1, 0.75)
# modify the plot
fig.canvas.draw()
# General settings
code = tikzplotlib.get_tikz_code(figure = fig,
figurewidth = "\\figurewidth",
figureheight = "5cm",
extra_axis_parameters = ["tick pos=left",
"legend style={font=\\footnotesize, at={(0 ,0)},xshift = -0.4cm, yshift=-1.5cm,anchor=north,nodes=right}",],
extra_tikzpicture_parameters = ["every axis plot post./append style={line width = 1pt}"],
)#strict = True)
#catch missed underscores & save
code = code.replace("\_", "_").replace("_", "\_")
file = codecs.open("../../thesis/images/exp_init.pgf", "w", 'utf-8')
file.write(code)
file.close() | code/exp_init/analyze.py | from deepobs import analyzer
import json
import matplotlib.pyplot as plt
import matplotlib as mpl
import tikzplotlib
import codecs
# get the plot
fig, axess = analyzer.plot_testset_performances('./results/', mode = 'final')
axess[0][0].set_title("DeepOBS init")
axess[0][1].set_title("PyTorch default init")
axess[0][0].set_ylabel("test loss")
axess[1][0].set_ylabel("train loss")
axess[2][0].set_ylabel("test acc")
axess[3][0].set_ylabel("train acc")
axess[0][0].get_legend().remove()
axess[3][1].legend(["Batch Size = 32",
"Batch Size = 64",
"Batch Size = 128"])
#Change line styles
for axes in axess:
lines = axes[0].get_lines()
for line in lines[1:]:
line.set_linewidth(3)
line.set_linestyle("--")
line.set_alpha(0.8)
lines[0].set_linewidth(4)
lines = axes[1].get_lines()
for line in lines[1:]:
line.set_linewidth(3)
line.set_linestyle("--")
line.set_alpha(0.8)
lines[0].set_linewidth(4)
#Change plot y scales
axess[0][0].set_ylim(1, 4)
axess[1][0].set_ylim(1, 4)
axess[0][1].set_ylim(1, 4)
axess[1][1].set_ylim(1, 4)
axess[2][0].set_ylim(0.1, 0.75)
axess[3][0].set_ylim(0.1, 0.75)
axess[2][1].set_ylim(0.1, 0.75)
axess[3][1].set_ylim(0.1, 0.75)
# modify the plot
fig.canvas.draw()
# General settings
code = tikzplotlib.get_tikz_code(figure = fig,
figurewidth = "\\figurewidth",
figureheight = "5cm",
extra_axis_parameters = ["tick pos=left",
"legend style={font=\\footnotesize, at={(0 ,0)},xshift = -0.4cm, yshift=-1.5cm,anchor=north,nodes=right}",],
extra_tikzpicture_parameters = ["every axis plot post./append style={line width = 1pt}"],
)#strict = True)
#catch missed underscores & save
code = code.replace("\_", "_").replace("_", "\_")
file = codecs.open("../../thesis/images/exp_init.pgf", "w", 'utf-8')
file.write(code)
file.close() | 0.248079 | 0.3122 |
import datetime
import jwt
from flasgger import swag_from
from flask import Blueprint, request
from flask.views import MethodView
from flask_restful import Api
from werkzeug.security import check_password_hash
from models import User
from config.environment_tools import get_secret_key
from controllers.request_model import get_credentials_fields
from config.flask_config import AuthenticationFailed
from utils.basic import is_dict_structure_equal
from utils.http import get_token_response, token_required
from config.logger import logging, get_logger_name
logger = logging.getLogger(get_logger_name(__name__))
API_PREFIX = 'auth'
AUTHENTICATION_BP = Blueprint('{0}_api'.format(API_PREFIX), __name__)
api = Api(AUTHENTICATION_BP)
class LoginAPI(MethodView):
@swag_from('/resources/authentication/description/login.yml')
# Causes token=null ?
# @marshal_with(get_token_fields())
def post(self):
data = request.get_json()
if not is_dict_structure_equal(get_credentials_fields(), data):
logger.warning('Request body has an unknown structure.')
raise AuthenticationFailed('Verifizierung nicht möglich.')
username = data['username']
password = data['password']
if not username or not password:
logger.warning('Password is missing.')
raise AuthenticationFailed('Verifizierung nicht möglich.')
user = User.query.filter_by(name=username).first()
if not user:
logger.warning('User could not be found in database.')
raise AuthenticationFailed('Verifizierung nicht möglich.')
if check_password_hash(user.password, password):
logger.info('Log in successful: {}'.format(user.public_id))
token = _generate_token(user)
return get_token_response(dict(
token=token.decode('UTF-8')
))
logger.warning('Password is wrong.')
raise AuthenticationFailed('Verifizierung nicht möglich.')
class RefreshAPI(MethodView):
@token_required()
@swag_from('/resources/authentication/description/refresh.yml')
def post(self, current_user: User):
token = _generate_token(current_user)
return get_token_response(dict(
token=token.decode('UTF-8')
))
def _generate_token(user: User):
public_id = user.public_id
now = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=14)
expires = now + timedelta
secret_key = get_secret_key()
algorithm = 'HS256'
token = jwt.encode({'public_id':public_id, 'iat':now, 'exp':expires}, secret_key, algorithm=algorithm)
return token
api.add_resource(LoginAPI, '/public/{rsc}/login'.format(rsc=API_PREFIX))
api.add_resource(RefreshAPI, '/{rsc}/refresh'.format(rsc=API_PREFIX)) | src/resources/authentication/__init__.py | import datetime
import jwt
from flasgger import swag_from
from flask import Blueprint, request
from flask.views import MethodView
from flask_restful import Api
from werkzeug.security import check_password_hash
from models import User
from config.environment_tools import get_secret_key
from controllers.request_model import get_credentials_fields
from config.flask_config import AuthenticationFailed
from utils.basic import is_dict_structure_equal
from utils.http import get_token_response, token_required
from config.logger import logging, get_logger_name
logger = logging.getLogger(get_logger_name(__name__))
API_PREFIX = 'auth'
AUTHENTICATION_BP = Blueprint('{0}_api'.format(API_PREFIX), __name__)
api = Api(AUTHENTICATION_BP)
class LoginAPI(MethodView):
@swag_from('/resources/authentication/description/login.yml')
# Causes token=null ?
# @marshal_with(get_token_fields())
def post(self):
data = request.get_json()
if not is_dict_structure_equal(get_credentials_fields(), data):
logger.warning('Request body has an unknown structure.')
raise AuthenticationFailed('Verifizierung nicht möglich.')
username = data['username']
password = data['password']
if not username or not password:
logger.warning('Password is missing.')
raise AuthenticationFailed('Verifizierung nicht möglich.')
user = User.query.filter_by(name=username).first()
if not user:
logger.warning('User could not be found in database.')
raise AuthenticationFailed('Verifizierung nicht möglich.')
if check_password_hash(user.password, password):
logger.info('Log in successful: {}'.format(user.public_id))
token = _generate_token(user)
return get_token_response(dict(
token=token.decode('UTF-8')
))
logger.warning('Password is wrong.')
raise AuthenticationFailed('Verifizierung nicht möglich.')
class RefreshAPI(MethodView):
@token_required()
@swag_from('/resources/authentication/description/refresh.yml')
def post(self, current_user: User):
token = _generate_token(current_user)
return get_token_response(dict(
token=token.decode('UTF-8')
))
def _generate_token(user: User):
public_id = user.public_id
now = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=14)
expires = now + timedelta
secret_key = get_secret_key()
algorithm = 'HS256'
token = jwt.encode({'public_id':public_id, 'iat':now, 'exp':expires}, secret_key, algorithm=algorithm)
return token
api.add_resource(LoginAPI, '/public/{rsc}/login'.format(rsc=API_PREFIX))
api.add_resource(RefreshAPI, '/{rsc}/refresh'.format(rsc=API_PREFIX)) | 0.374676 | 0.044183 |
import zmq
import json
from time import sleep
"""
sanity checks copied from armctl provided by Torobo.
"""
def isInvalidJointId(id):
list = ["all", "1", "2", "3", "4", "5", "6", "7", "8"]
sp = id.split("/")
for s in sp:
if s not in list:
return True
return False
def isInvalidServoState(state):
list = ["on", "ON", "off", "OFF"]
if state in list:
return False
else:
return True
def isFloat(str):
try:
float(str)
return True
except ValueError:
return False
def isInvalidCommand(command):
isInvalidCommand = False
if "joint_id" in command.keys():
if isInvalidJointId(command["joint_id"]):
print "Invalid Joint ID"
isInvalidCommand = True
if "value" in command.keys():
if not (command["value"].isdigit() or isFloat(command["value"])):
print "Invalid Value"
isInvalidCommand = True
if "pos" in command.keys():
if not (command["pos"].isdigit() or isFloat(command["pos"])):
print "Invalid Position"
isInvalidCommand = True
if "time" in command.keys() and command["time"] is not None:
if not (command["time"].isdigit() or isFloat(command["time"])):
print "Invalid Time"
isInvalidCommand = True
if "servo_state" in command.keys():
if isInvalidServoState(command["servo_state"]):
print "Invalid Servo State"
isInvalidCommand = True
if command == {}:
isInvalidCommand = True
return isInvalidCommand
def parse_joint_id(joint_id):
if joint_id == -1:
return 'all'
else:
return str(joint_id+1)
def move_to_home(joint_id, home_pos):
commands = [
{
"command": "--mode",
"mode_id": "20",
"joint_id": parse_joint_id(joint_id)
},
# default gain for J7 is not enough :(
{
"command": "--kp",
"joint_id": "7",
"value": "80.0"
},
{
"command": "--ki",
"joint_id": "7",
"value": "2.00"
},
{
"command": "--servo",
"servo_state": "on",
"joint_id": parse_joint_id(joint_id)
},
{
"command": "--tc",
"joint_id": parse_joint_id(joint_id)
},
]
if joint_id == -1:
commands += [
{
"command": "--tpts",
"joint_id": parse_joint_id(joint_id_),
"pos": str(pos),
"time": "5"
} for joint_id_, pos in enumerate(home_pos)
]
else:
commands += [
{
"command": "--tpts",
"joint_id": parse_joint_id(joint_id),
"pos": str(home_pos[joint_id]),
"time": "5"
}
]
commands += [
{
"command": "--ts",
"joint_id": parse_joint_id(joint_id)
}
]
print("moving to home...")
for command in commands:
send_command(command)
sleep(0.1)
TRAJ_STATUS = [1, 2, 3]
while True:
rs = json.loads(request_state())
ts = rs['jointState'][joint_id]['trjStatus']
if ts == 4:
print("move to home successfully finished")
break
elif ts not in TRAJ_STATUS:
raise Exception
else:
sleep(0.1)
for command in commands:
send_command({
"command": "--brake",
"brake_state": "on",
"joint_id": "all"
})
def initialize(joint_id, home_pos):
move_to_home(-1, home_pos)
commands = [
{
"command": "--mode",
"mode_id": "2",
"joint_id": parse_joint_id(joint_id)
},
{
"command": "--servo",
"servo_state": "off",
"joint_id": parse_joint_id(joint_id)
},
{
"command": "--servo",
"servo_state": "on",
"joint_id": parse_joint_id(joint_id)
},
]
for command in commands:
ret = send_command(command)
if not check_error(ret):
raise Exception
def finalize(joint_id):
commands = [
{
"command": "--servo",
"servo_state": "off",
"joint_id": parse_joint_id(joint_id)
},
{
"command": "--brake",
"brake_state": "on",
"joint_id": parse_joint_id(joint_id)
},
]
for command in commands:
send_command(command)
def set_torque(torque, joint_id):
command = {
"command": "--tor",
"value": str(torque),
"joint_id": parse_joint_id(joint_id)
}
return send_command(command)
def set_current(current, joint_id):
command = {
"command": "--cur",
"value": str(current),
"joint_id": parse_joint_id(joint_id)
}
return send_command(command)
def set_position(position, joint_id):
command = {
"command": "--pos",
"value": str(position),
"joint_id": parse_joint_id(joint_id),
}
return send_command(command)
def request_state():
command = {
"command": "--state"
}
return send_command(command)
def send_command(command):
assert not isInvalidCommand(command)
ctx = zmq.Context()
sock = ctx.socket(zmq.REQ)
sock.connect('tcp://localhost:5555')
sock.send(json.dumps(command))
curState = sock.recv()
return curState
def check_error(state):
js = json.loads(state)['jointState']
error = [ss['ewStatus'] for ss in js]
if all([ee == 0 for ee in error]):
return True
slave_error = [ee / 65536 for ee in error]
master_error = [ee % 65536 for ee in error]
print('slave')
for joint_id, se in enumerate(slave_error):
print(str(joint_id+1) + ':' + format(se, '#016b'))
print('master')
for joint_id, me in enumerate(master_error):
print(str(joint_id+1) + ':' + format(me, '#016b'))
return False | ToroboTakahashi/.ipynb_checkpoints/torobo_communicator-checkpoint.py | import zmq
import json
from time import sleep
"""
sanity checks copied from armctl provided by Torobo.
"""
def isInvalidJointId(id):
list = ["all", "1", "2", "3", "4", "5", "6", "7", "8"]
sp = id.split("/")
for s in sp:
if s not in list:
return True
return False
def isInvalidServoState(state):
list = ["on", "ON", "off", "OFF"]
if state in list:
return False
else:
return True
def isFloat(str):
try:
float(str)
return True
except ValueError:
return False
def isInvalidCommand(command):
isInvalidCommand = False
if "joint_id" in command.keys():
if isInvalidJointId(command["joint_id"]):
print "Invalid Joint ID"
isInvalidCommand = True
if "value" in command.keys():
if not (command["value"].isdigit() or isFloat(command["value"])):
print "Invalid Value"
isInvalidCommand = True
if "pos" in command.keys():
if not (command["pos"].isdigit() or isFloat(command["pos"])):
print "Invalid Position"
isInvalidCommand = True
if "time" in command.keys() and command["time"] is not None:
if not (command["time"].isdigit() or isFloat(command["time"])):
print "Invalid Time"
isInvalidCommand = True
if "servo_state" in command.keys():
if isInvalidServoState(command["servo_state"]):
print "Invalid Servo State"
isInvalidCommand = True
if command == {}:
isInvalidCommand = True
return isInvalidCommand
def parse_joint_id(joint_id):
if joint_id == -1:
return 'all'
else:
return str(joint_id+1)
def move_to_home(joint_id, home_pos):
commands = [
{
"command": "--mode",
"mode_id": "20",
"joint_id": parse_joint_id(joint_id)
},
# default gain for J7 is not enough :(
{
"command": "--kp",
"joint_id": "7",
"value": "80.0"
},
{
"command": "--ki",
"joint_id": "7",
"value": "2.00"
},
{
"command": "--servo",
"servo_state": "on",
"joint_id": parse_joint_id(joint_id)
},
{
"command": "--tc",
"joint_id": parse_joint_id(joint_id)
},
]
if joint_id == -1:
commands += [
{
"command": "--tpts",
"joint_id": parse_joint_id(joint_id_),
"pos": str(pos),
"time": "5"
} for joint_id_, pos in enumerate(home_pos)
]
else:
commands += [
{
"command": "--tpts",
"joint_id": parse_joint_id(joint_id),
"pos": str(home_pos[joint_id]),
"time": "5"
}
]
commands += [
{
"command": "--ts",
"joint_id": parse_joint_id(joint_id)
}
]
print("moving to home...")
for command in commands:
send_command(command)
sleep(0.1)
TRAJ_STATUS = [1, 2, 3]
while True:
rs = json.loads(request_state())
ts = rs['jointState'][joint_id]['trjStatus']
if ts == 4:
print("move to home successfully finished")
break
elif ts not in TRAJ_STATUS:
raise Exception
else:
sleep(0.1)
for command in commands:
send_command({
"command": "--brake",
"brake_state": "on",
"joint_id": "all"
})
def initialize(joint_id, home_pos):
move_to_home(-1, home_pos)
commands = [
{
"command": "--mode",
"mode_id": "2",
"joint_id": parse_joint_id(joint_id)
},
{
"command": "--servo",
"servo_state": "off",
"joint_id": parse_joint_id(joint_id)
},
{
"command": "--servo",
"servo_state": "on",
"joint_id": parse_joint_id(joint_id)
},
]
for command in commands:
ret = send_command(command)
if not check_error(ret):
raise Exception
def finalize(joint_id):
commands = [
{
"command": "--servo",
"servo_state": "off",
"joint_id": parse_joint_id(joint_id)
},
{
"command": "--brake",
"brake_state": "on",
"joint_id": parse_joint_id(joint_id)
},
]
for command in commands:
send_command(command)
def set_torque(torque, joint_id):
command = {
"command": "--tor",
"value": str(torque),
"joint_id": parse_joint_id(joint_id)
}
return send_command(command)
def set_current(current, joint_id):
command = {
"command": "--cur",
"value": str(current),
"joint_id": parse_joint_id(joint_id)
}
return send_command(command)
def set_position(position, joint_id):
command = {
"command": "--pos",
"value": str(position),
"joint_id": parse_joint_id(joint_id),
}
return send_command(command)
def request_state():
command = {
"command": "--state"
}
return send_command(command)
def send_command(command):
assert not isInvalidCommand(command)
ctx = zmq.Context()
sock = ctx.socket(zmq.REQ)
sock.connect('tcp://localhost:5555')
sock.send(json.dumps(command))
curState = sock.recv()
return curState
def check_error(state):
js = json.loads(state)['jointState']
error = [ss['ewStatus'] for ss in js]
if all([ee == 0 for ee in error]):
return True
slave_error = [ee / 65536 for ee in error]
master_error = [ee % 65536 for ee in error]
print('slave')
for joint_id, se in enumerate(slave_error):
print(str(joint_id+1) + ':' + format(se, '#016b'))
print('master')
for joint_id, me in enumerate(master_error):
print(str(joint_id+1) + ':' + format(me, '#016b'))
return False | 0.286169 | 0.227995 |
import argparse
import ijson
import multiprocessing
import json
from os import linesep
from bisect import bisect_left
STOP_TOKEN = "<PASSWORD>!!!"
def file_writer(dest_filename, some_queue, some_stop_token):
"""Write JSON strings to a JSON list from a multiprocessing
queue to a file until the stop token is sent"""
is_start_of_json = True
with open(dest_filename, 'w') as dest_file:
dest_file.write("[")
while True:
line = some_queue.get()
if line == some_stop_token:
dest_file.write(linesep)
dest_file.write("]")
return
if is_start_of_json:
is_start_of_json = False
else:
dest_file.write(",")
dest_file.write(linesep)
dest_file.write(line)
def remap_genome_coordinate(coord, align_tuples, startpoints):
"""Given a tuple of chromosome alignment remappings,
remap a single coordinate"""
original_chromosome = coord["chromosome"]
# The bisect left function gives the nearest item in the array
# If the items are equal, in this case we want them to be part of
# The same mapping so we add 1
ind = bisect_left(startpoints, (coord["position"] + 1)) -1
if ind == -1:
#The coordinate is before the first chromosome
return None
chromosome_mapping = align_tuples[ind]
(source_start_point,
source_chromosome,
length,
new_start_point,
new_chromosome) = chromosome_mapping
if original_chromosome == source_chromosome:
bases_from_start = coord["position"] - source_start_point
# length of chromosome counts from 0 to (length -1)
within_range = bases_from_start < length
if bases_from_start >= 0 and within_range:
# The base from the coordinate is within range
coord["chromosome"] = new_chromosome
coord["position"] = new_start_point + bases_from_start
return coord
return None
def remap_reference_genome(alignment_file_path,
coordinate_file_path,
writer_queue):
"""Given the file path to an alignment file and the
file path to a coordinate file
write an output file which maps
the source genome coordinates to a new reference genome"""
with open(alignment_file_path, 'r') as align:
alignments = ijson.items(align, 'item')
align_tuples = [(item["source"]["start"],
item["source"]["chromosome"],
item["length"],
item["target"]["start"],
item["target"]["chromosome"])
for item in alignments]
align_tuples.sort(key=lambda tup: tup[0])
startpoints = [tup[0] for tup in align_tuples]
with open(coordinate_file_path, 'r') as coordfile:
coords = ijson.items(coordfile, 'item')
for index, coord in enumerate(coords):
data_dict = remap_genome_coordinate(coord, align_tuples, startpoints)
if data_dict is not None:
writer_queue.put(json.dumps(data_dict))
def get_writer_process_and_queue(output):
"""Returns a multiprocessing process to write to a
file and a queue to do the writing"""
queue = multiprocessing.Queue()
return (
multiprocessing.Process(
target=file_writer,
args=(
output,
queue,
STOP_TOKEN)),
queue)
def handle_command(alignfile, coordsfile, output):
"""Given alignfile, coordsfile and output file paths, remap a genome"""
writer_process, writer_queue = get_writer_process_and_queue(output)
writer_process.start()
remap_reference_genome(alignfile, coordsfile, writer_queue)
writer_queue.put(STOP_TOKEN)
writer_process.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("alignfile", help="Path to the alignment JSON file")
parser.add_argument("coordsfile", help="Path to the coordinates JSON file")
parser.add_argument("output", help="Path to the desired output file")
args = parser.parse_args()
handle_command(args.alignfile, args.coordsfile, args.output) | grapper/grapper.py | import argparse
import ijson
import multiprocessing
import json
from os import linesep
from bisect import bisect_left
STOP_TOKEN = "<PASSWORD>!!!"
def file_writer(dest_filename, some_queue, some_stop_token):
"""Write JSON strings to a JSON list from a multiprocessing
queue to a file until the stop token is sent"""
is_start_of_json = True
with open(dest_filename, 'w') as dest_file:
dest_file.write("[")
while True:
line = some_queue.get()
if line == some_stop_token:
dest_file.write(linesep)
dest_file.write("]")
return
if is_start_of_json:
is_start_of_json = False
else:
dest_file.write(",")
dest_file.write(linesep)
dest_file.write(line)
def remap_genome_coordinate(coord, align_tuples, startpoints):
"""Given a tuple of chromosome alignment remappings,
remap a single coordinate"""
original_chromosome = coord["chromosome"]
# The bisect left function gives the nearest item in the array
# If the items are equal, in this case we want them to be part of
# The same mapping so we add 1
ind = bisect_left(startpoints, (coord["position"] + 1)) -1
if ind == -1:
#The coordinate is before the first chromosome
return None
chromosome_mapping = align_tuples[ind]
(source_start_point,
source_chromosome,
length,
new_start_point,
new_chromosome) = chromosome_mapping
if original_chromosome == source_chromosome:
bases_from_start = coord["position"] - source_start_point
# length of chromosome counts from 0 to (length -1)
within_range = bases_from_start < length
if bases_from_start >= 0 and within_range:
# The base from the coordinate is within range
coord["chromosome"] = new_chromosome
coord["position"] = new_start_point + bases_from_start
return coord
return None
def remap_reference_genome(alignment_file_path,
coordinate_file_path,
writer_queue):
"""Given the file path to an alignment file and the
file path to a coordinate file
write an output file which maps
the source genome coordinates to a new reference genome"""
with open(alignment_file_path, 'r') as align:
alignments = ijson.items(align, 'item')
align_tuples = [(item["source"]["start"],
item["source"]["chromosome"],
item["length"],
item["target"]["start"],
item["target"]["chromosome"])
for item in alignments]
align_tuples.sort(key=lambda tup: tup[0])
startpoints = [tup[0] for tup in align_tuples]
with open(coordinate_file_path, 'r') as coordfile:
coords = ijson.items(coordfile, 'item')
for index, coord in enumerate(coords):
data_dict = remap_genome_coordinate(coord, align_tuples, startpoints)
if data_dict is not None:
writer_queue.put(json.dumps(data_dict))
def get_writer_process_and_queue(output):
"""Returns a multiprocessing process to write to a
file and a queue to do the writing"""
queue = multiprocessing.Queue()
return (
multiprocessing.Process(
target=file_writer,
args=(
output,
queue,
STOP_TOKEN)),
queue)
def handle_command(alignfile, coordsfile, output):
"""Given alignfile, coordsfile and output file paths, remap a genome"""
writer_process, writer_queue = get_writer_process_and_queue(output)
writer_process.start()
remap_reference_genome(alignfile, coordsfile, writer_queue)
writer_queue.put(STOP_TOKEN)
writer_process.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("alignfile", help="Path to the alignment JSON file")
parser.add_argument("coordsfile", help="Path to the coordinates JSON file")
parser.add_argument("output", help="Path to the desired output file")
args = parser.parse_args()
handle_command(args.alignfile, args.coordsfile, args.output) | 0.600071 | 0.199211 |
import torch
def make_d2_symm(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: d2 symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Perform left-right symmetrization
"""
A= 0.5*(A + A.permute(0,1,4,3,2)) # left-right symmetry
return A
def make_d2_antisymm(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: d2 anti-symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Perform left-right symmetrization
"""
A= 0.5*(A - A.permute(0,1,4,3,2)) # left-right symmetry
return A
def make_c4v_symm(A, irreps=["A1"]):
r"""
:param A: on-site tensor
:param irreps: choice of irreps from A1, A2, B1, or B2
:type A: torch.tensor
:type irreps: list(str)
:return: C4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project and sum any combination of projections on real C4v irreps A1, A2, B1,
and B2. List of irreps is converted to a set (no repeated elements) and the
projections are then summed up.
"""
projections=dict({"A1": make_c4v_symm_A1, "A2": make_c4v_symm_A2, \
"B1": make_c4v_symm_B1, "B2": make_c4v_symm_B2})
irreps=set(irreps)
assert irreps.issubset(set(projections.keys())), "Unknown C4v irrep"
A_symm= torch.zeros(A.size(),device=A.device,dtype=A.dtype)
for irrep in irreps:
A_symm= A_symm + projections[irrep](A)
return A_symm
def make_c4v_symm_A1(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: c4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project on-site tensor ``A`` on A1 irrep of C4v group.
"""
A= 0.5*(A + A.permute(0,1,4,3,2)) # left-right reflection
A= 0.5*(A + A.permute(0,3,2,1,4)) # up-down reflection
A= 0.5*(A + A.permute(0,4,1,2,3)) # pi/2 anti-clockwise
A= 0.5*(A + A.permute(0,2,3,4,1)) # pi/2 clockwise
return A
def make_c4v_symm_A2(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: c4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project on-site tensor ``A`` on A2 irrep of C4v group.
"""
A= 0.5*(A - A.permute(0,1,4,3,2)) # left-right reflection (\sigma)
A= 0.5*(A - A.permute(0,4,3,2,1)) # skew reflection (\sigma R^-1)
A= 0.5*(A + A.permute(0,4,1,2,3)) # pi/2 anti-clockwise (R)
A= 0.5*(A + A.permute(0,3,4,1,2)) # pi anti-clockwise (R^2)
return A
def make_c4v_symm_B1(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: c4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project on-site tensor ``A`` on B1 irrep of C4v group.
"""
A= 0.5*(A + A.permute(0,1,4,3,2)) # left-right reflection (\sigma)
A= 0.5*(A - A.permute(0,4,3,2,1)) # skew reflection (\sigma R^-1)
A= 0.5*(A - A.permute(0,4,1,2,3)) # pi/2 anti-clockwise (R)
A= 0.5*(A + A.permute(0,3,4,1,2)) # pi anti-clockwise (R^2)
return A
def make_c4v_symm_B2(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: C4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project on-site tensor ``A`` on B2 irrep of C4v group.
"""
A= 0.5*(A - A.permute(0,1,4,3,2)) # left-right reflection (\sigma)
A= 0.5*(A + A.permute(0,4,3,2,1)) # skew reflection (\sigma R^-1)
A= 0.5*(A + A.permute(0,4,1,2,3)) # pi/2 anti-clockwise (R)
A= 0.5*(A - A.permute(0,3,4,1,2)) # pi anti-clockwise (R^2)
return A
def verify_c4v_symm_A1(A):
with torch.no_grad():
symm= True
max_d=0.
d_list=[]
for p in [(0,1,4,3,2), (0,3,2,1,4), (0,4,1,2,3), (0,2,3,4,1)]:
d= torch.dist(A,A.permute(p))
d_list.append((p,d))
symm= symm * (d<tol)
max_d= max(max_d,d)
return symm, max_d, d_list | groups/pg.py | import torch
def make_d2_symm(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: d2 symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Perform left-right symmetrization
"""
A= 0.5*(A + A.permute(0,1,4,3,2)) # left-right symmetry
return A
def make_d2_antisymm(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: d2 anti-symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Perform left-right symmetrization
"""
A= 0.5*(A - A.permute(0,1,4,3,2)) # left-right symmetry
return A
def make_c4v_symm(A, irreps=["A1"]):
r"""
:param A: on-site tensor
:param irreps: choice of irreps from A1, A2, B1, or B2
:type A: torch.tensor
:type irreps: list(str)
:return: C4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project and sum any combination of projections on real C4v irreps A1, A2, B1,
and B2. List of irreps is converted to a set (no repeated elements) and the
projections are then summed up.
"""
projections=dict({"A1": make_c4v_symm_A1, "A2": make_c4v_symm_A2, \
"B1": make_c4v_symm_B1, "B2": make_c4v_symm_B2})
irreps=set(irreps)
assert irreps.issubset(set(projections.keys())), "Unknown C4v irrep"
A_symm= torch.zeros(A.size(),device=A.device,dtype=A.dtype)
for irrep in irreps:
A_symm= A_symm + projections[irrep](A)
return A_symm
def make_c4v_symm_A1(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: c4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project on-site tensor ``A`` on A1 irrep of C4v group.
"""
A= 0.5*(A + A.permute(0,1,4,3,2)) # left-right reflection
A= 0.5*(A + A.permute(0,3,2,1,4)) # up-down reflection
A= 0.5*(A + A.permute(0,4,1,2,3)) # pi/2 anti-clockwise
A= 0.5*(A + A.permute(0,2,3,4,1)) # pi/2 clockwise
return A
def make_c4v_symm_A2(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: c4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project on-site tensor ``A`` on A2 irrep of C4v group.
"""
A= 0.5*(A - A.permute(0,1,4,3,2)) # left-right reflection (\sigma)
A= 0.5*(A - A.permute(0,4,3,2,1)) # skew reflection (\sigma R^-1)
A= 0.5*(A + A.permute(0,4,1,2,3)) # pi/2 anti-clockwise (R)
A= 0.5*(A + A.permute(0,3,4,1,2)) # pi anti-clockwise (R^2)
return A
def make_c4v_symm_B1(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: c4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project on-site tensor ``A`` on B1 irrep of C4v group.
"""
A= 0.5*(A + A.permute(0,1,4,3,2)) # left-right reflection (\sigma)
A= 0.5*(A - A.permute(0,4,3,2,1)) # skew reflection (\sigma R^-1)
A= 0.5*(A - A.permute(0,4,1,2,3)) # pi/2 anti-clockwise (R)
A= 0.5*(A + A.permute(0,3,4,1,2)) # pi anti-clockwise (R^2)
return A
def make_c4v_symm_B2(A):
r"""
:param A: on-site tensor
:type A: torch.tensor
:return: C4v symmetrized tensor ``A``
:rtype: torch.tensor
::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
Project on-site tensor ``A`` on B2 irrep of C4v group.
"""
A= 0.5*(A - A.permute(0,1,4,3,2)) # left-right reflection (\sigma)
A= 0.5*(A + A.permute(0,4,3,2,1)) # skew reflection (\sigma R^-1)
A= 0.5*(A + A.permute(0,4,1,2,3)) # pi/2 anti-clockwise (R)
A= 0.5*(A - A.permute(0,3,4,1,2)) # pi anti-clockwise (R^2)
return A
def verify_c4v_symm_A1(A):
with torch.no_grad():
symm= True
max_d=0.
d_list=[]
for p in [(0,1,4,3,2), (0,3,2,1,4), (0,4,1,2,3), (0,2,3,4,1)]:
d= torch.dist(A,A.permute(p))
d_list.append((p,d))
symm= symm * (d<tol)
max_d= max(max_d,d)
return symm, max_d, d_list | 0.797872 | 0.838878 |
from troveclient import base
from troveclient import common
REBOOT_SOFT = 'SOFT'
REBOOT_HARD = 'HARD'
class Instance(base.Resource):
"""An Instance is an opaque instance used to store Database instances."""
def __repr__(self):
return "<Instance: %s>" % self.name
def list_databases(self):
return self.manager.databases.list(self)
def delete(self):
"""Delete the instance."""
self.manager.delete(self)
def restart(self):
"""Restart the database instance."""
self.manager.restart(self.id)
def detach_replica(self):
"""Stops the replica database from being replicated to."""
self.manager.edit(self.id, detach_replica_source=True)
class Instances(base.ManagerWithFind):
"""Manage :class:`Instance` resources."""
resource_class = Instance
# TODO(SlickNik): Remove slave_of param after updating tests to replica_of
def create(self, name, flavor_id, volume=None, databases=None, users=None,
restorePoint=None, availability_zone=None, datastore=None,
datastore_version=None, nics=None, configuration=None,
replica_of=None, slave_of=None, replica_count=None):
"""Create (boot) a new instance."""
body = {"instance": {
"name": name,
"flavorRef": flavor_id
}}
datastore_obj = {}
if volume:
body["instance"]["volume"] = volume
if databases:
body["instance"]["databases"] = databases
if users:
body["instance"]["users"] = users
if restorePoint:
body["instance"]["restorePoint"] = restorePoint
if availability_zone:
body["instance"]["availability_zone"] = availability_zone
if datastore:
datastore_obj["type"] = datastore
if datastore_version:
datastore_obj["version"] = datastore_version
if datastore_obj:
body["instance"]["datastore"] = datastore_obj
if nics:
body["instance"]["nics"] = nics
if configuration:
body["instance"]["configuration"] = configuration
if replica_of or slave_of:
body["instance"]["replica_of"] = base.getid(replica_of) or slave_of
if replica_count:
body["instance"]["replica_count"] = replica_count
return self._create("/instances", body, "instance")
def modify(self, instance, configuration=None):
body = {
"instance": {
}
}
if configuration is not None:
body["instance"]["configuration"] = configuration
url = "/instances/%s" % base.getid(instance)
resp, body = self.api.client.put(url, body=body)
common.check_for_exceptions(resp, body, url)
def edit(self, instance, configuration=None, name=None,
detach_replica_source=False, remove_configuration=False):
body = {
"instance": {
}
}
if configuration and remove_configuration:
raise Exception("Cannot attach and detach configuration "
"simultaneously.")
if remove_configuration:
body["instance"]["configuration"] = None
if configuration is not None:
body["instance"]["configuration"] = configuration
if name is not None:
body["instance"]["name"] = name
if detach_replica_source:
# TODO(glucas): Remove slave_of after updating trove
# (see trove.instance.service.InstanceController#edit)
body["instance"]["slave_of"] = None
body["instance"]["replica_of"] = None
url = "/instances/%s" % base.getid(instance)
resp, body = self.api.client.patch(url, body=body)
common.check_for_exceptions(resp, body, url)
def list(self, limit=None, marker=None, include_clustered=False):
"""Get a list of all instances.
:rtype: list of :class:`Instance`.
"""
return self._paginated("/instances", "instances", limit, marker,
{"include_clustered": include_clustered})
def get(self, instance):
"""Get a specific instances.
:rtype: :class:`Instance`
"""
return self._get("/instances/%s" % base.getid(instance),
"instance")
def backups(self, instance, limit=None, marker=None):
"""Get the list of backups for a specific instance.
:rtype: list of :class:`Backups`.
"""
url = "/instances/%s/backups" % base.getid(instance)
return self._paginated(url, "backups", limit, marker)
def delete(self, instance):
"""Delete the specified instance.
:param instance: A reference to the instance to delete
"""
url = "/instances/%s" % base.getid(instance)
resp, body = self.api.client.delete(url)
common.check_for_exceptions(resp, body, url)
def _action(self, instance, body):
"""Perform a server "action" -- reboot/rebuild/resize/etc."""
url = "/instances/%s/action" % base.getid(instance)
resp, body = self.api.client.post(url, body=body)
common.check_for_exceptions(resp, body, url)
if body:
return self.resource_class(self, body, loaded=True)
return body
def resize_volume(self, instance, volume_size):
"""Resize the volume on an existing instances."""
body = {"resize": {"volume": {"size": volume_size}}}
self._action(instance, body)
def resize_instance(self, instance, flavor_id):
"""Resizes an instance with a new flavor."""
body = {"resize": {"flavorRef": flavor_id}}
self._action(instance, body)
def restart(self, instance):
"""Restart the database instance.
:param instance: The :class:`Instance` (or its ID) of the database
instance to restart.
"""
body = {'restart': {}}
self._action(instance, body)
def configuration(self, instance):
"""Get a configuration on instances.
:rtype: :class:`Instance`
"""
return self._get("/instances/%s/configuration" % base.getid(instance),
"instance")
def promote_to_replica_source(self, instance):
"""Promote a replica to be the new replica_source of its set
:param instance: The :class:`Instance` (or its ID) of the database
instance to promote.
"""
body = {'promote_to_replica_source': {}}
self._action(instance, body)
def eject_replica_source(self, instance):
"""Eject a replica source from its set
:param instance: The :class:`Instance` (or its ID) of the database
instance to eject.
"""
body = {'eject_replica_source': {}}
self._action(instance, body)
class InstanceStatus(object):
ACTIVE = "ACTIVE"
BLOCKED = "BLOCKED"
BUILD = "BUILD"
FAILED = "FAILED"
REBOOT = "REBOOT"
RESIZE = "RESIZE"
SHUTDOWN = "SHUTDOWN"
RESTART_REQUIRED = "RESTART_REQUIRED"
PROMOTING = "PROMOTING"
EJECTING = "EJECTING" | troveclient/v1/instances.py |
from troveclient import base
from troveclient import common
REBOOT_SOFT = 'SOFT'
REBOOT_HARD = 'HARD'
class Instance(base.Resource):
"""An Instance is an opaque instance used to store Database instances."""
def __repr__(self):
return "<Instance: %s>" % self.name
def list_databases(self):
return self.manager.databases.list(self)
def delete(self):
"""Delete the instance."""
self.manager.delete(self)
def restart(self):
"""Restart the database instance."""
self.manager.restart(self.id)
def detach_replica(self):
"""Stops the replica database from being replicated to."""
self.manager.edit(self.id, detach_replica_source=True)
class Instances(base.ManagerWithFind):
"""Manage :class:`Instance` resources."""
resource_class = Instance
# TODO(SlickNik): Remove slave_of param after updating tests to replica_of
def create(self, name, flavor_id, volume=None, databases=None, users=None,
restorePoint=None, availability_zone=None, datastore=None,
datastore_version=None, nics=None, configuration=None,
replica_of=None, slave_of=None, replica_count=None):
"""Create (boot) a new instance."""
body = {"instance": {
"name": name,
"flavorRef": flavor_id
}}
datastore_obj = {}
if volume:
body["instance"]["volume"] = volume
if databases:
body["instance"]["databases"] = databases
if users:
body["instance"]["users"] = users
if restorePoint:
body["instance"]["restorePoint"] = restorePoint
if availability_zone:
body["instance"]["availability_zone"] = availability_zone
if datastore:
datastore_obj["type"] = datastore
if datastore_version:
datastore_obj["version"] = datastore_version
if datastore_obj:
body["instance"]["datastore"] = datastore_obj
if nics:
body["instance"]["nics"] = nics
if configuration:
body["instance"]["configuration"] = configuration
if replica_of or slave_of:
body["instance"]["replica_of"] = base.getid(replica_of) or slave_of
if replica_count:
body["instance"]["replica_count"] = replica_count
return self._create("/instances", body, "instance")
def modify(self, instance, configuration=None):
body = {
"instance": {
}
}
if configuration is not None:
body["instance"]["configuration"] = configuration
url = "/instances/%s" % base.getid(instance)
resp, body = self.api.client.put(url, body=body)
common.check_for_exceptions(resp, body, url)
def edit(self, instance, configuration=None, name=None,
detach_replica_source=False, remove_configuration=False):
body = {
"instance": {
}
}
if configuration and remove_configuration:
raise Exception("Cannot attach and detach configuration "
"simultaneously.")
if remove_configuration:
body["instance"]["configuration"] = None
if configuration is not None:
body["instance"]["configuration"] = configuration
if name is not None:
body["instance"]["name"] = name
if detach_replica_source:
# TODO(glucas): Remove slave_of after updating trove
# (see trove.instance.service.InstanceController#edit)
body["instance"]["slave_of"] = None
body["instance"]["replica_of"] = None
url = "/instances/%s" % base.getid(instance)
resp, body = self.api.client.patch(url, body=body)
common.check_for_exceptions(resp, body, url)
def list(self, limit=None, marker=None, include_clustered=False):
"""Get a list of all instances.
:rtype: list of :class:`Instance`.
"""
return self._paginated("/instances", "instances", limit, marker,
{"include_clustered": include_clustered})
def get(self, instance):
"""Get a specific instances.
:rtype: :class:`Instance`
"""
return self._get("/instances/%s" % base.getid(instance),
"instance")
def backups(self, instance, limit=None, marker=None):
"""Get the list of backups for a specific instance.
:rtype: list of :class:`Backups`.
"""
url = "/instances/%s/backups" % base.getid(instance)
return self._paginated(url, "backups", limit, marker)
def delete(self, instance):
"""Delete the specified instance.
:param instance: A reference to the instance to delete
"""
url = "/instances/%s" % base.getid(instance)
resp, body = self.api.client.delete(url)
common.check_for_exceptions(resp, body, url)
def _action(self, instance, body):
"""Perform a server "action" -- reboot/rebuild/resize/etc."""
url = "/instances/%s/action" % base.getid(instance)
resp, body = self.api.client.post(url, body=body)
common.check_for_exceptions(resp, body, url)
if body:
return self.resource_class(self, body, loaded=True)
return body
def resize_volume(self, instance, volume_size):
"""Resize the volume on an existing instances."""
body = {"resize": {"volume": {"size": volume_size}}}
self._action(instance, body)
def resize_instance(self, instance, flavor_id):
"""Resizes an instance with a new flavor."""
body = {"resize": {"flavorRef": flavor_id}}
self._action(instance, body)
def restart(self, instance):
"""Restart the database instance.
:param instance: The :class:`Instance` (or its ID) of the database
instance to restart.
"""
body = {'restart': {}}
self._action(instance, body)
def configuration(self, instance):
"""Get a configuration on instances.
:rtype: :class:`Instance`
"""
return self._get("/instances/%s/configuration" % base.getid(instance),
"instance")
def promote_to_replica_source(self, instance):
"""Promote a replica to be the new replica_source of its set
:param instance: The :class:`Instance` (or its ID) of the database
instance to promote.
"""
body = {'promote_to_replica_source': {}}
self._action(instance, body)
def eject_replica_source(self, instance):
"""Eject a replica source from its set
:param instance: The :class:`Instance` (or its ID) of the database
instance to eject.
"""
body = {'eject_replica_source': {}}
self._action(instance, body)
class InstanceStatus(object):
ACTIVE = "ACTIVE"
BLOCKED = "BLOCKED"
BUILD = "BUILD"
FAILED = "FAILED"
REBOOT = "REBOOT"
RESIZE = "RESIZE"
SHUTDOWN = "SHUTDOWN"
RESTART_REQUIRED = "RESTART_REQUIRED"
PROMOTING = "PROMOTING"
EJECTING = "EJECTING" | 0.624294 | 0.098425 |
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.ShapeOptimizationApplication import *
import structural_response_function_factory
# ==============================================================================
def CreateListOfResponseFunctions( optimization_settings, model ):
list_of_response_functions = {}
response_creator = ResponseFunctionCreator( optimization_settings, model )
response_creator.AddSpecifiedKratosResponseFunctionsToList( list_of_response_functions )
return list_of_response_functions
# ==============================================================================
class ResponseFunctionCreator:
# --------------------------------------------------------------------------
def __init__( self, optimization_settings, model ):
self.optimization_settings = optimization_settings
self.model = model
# --------------------------------------------------------------------------
def AddSpecifiedKratosResponseFunctionsToList( self, list_of_response_functions ):
self.list_of_response_functions = list_of_response_functions
self.__AddObjectivesToListOfResponseFunctions()
self.__AddConstraintsToListOfResponseFunctions()
# --------------------------------------------------------------------------
def __AddObjectivesToListOfResponseFunctions( self ):
for objective_number in range(self.optimization_settings["objectives"].size()):
objective = self.optimization_settings["objectives"][objective_number]
objective_id = objective["identifier"].GetString()
if objective["use_kratos"].GetBool():
self.__CheckIfGivenResponseFunctionIsAlreadyDefined( objective_id )
self.__CreateAndAddGivenResponse( objective_id, objective["kratos_response_settings"] )
if not self.list_of_response_functions:
raise ValueError("No objective function specified!")
# --------------------------------------------------------------------------
def __AddConstraintsToListOfResponseFunctions( self ):
for constraint_number in range(self.optimization_settings["constraints"].size()):
constraint = self.optimization_settings["constraints"][constraint_number]
constraint_id = constraint["identifier"].GetString()
if constraint["use_kratos"].GetBool():
self.__CheckIfGivenResponseFunctionIsAlreadyDefined( constraint_id )
self.__CreateAndAddGivenResponse( constraint_id, constraint["kratos_response_settings"] )
# --------------------------------------------------------------------------
def __CheckIfGivenResponseFunctionIsAlreadyDefined( self, response_id ):
if response_id in self.list_of_response_functions.keys():
raise NameError("There are multiple response functions with the following identifier: " + response_id)
# --------------------------------------------------------------------------
def __CreateAndAddGivenResponse( self, response_id, response_settings ):
response_type = response_settings["response_type"].GetString()
if response_type in ["strain_energy", "mass", "eigenfrequency"]:
self.list_of_response_functions[response_id] = structural_response_function_factory.CreateResponseFunction(response_id, response_settings, self.model)
else:
raise NameError("The following response function is not available for optimization: " + response_id)
# ============================================================================== | applications/ShapeOptimizationApplication/python_scripts/response_function_factory.py |
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.ShapeOptimizationApplication import *
import structural_response_function_factory
# ==============================================================================
def CreateListOfResponseFunctions( optimization_settings, model ):
list_of_response_functions = {}
response_creator = ResponseFunctionCreator( optimization_settings, model )
response_creator.AddSpecifiedKratosResponseFunctionsToList( list_of_response_functions )
return list_of_response_functions
# ==============================================================================
class ResponseFunctionCreator:
# --------------------------------------------------------------------------
def __init__( self, optimization_settings, model ):
self.optimization_settings = optimization_settings
self.model = model
# --------------------------------------------------------------------------
def AddSpecifiedKratosResponseFunctionsToList( self, list_of_response_functions ):
self.list_of_response_functions = list_of_response_functions
self.__AddObjectivesToListOfResponseFunctions()
self.__AddConstraintsToListOfResponseFunctions()
# --------------------------------------------------------------------------
def __AddObjectivesToListOfResponseFunctions( self ):
for objective_number in range(self.optimization_settings["objectives"].size()):
objective = self.optimization_settings["objectives"][objective_number]
objective_id = objective["identifier"].GetString()
if objective["use_kratos"].GetBool():
self.__CheckIfGivenResponseFunctionIsAlreadyDefined( objective_id )
self.__CreateAndAddGivenResponse( objective_id, objective["kratos_response_settings"] )
if not self.list_of_response_functions:
raise ValueError("No objective function specified!")
# --------------------------------------------------------------------------
def __AddConstraintsToListOfResponseFunctions( self ):
for constraint_number in range(self.optimization_settings["constraints"].size()):
constraint = self.optimization_settings["constraints"][constraint_number]
constraint_id = constraint["identifier"].GetString()
if constraint["use_kratos"].GetBool():
self.__CheckIfGivenResponseFunctionIsAlreadyDefined( constraint_id )
self.__CreateAndAddGivenResponse( constraint_id, constraint["kratos_response_settings"] )
# --------------------------------------------------------------------------
def __CheckIfGivenResponseFunctionIsAlreadyDefined( self, response_id ):
if response_id in self.list_of_response_functions.keys():
raise NameError("There are multiple response functions with the following identifier: " + response_id)
# --------------------------------------------------------------------------
def __CreateAndAddGivenResponse( self, response_id, response_settings ):
response_type = response_settings["response_type"].GetString()
if response_type in ["strain_energy", "mass", "eigenfrequency"]:
self.list_of_response_functions[response_id] = structural_response_function_factory.CreateResponseFunction(response_id, response_settings, self.model)
else:
raise NameError("The following response function is not available for optimization: " + response_id)
# ============================================================================== | 0.77768 | 0.199133 |
import os
import json
import yaml
from pytezos import pytezos
from prompt_toolkit import HTML
from chinstrap.chinstrapCore import Helpers
from prompt_toolkit import print_formatted_text
class ChinstrapConfig:
def __init__(self, network='development', compile=False) -> None:
if not os.path.exists('./chinstrap_config.yaml'):
Helpers.fatal('Could not find chinstrap_config.yaml!')
with open('./chinstrap_config.yaml', 'r') as f:
confData = yaml.safe_load(f)
self.config = Helpers.convertYamlToObject(confData).chinstrap
self.compiler = self.config.compiler
if not compile:
if network=='development':
self.network = self.config.networks.development
self.network.name = 'development'
elif network=='florencenet':
self.network = self.config.networks.florencenet
self.network.name = 'florencenet'
elif network=='granada':
self.network = self.config.networks.granada
self.network.name = 'granada'
elif network=='mainnet':
self.network = self.config.networks.mainnet
self.network.name = 'mainnet'
elif network=='edo2':
self.network = self.config.networks.edo2
self.network.name = 'edo2'
msg = HTML(f'Using network: <b>{self.network.host}:{self.network.port}</b>')
print_formatted_text(msg)
self.loadAccounts()
def loadAccounts(self):
self.accounts = []
try:
keyFile = self.network.accounts[0].privateKeyFile
with open(keyFile, 'r') as f:
self.key = f.read().rstrip("\n")
self.wallet = pytezos.using(shell=f"{self.network.host}:{self.network.port}", key=self.key)
for i in self.network.accounts:
self.loadPrivateKeyFromFile(i.privateKeyFile)
except Exception as e:
print(e)
Helpers.fatal(f'Exception occured while loading accounts! {e}')
def loadPrivateKeyFromFile(self, keyFile):
with open(keyFile, 'r') as f:
key = f.read().rstrip("\n")
self.loadPrivateKey(key)
def loadPrivateKey(self, key):
try:
wallet = pytezos.using(shell=f"{self.network.host}:{self.network.port}", key=key)
except pytezos.rpc.node.RpcError:
Helpers.fatal(f"Failed to connect to {self.network.host}:{self.network.port}. Try again in sometime!")
msg = HTML(f"Loaded wallet <b>{wallet.key.public_key_hash()}</b>. Balance: <b>{wallet.balance()}</b>\n")
print_formatted_text(msg)
self.accounts.append(wallet)
def save(self):
config = {'chinstrap':{'networks':{},'compiler':{}}}
for i,v in self.config.__dict__['networks'].__dict__.items():
if i[0] != "_":
network = {'host':v.__dict__['host'], 'port':v.__dict__['port'], 'accounts':[]}
accounts = []
if 'accounts' in v.__dict__.keys():
for d in v.__dict__['accounts']:
for j, k in d.__dict__.items():
if j[0]!="_":
accounts.append({j:k})
network['accounts'] = accounts
config['chinstrap']['networks'][i] = network
with open('./chinstrap_config.yaml', 'w') as f:
f.write(yaml.dump(config))
class ChinstrapConfigHandler:
# make this a repl
def __init__(self) -> None:
pass | chinstrap/chinstrapCore/Config.py | import os
import json
import yaml
from pytezos import pytezos
from prompt_toolkit import HTML
from chinstrap.chinstrapCore import Helpers
from prompt_toolkit import print_formatted_text
class ChinstrapConfig:
def __init__(self, network='development', compile=False) -> None:
if not os.path.exists('./chinstrap_config.yaml'):
Helpers.fatal('Could not find chinstrap_config.yaml!')
with open('./chinstrap_config.yaml', 'r') as f:
confData = yaml.safe_load(f)
self.config = Helpers.convertYamlToObject(confData).chinstrap
self.compiler = self.config.compiler
if not compile:
if network=='development':
self.network = self.config.networks.development
self.network.name = 'development'
elif network=='florencenet':
self.network = self.config.networks.florencenet
self.network.name = 'florencenet'
elif network=='granada':
self.network = self.config.networks.granada
self.network.name = 'granada'
elif network=='mainnet':
self.network = self.config.networks.mainnet
self.network.name = 'mainnet'
elif network=='edo2':
self.network = self.config.networks.edo2
self.network.name = 'edo2'
msg = HTML(f'Using network: <b>{self.network.host}:{self.network.port}</b>')
print_formatted_text(msg)
self.loadAccounts()
def loadAccounts(self):
self.accounts = []
try:
keyFile = self.network.accounts[0].privateKeyFile
with open(keyFile, 'r') as f:
self.key = f.read().rstrip("\n")
self.wallet = pytezos.using(shell=f"{self.network.host}:{self.network.port}", key=self.key)
for i in self.network.accounts:
self.loadPrivateKeyFromFile(i.privateKeyFile)
except Exception as e:
print(e)
Helpers.fatal(f'Exception occured while loading accounts! {e}')
def loadPrivateKeyFromFile(self, keyFile):
with open(keyFile, 'r') as f:
key = f.read().rstrip("\n")
self.loadPrivateKey(key)
def loadPrivateKey(self, key):
try:
wallet = pytezos.using(shell=f"{self.network.host}:{self.network.port}", key=key)
except pytezos.rpc.node.RpcError:
Helpers.fatal(f"Failed to connect to {self.network.host}:{self.network.port}. Try again in sometime!")
msg = HTML(f"Loaded wallet <b>{wallet.key.public_key_hash()}</b>. Balance: <b>{wallet.balance()}</b>\n")
print_formatted_text(msg)
self.accounts.append(wallet)
def save(self):
config = {'chinstrap':{'networks':{},'compiler':{}}}
for i,v in self.config.__dict__['networks'].__dict__.items():
if i[0] != "_":
network = {'host':v.__dict__['host'], 'port':v.__dict__['port'], 'accounts':[]}
accounts = []
if 'accounts' in v.__dict__.keys():
for d in v.__dict__['accounts']:
for j, k in d.__dict__.items():
if j[0]!="_":
accounts.append({j:k})
network['accounts'] = accounts
config['chinstrap']['networks'][i] = network
with open('./chinstrap_config.yaml', 'w') as f:
f.write(yaml.dump(config))
class ChinstrapConfigHandler:
# make this a repl
def __init__(self) -> None:
pass | 0.05398 | 0.090013 |
from .lib import utils
from .lib.gravity import Dependency
from .lib.utils import make_block
from .modules import vcs
from .modules.error_state import HasErrorState
from .modules.output import HasOutput
from .modules.structure_handler import HasStructure
__all__ = ["Submit"]
class Submit(HasOutput, HasStructure, HasErrorState):
description = "Submitting module of Universum"
vcs_factory = Dependency(vcs.SubmitVcs)
@staticmethod
def define_arguments(parser):
parser.add_argument("--create-review", action="store_true", dest="review",
help="create deletable review (shelve for P4, temp branch for Git) "
"instead of actual submitting to repo")
parser.add_argument("--edit-only", action="store_true", dest="edit_only",
help="Only submit existing vcs modifications, no adding or deleting")
parser.add_argument('--commit-message', '-cm', dest='commit_message', metavar="COMMIT_MESSAGE",
help='Commit message to add')
parser.add_argument("--reconcile-list", "-rl", action="append", nargs='+', dest="reconcile_list",
metavar="RECONCILE_LIST",
help="List of vcs or directories to be reconciled for commit. "
"Relative paths starting at client root are supported")
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.check_required_option("commit_message", """
Commit message is not specified.
Please use '--commit-message' option or COMMIT_MESSAGE environment variable.
""")
self.vcs = self.vcs_factory()
self.client = None
@make_block("Executing")
def execute(self):
path_list = utils.unify_argument_list(self.settings.reconcile_list)
change = self.vcs.driver.submit_new_change(self.settings.commit_message,
path_list,
review=self.settings.review,
edit_only=self.settings.edit_only)
if change == 0:
self.out.log("Nothing to submit")
elif self.settings.review:
self.out.log("Review commit " + change + " created")
else:
self.out.log("Change " + change + " submitted")
@make_block("Finalizing", pass_errors=False)
def finalize(self):
self.vcs.finalize() | universum/submit.py | from .lib import utils
from .lib.gravity import Dependency
from .lib.utils import make_block
from .modules import vcs
from .modules.error_state import HasErrorState
from .modules.output import HasOutput
from .modules.structure_handler import HasStructure
__all__ = ["Submit"]
class Submit(HasOutput, HasStructure, HasErrorState):
description = "Submitting module of Universum"
vcs_factory = Dependency(vcs.SubmitVcs)
@staticmethod
def define_arguments(parser):
parser.add_argument("--create-review", action="store_true", dest="review",
help="create deletable review (shelve for P4, temp branch for Git) "
"instead of actual submitting to repo")
parser.add_argument("--edit-only", action="store_true", dest="edit_only",
help="Only submit existing vcs modifications, no adding or deleting")
parser.add_argument('--commit-message', '-cm', dest='commit_message', metavar="COMMIT_MESSAGE",
help='Commit message to add')
parser.add_argument("--reconcile-list", "-rl", action="append", nargs='+', dest="reconcile_list",
metavar="RECONCILE_LIST",
help="List of vcs or directories to be reconciled for commit. "
"Relative paths starting at client root are supported")
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.check_required_option("commit_message", """
Commit message is not specified.
Please use '--commit-message' option or COMMIT_MESSAGE environment variable.
""")
self.vcs = self.vcs_factory()
self.client = None
@make_block("Executing")
def execute(self):
path_list = utils.unify_argument_list(self.settings.reconcile_list)
change = self.vcs.driver.submit_new_change(self.settings.commit_message,
path_list,
review=self.settings.review,
edit_only=self.settings.edit_only)
if change == 0:
self.out.log("Nothing to submit")
elif self.settings.review:
self.out.log("Review commit " + change + " created")
else:
self.out.log("Change " + change + " submitted")
@make_block("Finalizing", pass_errors=False)
def finalize(self):
self.vcs.finalize() | 0.483405 | 0.05848 |
"""Testing for custom_predict.py"""
import csv
import os
import unittest
from unittest import TestCase
from keyword_clustering import cluster_keywords
TEMP_TESTING_FILE = "./temp_file_for_testing_keyword_clustering"
class KeywordClusteringTest(TestCase):
def tearDown(self):
os.remove(TEMP_TESTING_FILE)
def test_keyword_clustering_with_nonempty_summary(self):
with open(TEMP_TESTING_FILE, "wt") as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(["Original", "Model1", "Model2"])
tsv_writer.writerow(["Object1", "Cluster1", "Cluster1"])
tsv_writer.writerow(["Object2", "Cluster1", "Cluster1"])
tsv_writer.writerow(["Object3", "Cluster1", "Cluster2"])
tsv_writer.writerow(["Object4", "Cluster2", "Cluster2"])
shortened_keywords_list, total_keyword_counts, model_name_list = cluster_keywords(
TEMP_TESTING_FILE)
self.assertEqual(shortened_keywords_list, [{
'cluster1': ['Object1', 'Object2', 'Object3'],
'cluster2': ['Object4']
}, {
'cluster1': ['Object1', 'Object2'],
'cluster2': ['Object3', 'Object4']
}])
self.assertEqual(total_keyword_counts, 4)
self.assertEqual(model_name_list, ["Model1", "Model2"])
def test_keyword_clustering_with_empty_summary(self):
with open(TEMP_TESTING_FILE, "wt") as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(["Original", "Model1", "Model2"])
tsv_writer.writerow(["Object1", "", "Cluster1"])
tsv_writer.writerow(["Object2", "Cluster1", "Cluster1"])
tsv_writer.writerow(["Object3", "Cluster1", ""])
tsv_writer.writerow(["Object4", "Cluster2", ""])
shortened_keywords_list, total_keyword_counts, model_name_list = cluster_keywords(
TEMP_TESTING_FILE)
self.assertEqual(shortened_keywords_list, [{
'cluster1': ['Object2', 'Object3'],
'cluster2': ['Object4']
}, {
'cluster1': ['Object1', 'Object2']
}])
self.assertEqual(total_keyword_counts, 4)
self.assertEqual(model_name_list, ["Model1", "Model2"])
if __name__ == '__main__':
unittest.main() | keyword_clustering_test.py | """Testing for custom_predict.py"""
import csv
import os
import unittest
from unittest import TestCase
from keyword_clustering import cluster_keywords
TEMP_TESTING_FILE = "./temp_file_for_testing_keyword_clustering"
class KeywordClusteringTest(TestCase):
def tearDown(self):
os.remove(TEMP_TESTING_FILE)
def test_keyword_clustering_with_nonempty_summary(self):
with open(TEMP_TESTING_FILE, "wt") as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(["Original", "Model1", "Model2"])
tsv_writer.writerow(["Object1", "Cluster1", "Cluster1"])
tsv_writer.writerow(["Object2", "Cluster1", "Cluster1"])
tsv_writer.writerow(["Object3", "Cluster1", "Cluster2"])
tsv_writer.writerow(["Object4", "Cluster2", "Cluster2"])
shortened_keywords_list, total_keyword_counts, model_name_list = cluster_keywords(
TEMP_TESTING_FILE)
self.assertEqual(shortened_keywords_list, [{
'cluster1': ['Object1', 'Object2', 'Object3'],
'cluster2': ['Object4']
}, {
'cluster1': ['Object1', 'Object2'],
'cluster2': ['Object3', 'Object4']
}])
self.assertEqual(total_keyword_counts, 4)
self.assertEqual(model_name_list, ["Model1", "Model2"])
def test_keyword_clustering_with_empty_summary(self):
with open(TEMP_TESTING_FILE, "wt") as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(["Original", "Model1", "Model2"])
tsv_writer.writerow(["Object1", "", "Cluster1"])
tsv_writer.writerow(["Object2", "Cluster1", "Cluster1"])
tsv_writer.writerow(["Object3", "Cluster1", ""])
tsv_writer.writerow(["Object4", "Cluster2", ""])
shortened_keywords_list, total_keyword_counts, model_name_list = cluster_keywords(
TEMP_TESTING_FILE)
self.assertEqual(shortened_keywords_list, [{
'cluster1': ['Object2', 'Object3'],
'cluster2': ['Object4']
}, {
'cluster1': ['Object1', 'Object2']
}])
self.assertEqual(total_keyword_counts, 4)
self.assertEqual(model_name_list, ["Model1", "Model2"])
if __name__ == '__main__':
unittest.main() | 0.454714 | 0.350394 |
from titlesearch.bakaupdates import BakaUpdates
from titlesearch.mal import MyAnimeList
from titlesearch.vndb import VisualNovelDatabase
def get_similar_titles(title: str) -> list:
"""search the 3 different modules for a similar title and return a list sorted by similarity
:type title: str
:return:
"""
light_novel_results = BakaUpdates.get_similar_titles(title)
visual_novel_results = VisualNovelDatabase.get_similar_titles(title)
anime_results = MyAnimeList.get_similar_titles(title)
results = []
passed_titles = []
for result_list in (light_novel_results, visual_novel_results, anime_results):
for result in result_list:
if result['title'] in passed_titles:
results[passed_titles.index(result['title'])]['links'].append(result['link'])
else:
results.append({
'title': result['title'],
'links': [result['link']],
'similarity': result['similarity']
})
passed_titles.append(result['title'])
results.sort(key=lambda item: item['similarity'], reverse=True)
return results
def get_alternative_titles(title: str = '') -> dict:
"""Search the 3 different modules for an alternative title of the given title and return a
dictionary split into the different languages
:type title: str
:return:
"""
light_novel_results = BakaUpdates.get_alternative_titles(title=title)
visual_novel_results = VisualNovelDatabase.get_alternative_titles(title=title)
anime_results = MyAnimeList.get_alternative_titles(title=title)
alternative_titles = {}
for result_list in (light_novel_results, visual_novel_results, anime_results):
for language in result_list:
if not result_list[language]:
continue
for title in result_list[language]:
if language not in alternative_titles:
alternative_titles[language] = [title]
continue
if title not in alternative_titles[language]:
alternative_titles[language].append(title)
return alternative_titles | titlesearch/main.py |
from titlesearch.bakaupdates import BakaUpdates
from titlesearch.mal import MyAnimeList
from titlesearch.vndb import VisualNovelDatabase
def get_similar_titles(title: str) -> list:
"""search the 3 different modules for a similar title and return a list sorted by similarity
:type title: str
:return:
"""
light_novel_results = BakaUpdates.get_similar_titles(title)
visual_novel_results = VisualNovelDatabase.get_similar_titles(title)
anime_results = MyAnimeList.get_similar_titles(title)
results = []
passed_titles = []
for result_list in (light_novel_results, visual_novel_results, anime_results):
for result in result_list:
if result['title'] in passed_titles:
results[passed_titles.index(result['title'])]['links'].append(result['link'])
else:
results.append({
'title': result['title'],
'links': [result['link']],
'similarity': result['similarity']
})
passed_titles.append(result['title'])
results.sort(key=lambda item: item['similarity'], reverse=True)
return results
def get_alternative_titles(title: str = '') -> dict:
"""Search the 3 different modules for an alternative title of the given title and return a
dictionary split into the different languages
:type title: str
:return:
"""
light_novel_results = BakaUpdates.get_alternative_titles(title=title)
visual_novel_results = VisualNovelDatabase.get_alternative_titles(title=title)
anime_results = MyAnimeList.get_alternative_titles(title=title)
alternative_titles = {}
for result_list in (light_novel_results, visual_novel_results, anime_results):
for language in result_list:
if not result_list[language]:
continue
for title in result_list[language]:
if language not in alternative_titles:
alternative_titles[language] = [title]
continue
if title not in alternative_titles[language]:
alternative_titles[language].append(title)
return alternative_titles | 0.696165 | 0.304468 |
import matplotlib.pyplot as plt
import numpy as np
import os
from plotdata import PlotData
from graph import Graph as gh
import pandas as pd
from plotdata import PlotData
class Graphics():
def generate_graph(self, csv_file, save_path):
''' Generate a graph
This function generates a graph with data from csv_file.
Each line (PlotData) in the graph have some settings (parameters). For example:
- x values - [List] X values on the graph
- y values - [List] Y values on the graph
- color - [String] What color the line is going to have
- label - [String] Label/name of the line. Used for "plt.legend"
Even the graph (gh) have some settings (parameters). For example:
- gh(line (plot) 1, line (plot) 2, Title on graph, x label, y label, ticks settings (more information
about this further down), save path
plot1 - [PlotData obj] Obj containing line data
plot2 - [PlotData obj] Obj containing line data
title,- [String] Title on the graph
xlabel - [String] X-label
ylabel - [String] Y-label
validation - [Boolean] A flag that determines how the y-values are going to be generated (plt.yticks). So right
now there are two different settings (true or false). (If you need more settings this could be changed to a
string or something.)
save_path - [String] Where the graph is going to be saved.
:args:
:param csv_file: The location of the csv file where the data are collected
:param save_path: The location where the graph is going to be saved at
'''
# Read the csv file
df = pd.read_csv(csv_file)
# Epochs
epochs = []
for x in range(len(df['loss'])):
epochs.append(x)
# Accuracy
plot1 = PlotData(epochs, df['val_accuracy'], "red", "Validation accuracy")
plot2 = PlotData(epochs, df['accuracy'], "blue", "Training accuracy")
graph = gh(plot1, plot2, "TITLE", "Training Epoch", "Training Accuracy", False, save_path)
graph.generate_graph()
del plot1, plot2, graph
# Loss
plot1 = PlotData(epochs, df['val_loss'], "red", "Validation loss")
plot2 = PlotData(epochs, df['loss'], "blue", "Training loss")
graph = gh(plot1, plot2, "TITLE", "Training Epoch", "Training Loss", True, save_path)
graph.generate_graph()
del plot1, plot2, graph
def generate_mass_graphs(self, custom_logs_folder_path):
''' Find folder containing data
This function find all models sub folders in Custom_logs and fetches the right .csv file containing the
data we want to generate graph form. This function do also create a folder called "graphs" if the folder
doesn't exists.
There are three different types of .csv in Custom_logs folder. This script in the current form is only
interested in the .csv file not containing "_confusion_matrix", "_hyper" or "_prediction".
:args:
:param custom_logs_folder_path: The location of Custom_logs folder
'''
model_folders = os.listdir(custom_logs_folder_path)
for folder in model_folders:
path = custom_logs_folder_path + "\\" + folder
for file in os.listdir(path):
if file.endswith(".csv"):
if "_confusion_matrix" not in file and \
"_hyper" not in file and \
"_prediction" not in file: \
# Create a folder that stores graphs
try:
os.mkdir(path + '\\' + "graphs")
except FileExistsError:
pass
csv_file = path + '\\' + file
save_path = path + '\\' + "graphs"
gh.generate_graph(csv_file, save_path) | graphics.py | import matplotlib.pyplot as plt
import numpy as np
import os
from plotdata import PlotData
from graph import Graph as gh
import pandas as pd
from plotdata import PlotData
class Graphics():
def generate_graph(self, csv_file, save_path):
''' Generate a graph
This function generates a graph with data from csv_file.
Each line (PlotData) in the graph have some settings (parameters). For example:
- x values - [List] X values on the graph
- y values - [List] Y values on the graph
- color - [String] What color the line is going to have
- label - [String] Label/name of the line. Used for "plt.legend"
Even the graph (gh) have some settings (parameters). For example:
- gh(line (plot) 1, line (plot) 2, Title on graph, x label, y label, ticks settings (more information
about this further down), save path
plot1 - [PlotData obj] Obj containing line data
plot2 - [PlotData obj] Obj containing line data
title,- [String] Title on the graph
xlabel - [String] X-label
ylabel - [String] Y-label
validation - [Boolean] A flag that determines how the y-values are going to be generated (plt.yticks). So right
now there are two different settings (true or false). (If you need more settings this could be changed to a
string or something.)
save_path - [String] Where the graph is going to be saved.
:args:
:param csv_file: The location of the csv file where the data are collected
:param save_path: The location where the graph is going to be saved at
'''
# Read the csv file
df = pd.read_csv(csv_file)
# Epochs
epochs = []
for x in range(len(df['loss'])):
epochs.append(x)
# Accuracy
plot1 = PlotData(epochs, df['val_accuracy'], "red", "Validation accuracy")
plot2 = PlotData(epochs, df['accuracy'], "blue", "Training accuracy")
graph = gh(plot1, plot2, "TITLE", "Training Epoch", "Training Accuracy", False, save_path)
graph.generate_graph()
del plot1, plot2, graph
# Loss
plot1 = PlotData(epochs, df['val_loss'], "red", "Validation loss")
plot2 = PlotData(epochs, df['loss'], "blue", "Training loss")
graph = gh(plot1, plot2, "TITLE", "Training Epoch", "Training Loss", True, save_path)
graph.generate_graph()
del plot1, plot2, graph
def generate_mass_graphs(self, custom_logs_folder_path):
''' Find folder containing data
This function find all models sub folders in Custom_logs and fetches the right .csv file containing the
data we want to generate graph form. This function do also create a folder called "graphs" if the folder
doesn't exists.
There are three different types of .csv in Custom_logs folder. This script in the current form is only
interested in the .csv file not containing "_confusion_matrix", "_hyper" or "_prediction".
:args:
:param custom_logs_folder_path: The location of Custom_logs folder
'''
model_folders = os.listdir(custom_logs_folder_path)
for folder in model_folders:
path = custom_logs_folder_path + "\\" + folder
for file in os.listdir(path):
if file.endswith(".csv"):
if "_confusion_matrix" not in file and \
"_hyper" not in file and \
"_prediction" not in file: \
# Create a folder that stores graphs
try:
os.mkdir(path + '\\' + "graphs")
except FileExistsError:
pass
csv_file = path + '\\' + file
save_path = path + '\\' + "graphs"
gh.generate_graph(csv_file, save_path) | 0.744285 | 0.602296 |
import itertools
import numpy as np
import pytest
from chunkblocks.global_offset_array import GlobalOffsetArray
from chunkblocks.iterators import Iterator
from chunkblocks.models import Block, Chunk
class IdentityIterator(Iterator):
def get_all_neighbors(self, index, max=None):
return index
def get(self, start, dimensions):
yield start
class TestChunk:
def test_get_border_slices_2d(self):
bounds = (slice(0, 50), slice(0, 50))
chunk_shape = (30, 30)
overlap = (10, 10)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
chunk = Chunk(block, (0, 0))
borders = list(itertools.product(range(0, len(bounds)), [-1, 1]))
fake_data = np.zeros(chunk.shape)
for slices in chunk.border_slices(borders):
fake_data[slices] += 1
fake_data[chunk.core_slices(borders)] += 1
assert fake_data.sum() == np.product(fake_data.shape)
def test_get_border_slices_3d(self):
bounds = (slice(0, 70), slice(0, 70), slice(0, 70))
chunk_shape = (30, 30, 30)
overlap = (10, 10, 10)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
chunk = Chunk(block, (0, 0, 0))
borders = list(itertools.product(range(0, len(bounds)), [-1, 1]))
fake_data = np.zeros(chunk.shape)
for slices in chunk.border_slices(borders):
fake_data[slices] += 1
fake_data[chunk.core_slices(borders)] += 1
assert fake_data.sum() == np.product(fake_data.shape)
def test_get_border_slices_3d_overlapping(self):
bounds = (slice(0, 7), slice(0, 7), slice(0, 7))
chunk_shape = (3, 3, 3)
overlap = (1, 1, 1)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
chunk = Chunk(block, (0, 0, 0))
borders = list(itertools.product(range(0, len(bounds)), [-1, 1]))
fake_data = np.zeros(chunk.shape)
for slices in chunk.border_slices(borders, nonintersecting=False):
fake_data[slices] += 1
fake_data[chunk.core_slices(borders)] += 1
assert np.array_equal(fake_data, [[[3, 2, 3],
[2, 1, 2],
[3, 2, 3]],
[[2, 1, 2],
[1, 1, 1],
[2, 1, 2]],
[[3, 2, 3],
[2, 1, 2],
[3, 2, 3]]])
class TestBlock:
def test_init_wrong_size_no_overlap(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
with pytest.raises(ValueError):
Block(bounds=bounds, chunk_shape=chunk_shape)
def test_init(self):
bounds = (slice(0, 70), slice(0, 70))
offset = (0, 0)
num_chunks = (3, 3)
overlap = (10, 10)
chunk_shape = (30, 30)
# test with bounds
Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
# test with offset/num_chunks
Block(offset=offset, num_chunks=num_chunks, chunk_shape=chunk_shape, overlap=overlap)
# test with both offset/num_chunks
Block(bounds=bounds, offset=offset, num_chunks=num_chunks, chunk_shape=chunk_shape, overlap=overlap)
# test fail with neither block and offset offset/num_chunks
with pytest.raises(ValueError):
Block(chunk_shape=chunk_shape, overlap=overlap)
# test fail with only offset no num_chunks
with pytest.raises(ValueError):
Block(offset=offset, chunk_shape=chunk_shape, overlap=overlap)
# test fail with only num_chuks no offset
with pytest.raises(ValueError):
Block(num_chunks=num_chunks, chunk_shape=chunk_shape, overlap=overlap)
# test incorrect matching bounds with offset/num_chunks
with pytest.raises(Exception):
Block(bounds=(slice(b.start, b.stop + 1) for b in bounds),
offset=offset, num_chunks=num_chunks, chunk_shape=chunk_shape, overlap=overlap)
def test_init_wrong_size_overlap(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
with pytest.raises(ValueError):
Block(bounds=bounds, chunk_shape=chunk_shape)
def test_index_to_slices(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
overlap = (10, 10)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
assert block.unit_index_to_slices((0, 0)) == (slice(0, 30), slice(0, 30))
assert block.unit_index_to_slices((0, 1)) == (slice(0, 30), slice(20, 50))
assert block.unit_index_to_slices((1, 0)) == (slice(20, 50), slice(0, 30))
def test_slices_to_index(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
overlap = (10, 10)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
assert block.chunk_slices_to_unit_index((slice(0, 30), slice(0, 30))) == (0, 0)
assert block.chunk_slices_to_unit_index((slice(0, 30), slice(20, 50))) == (0, 1)
assert block.chunk_slices_to_unit_index((slice(20, 50), slice(0, 30))) == (1, 0)
assert block.chunk_slices_to_unit_index((slice(20, 50), slice(20, 50))) == (1, 1)
def test_iterator(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
overlap = (10, 10)
start = (0, 0)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap, base_iterator=IdentityIterator())
chunks = list(block.chunk_iterator(start))
assert len(chunks) == 1
assert chunks[0].unit_index == start
def test_get_slices_2d(self):
bounds = (slice(0, 7), slice(0, 7))
chunk_shape = (3, 3)
overlap = (1, 1)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
fake_data = GlobalOffsetArray(np.zeros(block.shape), global_offset=(0, 0))
assert block.num_chunks == (3, 3)
for chunk in block.chunk_iterator((0, 0)):
for edge_slice in block.overlap_slices(chunk):
fake_data[edge_slice] += 1
fake_data[block.core_slices(chunk)] += 1
assert fake_data.sum() == np.product(fake_data.shape)
def test_overlap_slices_3d(self):
bounds = (slice(0, 7), slice(0, 7), slice(0, 7))
chunk_shape = (3, 3, 3)
overlap = (1, 1, 1)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
assert block.num_chunks == (3, 3, 3)
fake_data = GlobalOffsetArray(np.zeros(block.shape), global_offset=(0, 0, 0))
for chunk in block.chunk_iterator((1, 0, 1)):
for edge_slice in block.overlap_slices(chunk):
fake_data[edge_slice] += 1
fake_data[block.core_slices(chunk)] += 1
assert fake_data.sum() == np.product(fake_data.shape)
def test_checkpoints(self):
bounds = (slice(0, 7), slice(0, 7), slice(0, 7))
chunk_shape = (3, 3, 3)
overlap = (1, 1, 1)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
for chunk in block.chunk_iterator((1, 0, 1)):
block.checkpoint(chunk)
assert block.is_checkpointed(chunk)
assert block.is_checkpointed(chunk, stage=0)
for chunk in block.chunk_iterator((1, 0, 1)):
assert not block.is_checkpointed(chunk, stage=1)
assert not block.checkpoint(chunk, stage=1)
assert block.all_neighbors_checkpointed(chunk, stage=0)
block.checkpoint(chunk, stage=1)
stage = 0
for chunk in block.chunk_iterator((1, 0, 1)):
print(block.checkpoints[stage][chunk.unit_index])
for c in block.get_all_neighbors(chunk):
print(c.unit_index, block.checkpoints[stage][c.unit_index])
assert block.all_neighbors_checkpointed(chunk, stage=0)
def test_slices_to_indices(self):
bounds_1 = (slice(0, 16), slice(0, 16), slice(0, 16))
chunk_shape_1 = (4, 4, 4)
overlap_1 = (1, 1, 1)
block_1 = Block(bounds=bounds_1, chunk_shape=chunk_shape_1, overlap=overlap_1)
bounds_2 = (slice(-1, 25), slice(-1, 25), slice(-1, 25))
chunk_shape_2 = (6, 6, 6)
overlap_2 = (1, 1, 1)
block_2 = Block(bounds=bounds_2, chunk_shape=chunk_shape_2, overlap=overlap_2)
index = 1
for unit_index in range(0, block_2.num_chunks[index]):
chunk_2 = Chunk(block_2, (0, unit_index))
chunk_2_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_2.slices[index].start, chunk_2.slices[index].stop)))
print('expect:', chunk_2.slices, chunk_2_coords)
for unit_index in block_1.slices_to_unit_indices(chunk_2.slices):
chunk_1 = Chunk(block_1, unit_index)
chunk_1_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_1.slices[index].start, chunk_1.slices[index].stop)))
print(chunk_1.slices, chunk_1_coords)
chunk_2_coords.difference_update(chunk_1_coords)
assert all(tuple(u >= 0 and u <= n for u, n in zip(unit_index, block_1.num_chunks)))
print('left', chunk_2_coords)
assert len(chunk_2_coords) == 0
# Test reverse direction
block_2_temp = block_2
block_2 = block_1
block_1 = block_2_temp
index = 1
for unit_index in range(0, block_2.num_chunks[index]):
chunk_2 = Chunk(block_2, (0, unit_index))
chunk_2_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_2.slices[index].start, chunk_2.slices[index].stop)))
print('expect:', chunk_2.slices, chunk_2_coords)
for unit_index in block_1.slices_to_unit_indices(chunk_2.slices):
chunk_1 = Chunk(block_1, unit_index)
chunk_1_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_1.slices[index].start, chunk_1.slices[index].stop)))
print(chunk_1.slices, chunk_1_coords)
chunk_2_coords.difference_update(chunk_1_coords)
assert all(tuple(u >= 0 and u <= n for u, n in zip(unit_index, block_1.num_chunks)))
print('left', chunk_2_coords)
assert len(chunk_2_coords) == 0
# Test None
index = 1
for unit_index in range(0, block_2.num_chunks[index]):
chunk_2 = Chunk(block_2, (0, unit_index))
# use fake slices with None here!
chunk_2_slices = (slice(None, None),) + chunk_2.slices[1:]
chunk_2_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_2_slices[index].start, chunk_2_slices[index].stop)))
print('expect:', chunk_2_slices, chunk_2_coords)
for unit_index in block_1.slices_to_unit_indices(chunk_2_slices):
chunk_1 = Chunk(block_1, unit_index)
chunk_1_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_1.slices[index].start, chunk_1.slices[index].stop)))
print(chunk_1.slices, chunk_1_coords)
chunk_2_coords.difference_update(chunk_1_coords)
assert all(tuple(u >= 0 and u <= n for u, n in zip(unit_index, block_1.num_chunks)))
print('left', chunk_2_coords)
assert len(chunk_2_coords) == 0 | tests/test_models.py | import itertools
import numpy as np
import pytest
from chunkblocks.global_offset_array import GlobalOffsetArray
from chunkblocks.iterators import Iterator
from chunkblocks.models import Block, Chunk
class IdentityIterator(Iterator):
def get_all_neighbors(self, index, max=None):
return index
def get(self, start, dimensions):
yield start
class TestChunk:
def test_get_border_slices_2d(self):
bounds = (slice(0, 50), slice(0, 50))
chunk_shape = (30, 30)
overlap = (10, 10)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
chunk = Chunk(block, (0, 0))
borders = list(itertools.product(range(0, len(bounds)), [-1, 1]))
fake_data = np.zeros(chunk.shape)
for slices in chunk.border_slices(borders):
fake_data[slices] += 1
fake_data[chunk.core_slices(borders)] += 1
assert fake_data.sum() == np.product(fake_data.shape)
def test_get_border_slices_3d(self):
bounds = (slice(0, 70), slice(0, 70), slice(0, 70))
chunk_shape = (30, 30, 30)
overlap = (10, 10, 10)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
chunk = Chunk(block, (0, 0, 0))
borders = list(itertools.product(range(0, len(bounds)), [-1, 1]))
fake_data = np.zeros(chunk.shape)
for slices in chunk.border_slices(borders):
fake_data[slices] += 1
fake_data[chunk.core_slices(borders)] += 1
assert fake_data.sum() == np.product(fake_data.shape)
def test_get_border_slices_3d_overlapping(self):
bounds = (slice(0, 7), slice(0, 7), slice(0, 7))
chunk_shape = (3, 3, 3)
overlap = (1, 1, 1)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
chunk = Chunk(block, (0, 0, 0))
borders = list(itertools.product(range(0, len(bounds)), [-1, 1]))
fake_data = np.zeros(chunk.shape)
for slices in chunk.border_slices(borders, nonintersecting=False):
fake_data[slices] += 1
fake_data[chunk.core_slices(borders)] += 1
assert np.array_equal(fake_data, [[[3, 2, 3],
[2, 1, 2],
[3, 2, 3]],
[[2, 1, 2],
[1, 1, 1],
[2, 1, 2]],
[[3, 2, 3],
[2, 1, 2],
[3, 2, 3]]])
class TestBlock:
def test_init_wrong_size_no_overlap(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
with pytest.raises(ValueError):
Block(bounds=bounds, chunk_shape=chunk_shape)
def test_init(self):
bounds = (slice(0, 70), slice(0, 70))
offset = (0, 0)
num_chunks = (3, 3)
overlap = (10, 10)
chunk_shape = (30, 30)
# test with bounds
Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
# test with offset/num_chunks
Block(offset=offset, num_chunks=num_chunks, chunk_shape=chunk_shape, overlap=overlap)
# test with both offset/num_chunks
Block(bounds=bounds, offset=offset, num_chunks=num_chunks, chunk_shape=chunk_shape, overlap=overlap)
# test fail with neither block and offset offset/num_chunks
with pytest.raises(ValueError):
Block(chunk_shape=chunk_shape, overlap=overlap)
# test fail with only offset no num_chunks
with pytest.raises(ValueError):
Block(offset=offset, chunk_shape=chunk_shape, overlap=overlap)
# test fail with only num_chuks no offset
with pytest.raises(ValueError):
Block(num_chunks=num_chunks, chunk_shape=chunk_shape, overlap=overlap)
# test incorrect matching bounds with offset/num_chunks
with pytest.raises(Exception):
Block(bounds=(slice(b.start, b.stop + 1) for b in bounds),
offset=offset, num_chunks=num_chunks, chunk_shape=chunk_shape, overlap=overlap)
def test_init_wrong_size_overlap(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
with pytest.raises(ValueError):
Block(bounds=bounds, chunk_shape=chunk_shape)
def test_index_to_slices(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
overlap = (10, 10)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
assert block.unit_index_to_slices((0, 0)) == (slice(0, 30), slice(0, 30))
assert block.unit_index_to_slices((0, 1)) == (slice(0, 30), slice(20, 50))
assert block.unit_index_to_slices((1, 0)) == (slice(20, 50), slice(0, 30))
def test_slices_to_index(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
overlap = (10, 10)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
assert block.chunk_slices_to_unit_index((slice(0, 30), slice(0, 30))) == (0, 0)
assert block.chunk_slices_to_unit_index((slice(0, 30), slice(20, 50))) == (0, 1)
assert block.chunk_slices_to_unit_index((slice(20, 50), slice(0, 30))) == (1, 0)
assert block.chunk_slices_to_unit_index((slice(20, 50), slice(20, 50))) == (1, 1)
def test_iterator(self):
bounds = (slice(0, 70), slice(0, 70))
chunk_shape = (30, 30)
overlap = (10, 10)
start = (0, 0)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap, base_iterator=IdentityIterator())
chunks = list(block.chunk_iterator(start))
assert len(chunks) == 1
assert chunks[0].unit_index == start
def test_get_slices_2d(self):
bounds = (slice(0, 7), slice(0, 7))
chunk_shape = (3, 3)
overlap = (1, 1)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
fake_data = GlobalOffsetArray(np.zeros(block.shape), global_offset=(0, 0))
assert block.num_chunks == (3, 3)
for chunk in block.chunk_iterator((0, 0)):
for edge_slice in block.overlap_slices(chunk):
fake_data[edge_slice] += 1
fake_data[block.core_slices(chunk)] += 1
assert fake_data.sum() == np.product(fake_data.shape)
def test_overlap_slices_3d(self):
bounds = (slice(0, 7), slice(0, 7), slice(0, 7))
chunk_shape = (3, 3, 3)
overlap = (1, 1, 1)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
assert block.num_chunks == (3, 3, 3)
fake_data = GlobalOffsetArray(np.zeros(block.shape), global_offset=(0, 0, 0))
for chunk in block.chunk_iterator((1, 0, 1)):
for edge_slice in block.overlap_slices(chunk):
fake_data[edge_slice] += 1
fake_data[block.core_slices(chunk)] += 1
assert fake_data.sum() == np.product(fake_data.shape)
def test_checkpoints(self):
bounds = (slice(0, 7), slice(0, 7), slice(0, 7))
chunk_shape = (3, 3, 3)
overlap = (1, 1, 1)
block = Block(bounds=bounds, chunk_shape=chunk_shape, overlap=overlap)
for chunk in block.chunk_iterator((1, 0, 1)):
block.checkpoint(chunk)
assert block.is_checkpointed(chunk)
assert block.is_checkpointed(chunk, stage=0)
for chunk in block.chunk_iterator((1, 0, 1)):
assert not block.is_checkpointed(chunk, stage=1)
assert not block.checkpoint(chunk, stage=1)
assert block.all_neighbors_checkpointed(chunk, stage=0)
block.checkpoint(chunk, stage=1)
stage = 0
for chunk in block.chunk_iterator((1, 0, 1)):
print(block.checkpoints[stage][chunk.unit_index])
for c in block.get_all_neighbors(chunk):
print(c.unit_index, block.checkpoints[stage][c.unit_index])
assert block.all_neighbors_checkpointed(chunk, stage=0)
def test_slices_to_indices(self):
bounds_1 = (slice(0, 16), slice(0, 16), slice(0, 16))
chunk_shape_1 = (4, 4, 4)
overlap_1 = (1, 1, 1)
block_1 = Block(bounds=bounds_1, chunk_shape=chunk_shape_1, overlap=overlap_1)
bounds_2 = (slice(-1, 25), slice(-1, 25), slice(-1, 25))
chunk_shape_2 = (6, 6, 6)
overlap_2 = (1, 1, 1)
block_2 = Block(bounds=bounds_2, chunk_shape=chunk_shape_2, overlap=overlap_2)
index = 1
for unit_index in range(0, block_2.num_chunks[index]):
chunk_2 = Chunk(block_2, (0, unit_index))
chunk_2_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_2.slices[index].start, chunk_2.slices[index].stop)))
print('expect:', chunk_2.slices, chunk_2_coords)
for unit_index in block_1.slices_to_unit_indices(chunk_2.slices):
chunk_1 = Chunk(block_1, unit_index)
chunk_1_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_1.slices[index].start, chunk_1.slices[index].stop)))
print(chunk_1.slices, chunk_1_coords)
chunk_2_coords.difference_update(chunk_1_coords)
assert all(tuple(u >= 0 and u <= n for u, n in zip(unit_index, block_1.num_chunks)))
print('left', chunk_2_coords)
assert len(chunk_2_coords) == 0
# Test reverse direction
block_2_temp = block_2
block_2 = block_1
block_1 = block_2_temp
index = 1
for unit_index in range(0, block_2.num_chunks[index]):
chunk_2 = Chunk(block_2, (0, unit_index))
chunk_2_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_2.slices[index].start, chunk_2.slices[index].stop)))
print('expect:', chunk_2.slices, chunk_2_coords)
for unit_index in block_1.slices_to_unit_indices(chunk_2.slices):
chunk_1 = Chunk(block_1, unit_index)
chunk_1_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_1.slices[index].start, chunk_1.slices[index].stop)))
print(chunk_1.slices, chunk_1_coords)
chunk_2_coords.difference_update(chunk_1_coords)
assert all(tuple(u >= 0 and u <= n for u, n in zip(unit_index, block_1.num_chunks)))
print('left', chunk_2_coords)
assert len(chunk_2_coords) == 0
# Test None
index = 1
for unit_index in range(0, block_2.num_chunks[index]):
chunk_2 = Chunk(block_2, (0, unit_index))
# use fake slices with None here!
chunk_2_slices = (slice(None, None),) + chunk_2.slices[1:]
chunk_2_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_2_slices[index].start, chunk_2_slices[index].stop)))
print('expect:', chunk_2_slices, chunk_2_coords)
for unit_index in block_1.slices_to_unit_indices(chunk_2_slices):
chunk_1 = Chunk(block_1, unit_index)
chunk_1_coords = set(filter(lambda x: x >= block_1.bounds[index].start and x < block_1.bounds[index].stop,
range(chunk_1.slices[index].start, chunk_1.slices[index].stop)))
print(chunk_1.slices, chunk_1_coords)
chunk_2_coords.difference_update(chunk_1_coords)
assert all(tuple(u >= 0 and u <= n for u, n in zip(unit_index, block_1.num_chunks)))
print('left', chunk_2_coords)
assert len(chunk_2_coords) == 0 | 0.716715 | 0.546012 |
import numpy as np
import sys
import xml.etree.ElementTree as ET
from transforms3d.euler import euler2quat
import os
import time
# The path below comes from a docker
sys.path.append("/workspace/sapien/build")
import rospkg
import rospy
import pysapien_ros1.core as sapien
import pysapien_ros1.ros1 as sr
RENDER_HZ = 8
def load_sapien_sdf(sdf_file, scene, table_height):
model_path = os.getenv('SAPIEN_MODEL_PATH')
assert model_path, 'SAPIEN_MODEL_PATH environment variable is required'
if model_path[-1] != '/':
model_path += '/'
sdf = ET.parse(sdf_file).getroot()
world = sdf.find('world')
actors = []
for l in world.findall('light'):
assert l.attrib['type'] == 'point'
color = [float(x) / 3.14 for x in l.find('diffuse').text.split()]
position = np.array([float(x) for x in l.find('pose').text.split()][:3])
position[2] += table_height
scene.add_point_light(position, color)
for sdf_model in world.findall('model'):
builder = scene.create_actor_builder()
sdf_link = sdf_model.find('link')
sdf_pose = sdf_model.find('pose')
sdf_inertial = sdf_link.find('inertial')
assert sdf_inertial is not None
cs = sdf_link.findall('collision')
vs = sdf_link.findall('visual')
for col in cs:
sdf_geom = col.find('geometry')
sdf_mesh = sdf_geom.find('mesh')
sdf_uri = sdf_mesh.find('uri')
sdf_scale = sdf_mesh.find('scale')
assert sdf_uri is not None and sdf_scale is not None
filename = sdf_uri.text.replace('model://', model_path)
scale = [float(x) for x in sdf_scale.text.strip().split()]
assert len(scale) == 3
assert os.path.isfile(filename), filename
friction = float(col.find('surface').find('friction').find('ode').find('mu').text)
assert friction == 0.5 # will all be 0.5
builder.add_multiple_convex_shapes_from_file(filename, scale=scale)
for v in vs:
sdf_geom = v.find('geometry')
sdf_mesh = sdf_geom.find('mesh')
sdf_uri = sdf_mesh.find('uri')
sdf_scale = sdf_mesh.find('scale')
assert sdf_uri is not None and sdf_scale is not None
filename = sdf_uri.text.replace('model://', model_path)
scale = [float(x) for x in sdf_scale.text.strip().split()]
assert len(scale) == 3
assert os.path.isfile(filename), filename
builder.add_visual_from_file(filename, scale=scale)
sdf_mass = sdf_inertial.find('mass')
sdf_pose = sdf_inertial.find('pose')
sdf_inertia = sdf_inertial.find('inertia')
assert sdf_mass is not None and sdf_pose is not None and sdf_inertia is not None
mass = float(sdf_mass.text)
xyzrpy = [float(x) for x in sdf_pose.text.strip().split()]
assert len(xyzrpy) == 6
ixx = float(sdf_inertia.find('ixx').text)
iyy = float(sdf_inertia.find('iyy').text)
izz = float(sdf_inertia.find('izz').text)
ixy = float(sdf_inertia.find('ixy').text)
ixz = float(sdf_inertia.find('ixz').text)
iyz = float(sdf_inertia.find('iyz').text)
assert ixy == ixz == iyz == 0
builder.set_mass_and_inertia(mass, sapien.Pose(xyzrpy[:3], euler2quat(*xyzrpy[3:])), [ixx, iyy, izz])
model_pose = sdf_model.find('pose')
model = builder.build(name=sdf_model.attrib['name'])
xyzrpy = np.array([float(x) for x in model_pose.text.strip().split()])
xyzrpy[2] += table_height
model.set_pose(sapien.Pose(xyzrpy[:3], euler2quat(*xyzrpy[3:])))
model.set_velocity([0, 0, 0])
model.set_damping(1, 1)
actors.append(model)
return actors
def setup_table(scene: sapien.Scene, height, table_physical_material):
table_size = np.array([1, 0.8, 0.01]) / 2
table_pose = np.array([0, 0, height - 0.01])
table_vis_material = sapien.PxrMaterial()
table_vis_material.roughness = 0.025
table_vis_material.specular = 0.95
table_vis_material.metallic = 0.6
rgbd = np.array([171, 171, 171, 255])
table_vis_material.set_base_color(rgbd / 255)
builder = scene.create_actor_builder()
builder.add_box_visual_complex(sapien.Pose(table_pose), table_size, table_vis_material)
builder.add_box_shape(sapien.Pose(table_pose), table_size, table_physical_material)
table = builder.build_static("table")
table.set_pose(sapien.Pose([0, 0, 0], [-0.7071, 0, 0, 0.7071]))
table_leg_position1 = [0.45, 0.35, height / 2]
table_leg_position2 = [-0.45, -0.35, height / 2]
table_leg_position3 = [-0.45, 0.35, height / 2]
table_leg_position4 = [0.45, -0.35, height / 2]
table_leg_size = np.array([0.025, 0.025, height / 2 - 0.01])
builder = scene.create_actor_builder()
builder.add_box_visual_complex(sapien.Pose(table_leg_position1), table_leg_size)
builder.add_box_visual_complex(sapien.Pose(table_leg_position2), table_leg_size)
builder.add_box_visual_complex(sapien.Pose(table_leg_position3), table_leg_size)
builder.add_box_visual_complex(sapien.Pose(table_leg_position4), table_leg_size)
legs = builder.build_static("table_leg")
legs.set_pose(table.get_pose())
return [table, legs]
def main():
# Parse ROS path and args
materials_path = rospy.get_param('~materials_dir',
'/root/ocrtoc_materials')
args = parse_arg()
print(args)
if args.paused and not args.gui:
raise RuntimeError(
"Argument paused is only useful when GUI is activated. It is only for debug purpose. "
"Your program will directly end when using paused:=true with gui:=false")
current_path = rospkg.RosPack().get_path('sapien_simulator')
engine = sapien.Engine()
optifuser_config = sapien.OptifuserConfig()
optifuser_config.use_shadow = False
renderer = sapien.OptifuserRenderer(glsl_dir=os.path.join(current_path, "./glsl_shader/130"),
glsl_version="130",
config=optifuser_config)
engine.set_renderer(renderer)
controller = sapien.OptifuserController(renderer)
# Load scene and ground
scene_config = sapien.SceneConfig()
scene_config.solver_iterations = 25
scene_config.solver_velocity_iterations = 2
scene_config.enable_pcm = False
scene_config.default_restitution = 0
scene_config.default_dynamic_friction = 0.5
scene_config.default_static_friction = 0.5
scene = engine.create_scene(scene_config)
scene.set_timestep(1 / 250)
ground_material = sapien.PxrMaterial()
ground_color = np.array([202, 164, 114, 256]) / 256
ground_material.set_base_color(ground_color)
ground_material.specular = 0.5
scene.add_ground(-0.8, render_material=ground_material)
if args.gui:
controller.set_current_scene(scene)
controller.set_camera_position(2.5, 0, 3)
controller.set_camera_rotation(3.14, -0.7)
controller.show_window()
# Load table
table_height = 0.0
# Load sdf
os.environ.update({
"SAPIEN_MODEL_PATH": os.path.join(materials_path, "models")})
sdf_objects = load_sapien_sdf(args.world_name, scene, table_height)
# scene.set_shadow_light([0, -1, -1], [1, 1, 1])
scene.set_ambient_light((0.5, 0.5, 0.5))
sr.init_spd_logger()
scene_manager = sr.SceneManager(scene, "")
loader = scene_manager.create_robot_loader()
loader.fix_root_link = True
gripper_material = engine.create_physical_material(1.2, 0.8, 0.01)
urdf_config = {
"link": {
"robotiq_2f_85_left_pad": {"material": gripper_material, "patch_radius": 0.02, "min_patch_radius": 0.005},
"robotiq_2f_85_right_pad": {"material": gripper_material, "patch_radius": 0.02,
"min_patch_radius": 0.005}}}
# Load robot
robot, manager = loader.load_from_parameter_server("", urdf_config, 125)
init_qpos = np.array([-1.57, -1.57, 1.57, 0, 0, 0, 0, 0, 0, 0, 0, 0])
robot.set_qpos(init_qpos)
robot.set_drive_target(init_qpos)
manager.set_drive_property(3000, 500, 1000, [0, 1, 2, 3, 4, 5])
manager.set_drive_property(200, 50, 300, [6, 7])
manager.set_drive_property(100, 40, 300, [8, 9, 10, 11])
scene_manager.start_all_ros_camera(30)
scene_manager.start_get_model_service("/sapien/get_model_state", sdf_objects)
scene.step()
# Start
start_time = time.time()
scene_manager.start()
step = 0
timestep = scene.get_timestep()
next_step_time = time.time() + timestep
mimic_joints = robot.get_active_joints()[20:24]
if args.gui:
if args.paused:
while not controller.should_quit:
scene.update_render()
controller.render()
while True:
step_and_render(manager, scene, controller, step, next_step_time)
mimic_joint(robot, mimic_joints)
next_step_time += timestep
step += 1
else:
while not controller.should_quit:
step_and_render(manager, scene, controller, step, next_step_time)
mimic_joint(robot, mimic_joints)
next_step_time += timestep
step += 1
else:
try:
while True:
step_only(manager, scene, next_step_time)
mimic_joint(robot, mimic_joints)
next_step_time += timestep
step += 1
except KeyboardInterrupt:
print("Simulation stopped by user")
scene = None
def step_and_render(manager, scene, controller, step, next_step_time):
manager.balance_passive_force()
now = time.time()
while now < next_step_time:
time.sleep(1e-4)
now = time.time()
scene.step()
scene.update_render()
if step % RENDER_HZ == 0:
controller.render()
def step_only(manager, scene, next_step_time):
manager.balance_passive_force()
now = time.time()
while now < next_step_time:
time.sleep(1e-4)
now = time.time()
scene.step()
scene.update_render()
def mimic_joint(robot, mimic_joints):
left_target = robot.get_qpos()[7] * 17.86
right_target = robot.get_qpos()[6] * 17.86
mimic_joints[0].set_drive_target(right_target)
mimic_joints[1].set_drive_target(right_target)
mimic_joints[2].set_drive_target(left_target)
mimic_joints[3].set_drive_target(left_target)
def parse_arg():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gui", action="store_true", help="show gui for visualization")
parser.add_argument("--paused", action="store_true", help="start simulator in a paused mode")
parser.add_argument("--world_name", type=str, help="scene name for loading")
print(sys.argv[1:-2])
return parser.parse_args(sys.argv[1:-2])
if __name__ == '__main__':
import sys
sr.ros_init("iros_pipeline", sys.argv)
main() | sapien_simulator/scripts/sapien_env.py | import numpy as np
import sys
import xml.etree.ElementTree as ET
from transforms3d.euler import euler2quat
import os
import time
# The path below comes from a docker
sys.path.append("/workspace/sapien/build")
import rospkg
import rospy
import pysapien_ros1.core as sapien
import pysapien_ros1.ros1 as sr
RENDER_HZ = 8
def load_sapien_sdf(sdf_file, scene, table_height):
model_path = os.getenv('SAPIEN_MODEL_PATH')
assert model_path, 'SAPIEN_MODEL_PATH environment variable is required'
if model_path[-1] != '/':
model_path += '/'
sdf = ET.parse(sdf_file).getroot()
world = sdf.find('world')
actors = []
for l in world.findall('light'):
assert l.attrib['type'] == 'point'
color = [float(x) / 3.14 for x in l.find('diffuse').text.split()]
position = np.array([float(x) for x in l.find('pose').text.split()][:3])
position[2] += table_height
scene.add_point_light(position, color)
for sdf_model in world.findall('model'):
builder = scene.create_actor_builder()
sdf_link = sdf_model.find('link')
sdf_pose = sdf_model.find('pose')
sdf_inertial = sdf_link.find('inertial')
assert sdf_inertial is not None
cs = sdf_link.findall('collision')
vs = sdf_link.findall('visual')
for col in cs:
sdf_geom = col.find('geometry')
sdf_mesh = sdf_geom.find('mesh')
sdf_uri = sdf_mesh.find('uri')
sdf_scale = sdf_mesh.find('scale')
assert sdf_uri is not None and sdf_scale is not None
filename = sdf_uri.text.replace('model://', model_path)
scale = [float(x) for x in sdf_scale.text.strip().split()]
assert len(scale) == 3
assert os.path.isfile(filename), filename
friction = float(col.find('surface').find('friction').find('ode').find('mu').text)
assert friction == 0.5 # will all be 0.5
builder.add_multiple_convex_shapes_from_file(filename, scale=scale)
for v in vs:
sdf_geom = v.find('geometry')
sdf_mesh = sdf_geom.find('mesh')
sdf_uri = sdf_mesh.find('uri')
sdf_scale = sdf_mesh.find('scale')
assert sdf_uri is not None and sdf_scale is not None
filename = sdf_uri.text.replace('model://', model_path)
scale = [float(x) for x in sdf_scale.text.strip().split()]
assert len(scale) == 3
assert os.path.isfile(filename), filename
builder.add_visual_from_file(filename, scale=scale)
sdf_mass = sdf_inertial.find('mass')
sdf_pose = sdf_inertial.find('pose')
sdf_inertia = sdf_inertial.find('inertia')
assert sdf_mass is not None and sdf_pose is not None and sdf_inertia is not None
mass = float(sdf_mass.text)
xyzrpy = [float(x) for x in sdf_pose.text.strip().split()]
assert len(xyzrpy) == 6
ixx = float(sdf_inertia.find('ixx').text)
iyy = float(sdf_inertia.find('iyy').text)
izz = float(sdf_inertia.find('izz').text)
ixy = float(sdf_inertia.find('ixy').text)
ixz = float(sdf_inertia.find('ixz').text)
iyz = float(sdf_inertia.find('iyz').text)
assert ixy == ixz == iyz == 0
builder.set_mass_and_inertia(mass, sapien.Pose(xyzrpy[:3], euler2quat(*xyzrpy[3:])), [ixx, iyy, izz])
model_pose = sdf_model.find('pose')
model = builder.build(name=sdf_model.attrib['name'])
xyzrpy = np.array([float(x) for x in model_pose.text.strip().split()])
xyzrpy[2] += table_height
model.set_pose(sapien.Pose(xyzrpy[:3], euler2quat(*xyzrpy[3:])))
model.set_velocity([0, 0, 0])
model.set_damping(1, 1)
actors.append(model)
return actors
def setup_table(scene: sapien.Scene, height, table_physical_material):
table_size = np.array([1, 0.8, 0.01]) / 2
table_pose = np.array([0, 0, height - 0.01])
table_vis_material = sapien.PxrMaterial()
table_vis_material.roughness = 0.025
table_vis_material.specular = 0.95
table_vis_material.metallic = 0.6
rgbd = np.array([171, 171, 171, 255])
table_vis_material.set_base_color(rgbd / 255)
builder = scene.create_actor_builder()
builder.add_box_visual_complex(sapien.Pose(table_pose), table_size, table_vis_material)
builder.add_box_shape(sapien.Pose(table_pose), table_size, table_physical_material)
table = builder.build_static("table")
table.set_pose(sapien.Pose([0, 0, 0], [-0.7071, 0, 0, 0.7071]))
table_leg_position1 = [0.45, 0.35, height / 2]
table_leg_position2 = [-0.45, -0.35, height / 2]
table_leg_position3 = [-0.45, 0.35, height / 2]
table_leg_position4 = [0.45, -0.35, height / 2]
table_leg_size = np.array([0.025, 0.025, height / 2 - 0.01])
builder = scene.create_actor_builder()
builder.add_box_visual_complex(sapien.Pose(table_leg_position1), table_leg_size)
builder.add_box_visual_complex(sapien.Pose(table_leg_position2), table_leg_size)
builder.add_box_visual_complex(sapien.Pose(table_leg_position3), table_leg_size)
builder.add_box_visual_complex(sapien.Pose(table_leg_position4), table_leg_size)
legs = builder.build_static("table_leg")
legs.set_pose(table.get_pose())
return [table, legs]
def main():
# Parse ROS path and args
materials_path = rospy.get_param('~materials_dir',
'/root/ocrtoc_materials')
args = parse_arg()
print(args)
if args.paused and not args.gui:
raise RuntimeError(
"Argument paused is only useful when GUI is activated. It is only for debug purpose. "
"Your program will directly end when using paused:=true with gui:=false")
current_path = rospkg.RosPack().get_path('sapien_simulator')
engine = sapien.Engine()
optifuser_config = sapien.OptifuserConfig()
optifuser_config.use_shadow = False
renderer = sapien.OptifuserRenderer(glsl_dir=os.path.join(current_path, "./glsl_shader/130"),
glsl_version="130",
config=optifuser_config)
engine.set_renderer(renderer)
controller = sapien.OptifuserController(renderer)
# Load scene and ground
scene_config = sapien.SceneConfig()
scene_config.solver_iterations = 25
scene_config.solver_velocity_iterations = 2
scene_config.enable_pcm = False
scene_config.default_restitution = 0
scene_config.default_dynamic_friction = 0.5
scene_config.default_static_friction = 0.5
scene = engine.create_scene(scene_config)
scene.set_timestep(1 / 250)
ground_material = sapien.PxrMaterial()
ground_color = np.array([202, 164, 114, 256]) / 256
ground_material.set_base_color(ground_color)
ground_material.specular = 0.5
scene.add_ground(-0.8, render_material=ground_material)
if args.gui:
controller.set_current_scene(scene)
controller.set_camera_position(2.5, 0, 3)
controller.set_camera_rotation(3.14, -0.7)
controller.show_window()
# Load table
table_height = 0.0
# Load sdf
os.environ.update({
"SAPIEN_MODEL_PATH": os.path.join(materials_path, "models")})
sdf_objects = load_sapien_sdf(args.world_name, scene, table_height)
# scene.set_shadow_light([0, -1, -1], [1, 1, 1])
scene.set_ambient_light((0.5, 0.5, 0.5))
sr.init_spd_logger()
scene_manager = sr.SceneManager(scene, "")
loader = scene_manager.create_robot_loader()
loader.fix_root_link = True
gripper_material = engine.create_physical_material(1.2, 0.8, 0.01)
urdf_config = {
"link": {
"robotiq_2f_85_left_pad": {"material": gripper_material, "patch_radius": 0.02, "min_patch_radius": 0.005},
"robotiq_2f_85_right_pad": {"material": gripper_material, "patch_radius": 0.02,
"min_patch_radius": 0.005}}}
# Load robot
robot, manager = loader.load_from_parameter_server("", urdf_config, 125)
init_qpos = np.array([-1.57, -1.57, 1.57, 0, 0, 0, 0, 0, 0, 0, 0, 0])
robot.set_qpos(init_qpos)
robot.set_drive_target(init_qpos)
manager.set_drive_property(3000, 500, 1000, [0, 1, 2, 3, 4, 5])
manager.set_drive_property(200, 50, 300, [6, 7])
manager.set_drive_property(100, 40, 300, [8, 9, 10, 11])
scene_manager.start_all_ros_camera(30)
scene_manager.start_get_model_service("/sapien/get_model_state", sdf_objects)
scene.step()
# Start
start_time = time.time()
scene_manager.start()
step = 0
timestep = scene.get_timestep()
next_step_time = time.time() + timestep
mimic_joints = robot.get_active_joints()[20:24]
if args.gui:
if args.paused:
while not controller.should_quit:
scene.update_render()
controller.render()
while True:
step_and_render(manager, scene, controller, step, next_step_time)
mimic_joint(robot, mimic_joints)
next_step_time += timestep
step += 1
else:
while not controller.should_quit:
step_and_render(manager, scene, controller, step, next_step_time)
mimic_joint(robot, mimic_joints)
next_step_time += timestep
step += 1
else:
try:
while True:
step_only(manager, scene, next_step_time)
mimic_joint(robot, mimic_joints)
next_step_time += timestep
step += 1
except KeyboardInterrupt:
print("Simulation stopped by user")
scene = None
def step_and_render(manager, scene, controller, step, next_step_time):
manager.balance_passive_force()
now = time.time()
while now < next_step_time:
time.sleep(1e-4)
now = time.time()
scene.step()
scene.update_render()
if step % RENDER_HZ == 0:
controller.render()
def step_only(manager, scene, next_step_time):
manager.balance_passive_force()
now = time.time()
while now < next_step_time:
time.sleep(1e-4)
now = time.time()
scene.step()
scene.update_render()
def mimic_joint(robot, mimic_joints):
left_target = robot.get_qpos()[7] * 17.86
right_target = robot.get_qpos()[6] * 17.86
mimic_joints[0].set_drive_target(right_target)
mimic_joints[1].set_drive_target(right_target)
mimic_joints[2].set_drive_target(left_target)
mimic_joints[3].set_drive_target(left_target)
def parse_arg():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gui", action="store_true", help="show gui for visualization")
parser.add_argument("--paused", action="store_true", help="start simulator in a paused mode")
parser.add_argument("--world_name", type=str, help="scene name for loading")
print(sys.argv[1:-2])
return parser.parse_args(sys.argv[1:-2])
if __name__ == '__main__':
import sys
sr.ros_init("iros_pipeline", sys.argv)
main() | 0.416085 | 0.261461 |
import copy
import threading
from collections import defaultdict
from projex.lazymodule import lazy_import
from projex.locks import ReadWriteLock, WriteLocker, ReadLocker
orb = lazy_import('orb')
class Context(object):
""""
Defines a unique instance of information that will be bundled when
calling different methods within the connections class.
The Context class will accept a set of keyword arguments to
control how the action on the database will be affected. The options are:
"""
Defaults = {
'autoIncrementEnabled': True,
'columns': None,
'db': None,
'database': None,
'distinct': False,
'dryRun': False,
'expand': None,
'format': 'json',
'force': False,
'inflated': None,
'limit': None,
'locale': None,
'namespace': '',
'forceNamespace': False,
'order': None,
'page': None,
'pageSize': None,
'scope': None,
'returning': 'records',
'start': None,
'timezone': None,
'where': None,
'useBaseQuery': True
}
QueryFields = {
'columns',
'expand',
'limit',
'order',
'page',
'pageSize',
'start',
'where'
}
UnhashableOptions = {
'db',
'scope'
}
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return hash(self) != hash(other)
def __hash__(self):
keys = sorted(self.Defaults.keys())
hash_keys = []
for key in keys:
if key in self.__class__.UnhashableOptions:
continue
value = self.raw_values.get(key, self.__class__.Defaults[key])
if isinstance(value, (list, set)):
value = tuple(value)
try:
hash_value = hash(value)
except TypeError:
hash_value = unicode(value)
hash_keys.append(hash_value)
return hash(tuple(hash_keys))
def __enter__(self):
"""
Creates a scope where this context is default, so all calls made while it is in scope will begin with
the default context information.
:usage |import orb
|with orb.Context(database=db):
| user = models.User()
| group = models.Group()
:return: <orb.Context>
"""
self.pushDefaultContext(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.popDefaultContext()
if exc_type:
raise
else:
return self
def __init__(self, **kwds):
self.__dict__['raw_values'] = {}
self.update(kwds)
def __getattr__(self, key):
try:
return self.raw_values.get(key, self.Defaults[key])
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if not key in self.Defaults:
raise AttributeError(key)
else:
self.raw_values[key] = value
def __iter__(self):
for k in self.Defaults:
yield k, getattr(self, k)
def copy(self):
"""
Returns a copy of this database option set.
:return <orb.Context>
"""
properties = {}
for key, value in self.raw_values.items():
if key in self.UnhashableOptions:
properties[key] = value
else:
properties[key] = copy.copy(value)
return Context(**properties)
@property
def db(self):
try:
return self.raw_values['db']
except KeyError:
db = orb.system.database(self.database)
if not db:
raise orb.errors.DatabaseNotFound()
return db
@property
def expand(self):
out = self.raw_values.get('expand')
if isinstance(out, set):
return list(out)
elif isinstance(out, (str, unicode)):
return out.split(',')
elif isinstance(out, dict):
def expand_string(key, children):
return [key] + [key + '.' + child
for value in [expand_string(k_, v_) for k_, v_ in children.items()]
for child in value]
return [entry for item in [expand_string(k, v) for k, v in out.items()] for entry in item]
else:
return out
def expandtree(self, model=None):
"""
Goes through the expand options associated with this context and
returns a trie of data.
:param model: subclass of <orb.Model> || None
:return: <dict>
"""
if model and not self.columns:
schema = model.schema()
defaults = schema.columns(flags=orb.Column.Flags.AutoExpand).keys()
defaults += schema.collectors(flags=orb.Collector.Flags.AutoExpand).keys()
else:
defaults = []
expand = self.expand or defaults
if not expand:
return {}
def build_tree(parts, tree):
tree.setdefault(parts[0], {})
if len(parts) > 1:
build_tree(parts[1:], tree[parts[0]])
tree = {}
for branch in expand:
build_tree(branch.split('.'), tree)
return tree
def isNull(self):
"""
Returns whether or not this option set has been modified.
:return <bool>
"""
check = self.raw_values.copy()
scope = check.pop('scope', {})
return len(check) == 0 and len(scope) == 0
def items(self):
return [(k, getattr(self, k)) for k in self.Defaults]
@property
def locale(self):
return self.raw_values.get('locale') or orb.system.settings().default_locale
@property
def order(self):
out = self.raw_values.get('order')
if isinstance(out, set):
return list(out)
elif isinstance(out, (str, unicode)):
return [(x.strip('+-'), 'desc' if x.startswith('-') else 'asc') for x in out.split(',') if x]
else:
return out
def schemaColumns(self, schema):
return [schema.column(col) for col in self.columns or []]
@property
def limit(self):
return self.raw_values.get('pageSize') or self.raw_values.get('limit')
@property
def scope(self):
out = self.raw_values.get('scope')
return out if out is not None else {}
@property
def start(self):
if self.raw_values.get('page') is not None:
return (self.raw_values.get('page') - 1) * (self.limit or 0)
else:
return self.raw_values.get('start')
@property
def timezone(self):
return self.raw_values.get('timezone') or orb.system.settings().server_timezone
def update(self, other_context):
"""
Updates this lookup set with the inputted options.
:param other_context | <dict> || <orb.Context>
"""
# convert a context instance into a dictionary
if isinstance(other_context, orb.Context):
other_context = copy.copy(other_context.raw_values)
ignore = ('where', 'columns', 'scope')
inherit_kwds = {}
inherit_scope = {}
inherit_columns = []
inherit_where = orb.Query()
# update from the base context
base_context = other_context.pop('context', None)
if base_context is not None:
inherit_kwds = base_context.raw_values
# use the default contexts
else:
for default in self.defaultContexts():
if default is not None:
# extract expandable information
for k, v in default.raw_values.items():
if k not in ignore:
inherit_kwds[k] = copy.copy(v)
# merge where queries
where = default.where
if where is not None:
inherit_where &= where
# merge column queries
columns = default.columns
if columns is not None:
inherit_columns += list(columns)
# merge scope
scope = default.scope
if scope:
inherit_scope.update(scope)
# update the inherited kwds
for k, v in inherit_kwds.items():
other_context.setdefault(k, v)
# update the inherited query
if inherit_where:
other_context.setdefault('where', orb.Query())
other_context['where'] &= inherit_where
# update the inherited columns
if inherit_columns:
other_context['columns'] = inherit_columns + (other_context.get('columns') or [])
# update the inherited scope
if inherit_scope:
new_scope = {}
new_scope.update(inherit_scope)
new_scope.update(other_context.get('scope') or {})
other_context['scope'] = new_scope
# convert the columns to a list
if 'columns' in other_context and isinstance(other_context['columns'], (str, unicode)):
other_context['columns'] = other_context['columns'].split(',')
# convert where to query
where = other_context.get('where')
if isinstance(where, dict):
other_context['where'] = orb.Query.fromJSON(where)
if isinstance(where, (orb.Query, orb.QueryCompound)):
other_context['where'] &= self.where
# validate values
if other_context.get('start') is not None and (type(other_context['start']) != int or other_context['start'] < 0):
msg = 'Start needs to be a positive number, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('start)')))
if other_context.get('page') is not None and (type(other_context['page']) != int or other_context['page'] < 1):
msg = 'Page needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('page')))
if other_context.get('limit') is not None and (type(other_context['limit']) != int or other_context['limit'] < 1):
msg = 'Limit needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('limit')))
if other_context.get('pageSize') is not None and (type(other_context['pageSize']) != int or other_context['pageSize'] < 1):
msg = 'Page size needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('pageSize')))
# update the raw values
self.raw_values.update({k: v for k, v in other_context.items() if k in self.Defaults})
@classmethod
def defaultContexts(cls):
defaults = getattr(cls, '_{0}__defaults'.format(cls.__name__), None)
if defaults is None:
defaults = defaultdict(list)
lock = ReadWriteLock()
setattr(cls, '_{0}__defaults'.format(cls.__name__), defaults)
setattr(cls, '_{0}__defaultsLock'.format(cls.__name__), lock)
else:
lock = getattr(cls, '_{0}__defaultsLock'.format(cls.__name__))
tid = threading.currentThread().ident
with ReadLocker(lock):
return defaults.get(tid) or []
@classmethod
def popDefaultContext(cls):
defaults = getattr(cls, '_{0}__defaults'.format(cls.__name__), None)
if defaults is None:
defaults = defaultdict(list)
lock = ReadWriteLock()
setattr(cls, '_{0}__defaults'.format(cls.__name__), defaults)
setattr(cls, '_{0}__defaultsLock'.format(cls.__name__), lock)
else:
lock = getattr(cls, '_{0}__defaultsLock'.format(cls.__name__))
tid = threading.currentThread().ident
with WriteLocker(lock):
defaults[tid].pop()
@classmethod
def pushDefaultContext(cls, context):
defaults = getattr(cls, '_{0}__defaults'.format(cls.__name__), None)
if defaults is None:
defaults = defaultdict(list)
lock = ReadWriteLock()
setattr(cls, '_{0}__defaults'.format(cls.__name__), defaults)
setattr(cls, '_{0}__defaultsLock'.format(cls.__name__), lock)
else:
lock = getattr(cls, '_{0}__defaultsLock'.format(cls.__name__))
tid = threading.currentThread().ident
with WriteLocker(lock):
defaults[tid].append(context) | orb/core/context.py | import copy
import threading
from collections import defaultdict
from projex.lazymodule import lazy_import
from projex.locks import ReadWriteLock, WriteLocker, ReadLocker
orb = lazy_import('orb')
class Context(object):
""""
Defines a unique instance of information that will be bundled when
calling different methods within the connections class.
The Context class will accept a set of keyword arguments to
control how the action on the database will be affected. The options are:
"""
Defaults = {
'autoIncrementEnabled': True,
'columns': None,
'db': None,
'database': None,
'distinct': False,
'dryRun': False,
'expand': None,
'format': 'json',
'force': False,
'inflated': None,
'limit': None,
'locale': None,
'namespace': '',
'forceNamespace': False,
'order': None,
'page': None,
'pageSize': None,
'scope': None,
'returning': 'records',
'start': None,
'timezone': None,
'where': None,
'useBaseQuery': True
}
QueryFields = {
'columns',
'expand',
'limit',
'order',
'page',
'pageSize',
'start',
'where'
}
UnhashableOptions = {
'db',
'scope'
}
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return hash(self) != hash(other)
def __hash__(self):
keys = sorted(self.Defaults.keys())
hash_keys = []
for key in keys:
if key in self.__class__.UnhashableOptions:
continue
value = self.raw_values.get(key, self.__class__.Defaults[key])
if isinstance(value, (list, set)):
value = tuple(value)
try:
hash_value = hash(value)
except TypeError:
hash_value = unicode(value)
hash_keys.append(hash_value)
return hash(tuple(hash_keys))
def __enter__(self):
"""
Creates a scope where this context is default, so all calls made while it is in scope will begin with
the default context information.
:usage |import orb
|with orb.Context(database=db):
| user = models.User()
| group = models.Group()
:return: <orb.Context>
"""
self.pushDefaultContext(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.popDefaultContext()
if exc_type:
raise
else:
return self
def __init__(self, **kwds):
self.__dict__['raw_values'] = {}
self.update(kwds)
def __getattr__(self, key):
try:
return self.raw_values.get(key, self.Defaults[key])
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if not key in self.Defaults:
raise AttributeError(key)
else:
self.raw_values[key] = value
def __iter__(self):
for k in self.Defaults:
yield k, getattr(self, k)
def copy(self):
"""
Returns a copy of this database option set.
:return <orb.Context>
"""
properties = {}
for key, value in self.raw_values.items():
if key in self.UnhashableOptions:
properties[key] = value
else:
properties[key] = copy.copy(value)
return Context(**properties)
@property
def db(self):
try:
return self.raw_values['db']
except KeyError:
db = orb.system.database(self.database)
if not db:
raise orb.errors.DatabaseNotFound()
return db
@property
def expand(self):
out = self.raw_values.get('expand')
if isinstance(out, set):
return list(out)
elif isinstance(out, (str, unicode)):
return out.split(',')
elif isinstance(out, dict):
def expand_string(key, children):
return [key] + [key + '.' + child
for value in [expand_string(k_, v_) for k_, v_ in children.items()]
for child in value]
return [entry for item in [expand_string(k, v) for k, v in out.items()] for entry in item]
else:
return out
def expandtree(self, model=None):
"""
Goes through the expand options associated with this context and
returns a trie of data.
:param model: subclass of <orb.Model> || None
:return: <dict>
"""
if model and not self.columns:
schema = model.schema()
defaults = schema.columns(flags=orb.Column.Flags.AutoExpand).keys()
defaults += schema.collectors(flags=orb.Collector.Flags.AutoExpand).keys()
else:
defaults = []
expand = self.expand or defaults
if not expand:
return {}
def build_tree(parts, tree):
tree.setdefault(parts[0], {})
if len(parts) > 1:
build_tree(parts[1:], tree[parts[0]])
tree = {}
for branch in expand:
build_tree(branch.split('.'), tree)
return tree
def isNull(self):
"""
Returns whether or not this option set has been modified.
:return <bool>
"""
check = self.raw_values.copy()
scope = check.pop('scope', {})
return len(check) == 0 and len(scope) == 0
def items(self):
return [(k, getattr(self, k)) for k in self.Defaults]
@property
def locale(self):
return self.raw_values.get('locale') or orb.system.settings().default_locale
@property
def order(self):
out = self.raw_values.get('order')
if isinstance(out, set):
return list(out)
elif isinstance(out, (str, unicode)):
return [(x.strip('+-'), 'desc' if x.startswith('-') else 'asc') for x in out.split(',') if x]
else:
return out
def schemaColumns(self, schema):
return [schema.column(col) for col in self.columns or []]
@property
def limit(self):
return self.raw_values.get('pageSize') or self.raw_values.get('limit')
@property
def scope(self):
out = self.raw_values.get('scope')
return out if out is not None else {}
@property
def start(self):
if self.raw_values.get('page') is not None:
return (self.raw_values.get('page') - 1) * (self.limit or 0)
else:
return self.raw_values.get('start')
@property
def timezone(self):
return self.raw_values.get('timezone') or orb.system.settings().server_timezone
def update(self, other_context):
"""
Updates this lookup set with the inputted options.
:param other_context | <dict> || <orb.Context>
"""
# convert a context instance into a dictionary
if isinstance(other_context, orb.Context):
other_context = copy.copy(other_context.raw_values)
ignore = ('where', 'columns', 'scope')
inherit_kwds = {}
inherit_scope = {}
inherit_columns = []
inherit_where = orb.Query()
# update from the base context
base_context = other_context.pop('context', None)
if base_context is not None:
inherit_kwds = base_context.raw_values
# use the default contexts
else:
for default in self.defaultContexts():
if default is not None:
# extract expandable information
for k, v in default.raw_values.items():
if k not in ignore:
inherit_kwds[k] = copy.copy(v)
# merge where queries
where = default.where
if where is not None:
inherit_where &= where
# merge column queries
columns = default.columns
if columns is not None:
inherit_columns += list(columns)
# merge scope
scope = default.scope
if scope:
inherit_scope.update(scope)
# update the inherited kwds
for k, v in inherit_kwds.items():
other_context.setdefault(k, v)
# update the inherited query
if inherit_where:
other_context.setdefault('where', orb.Query())
other_context['where'] &= inherit_where
# update the inherited columns
if inherit_columns:
other_context['columns'] = inherit_columns + (other_context.get('columns') or [])
# update the inherited scope
if inherit_scope:
new_scope = {}
new_scope.update(inherit_scope)
new_scope.update(other_context.get('scope') or {})
other_context['scope'] = new_scope
# convert the columns to a list
if 'columns' in other_context and isinstance(other_context['columns'], (str, unicode)):
other_context['columns'] = other_context['columns'].split(',')
# convert where to query
where = other_context.get('where')
if isinstance(where, dict):
other_context['where'] = orb.Query.fromJSON(where)
if isinstance(where, (orb.Query, orb.QueryCompound)):
other_context['where'] &= self.where
# validate values
if other_context.get('start') is not None and (type(other_context['start']) != int or other_context['start'] < 0):
msg = 'Start needs to be a positive number, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('start)')))
if other_context.get('page') is not None and (type(other_context['page']) != int or other_context['page'] < 1):
msg = 'Page needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('page')))
if other_context.get('limit') is not None and (type(other_context['limit']) != int or other_context['limit'] < 1):
msg = 'Limit needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('limit')))
if other_context.get('pageSize') is not None and (type(other_context['pageSize']) != int or other_context['pageSize'] < 1):
msg = 'Page size needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('pageSize')))
# update the raw values
self.raw_values.update({k: v for k, v in other_context.items() if k in self.Defaults})
@classmethod
def defaultContexts(cls):
defaults = getattr(cls, '_{0}__defaults'.format(cls.__name__), None)
if defaults is None:
defaults = defaultdict(list)
lock = ReadWriteLock()
setattr(cls, '_{0}__defaults'.format(cls.__name__), defaults)
setattr(cls, '_{0}__defaultsLock'.format(cls.__name__), lock)
else:
lock = getattr(cls, '_{0}__defaultsLock'.format(cls.__name__))
tid = threading.currentThread().ident
with ReadLocker(lock):
return defaults.get(tid) or []
@classmethod
def popDefaultContext(cls):
defaults = getattr(cls, '_{0}__defaults'.format(cls.__name__), None)
if defaults is None:
defaults = defaultdict(list)
lock = ReadWriteLock()
setattr(cls, '_{0}__defaults'.format(cls.__name__), defaults)
setattr(cls, '_{0}__defaultsLock'.format(cls.__name__), lock)
else:
lock = getattr(cls, '_{0}__defaultsLock'.format(cls.__name__))
tid = threading.currentThread().ident
with WriteLocker(lock):
defaults[tid].pop()
@classmethod
def pushDefaultContext(cls, context):
defaults = getattr(cls, '_{0}__defaults'.format(cls.__name__), None)
if defaults is None:
defaults = defaultdict(list)
lock = ReadWriteLock()
setattr(cls, '_{0}__defaults'.format(cls.__name__), defaults)
setattr(cls, '_{0}__defaultsLock'.format(cls.__name__), lock)
else:
lock = getattr(cls, '_{0}__defaultsLock'.format(cls.__name__))
tid = threading.currentThread().ident
with WriteLocker(lock):
defaults[tid].append(context) | 0.518059 | 0.196942 |
import unittest
import zserio
from testutils import getZserioApi
from TestPubsub import TestPubsub, TestPubsubContext
class SimplePubsubTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "pubsub_types.zs").simple_pubsub
def setUp(self):
pubsub = TestPubsub()
self.simplePubsubProvider = self.api.SimplePubsubProvider(pubsub)
self.simplePubsubClient = self.api.SimplePubsubClient(pubsub)
self.simplePubsub = self.api.SimplePubsub(pubsub)
def testPowerOfTwoClientAndProvider(self):
def requestCallback(topic, value):
self.assertEqual("simple_pubsub/request", topic)
result = self.api.UInt64Value.fromFields(value.getValue() * value.getValue())
self.simplePubsubProvider.publishPowerOfTwo(result)
self.simplePubsubProvider.subscribeRequest(requestCallback)
result = {"value": 0}
def powerOfTwoCallback(topic, value):
self.assertEqual("simple_pubsub/power_of_two", topic)
result["value"] = value.getValue()
self.simplePubsubClient.subscribePowerOfTwo(powerOfTwoCallback)
request = self.api.Int32Value.fromFields(13)
self.simplePubsubClient.publishRequest(request)
self.assertEqual(169, result["value"])
request.setValue(-13)
self.simplePubsubClient.publishRequest(request)
self.assertEqual(169, result["value"])
request.setValue(2)
self.simplePubsubClient.publishRequest(request)
self.assertEqual(4, result["value"])
request.setValue(-2)
self.simplePubsubClient.publishRequest(request)
self.assertEqual(4, result["value"])
def testPowerOfTwoSimplePubsub(self):
def requestCallback(topic, value):
self.assertEqual("simple_pubsub/request", topic)
result = self.api.UInt64Value.fromFields(value.getValue() * value.getValue())
self.simplePubsub.publishPowerOfTwo(result)
self.simplePubsub.subscribeRequest(requestCallback)
result = {"value": 0}
def powerOfTwoCallback(topic, value):
self.assertEqual("simple_pubsub/power_of_two", topic)
result["value"] = value.getValue()
self.simplePubsub.subscribePowerOfTwo(powerOfTwoCallback)
request = self.api.Int32Value.fromFields(13)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value"])
request.setValue(-13)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value"])
request.setValue(2)
self.simplePubsub.publishRequest(request)
self.assertEqual(4, result["value"])
request.setValue(-2)
self.simplePubsub.publishRequest(request)
self.assertEqual(4, result["value"])
def testPublishRequestWithContext(self):
context = TestPubsubContext()
self.assertFalse(context.seenByPubsub)
self.simplePubsub.publishRequest(self.api.Int32Value.fromFields(42), context)
self.assertTrue(context.seenByPubsub)
def testSubscribeRequestWithContext(self):
context = TestPubsubContext()
self.assertFalse(context.seenByPubsub)
self.simplePubsub.subscribeRequest(lambda topic, value: None, context)
self.assertTrue(context.seenByPubsub)
def testUnsubscribe(self):
def requestCallback(topic, value):
self.assertEqual("simple_pubsub/request", topic)
result = self.api.UInt64Value.fromFields(value.getValue() * value.getValue())
self.simplePubsub.publishPowerOfTwo(result)
id0 = self.simplePubsub.subscribeRequest(requestCallback)
result = {"value1": 0, "value2": 0}
def powerOfTwoCallback1(topic, value):
self.assertEqual("simple_pubsub/power_of_two", topic)
result["value1"] = value.getValue()
id1 = self.simplePubsub.subscribePowerOfTwo(powerOfTwoCallback1)
def powerOfTwoCallback2(topic, value):
self.assertEqual("simple_pubsub/power_of_two", topic)
result["value2"] = value.getValue()
id2 = self.simplePubsub.subscribePowerOfTwo(powerOfTwoCallback2)
request = self.api.Int32Value.fromFields(13)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value1"])
self.assertEqual(169, result["value2"])
self.simplePubsub.unsubscribe(id1)
request.setValue(2)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value1"]) # shall not be changed!
self.assertEqual(4, result["value2"])
self.simplePubsub.unsubscribe(id0) # unsubscribe publisher
request.setValue(3)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value1"]) # shall not be changed!
self.assertEqual(4, result["value2"]) # shall not be changed!
self.simplePubsub.unsubscribe(id2)
def testUnsubscribeInvalid(self):
with self.assertRaises(zserio.PubsubException):
self.simplePubsub.unsubscribe(0) | test/language/pubsub_types/python/SimplePubsubTest.py | import unittest
import zserio
from testutils import getZserioApi
from TestPubsub import TestPubsub, TestPubsubContext
class SimplePubsubTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "pubsub_types.zs").simple_pubsub
def setUp(self):
pubsub = TestPubsub()
self.simplePubsubProvider = self.api.SimplePubsubProvider(pubsub)
self.simplePubsubClient = self.api.SimplePubsubClient(pubsub)
self.simplePubsub = self.api.SimplePubsub(pubsub)
def testPowerOfTwoClientAndProvider(self):
def requestCallback(topic, value):
self.assertEqual("simple_pubsub/request", topic)
result = self.api.UInt64Value.fromFields(value.getValue() * value.getValue())
self.simplePubsubProvider.publishPowerOfTwo(result)
self.simplePubsubProvider.subscribeRequest(requestCallback)
result = {"value": 0}
def powerOfTwoCallback(topic, value):
self.assertEqual("simple_pubsub/power_of_two", topic)
result["value"] = value.getValue()
self.simplePubsubClient.subscribePowerOfTwo(powerOfTwoCallback)
request = self.api.Int32Value.fromFields(13)
self.simplePubsubClient.publishRequest(request)
self.assertEqual(169, result["value"])
request.setValue(-13)
self.simplePubsubClient.publishRequest(request)
self.assertEqual(169, result["value"])
request.setValue(2)
self.simplePubsubClient.publishRequest(request)
self.assertEqual(4, result["value"])
request.setValue(-2)
self.simplePubsubClient.publishRequest(request)
self.assertEqual(4, result["value"])
def testPowerOfTwoSimplePubsub(self):
def requestCallback(topic, value):
self.assertEqual("simple_pubsub/request", topic)
result = self.api.UInt64Value.fromFields(value.getValue() * value.getValue())
self.simplePubsub.publishPowerOfTwo(result)
self.simplePubsub.subscribeRequest(requestCallback)
result = {"value": 0}
def powerOfTwoCallback(topic, value):
self.assertEqual("simple_pubsub/power_of_two", topic)
result["value"] = value.getValue()
self.simplePubsub.subscribePowerOfTwo(powerOfTwoCallback)
request = self.api.Int32Value.fromFields(13)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value"])
request.setValue(-13)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value"])
request.setValue(2)
self.simplePubsub.publishRequest(request)
self.assertEqual(4, result["value"])
request.setValue(-2)
self.simplePubsub.publishRequest(request)
self.assertEqual(4, result["value"])
def testPublishRequestWithContext(self):
context = TestPubsubContext()
self.assertFalse(context.seenByPubsub)
self.simplePubsub.publishRequest(self.api.Int32Value.fromFields(42), context)
self.assertTrue(context.seenByPubsub)
def testSubscribeRequestWithContext(self):
context = TestPubsubContext()
self.assertFalse(context.seenByPubsub)
self.simplePubsub.subscribeRequest(lambda topic, value: None, context)
self.assertTrue(context.seenByPubsub)
def testUnsubscribe(self):
def requestCallback(topic, value):
self.assertEqual("simple_pubsub/request", topic)
result = self.api.UInt64Value.fromFields(value.getValue() * value.getValue())
self.simplePubsub.publishPowerOfTwo(result)
id0 = self.simplePubsub.subscribeRequest(requestCallback)
result = {"value1": 0, "value2": 0}
def powerOfTwoCallback1(topic, value):
self.assertEqual("simple_pubsub/power_of_two", topic)
result["value1"] = value.getValue()
id1 = self.simplePubsub.subscribePowerOfTwo(powerOfTwoCallback1)
def powerOfTwoCallback2(topic, value):
self.assertEqual("simple_pubsub/power_of_two", topic)
result["value2"] = value.getValue()
id2 = self.simplePubsub.subscribePowerOfTwo(powerOfTwoCallback2)
request = self.api.Int32Value.fromFields(13)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value1"])
self.assertEqual(169, result["value2"])
self.simplePubsub.unsubscribe(id1)
request.setValue(2)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value1"]) # shall not be changed!
self.assertEqual(4, result["value2"])
self.simplePubsub.unsubscribe(id0) # unsubscribe publisher
request.setValue(3)
self.simplePubsub.publishRequest(request)
self.assertEqual(169, result["value1"]) # shall not be changed!
self.assertEqual(4, result["value2"]) # shall not be changed!
self.simplePubsub.unsubscribe(id2)
def testUnsubscribeInvalid(self):
with self.assertRaises(zserio.PubsubException):
self.simplePubsub.unsubscribe(0) | 0.653127 | 0.32154 |
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import (
pauli_error,
depolarizing_error,
)
from statistics import stdev
from math import sqrt
from datetime import datetime
SPACE = ' '
def string_reverse(input_string):
"""Reverses a string.
Parameters
----------
input_string : str
Holds the string to be reversed
Returns
----------
reversed_string : str
The reversed string
"""
reversed_string = input_string[::-1]
return(reversed_string)
def find_parity(counts):
"""Finds the parity of the output bit string held in the counts dictionary.
Parameters
----------
counts : dictionary
Holds the observed output bit strings
Returns
----------
parity_count : dict
A dictionary holding the parity count for each observed output bit string.
"""
#initialise dictionary to hold counts
parity_count = {str(i) : 0 for i in range(2)}
for key, value in counts.items():
#split out data part of key
data = key.split()[1]
parity = calculate_parity(data)
old_count = parity_count[str(parity)]
new_count = old_count + value
parity_count[str(parity)] = new_count
return(parity_count)
def calculate_parity(bit_string):
""" Calculates the parity of a bit string
Parameters
----------
bit_string : str
bit string on which parity is to be calculated
Returns
-------
parity :int
0 if even parity
1 if odd parity
"""
parity = 0
for i in range(len(bit_string)):
bit = bit_string[i]
if bit == '1':
#parity has changed
if parity == 0:
parity = 1
elif parity == 1:
parity = 0
else:
raise Exception("Unexpected error calculating parity")
return(parity)
def count_valid_output_strings(counts, codewords, data_location = 0,
post_selection = False, simple = False,
single = False, single_bit = 0):
"""Finds the number of valid and invalid output bit strings
in a given location in a dictionary representing
the counts for each output bit string.
Various algorithms for determining validaty are supported,
including post selection, where a bit is only valid if it is the codewords,
simple decoding based on the parity of three bits and
looking at a single bit only.
Parameters
----------
counts : dictionary
holds the observed populations for each
combination of qubit
codewords : list
holds allowed codewords
data_location : int
location of the data string
post_selection : bool
if true then only strings in logical zero are invalid.
Strings outside the codespace are counted separately.
simple : bool
looks only at the parity of bits with exactly two non-zero columns in the parity matrix
single : bool
look at single bit only
single_bit : int
single bit to validate against
Returns
-------
count_valid : int
Number of valid bit strings
count_invalid : int
Number of invalid bit strings
count_outside_codeword : int
Number of strings outside codespace.
Notes
-----
This code was originally designed to handle the codewords
in a list of lists, but will also work fine
with a list of strings.
"""
if single:
if len(codewords) != 1:
raise ValueError('Only send a one bit codeword with calculation using a single bit')
if simple:
raise ValueError('Validity calculation not designed for both simple algorithm and single_bit')
if post_selection:
raise ValueError('Validity calculation not designed for both post_selection and single_bit')
if simple:
if post_selection:
raise ValueError('Validity calculation not designed for both post_selection and simple')
if len(codewords) != 1:
raise ValueError('Only send a one bit codeword with simple calculation')
count_valid = 0
count_invalid = 0
count_outside_codeword = 0
for key, value in counts.items():
#split out data part of key
if data_location == 0:
data = key
else:
data = key.split()[data_location]
#need to reverse the data string showing the relevant qubits as
#the codewords and the data have a different format
reversed_data_string = string_reverse(data)
valid, invalid, outside_codeword = compute_string_validity(value = value, codewords = codewords,
reversed_data_string = reversed_data_string,
post_selection = post_selection,
simple = simple,
single = single,
single_bit = single_bit
)
count_valid = count_valid + valid
count_invalid = count_invalid + invalid
count_outside_codeword = count_outside_codeword + outside_codeword
return(count_valid, count_invalid, count_outside_codeword)
def compute_string_validity(value, codewords, reversed_data_string, post_selection = False,
simple = False, single = False, single_bit = 0):
"""Categorises a string as valid, invalid or outside the codeword and based on this assigns
the number of counts of that string to the values returned.
Various algorithms for determining validaty are supported,
including post selection, where a bit is only valid if it is the codewords,
simple decoding based on the parity of three bits and
looking at a single bit only.
Parameters
----------
value : int
number of strings for this data string
codewords : list
holds allowed codewords
reversed_data_string : str
string holding element to be processed
post_selection : bool
if true then only strings in logical zero are invalid.
Strings outside the codespace are counted separately.
simple : bool
looks only at the parity of bits with exactly two non-zero columns in the parity matrix
single : bool
look at single bit only
single_bit : int
single bit to validate against
Returns
-------
valid : int
value if the bit string is valid
invalid : int
value if the bit string is invalid
outside_codeword : int
value if the bit string is outside the codespace
Notes
-----
This code was originally designed to handle the codewords
in a list of lists, but will also work fine
with a list of strings.
"""
if simple:
if post_selection:
raise Exception('simple and post selection algorithm are exclusive')
valid = 0
invalid = 0
outside_codeword = 0
if post_selection:
logical_zero = codewords
logical_one = flip_code_words(codewords)
if reversed_data_string in logical_zero:
valid = value
elif reversed_data_string in logical_one:
invalid = value
else:
outside_codeword = outside_codeword + value
elif simple:
simple_parity_bits = calculate_simple_parity_bits()
bit_string = ['']
for bit_location in simple_parity_bits:
bit_string.append(reversed_data_string[bit_location])
parity = str(calculate_parity(bit_string))
if parity in codewords:
valid = value
else:
invalid = value
elif single:
if reversed_data_string[single_bit] in codewords:
valid = value
else:
invalid = value
else:
if reversed_data_string in codewords:
valid = value
else:
invalid = value
return(valid, invalid, outside_codeword)
def calculate_simple_parity_bits():
"""Returns a list of qubits with exactly two non zero rows in the parity matrix
Returns
-------
simple_parity_bits : list
A list of all qubits with exactly two non zero rows in the parity matrix
"""
parity_matrix_totals = calculate_parity_matrix_totals()
simple_parity_bits = []
count = 0
for items in parity_matrix_totals:
if items == 2:
simple_parity_bits.append(count)
count = count + 1
return(simple_parity_bits)
def find_individual_ancilla_values(ancilla_values, data_qubits,
ancilla_qubits, label_string = ''):
"""Returns the count of individual ancilla bit strings as a dictionary.
Parameters
----------
ancilla_values : dict
holds the counts for each combination of ancilla bit strings.
data_qubits : int
number of data qubits used as an offset to calculate
the ancilla number
ancilla_qubits : int
number of ancilla qubits
label_string : str
first part of label
Returns
-------
individual_ancilla_values : dict
dictionary containing the count of individual
ancilla bit string
"""
#initialise dictionary to hold values
individual_ancilla_values = {label_string + str(count): 0
for count in range(data_qubits + 1,
data_qubits + 1 +
ancilla_qubits) }
for ancilla, value in ancilla_values.items():
for count in range(ancilla_qubits):
bit = ancilla[count]
if bit == '1':
# note that order of Qiskit qubit order needs to be reversed to compare with the paper
key = label_string + str(data_qubits + ancilla_qubits - count)
old_count = individual_ancilla_values[key]
new_count = old_count + value
individual_ancilla_values[key] = new_count
return(individual_ancilla_values)
def find_ancilla_values(counts, ancilla_qubits, ancilla_location = 0):
"""Returns a dictionary with a count of each possible ancilla bit string.
Parameters
----------
counts : dictionary
counts for each possible output bit string
ancilla_qubits : int
number of ancilla qubits
ancilla_location : int
designates which bit string is relevant
Returns
-------
ancilla_values : dict
dictionary containing the count of each possible ancilla bit string
"""
#build a list of all the possible ancilla in binary
possible_ancilla_list = []
format_string = '0' + str(ancilla_qubits) + 'b'
for i in range(2 ** (ancilla_qubits)):
possible_ancilla_value = format(i, format_string)
possible_ancilla_list.append(possible_ancilla_value)
#use the list to initialise a dictionary which hold the results by ancilla
ancilla_values = {i:0 for i in possible_ancilla_list}
# loop through the results and summarise by ancilla
for key, value in counts.items():
#split out the ancilla part of key
ancilla = key.split()[ancilla_location]
old_count = ancilla_values[ancilla]
new_count = old_count + value
ancilla_values[ancilla] = new_count
return(ancilla_values)
def strings_AND_bitwise(string1, string2):
"""Returns the bitwise AND of two equal length bit strings.
Parameters
----------
string1 : str
First string
string2 : str
Second string
Returns
-------
string_out : str
bitwise AND of the two input strings
"""
string_out = ''
if len(string1) != len(string2):
raise Exception('When taking the logical AND of two strings they must both have the same length')
for count in range(len(string1)):
i = (string1)[count]
j = (string2)[count]
k = '0'
if i == '0':
if j == '1':
k = '1'
if i == '1':
if j == '0':
k = '1'
string_out = string_out + k
return(string_out)
def string_ancilla_mask(location, length):
"""Returns a bit string with a 1 in a certain bit and the 0 elsewhere.
Parameters
----------
location : int
location of the bit which should be set to '1' in the mask
length : int
length of string in the mask
Returns
-------
string : str
ancilla bit mask string in required format
"""
if not isinstance(location, int):
return Exception('Location of string must an integer when calculating ancilla mask')
if not isinstance(length, int):
return Exception('Length of string must an integer when calculating ancilla mask')
if location < 1:
return Exception('Location of string must be strictly positive when calculating ancilla mask')
if length < 1:
return Exception('String length must be greater than 1 when calculating ancilla mask')
if length < location:
return Exception('Location must be less than string length when calculating ancilla mask')
string = '1'
for i in range(length - 1):
string = '0' + string
for count in range(location - 1):
new_string = string[1:7] + '0'
string = new_string
return(string)
def correct_qubit(data_in, ancilla, data_qubits):
"""Returns the corrected data bit string calculated from the ancilla settings.
Parameters
----------
data_in : str
input data bit string
ancilla : str
three bit ancilla logical Z code
data_qubits : int
length of bit string
Returns
-------
data_out : str
corrected data bit string
Notes
-----
The ancilla number calculation needs to take into account
that the ancilla bit string is reversed
compared to numbering of the databits shown on the Qiskit diagrams.
This code corrects bit string errors only, not phase errors
"""
data_out = ''
if ancilla == '000':
data_out = data_in
else:
bin_ancilla = string_reverse(ancilla)
int_ancilla = int(bin_ancilla, 2)
ancilla_mask = string_ancilla_mask(int_ancilla, data_qubits)
data_out = strings_AND_bitwise(data_in, ancilla_mask)
return(data_out)
def flip_code_words(codewords_in):
"""Returns a list of codewords for the logical one from
the list of codewords for the logical zero
by flipped each bit of the input codewords.
Parameters
----------
codewords_in : list
logical codewords in seven bit Steane code data qubit
for the logical zero
Returns
-------
Codewords_out : list
bit flipped input codeword
"""
codewords_out = []
for items in codewords_in:
new_string = ''
for bit in items:
if bit == '1':
flipped_bit = '0'
elif bit == '0':
flipped_bit = '1'
else:
raise Exception('Not able to interpret bit in codewords')
new_string = new_string + flipped_bit
codewords_out.append(new_string)
return(codewords_out)
def get_noise(p_meas, single_qubit_error,
two_qubit_error, single_qubit_gate_set,
two_qubit_gate_set, all = True,
noisy_qubit_list = [],
decohere = False,
dummy_gate_set = [],
dummy_gate_error = 0
):
"""Returns a noise model
Parameters
----------
p_meas : float
probability of X error on measurement
single_qubit_error : float
probability of a depolarizing error on a single qubit gate
two_qubit_error : float
probability of a depolarizing error on a two qubit gate
single_qubit_gate_set : list
list of all single qubit gate types relevant for noise
two_qubit_gate_set : list
list of all two qubit gate types relevant for noise
all : bool
apply two gate noise to all qubits
noisy_qubit_list : list of list
list of list of noisy qubits on which errors are applied
decohere : bool
Add extra noise to represent de-coherence
dummy_gate_set : list
Set of dummy gates on which the de-coherence error is applied. Normally ['id'].
dummy_gate_error : float
error to apply to dummy gate which is set up to model de-coherence at certain stages in the circuit.
Returns
-------
noise_model : dict
noise model to be used
Notes
-----
Can apply noise selectively to qubits in noisy_qubit_list. This is a list of lists.
"""
error_meas = pauli_error([('X', p_meas), ('I', 1 - p_meas)])
error_gate1 = depolarizing_error(single_qubit_error, 1)
error_gate2 = depolarizing_error(two_qubit_error, 1)
error_gate3 = error_gate2.tensor(error_gate2)
if decohere:
if 'id' in single_qubit_gate_set:
raise ValueError('Do not include gate id in the single_qubit_gate_set as used for decoherent errors')
error_decohere = depolarizing_error(dummy_gate_error, 1)
noise_model = NoiseModel()
if all:
if noisy_qubit_list != []:
raise ValueError('Errors are applied to all qubits but a list of qubits with errors is given')
noise_model.add_all_qubit_quantum_error(error_meas, 'measure')
# measurement error is applied to measurements
noise_model.add_all_qubit_quantum_error(error_gate1,
single_qubit_gate_set)
# single qubit gate errors
noise_model.add_all_qubit_quantum_error(error_gate3,
two_qubit_gate_set)
# two qubit gate error is applied to two qubit gates
if decohere:
noise_model.add_all_qubit_quantum_error(error_decohere,
dummy_gate_set)
# decoherence error is applied to dummy gates
else:
if noisy_qubit_list == []:
raise ValueError('A list of qubits must be supplied if errors are not to be applied to all qubits')
#read through list of list of error gates
for gate_list in noisy_qubit_list:
for gate_index1 in gate_list:
noise_model.add_quantum_error(error_meas, 'measure',
[gate_index1]
)
# measurement error is applied to measurements
noise_model.add_quantum_error(error_gate1,
single_qubit_gate_set,
[gate_index1]
)
if decohere:
noise_model.add_quantum_error(error_decohere ,
dummy_gate_set,
[gate_index1]
)
# decoherence error is applied to dummy gates
# single qubit gate errors
for gate_index2 in gate_list:
if gate_index1 != gate_index2:
noise_model.add_quantum_error(error_gate3,
two_qubit_gate_set,
[gate_index1,
gate_index2]
)
return noise_model
def mean_of_list(list_in):
"""Returns the mean of a list
Parameters
----------
list_in : list
data for analysis
Returns
-------
mean : float
result of calculation
"""
mean = sum(list_in) / len(list_in)
return(mean)
def calculate_standard_error(list_in):
""" Calculates the standard error of a list of numbers
Parameters
----------
list_in : list
data for analysis
Returns
-------
standard_deviation : float
standard deviation estimated from sample
standard_error : float
standard error estimated from sample
result of calculation
"""
if len(list_in) > 1:
standard_deviation = stdev(list_in)
standard_error = standard_deviation / sqrt(len(list_in))
elif len(list_in) == 1:
standard_deviation = 0
standard_error = 0
print('Unable to carry out standard error calcuation with one point. ')
print('Standard error of 0 used.')
else:
raise ValueError('f The number of iterations must be positive {iterations} used')
return(standard_deviation, standard_error)
def convert_codewords(codewords):
""" Changes the codewords list of lists to a list of strings
Parameters
----------
codewords : list
allowed codewords for logical zero
Returns
-------
list_of_strings : list
a list of strings
Notes
-----
No longer needed at present as codeword is a list of strings
but retained in case needed in future.
"""
list_of_strings = []
for lists in codewords:
new_string = ''
for item in lists:
new_string = new_string + str(item)
list_of_strings.append(new_string)
return(list_of_strings)
def summarise_logical_counts(counts, logical_zero_strings, logical_one_strings,
data1_location, data2_location, simple = False):
"""Simplifies bit strings for logical operations
to show each qubit as 0, 1, or 2 instead of the full bit string.
0. means qubit is the logical zero
1. means qubit is the logical one
2. means qubit is outside code space
Parameters
----------
counts : dict
results of computation
logical_zero_strings : list
list of strings in logical zero
logical_one_strings : list
list of strings in logical zero
data1_location : int
where in the counts bit string data1 is held
data2_location : int
where in the counts bit string data2 is held
simple : bool
use simple decoding based on bit parity
Returns
-------
new_counts : dict
simplified results
"""
#set up dictionary to hold answer
if type(logical_zero_strings) != list:
raise Exception('logical_zero_strings should be a list')
if type(logical_one_strings) != list:
raise Exception('logical_one_strings should be a list')
validate_integer(data1_location)
validate_integer(data2_location)
if simple:
if len(logical_zero_strings) != 1:
raise Exception('with simple decoding logical zero should be a list with one entry')
if len(logical_zero_strings) != 1:
raise Exception('with simple decoding logical one should be a list with one entry')
simple_parity_bits = calculate_simple_parity_bits()
new_counts = {str(i) + str(j):0 for i in range(3) for j in range(3)}
for key, value in counts.items():
#split out the data parts of key
data1 = key.split()[data1_location]
data2 = key.split()[data2_location]
#need to reverse the string from qiskit format
reverse1 = string_reverse(data1)
reverse2 = string_reverse(data2)
if simple:
#string is calculated from parity
bit_string1 = ['']
bit_string2 = ['']
for bit_location in simple_parity_bits:
bit_string1.append(reverse1[bit_location])
bit_string2.append(reverse2[bit_location])
new_data1 = str(calculate_parity(bit_string1))
new_data2 = str(calculate_parity(bit_string2))
else:
new_data1 = look_up_data(reverse1, logical_zero_strings, logical_one_strings)
new_data2 = look_up_data(reverse2, logical_zero_strings, logical_one_strings)
new_key = new_data1 + new_data2
if new_counts.get(new_key) == None:
new_counts.update({new_key: value})
else:
new_counts[new_key] = new_counts[new_key] + value
return(new_counts)
def look_up_data(input_string, logical_zero, logical_one):
"""Looks up the input data to determine if the string is a logical one,
logical zero, or outside the code base.
Parameters
----------
input_string : str
data for analysis
logical_zero : list
list of strings representing a logical zero
logical_one : str
list of strings representing a logical one
Returns
-------
output_string : str
result of look-up"""
if input_string in logical_zero:
output_string = '0'
elif input_string in logical_one:
output_string = '1'
else:
output_string = 'E'
return(output_string)
def print_time():
"""Prints current time"""
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
return
def validate_integer(number):
"""Checks if a number is an integer.
Parameters
----------
number: int
number to be validated
"""
if type(number) != int:
raise ValueError(f'The number {number} entered is not an integer')
def process_FT_results(counts, codewords, data_meas_strings = ['0'],
anc_zero = '0', anc_one = '1',
verbose = False, data_qubits = 7,
ancilla_start = 0, data_meas_start = 0, data_start = 0,
ancilla_types = 2, ancilla_qubits = 0, ancilla_meas_repeats = 1,
data_meas_qubits = 0, data_meas_repeats = 0,
post_selection = False, simple = False,
):
"""Process results from fault tolerant processing.
Parameters
----------
counts : dictionary
results for analysis
codewords : list
list of valid data codewords
data_meas_strings: string
allowed strings for the data measurement bits
anc_zero : string
allowed strings for the ancilla zero
anc_one : string
allowed strings for the ancilla one
verbose : bool
if true enables printing
data_qubits : int
Length of data bit string. Usually seven
ancilla_start : int
starting place for ancilla (if any)
data_meas_start : int
starting place for data measurement qubits (if any)
data_start : int
starting place for data string
ancilla_types : int
number of different ancilla types. Normally 2 (X and Z) or 0
ancilla_qubits : int
number of strings for each ancilla qubits. Normally 0, 1 or 3
ancilla_meas_repeats : int
number of times ancilla measurements are repeated. Normally 3 or 1
data_meas_qubits : int
number of distinct data measurement qubits. Normally 7, 1 or 0
data_meas_repeats: int
number of times data measurements are repeated. Normally 3 or 1.
post_select: bool
if true then only strings in logical zero are invalid
simple : bool
if true then simple decoding based on three bits shall be used.
Returns
-------
error_rate : float
error rate calculated
rejected : int
strings rejected for validation
accepted : int
strings accepted for validation
valid : int
strings validated and found to be in the code space
invalid : int
strings validated and found to not be in the code space
Notes
-----
This function takes the output string, splits it, and determines if it passes
data and ancilla checks. If so the data keyword is validated.
"""
anc_meas_strings = [anc_zero, anc_one]
validate_integer(ancilla_start)
validate_integer(data_meas_start)
validate_integer(data_start)
validate_integer(ancilla_types)
validate_integer(ancilla_qubits)
validate_integer(ancilla_meas_repeats)
validate_integer(data_meas_qubits)
validate_integer(data_meas_repeats)
total_keys = ancilla_types * ancilla_qubits * ancilla_meas_repeats
total_keys = total_keys + (data_meas_qubits * data_meas_repeats) + 1
count_valid = 0
count_invalid = 0
count_outside_codeword = 0
ancilla_rejected = 0
ancilla_accepted = 0
data_rejected = 0
data_accepted = 0
rejected = 0
accepted = 0
for string, value in counts.items():
qubit_strings = []
data_syndrome_strings = []
data_OK = False
for i in range(total_keys):
qubit_strings.append(string.split()[i])
data_string = qubit_strings[data_start]
for i in range(data_meas_start, data_meas_start + data_meas_repeats):
#need to reverse strings because Qiskit reverses them
data_syndrome_strings.append(string_reverse(qubit_strings[i]))
if data_meas_repeats == 3:
if data_syndrome_strings[2] in data_meas_strings:
if data_syndrome_strings[1] in data_meas_strings:
if data_syndrome_strings[0] in data_meas_strings:
data_OK = True
elif data_meas_repeats == 0:
data_OK = True
else:
raise Exception('At present only 3 or zero data measurements are coded for')
if data_OK:
data_accepted = data_accepted + value
if ancilla_qubits == 0:
#no ancilla
ancilla_accepted = data_accepted
ancilla_rejected = 0
ancilla_OK = True
corrected_data_string = data_string
elif ancilla_qubits == 1:
#simple case without fault tolerance. No check on ancilla possible
ancilla_OK = True
ancilla_accepted = data_accepted
ancilla_rejected = 0
if ancilla_meas_repeats != 1:
raise Exception('can not handle multiple measurements on one ancilla qubit')
ancilla = qubit_strings[ancilla_start]
corrected_data_string = correct_qubit(data_string, ancilla, data_qubits)
elif ancilla_qubits == 3:
#complex case with fault tolerance
count_ancilla_OK = 0
X = ['' for i in range(ancilla_qubits)]
for i in range(ancilla_types):
for j in range(ancilla_meas_repeats):
first = i * (ancilla_qubits * ancilla_meas_repeats) + j * ancilla_meas_repeats
second = first + 1
third = second + 1
if qubit_strings[third] == qubit_strings[second]:
if qubit_strings[second] == qubit_strings[first]:
if qubit_strings[first] in anc_meas_strings:
count_ancilla_OK = count_ancilla_OK + 1
if i == 0:
#only interested in X values
if qubit_strings[first] in anc_zero:
X[j] = '0'
elif qubit_strings[first] in anc_one:
X[j] = '1'
else:
raise Exception('Error in processing strings for i, j, k = {i}, {j}, {k}')
if count_ancilla_OK == ancilla_qubits * ancilla_types:
ancilla_OK = True
ancilla_accepted = ancilla_accepted + value
#always first three ancilla with Steane code
ancilla = X[0] + X[1] + X[2]
corrected_data_string = correct_qubit(data_string, ancilla, data_qubits)
else:
ancilla_OK = False
ancilla_rejected = ancilla_rejected + value
else:
raise Exception('Can only process ancilla strings of 0, 1 or 3 qubits')
if ancilla_OK:
#need to reverse string because of Qisit convention
reversed_data_string = string_reverse(corrected_data_string)
valid, invalid, outside_codeword = compute_string_validity(value,
codewords,
reversed_data_string,
post_selection = post_selection,
simple = simple,
)
count_valid = count_valid + valid
count_invalid = count_invalid + invalid
count_outside_codeword = count_outside_codeword + outside_codeword
else:
data_rejected = data_rejected + value
if ancilla_accepted != 0:
# calculate on ancilla_accepted because this always holds the amounts to be validated
error_rate = count_invalid / ancilla_accepted
else:
error_rate = 0
print('Error rate not defined as no strings accepted')
rejected = data_rejected + ancilla_rejected
accepted = ancilla_accepted
if verbose:
print(f'At the data validation stage')
print(f'There are {data_rejected} strings rejected and {data_accepted} strings submitted for processing')
print(f'Making {data_rejected + data_accepted} in total submitted for data processing')
print()
print(f'At the ancilla validation stage')
print(f'There are {ancilla_rejected} strings rejected and {ancilla_accepted} strings submitted for validation')
print(f'Making {ancilla_rejected + ancilla_accepted} in total submitted to check against ancilla')
print()
print(f'Of these {ancilla_accepted} strings validated there are {count_valid} valid strings and {count_invalid} invalid_strings')
if post_selection:
print(f'There were {count_outside_codeword} strings that were neither logical one or logical zero')
print(f'The error rate is {error_rate:.4f}')
return(error_rate, rejected, accepted, count_valid, count_invalid)
def get_parity_check_matrix():
"""Stores the parity matrix in one place"""
parity_check_matrix = ['0001111',
'0110011',
'1010101'
]
return(parity_check_matrix)
def get_codewords():
"""Stores the codewords for the logical zero in one place
Returns
-------
codewords : list
A list of valid codewords for the logical zero
"""
codewords =['0000000',
'1010101',
'0110011',
'1100110',
'0001111',
'1011010',
'0111100',
'1101001'
]
return(codewords)
def calculate_parity_matrix_totals():
"""Calculates the number of items in each row of the parity matrix
Returns
-------
parity_matrix_totals : list
List holding parity matrix totals for each row in the parity matrix.
"""
parity_check_matrix = get_parity_check_matrix()
n = len(parity_check_matrix[0])
parity_matrix_totals = [ 0 for x in range(n)] # define an empty list
#ready to work out parity_matrix_totals
#calculate the number of non-zero entries in each row of the parity matrix and store
for parity_string in parity_check_matrix :
for index in range(n):
parity_matrix_totals[index] = parity_matrix_totals[index] + int(parity_string[index])
return(parity_matrix_totals) | helper_functions.py |
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import (
pauli_error,
depolarizing_error,
)
from statistics import stdev
from math import sqrt
from datetime import datetime
SPACE = ' '
def string_reverse(input_string):
"""Reverses a string.
Parameters
----------
input_string : str
Holds the string to be reversed
Returns
----------
reversed_string : str
The reversed string
"""
reversed_string = input_string[::-1]
return(reversed_string)
def find_parity(counts):
"""Finds the parity of the output bit string held in the counts dictionary.
Parameters
----------
counts : dictionary
Holds the observed output bit strings
Returns
----------
parity_count : dict
A dictionary holding the parity count for each observed output bit string.
"""
#initialise dictionary to hold counts
parity_count = {str(i) : 0 for i in range(2)}
for key, value in counts.items():
#split out data part of key
data = key.split()[1]
parity = calculate_parity(data)
old_count = parity_count[str(parity)]
new_count = old_count + value
parity_count[str(parity)] = new_count
return(parity_count)
def calculate_parity(bit_string):
""" Calculates the parity of a bit string
Parameters
----------
bit_string : str
bit string on which parity is to be calculated
Returns
-------
parity :int
0 if even parity
1 if odd parity
"""
parity = 0
for i in range(len(bit_string)):
bit = bit_string[i]
if bit == '1':
#parity has changed
if parity == 0:
parity = 1
elif parity == 1:
parity = 0
else:
raise Exception("Unexpected error calculating parity")
return(parity)
def count_valid_output_strings(counts, codewords, data_location = 0,
post_selection = False, simple = False,
single = False, single_bit = 0):
"""Finds the number of valid and invalid output bit strings
in a given location in a dictionary representing
the counts for each output bit string.
Various algorithms for determining validaty are supported,
including post selection, where a bit is only valid if it is the codewords,
simple decoding based on the parity of three bits and
looking at a single bit only.
Parameters
----------
counts : dictionary
holds the observed populations for each
combination of qubit
codewords : list
holds allowed codewords
data_location : int
location of the data string
post_selection : bool
if true then only strings in logical zero are invalid.
Strings outside the codespace are counted separately.
simple : bool
looks only at the parity of bits with exactly two non-zero columns in the parity matrix
single : bool
look at single bit only
single_bit : int
single bit to validate against
Returns
-------
count_valid : int
Number of valid bit strings
count_invalid : int
Number of invalid bit strings
count_outside_codeword : int
Number of strings outside codespace.
Notes
-----
This code was originally designed to handle the codewords
in a list of lists, but will also work fine
with a list of strings.
"""
if single:
if len(codewords) != 1:
raise ValueError('Only send a one bit codeword with calculation using a single bit')
if simple:
raise ValueError('Validity calculation not designed for both simple algorithm and single_bit')
if post_selection:
raise ValueError('Validity calculation not designed for both post_selection and single_bit')
if simple:
if post_selection:
raise ValueError('Validity calculation not designed for both post_selection and simple')
if len(codewords) != 1:
raise ValueError('Only send a one bit codeword with simple calculation')
count_valid = 0
count_invalid = 0
count_outside_codeword = 0
for key, value in counts.items():
#split out data part of key
if data_location == 0:
data = key
else:
data = key.split()[data_location]
#need to reverse the data string showing the relevant qubits as
#the codewords and the data have a different format
reversed_data_string = string_reverse(data)
valid, invalid, outside_codeword = compute_string_validity(value = value, codewords = codewords,
reversed_data_string = reversed_data_string,
post_selection = post_selection,
simple = simple,
single = single,
single_bit = single_bit
)
count_valid = count_valid + valid
count_invalid = count_invalid + invalid
count_outside_codeword = count_outside_codeword + outside_codeword
return(count_valid, count_invalid, count_outside_codeword)
def compute_string_validity(value, codewords, reversed_data_string, post_selection = False,
simple = False, single = False, single_bit = 0):
"""Categorises a string as valid, invalid or outside the codeword and based on this assigns
the number of counts of that string to the values returned.
Various algorithms for determining validaty are supported,
including post selection, where a bit is only valid if it is the codewords,
simple decoding based on the parity of three bits and
looking at a single bit only.
Parameters
----------
value : int
number of strings for this data string
codewords : list
holds allowed codewords
reversed_data_string : str
string holding element to be processed
post_selection : bool
if true then only strings in logical zero are invalid.
Strings outside the codespace are counted separately.
simple : bool
looks only at the parity of bits with exactly two non-zero columns in the parity matrix
single : bool
look at single bit only
single_bit : int
single bit to validate against
Returns
-------
valid : int
value if the bit string is valid
invalid : int
value if the bit string is invalid
outside_codeword : int
value if the bit string is outside the codespace
Notes
-----
This code was originally designed to handle the codewords
in a list of lists, but will also work fine
with a list of strings.
"""
if simple:
if post_selection:
raise Exception('simple and post selection algorithm are exclusive')
valid = 0
invalid = 0
outside_codeword = 0
if post_selection:
logical_zero = codewords
logical_one = flip_code_words(codewords)
if reversed_data_string in logical_zero:
valid = value
elif reversed_data_string in logical_one:
invalid = value
else:
outside_codeword = outside_codeword + value
elif simple:
simple_parity_bits = calculate_simple_parity_bits()
bit_string = ['']
for bit_location in simple_parity_bits:
bit_string.append(reversed_data_string[bit_location])
parity = str(calculate_parity(bit_string))
if parity in codewords:
valid = value
else:
invalid = value
elif single:
if reversed_data_string[single_bit] in codewords:
valid = value
else:
invalid = value
else:
if reversed_data_string in codewords:
valid = value
else:
invalid = value
return(valid, invalid, outside_codeword)
def calculate_simple_parity_bits():
"""Returns a list of qubits with exactly two non zero rows in the parity matrix
Returns
-------
simple_parity_bits : list
A list of all qubits with exactly two non zero rows in the parity matrix
"""
parity_matrix_totals = calculate_parity_matrix_totals()
simple_parity_bits = []
count = 0
for items in parity_matrix_totals:
if items == 2:
simple_parity_bits.append(count)
count = count + 1
return(simple_parity_bits)
def find_individual_ancilla_values(ancilla_values, data_qubits,
ancilla_qubits, label_string = ''):
"""Returns the count of individual ancilla bit strings as a dictionary.
Parameters
----------
ancilla_values : dict
holds the counts for each combination of ancilla bit strings.
data_qubits : int
number of data qubits used as an offset to calculate
the ancilla number
ancilla_qubits : int
number of ancilla qubits
label_string : str
first part of label
Returns
-------
individual_ancilla_values : dict
dictionary containing the count of individual
ancilla bit string
"""
#initialise dictionary to hold values
individual_ancilla_values = {label_string + str(count): 0
for count in range(data_qubits + 1,
data_qubits + 1 +
ancilla_qubits) }
for ancilla, value in ancilla_values.items():
for count in range(ancilla_qubits):
bit = ancilla[count]
if bit == '1':
# note that order of Qiskit qubit order needs to be reversed to compare with the paper
key = label_string + str(data_qubits + ancilla_qubits - count)
old_count = individual_ancilla_values[key]
new_count = old_count + value
individual_ancilla_values[key] = new_count
return(individual_ancilla_values)
def find_ancilla_values(counts, ancilla_qubits, ancilla_location = 0):
"""Returns a dictionary with a count of each possible ancilla bit string.
Parameters
----------
counts : dictionary
counts for each possible output bit string
ancilla_qubits : int
number of ancilla qubits
ancilla_location : int
designates which bit string is relevant
Returns
-------
ancilla_values : dict
dictionary containing the count of each possible ancilla bit string
"""
#build a list of all the possible ancilla in binary
possible_ancilla_list = []
format_string = '0' + str(ancilla_qubits) + 'b'
for i in range(2 ** (ancilla_qubits)):
possible_ancilla_value = format(i, format_string)
possible_ancilla_list.append(possible_ancilla_value)
#use the list to initialise a dictionary which hold the results by ancilla
ancilla_values = {i:0 for i in possible_ancilla_list}
# loop through the results and summarise by ancilla
for key, value in counts.items():
#split out the ancilla part of key
ancilla = key.split()[ancilla_location]
old_count = ancilla_values[ancilla]
new_count = old_count + value
ancilla_values[ancilla] = new_count
return(ancilla_values)
def strings_AND_bitwise(string1, string2):
"""Returns the bitwise AND of two equal length bit strings.
Parameters
----------
string1 : str
First string
string2 : str
Second string
Returns
-------
string_out : str
bitwise AND of the two input strings
"""
string_out = ''
if len(string1) != len(string2):
raise Exception('When taking the logical AND of two strings they must both have the same length')
for count in range(len(string1)):
i = (string1)[count]
j = (string2)[count]
k = '0'
if i == '0':
if j == '1':
k = '1'
if i == '1':
if j == '0':
k = '1'
string_out = string_out + k
return(string_out)
def string_ancilla_mask(location, length):
"""Returns a bit string with a 1 in a certain bit and the 0 elsewhere.
Parameters
----------
location : int
location of the bit which should be set to '1' in the mask
length : int
length of string in the mask
Returns
-------
string : str
ancilla bit mask string in required format
"""
if not isinstance(location, int):
return Exception('Location of string must an integer when calculating ancilla mask')
if not isinstance(length, int):
return Exception('Length of string must an integer when calculating ancilla mask')
if location < 1:
return Exception('Location of string must be strictly positive when calculating ancilla mask')
if length < 1:
return Exception('String length must be greater than 1 when calculating ancilla mask')
if length < location:
return Exception('Location must be less than string length when calculating ancilla mask')
string = '1'
for i in range(length - 1):
string = '0' + string
for count in range(location - 1):
new_string = string[1:7] + '0'
string = new_string
return(string)
def correct_qubit(data_in, ancilla, data_qubits):
"""Returns the corrected data bit string calculated from the ancilla settings.
Parameters
----------
data_in : str
input data bit string
ancilla : str
three bit ancilla logical Z code
data_qubits : int
length of bit string
Returns
-------
data_out : str
corrected data bit string
Notes
-----
The ancilla number calculation needs to take into account
that the ancilla bit string is reversed
compared to numbering of the databits shown on the Qiskit diagrams.
This code corrects bit string errors only, not phase errors
"""
data_out = ''
if ancilla == '000':
data_out = data_in
else:
bin_ancilla = string_reverse(ancilla)
int_ancilla = int(bin_ancilla, 2)
ancilla_mask = string_ancilla_mask(int_ancilla, data_qubits)
data_out = strings_AND_bitwise(data_in, ancilla_mask)
return(data_out)
def flip_code_words(codewords_in):
"""Returns a list of codewords for the logical one from
the list of codewords for the logical zero
by flipped each bit of the input codewords.
Parameters
----------
codewords_in : list
logical codewords in seven bit Steane code data qubit
for the logical zero
Returns
-------
Codewords_out : list
bit flipped input codeword
"""
codewords_out = []
for items in codewords_in:
new_string = ''
for bit in items:
if bit == '1':
flipped_bit = '0'
elif bit == '0':
flipped_bit = '1'
else:
raise Exception('Not able to interpret bit in codewords')
new_string = new_string + flipped_bit
codewords_out.append(new_string)
return(codewords_out)
def get_noise(p_meas, single_qubit_error,
two_qubit_error, single_qubit_gate_set,
two_qubit_gate_set, all = True,
noisy_qubit_list = [],
decohere = False,
dummy_gate_set = [],
dummy_gate_error = 0
):
"""Returns a noise model
Parameters
----------
p_meas : float
probability of X error on measurement
single_qubit_error : float
probability of a depolarizing error on a single qubit gate
two_qubit_error : float
probability of a depolarizing error on a two qubit gate
single_qubit_gate_set : list
list of all single qubit gate types relevant for noise
two_qubit_gate_set : list
list of all two qubit gate types relevant for noise
all : bool
apply two gate noise to all qubits
noisy_qubit_list : list of list
list of list of noisy qubits on which errors are applied
decohere : bool
Add extra noise to represent de-coherence
dummy_gate_set : list
Set of dummy gates on which the de-coherence error is applied. Normally ['id'].
dummy_gate_error : float
error to apply to dummy gate which is set up to model de-coherence at certain stages in the circuit.
Returns
-------
noise_model : dict
noise model to be used
Notes
-----
Can apply noise selectively to qubits in noisy_qubit_list. This is a list of lists.
"""
error_meas = pauli_error([('X', p_meas), ('I', 1 - p_meas)])
error_gate1 = depolarizing_error(single_qubit_error, 1)
error_gate2 = depolarizing_error(two_qubit_error, 1)
error_gate3 = error_gate2.tensor(error_gate2)
if decohere:
if 'id' in single_qubit_gate_set:
raise ValueError('Do not include gate id in the single_qubit_gate_set as used for decoherent errors')
error_decohere = depolarizing_error(dummy_gate_error, 1)
noise_model = NoiseModel()
if all:
if noisy_qubit_list != []:
raise ValueError('Errors are applied to all qubits but a list of qubits with errors is given')
noise_model.add_all_qubit_quantum_error(error_meas, 'measure')
# measurement error is applied to measurements
noise_model.add_all_qubit_quantum_error(error_gate1,
single_qubit_gate_set)
# single qubit gate errors
noise_model.add_all_qubit_quantum_error(error_gate3,
two_qubit_gate_set)
# two qubit gate error is applied to two qubit gates
if decohere:
noise_model.add_all_qubit_quantum_error(error_decohere,
dummy_gate_set)
# decoherence error is applied to dummy gates
else:
if noisy_qubit_list == []:
raise ValueError('A list of qubits must be supplied if errors are not to be applied to all qubits')
#read through list of list of error gates
for gate_list in noisy_qubit_list:
for gate_index1 in gate_list:
noise_model.add_quantum_error(error_meas, 'measure',
[gate_index1]
)
# measurement error is applied to measurements
noise_model.add_quantum_error(error_gate1,
single_qubit_gate_set,
[gate_index1]
)
if decohere:
noise_model.add_quantum_error(error_decohere ,
dummy_gate_set,
[gate_index1]
)
# decoherence error is applied to dummy gates
# single qubit gate errors
for gate_index2 in gate_list:
if gate_index1 != gate_index2:
noise_model.add_quantum_error(error_gate3,
two_qubit_gate_set,
[gate_index1,
gate_index2]
)
return noise_model
def mean_of_list(list_in):
"""Returns the mean of a list
Parameters
----------
list_in : list
data for analysis
Returns
-------
mean : float
result of calculation
"""
mean = sum(list_in) / len(list_in)
return(mean)
def calculate_standard_error(list_in):
""" Calculates the standard error of a list of numbers
Parameters
----------
list_in : list
data for analysis
Returns
-------
standard_deviation : float
standard deviation estimated from sample
standard_error : float
standard error estimated from sample
result of calculation
"""
if len(list_in) > 1:
standard_deviation = stdev(list_in)
standard_error = standard_deviation / sqrt(len(list_in))
elif len(list_in) == 1:
standard_deviation = 0
standard_error = 0
print('Unable to carry out standard error calcuation with one point. ')
print('Standard error of 0 used.')
else:
raise ValueError('f The number of iterations must be positive {iterations} used')
return(standard_deviation, standard_error)
def convert_codewords(codewords):
""" Changes the codewords list of lists to a list of strings
Parameters
----------
codewords : list
allowed codewords for logical zero
Returns
-------
list_of_strings : list
a list of strings
Notes
-----
No longer needed at present as codeword is a list of strings
but retained in case needed in future.
"""
list_of_strings = []
for lists in codewords:
new_string = ''
for item in lists:
new_string = new_string + str(item)
list_of_strings.append(new_string)
return(list_of_strings)
def summarise_logical_counts(counts, logical_zero_strings, logical_one_strings,
data1_location, data2_location, simple = False):
"""Simplifies bit strings for logical operations
to show each qubit as 0, 1, or 2 instead of the full bit string.
0. means qubit is the logical zero
1. means qubit is the logical one
2. means qubit is outside code space
Parameters
----------
counts : dict
results of computation
logical_zero_strings : list
list of strings in logical zero
logical_one_strings : list
list of strings in logical zero
data1_location : int
where in the counts bit string data1 is held
data2_location : int
where in the counts bit string data2 is held
simple : bool
use simple decoding based on bit parity
Returns
-------
new_counts : dict
simplified results
"""
#set up dictionary to hold answer
if type(logical_zero_strings) != list:
raise Exception('logical_zero_strings should be a list')
if type(logical_one_strings) != list:
raise Exception('logical_one_strings should be a list')
validate_integer(data1_location)
validate_integer(data2_location)
if simple:
if len(logical_zero_strings) != 1:
raise Exception('with simple decoding logical zero should be a list with one entry')
if len(logical_zero_strings) != 1:
raise Exception('with simple decoding logical one should be a list with one entry')
simple_parity_bits = calculate_simple_parity_bits()
new_counts = {str(i) + str(j):0 for i in range(3) for j in range(3)}
for key, value in counts.items():
#split out the data parts of key
data1 = key.split()[data1_location]
data2 = key.split()[data2_location]
#need to reverse the string from qiskit format
reverse1 = string_reverse(data1)
reverse2 = string_reverse(data2)
if simple:
#string is calculated from parity
bit_string1 = ['']
bit_string2 = ['']
for bit_location in simple_parity_bits:
bit_string1.append(reverse1[bit_location])
bit_string2.append(reverse2[bit_location])
new_data1 = str(calculate_parity(bit_string1))
new_data2 = str(calculate_parity(bit_string2))
else:
new_data1 = look_up_data(reverse1, logical_zero_strings, logical_one_strings)
new_data2 = look_up_data(reverse2, logical_zero_strings, logical_one_strings)
new_key = new_data1 + new_data2
if new_counts.get(new_key) == None:
new_counts.update({new_key: value})
else:
new_counts[new_key] = new_counts[new_key] + value
return(new_counts)
def look_up_data(input_string, logical_zero, logical_one):
"""Looks up the input data to determine if the string is a logical one,
logical zero, or outside the code base.
Parameters
----------
input_string : str
data for analysis
logical_zero : list
list of strings representing a logical zero
logical_one : str
list of strings representing a logical one
Returns
-------
output_string : str
result of look-up"""
if input_string in logical_zero:
output_string = '0'
elif input_string in logical_one:
output_string = '1'
else:
output_string = 'E'
return(output_string)
def print_time():
"""Prints current time"""
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
return
def validate_integer(number):
"""Checks if a number is an integer.
Parameters
----------
number: int
number to be validated
"""
if type(number) != int:
raise ValueError(f'The number {number} entered is not an integer')
def process_FT_results(counts, codewords, data_meas_strings = ['0'],
anc_zero = '0', anc_one = '1',
verbose = False, data_qubits = 7,
ancilla_start = 0, data_meas_start = 0, data_start = 0,
ancilla_types = 2, ancilla_qubits = 0, ancilla_meas_repeats = 1,
data_meas_qubits = 0, data_meas_repeats = 0,
post_selection = False, simple = False,
):
"""Process results from fault tolerant processing.
Parameters
----------
counts : dictionary
results for analysis
codewords : list
list of valid data codewords
data_meas_strings: string
allowed strings for the data measurement bits
anc_zero : string
allowed strings for the ancilla zero
anc_one : string
allowed strings for the ancilla one
verbose : bool
if true enables printing
data_qubits : int
Length of data bit string. Usually seven
ancilla_start : int
starting place for ancilla (if any)
data_meas_start : int
starting place for data measurement qubits (if any)
data_start : int
starting place for data string
ancilla_types : int
number of different ancilla types. Normally 2 (X and Z) or 0
ancilla_qubits : int
number of strings for each ancilla qubits. Normally 0, 1 or 3
ancilla_meas_repeats : int
number of times ancilla measurements are repeated. Normally 3 or 1
data_meas_qubits : int
number of distinct data measurement qubits. Normally 7, 1 or 0
data_meas_repeats: int
number of times data measurements are repeated. Normally 3 or 1.
post_select: bool
if true then only strings in logical zero are invalid
simple : bool
if true then simple decoding based on three bits shall be used.
Returns
-------
error_rate : float
error rate calculated
rejected : int
strings rejected for validation
accepted : int
strings accepted for validation
valid : int
strings validated and found to be in the code space
invalid : int
strings validated and found to not be in the code space
Notes
-----
This function takes the output string, splits it, and determines if it passes
data and ancilla checks. If so the data keyword is validated.
"""
anc_meas_strings = [anc_zero, anc_one]
validate_integer(ancilla_start)
validate_integer(data_meas_start)
validate_integer(data_start)
validate_integer(ancilla_types)
validate_integer(ancilla_qubits)
validate_integer(ancilla_meas_repeats)
validate_integer(data_meas_qubits)
validate_integer(data_meas_repeats)
total_keys = ancilla_types * ancilla_qubits * ancilla_meas_repeats
total_keys = total_keys + (data_meas_qubits * data_meas_repeats) + 1
count_valid = 0
count_invalid = 0
count_outside_codeword = 0
ancilla_rejected = 0
ancilla_accepted = 0
data_rejected = 0
data_accepted = 0
rejected = 0
accepted = 0
for string, value in counts.items():
qubit_strings = []
data_syndrome_strings = []
data_OK = False
for i in range(total_keys):
qubit_strings.append(string.split()[i])
data_string = qubit_strings[data_start]
for i in range(data_meas_start, data_meas_start + data_meas_repeats):
#need to reverse strings because Qiskit reverses them
data_syndrome_strings.append(string_reverse(qubit_strings[i]))
if data_meas_repeats == 3:
if data_syndrome_strings[2] in data_meas_strings:
if data_syndrome_strings[1] in data_meas_strings:
if data_syndrome_strings[0] in data_meas_strings:
data_OK = True
elif data_meas_repeats == 0:
data_OK = True
else:
raise Exception('At present only 3 or zero data measurements are coded for')
if data_OK:
data_accepted = data_accepted + value
if ancilla_qubits == 0:
#no ancilla
ancilla_accepted = data_accepted
ancilla_rejected = 0
ancilla_OK = True
corrected_data_string = data_string
elif ancilla_qubits == 1:
#simple case without fault tolerance. No check on ancilla possible
ancilla_OK = True
ancilla_accepted = data_accepted
ancilla_rejected = 0
if ancilla_meas_repeats != 1:
raise Exception('can not handle multiple measurements on one ancilla qubit')
ancilla = qubit_strings[ancilla_start]
corrected_data_string = correct_qubit(data_string, ancilla, data_qubits)
elif ancilla_qubits == 3:
#complex case with fault tolerance
count_ancilla_OK = 0
X = ['' for i in range(ancilla_qubits)]
for i in range(ancilla_types):
for j in range(ancilla_meas_repeats):
first = i * (ancilla_qubits * ancilla_meas_repeats) + j * ancilla_meas_repeats
second = first + 1
third = second + 1
if qubit_strings[third] == qubit_strings[second]:
if qubit_strings[second] == qubit_strings[first]:
if qubit_strings[first] in anc_meas_strings:
count_ancilla_OK = count_ancilla_OK + 1
if i == 0:
#only interested in X values
if qubit_strings[first] in anc_zero:
X[j] = '0'
elif qubit_strings[first] in anc_one:
X[j] = '1'
else:
raise Exception('Error in processing strings for i, j, k = {i}, {j}, {k}')
if count_ancilla_OK == ancilla_qubits * ancilla_types:
ancilla_OK = True
ancilla_accepted = ancilla_accepted + value
#always first three ancilla with Steane code
ancilla = X[0] + X[1] + X[2]
corrected_data_string = correct_qubit(data_string, ancilla, data_qubits)
else:
ancilla_OK = False
ancilla_rejected = ancilla_rejected + value
else:
raise Exception('Can only process ancilla strings of 0, 1 or 3 qubits')
if ancilla_OK:
#need to reverse string because of Qisit convention
reversed_data_string = string_reverse(corrected_data_string)
valid, invalid, outside_codeword = compute_string_validity(value,
codewords,
reversed_data_string,
post_selection = post_selection,
simple = simple,
)
count_valid = count_valid + valid
count_invalid = count_invalid + invalid
count_outside_codeword = count_outside_codeword + outside_codeword
else:
data_rejected = data_rejected + value
if ancilla_accepted != 0:
# calculate on ancilla_accepted because this always holds the amounts to be validated
error_rate = count_invalid / ancilla_accepted
else:
error_rate = 0
print('Error rate not defined as no strings accepted')
rejected = data_rejected + ancilla_rejected
accepted = ancilla_accepted
if verbose:
print(f'At the data validation stage')
print(f'There are {data_rejected} strings rejected and {data_accepted} strings submitted for processing')
print(f'Making {data_rejected + data_accepted} in total submitted for data processing')
print()
print(f'At the ancilla validation stage')
print(f'There are {ancilla_rejected} strings rejected and {ancilla_accepted} strings submitted for validation')
print(f'Making {ancilla_rejected + ancilla_accepted} in total submitted to check against ancilla')
print()
print(f'Of these {ancilla_accepted} strings validated there are {count_valid} valid strings and {count_invalid} invalid_strings')
if post_selection:
print(f'There were {count_outside_codeword} strings that were neither logical one or logical zero')
print(f'The error rate is {error_rate:.4f}')
return(error_rate, rejected, accepted, count_valid, count_invalid)
def get_parity_check_matrix():
"""Stores the parity matrix in one place"""
parity_check_matrix = ['0001111',
'0110011',
'1010101'
]
return(parity_check_matrix)
def get_codewords():
"""Stores the codewords for the logical zero in one place
Returns
-------
codewords : list
A list of valid codewords for the logical zero
"""
codewords =['0000000',
'1010101',
'0110011',
'1100110',
'0001111',
'1011010',
'0111100',
'1101001'
]
return(codewords)
def calculate_parity_matrix_totals():
"""Calculates the number of items in each row of the parity matrix
Returns
-------
parity_matrix_totals : list
List holding parity matrix totals for each row in the parity matrix.
"""
parity_check_matrix = get_parity_check_matrix()
n = len(parity_check_matrix[0])
parity_matrix_totals = [ 0 for x in range(n)] # define an empty list
#ready to work out parity_matrix_totals
#calculate the number of non-zero entries in each row of the parity matrix and store
for parity_string in parity_check_matrix :
for index in range(n):
parity_matrix_totals[index] = parity_matrix_totals[index] + int(parity_string[index])
return(parity_matrix_totals) | 0.906821 | 0.667212 |
import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def create(resource_group, vnet_name, name, address_prefixes, network_security_group=None, route_table=None, service_endpoints=None, service_endpoint_policy=None, delegations=None, nat_gateway=None, disable_private_endpoint_network_policies=None, disable_private_link_service_network_policies=None):
params = get_params(locals())
command = "az network vnet subnet create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, vnet_name, name):
params = get_params(locals())
command = "az network vnet subnet delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, vnet_name, name, expand=None):
params = get_params(locals())
command = "az network vnet subnet show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group, vnet_name):
params = get_params(locals())
command = "az network vnet subnet list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, vnet_name, name, address_prefixes=None, network_security_group=None, route_table=None, service_endpoints=None, delegations=None, nat_gateway=None, service_endpoint_policy=None, disable_private_endpoint_network_policies=None, disable_private_link_service_network_policies=None, set=None, add=None, remove=None, force_string=None):
params = get_params(locals())
command = "az network vnet subnet update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list_available_delegations(resource_group=None, location=None):
params = get_params(locals())
command = "az network vnet subnet list-available-delegations " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr) | test/pyaz/network/vnet/subnet/__init__.py | import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def create(resource_group, vnet_name, name, address_prefixes, network_security_group=None, route_table=None, service_endpoints=None, service_endpoint_policy=None, delegations=None, nat_gateway=None, disable_private_endpoint_network_policies=None, disable_private_link_service_network_policies=None):
params = get_params(locals())
command = "az network vnet subnet create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, vnet_name, name):
params = get_params(locals())
command = "az network vnet subnet delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, vnet_name, name, expand=None):
params = get_params(locals())
command = "az network vnet subnet show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group, vnet_name):
params = get_params(locals())
command = "az network vnet subnet list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, vnet_name, name, address_prefixes=None, network_security_group=None, route_table=None, service_endpoints=None, delegations=None, nat_gateway=None, service_endpoint_policy=None, disable_private_endpoint_network_policies=None, disable_private_link_service_network_policies=None, set=None, add=None, remove=None, force_string=None):
params = get_params(locals())
command = "az network vnet subnet update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list_available_delegations(resource_group=None, location=None):
params = get_params(locals())
command = "az network vnet subnet list-available-delegations " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr) | 0.189334 | 0.061734 |
import gdb
import functools
import time
import logging
import traceback
global isDevelopmentBuild
global setTrace
global currentExecutionContext
variableReferenceCounter = 0
def next_variable_reference():
global variableReferenceCounter
res = variableReferenceCounter + 1
variableReferenceCounter += 1
return res
isDevelopmentBuild = False
setTrace = False
currentExecutionContext = None
class ReferenceKey:
def __init__(self, threadId, stackFrameId):
self.threadId = threadId
self.frameId = stackFrameId
class VariableReferenceMap:
def __init__(self):
self.lookup = {}
def add_mapping(self, variableReference, midasStackFrame):
self.lookup[variableReference] = midasStackFrame.reference_key()
def get_context(self, variableReference) -> ReferenceKey:
return self.lookup.get(variableReference)
variableReferences = VariableReferenceMap()
def timeInvocation(f):
if not isDevelopmentBuild:
return f
"""Measure performance (time) of command or function"""
@functools.wraps(f)
def timer_decorator(*args, **kwargs):
invokeBegin = time.perf_counter_ns()
result = f(*args, **kwargs)
invokeEnd = time.perf_counter_ns()
logger = logging.getLogger("time-logger")
# we don't need nano-second measuring, but the accuracy of the timer is nice.
elapsed_time = int((invokeEnd - invokeBegin) / 1000)
logger.info("{:<30} executed in {:>10,} microseconds".format(f.__qualname__, elapsed_time))
return result
return timer_decorator
def error_logger():
return logging.getLogger("error-logger")
def update_logger():
return logging.getLogger("update-logger")
def timing_logger():
return logging.getLogger("time-logger")
def log_exception(logger, errmsg, exception):
logger.error("{} Exception info: {}".format(errmsg, exception))
logger.error(traceback.format_exc())
logger.error("Current dev setting: {}".format(isDevelopmentBuild)) | modules/python/config.py |
import gdb
import functools
import time
import logging
import traceback
global isDevelopmentBuild
global setTrace
global currentExecutionContext
variableReferenceCounter = 0
def next_variable_reference():
global variableReferenceCounter
res = variableReferenceCounter + 1
variableReferenceCounter += 1
return res
isDevelopmentBuild = False
setTrace = False
currentExecutionContext = None
class ReferenceKey:
def __init__(self, threadId, stackFrameId):
self.threadId = threadId
self.frameId = stackFrameId
class VariableReferenceMap:
def __init__(self):
self.lookup = {}
def add_mapping(self, variableReference, midasStackFrame):
self.lookup[variableReference] = midasStackFrame.reference_key()
def get_context(self, variableReference) -> ReferenceKey:
return self.lookup.get(variableReference)
variableReferences = VariableReferenceMap()
def timeInvocation(f):
if not isDevelopmentBuild:
return f
"""Measure performance (time) of command or function"""
@functools.wraps(f)
def timer_decorator(*args, **kwargs):
invokeBegin = time.perf_counter_ns()
result = f(*args, **kwargs)
invokeEnd = time.perf_counter_ns()
logger = logging.getLogger("time-logger")
# we don't need nano-second measuring, but the accuracy of the timer is nice.
elapsed_time = int((invokeEnd - invokeBegin) / 1000)
logger.info("{:<30} executed in {:>10,} microseconds".format(f.__qualname__, elapsed_time))
return result
return timer_decorator
def error_logger():
return logging.getLogger("error-logger")
def update_logger():
return logging.getLogger("update-logger")
def timing_logger():
return logging.getLogger("time-logger")
def log_exception(logger, errmsg, exception):
logger.error("{} Exception info: {}".format(errmsg, exception))
logger.error(traceback.format_exc())
logger.error("Current dev setting: {}".format(isDevelopmentBuild)) | 0.615319 | 0.05498 |
__docformat__ = "reStructuredText"
from Testing import ZopeTestCase
from zope.testing.doctest import INTERPRET_FOOTNOTES
from zope.testing.loggingsupport import InstalledHandler
import doctest
import random
import unittest
import logging
from five.taskqueue import service
ZopeTestCase.installProduct('Five')
def _configure_conflict_error_log_level():
import App.config
config = App.config.getConfiguration()
config.conflict_error_log_level = logging.INFO
App.config.setConfiguration(config)
def setUp(test):
test.globs['root'] = ZopeTestCase.base.app()
# As task will be run in different threads, we cannot rely on print
# results. We need to log calls to prove correctness.
log_info = InstalledHandler('z3c.taskqueue')
test.globs['log_info'] = log_info
# We pass the ZPublisher conflict logger to prove that no conflict
# happened.
conflict_logger = InstalledHandler('ZPublisher.Conflict')
test.globs['conflict_logger'] = conflict_logger
# Make sure ZPublisher conflict error log level is setup.
_configure_conflict_error_log_level()
test.origArgs = service.TaskService.processorArguments
service.TaskService.processorArguments = {'waitTime': 0.0}
# Make tests predictable
random.seed(27)
def tearDown(test):
random.seed()
service.TaskService.processorArguments = test.origArgs
class TestIdGenerator(unittest.TestCase):
def setUp(self):
random.seed(27)
self.service = service.TaskService()
def tearDown(self):
random.seed()
def test_sequence(self):
id = 1392637175
self.assertEquals(id, self.service._generateId())
self.assertEquals(id + 1, self.service._generateId())
self.assertEquals(id + 2, self.service._generateId())
self.assertEquals(id + 3, self.service._generateId())
def test_in_use_randomises(self):
id = 1392637175
self.assertEquals(id, self.service._generateId())
self.service.jobs[id + 1] = object()
id = 1506179619
self.assertEquals(id, self.service._generateId())
self.assertEquals(id + 1, self.service._generateId())
self.service.jobs[id + 1] = object()
self.assertEquals(id + 2, self.service._generateId())
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestIdGenerator),
ZopeTestCase.ZopeDocFileSuite('processor.txt',
package='five.taskqueue.tests',
setUp=setUp,
tearDown=tearDown,
optionflags=doctest.NORMALIZE_WHITESPACE
| doctest.ELLIPSIS
| INTERPRET_FOOTNOTES),
)) | src/five/taskqueue/tests/test_doctests.py | __docformat__ = "reStructuredText"
from Testing import ZopeTestCase
from zope.testing.doctest import INTERPRET_FOOTNOTES
from zope.testing.loggingsupport import InstalledHandler
import doctest
import random
import unittest
import logging
from five.taskqueue import service
ZopeTestCase.installProduct('Five')
def _configure_conflict_error_log_level():
import App.config
config = App.config.getConfiguration()
config.conflict_error_log_level = logging.INFO
App.config.setConfiguration(config)
def setUp(test):
test.globs['root'] = ZopeTestCase.base.app()
# As task will be run in different threads, we cannot rely on print
# results. We need to log calls to prove correctness.
log_info = InstalledHandler('z3c.taskqueue')
test.globs['log_info'] = log_info
# We pass the ZPublisher conflict logger to prove that no conflict
# happened.
conflict_logger = InstalledHandler('ZPublisher.Conflict')
test.globs['conflict_logger'] = conflict_logger
# Make sure ZPublisher conflict error log level is setup.
_configure_conflict_error_log_level()
test.origArgs = service.TaskService.processorArguments
service.TaskService.processorArguments = {'waitTime': 0.0}
# Make tests predictable
random.seed(27)
def tearDown(test):
random.seed()
service.TaskService.processorArguments = test.origArgs
class TestIdGenerator(unittest.TestCase):
def setUp(self):
random.seed(27)
self.service = service.TaskService()
def tearDown(self):
random.seed()
def test_sequence(self):
id = 1392637175
self.assertEquals(id, self.service._generateId())
self.assertEquals(id + 1, self.service._generateId())
self.assertEquals(id + 2, self.service._generateId())
self.assertEquals(id + 3, self.service._generateId())
def test_in_use_randomises(self):
id = 1392637175
self.assertEquals(id, self.service._generateId())
self.service.jobs[id + 1] = object()
id = 1506179619
self.assertEquals(id, self.service._generateId())
self.assertEquals(id + 1, self.service._generateId())
self.service.jobs[id + 1] = object()
self.assertEquals(id + 2, self.service._generateId())
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestIdGenerator),
ZopeTestCase.ZopeDocFileSuite('processor.txt',
package='five.taskqueue.tests',
setUp=setUp,
tearDown=tearDown,
optionflags=doctest.NORMALIZE_WHITESPACE
| doctest.ELLIPSIS
| INTERPRET_FOOTNOTES),
)) | 0.464416 | 0.135175 |
import logging
from oidcmsg import oauth2
from oidcmsg.time_util import utc_time_sans_frac
from oidcendpoint.endpoint import Endpoint
LOGGER = logging.getLogger(__name__)
class Introspection(Endpoint):
"""Implements RFC 7662"""
request_cls = oauth2.TokenIntrospectionRequest
response_cls = oauth2.TokenIntrospectionResponse
request_format = "urlencoded"
response_format = "json"
endpoint_name = "introspection_endpoint"
name = "introspection"
def __init__(self, **kwargs):
Endpoint.__init__(self, **kwargs)
self.offset = kwargs.get("offset", 0)
def get_client_id_from_token(self, endpoint_context, token, request=None):
"""
Will try to match tokens against information in the session DB.
:param endpoint_context:
:param token:
:param request:
:return: client_id if there was a match
"""
sinfo = endpoint_context.sdb[token]
return sinfo["authn_req"]["client_id"]
def _introspect(self, token):
try:
info = self.endpoint_context.sdb[token]
except KeyError:
return None
# Make sure that the token is an access_token or a refresh_token
if token not in info.get("access_token") and token != info.get(
"refresh_token"
):
return None
eat = info.get("expires_at")
if eat and eat < utc_time_sans_frac():
return None
if info: # Now what can be returned ?
ret = info.to_dict()
ret["iss"] = self.endpoint_context.issuer
if "scope" not in ret:
ret["scope"] = " ".join(info["authn_req"]["scope"])
return ret
def process_request(self, request=None, **kwargs):
"""
:param request: The authorization request as a dictionary
:param kwargs:
:return:
"""
_introspect_request = self.request_cls(**request)
if "error" in _introspect_request:
return _introspect_request
_token = _introspect_request["token"]
_resp = self.response_cls(active=False)
_info = self._introspect(_token)
if _info is None:
return {"response_args": _resp}
if "release" in self.kwargs:
if "username" in self.kwargs["release"]:
try:
_info["username"] = self.endpoint_context.userinfo.search(
sub=_info["sub"]
)
except KeyError:
pass
_resp.update(_info)
_resp.weed()
_resp["active"] = True
return {"response_args": _resp} | src/oidcendpoint/oauth2/introspection.py | import logging
from oidcmsg import oauth2
from oidcmsg.time_util import utc_time_sans_frac
from oidcendpoint.endpoint import Endpoint
LOGGER = logging.getLogger(__name__)
class Introspection(Endpoint):
"""Implements RFC 7662"""
request_cls = oauth2.TokenIntrospectionRequest
response_cls = oauth2.TokenIntrospectionResponse
request_format = "urlencoded"
response_format = "json"
endpoint_name = "introspection_endpoint"
name = "introspection"
def __init__(self, **kwargs):
Endpoint.__init__(self, **kwargs)
self.offset = kwargs.get("offset", 0)
def get_client_id_from_token(self, endpoint_context, token, request=None):
"""
Will try to match tokens against information in the session DB.
:param endpoint_context:
:param token:
:param request:
:return: client_id if there was a match
"""
sinfo = endpoint_context.sdb[token]
return sinfo["authn_req"]["client_id"]
def _introspect(self, token):
try:
info = self.endpoint_context.sdb[token]
except KeyError:
return None
# Make sure that the token is an access_token or a refresh_token
if token not in info.get("access_token") and token != info.get(
"refresh_token"
):
return None
eat = info.get("expires_at")
if eat and eat < utc_time_sans_frac():
return None
if info: # Now what can be returned ?
ret = info.to_dict()
ret["iss"] = self.endpoint_context.issuer
if "scope" not in ret:
ret["scope"] = " ".join(info["authn_req"]["scope"])
return ret
def process_request(self, request=None, **kwargs):
"""
:param request: The authorization request as a dictionary
:param kwargs:
:return:
"""
_introspect_request = self.request_cls(**request)
if "error" in _introspect_request:
return _introspect_request
_token = _introspect_request["token"]
_resp = self.response_cls(active=False)
_info = self._introspect(_token)
if _info is None:
return {"response_args": _resp}
if "release" in self.kwargs:
if "username" in self.kwargs["release"]:
try:
_info["username"] = self.endpoint_context.userinfo.search(
sub=_info["sub"]
)
except KeyError:
pass
_resp.update(_info)
_resp.weed()
_resp["active"] = True
return {"response_args": _resp} | 0.536313 | 0.076236 |
import os
import numpy as np
import matplotlib.pyplot as plt
import argparse
from wordcloud import WordCloud
from methods import RandomQuery, Tiara, TiaraS, EPSGreedy, UCB
from environments import get_class_ids, get_env
from utils import load_glove
def save_array(opt, budget, env_name, method_name, class_id, seed):
scores = np.array([opt.history[i][1] for i in range(budget)])
np.save('outputs/{}_{}_{}_{}_scores.npy'.format(env_name, method_name, class_id, seed), scores)
def update_pics(fig, opt, env, ts, num_methods, method_ind):
history = [opt.history[i - 1] for i in ts]
for ind, (loop, score, i) in enumerate(history):
ax = fig.add_subplot(num_methods, len(ts), len(ts) * method_ind + ind + 1)
img = env.get_image(i)
ax.imshow(img)
ax.text(0, img.size[1] + 100, 'i: {}\ns: {:.4f}\n{}'.format(loop + 1, score, i), size=16, color='red')
ax.axis('off')
def savefig(fig, basename):
fig.savefig('outputs/{}.png'.format(basename), bbox_inches='tight')
fig.savefig('outputs/{}.svg'.format(basename), bbox_inches='tight')
def save_curve(scores, methods, env_name, class_id):
fig, ax = plt.subplots()
for method_name, _, _ in methods:
ax.plot(scores[method_name].mean(0), label=method_name)
ax.legend()
fig.savefig('outputs/{}_{}_curve.png'.format(env_name, class_id), bbox_inches='tight')
def wordcloud_col(word, font_size, position, orientation, font_path, random_state):
lam = (font_size - 6) / (48 - 6)
red = np.array([255, 75, 0])
grey = np.array([132, 145, 158])
res = lam * red + (1 - lam) * grey
res = res.astype(int)
return (res[0], res[1], res[2])
def save_wordcloud(opt, env_name, class_id, seed, method_name, font_path):
tag_scores = opt.tag_scores()
score_dict = {tag: tag_scores[tag_id] for tag_id, tag in enumerate(opt.tags)}
x, y = np.ogrid[:300, :300]
mask = (x - 150) ** 2 + (y - 150) ** 2 > 150 ** 2
mask = 255 * mask.astype(int)
wc = WordCloud(font_path=font_path, background_color='white', mask=mask, random_state=0, prefer_horizontal=1.0, max_font_size=48, min_font_size=6)
wc.generate_from_frequencies(score_dict)
wc.recolor(random_state=0, color_func=wordcloud_col)
wc.to_file('outputs/{}_{}_{}_{}_wordcloud.png'.format(env_name, class_id, seed, method_name))
with open('outputs/{}_{}_{}_{}_wordcloud.svg'.format(env_name, class_id, seed, method_name), 'w') as f:
f.write(wc.to_svg().replace('fill:(', 'fill:rgb('))
if not os.path.exists('outputs'):
os.makedirs('outputs')
parser = argparse.ArgumentParser()
parser.add_argument('--tuning', action='store_true')
parser.add_argument('--extra', action='store_true')
parser.add_argument('--env', choices=['open', 'flickr', 'flickrsim'])
parser.add_argument('--num_seeds', type=int, default=10)
parser.add_argument('--budget', type=int, default=500)
parser.add_argument('--api_key', type=str, help='API key for Flickr.')
parser.add_argument('--api_secret', type=str, help='API secret key for Flickr.')
parser.add_argument('--font_path', type=str, help='Font path for wordclouds.')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('-c', '--classes', type=int, nargs='*')
args = parser.parse_args()
glove = load_glove(300, 6)
if args.tuning:
glove50 = load_glove(50, 6)
glove100 = load_glove(100, 6)
glove200 = load_glove(200, 6)
budget = args.budget
budget_ini = 1
class_ids = get_class_ids(args.env)
num_seeds = args.num_seeds
ts = [10, 50, 100, 200, 300, 400, 500] # checkpoints
print(args.classes)
if args.classes:
class_ids = [class_ids[c] for c in args.classes]
print('classes:', class_ids)
methods = [
('Tiara_1_0.01', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.01, 'uncase': True}),
('UCB_1', UCB, {'alpha': 1.0}),
('random', RandomQuery, {})
]
if args.extra:
methods += [
('TiaraS_1_0.01', TiaraS, {'word_embedding': glove, 'lam': 1, 'alpha': 0.01}),
('eps_0.01', EPSGreedy, {'eps': 0.01}),
('eps_0.1', EPSGreedy, {'eps': 0.1}),
('eps_0.5', EPSGreedy, {'eps': 0.5}),
('UCB_0.1', UCB, {'alpha': 0.1}),
('UCB_10', UCB, {'alpha': 10.0}),
('adaeps_0.1', EPSGreedy, {'eps': 0.1, 'adaptive': True}),
('adaUCB_1', UCB, {'alpha': 1.0, 'adaptive': True}),
]
if args.tuning:
methods += [
('Tiara_1_0.001', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.001}),
('Tiara_1_0.1', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.1}),
('Tiara_1_1', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 1}),
('Tiara_1_10', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 10}),
('Tiara_1_100', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 100}),
('Tiara_0.01_0.01', Tiara, {'word_embedding': glove, 'lam': 0.01, 'alpha': 0.01}),
('Tiara_0.1_0.01', Tiara, {'word_embedding': glove, 'lam': 0.1, 'alpha': 0.01}),
('Tiara_10_0.01', Tiara, {'word_embedding': glove, 'lam': 10, 'alpha': 0.01}),
('Tiara_100_0.01', Tiara, {'word_embedding': glove, 'lam': 100, 'alpha': 0.01}),
('Tiara_1000_0.01', Tiara, {'word_embedding': glove, 'lam': 1000, 'alpha': 0.01}),
('Tiara_50dim', Tiara, {'word_embedding': glove50, 'lam': 1, 'alpha': 0.01}),
('Tiara_100dim', Tiara, {'word_embedding': glove100, 'lam': 1, 'alpha': 0.01}),
('Tiara_200dim', Tiara, {'word_embedding': glove200, 'lam': 1, 'alpha': 0.01}),
]
for class_ind, class_id in enumerate(class_ids):
scores = {method_name: np.zeros((num_seeds, budget)) for method_name, _, _ in methods}
for seed in range(num_seeds):
fig_pics = plt.figure(figsize=(len(ts) * 4, len(methods) * 3))
for method_ind, (method_name, Opt, config) in enumerate(methods):
if args.verbose:
print(method_name, class_ind, seed)
env = get_env(args.env, class_id, seed, args.api_key, args.api_secret)
opt = Opt(env, budget, seed, budget_ini=budget_ini, verbose=args.verbose, **config)
opt.optimize()
scores[method_name][seed] = [opt.history[i][1] for i in range(budget)]
update_pics(fig_pics, opt, env, ts, len(methods), method_ind)
if hasattr(opt, 'tag_scores'):
save_wordcloud(opt, args.env, class_id, seed, method_name, args.font_path)
if hasattr(env, 'save_cache'):
env.save_cache()
savefig(fig_pics, '{}_{}_{}_figures'.format(args.env, class_id, seed))
plt.close()
save_curve(scores, methods, args.env, class_id)
for method_name, _, _ in methods:
np.save('outputs/{}_{}_{}_scores.npy'.format(args.env, class_id, method_name), scores[method_name]) | evaluate.py | import os
import numpy as np
import matplotlib.pyplot as plt
import argparse
from wordcloud import WordCloud
from methods import RandomQuery, Tiara, TiaraS, EPSGreedy, UCB
from environments import get_class_ids, get_env
from utils import load_glove
def save_array(opt, budget, env_name, method_name, class_id, seed):
scores = np.array([opt.history[i][1] for i in range(budget)])
np.save('outputs/{}_{}_{}_{}_scores.npy'.format(env_name, method_name, class_id, seed), scores)
def update_pics(fig, opt, env, ts, num_methods, method_ind):
history = [opt.history[i - 1] for i in ts]
for ind, (loop, score, i) in enumerate(history):
ax = fig.add_subplot(num_methods, len(ts), len(ts) * method_ind + ind + 1)
img = env.get_image(i)
ax.imshow(img)
ax.text(0, img.size[1] + 100, 'i: {}\ns: {:.4f}\n{}'.format(loop + 1, score, i), size=16, color='red')
ax.axis('off')
def savefig(fig, basename):
fig.savefig('outputs/{}.png'.format(basename), bbox_inches='tight')
fig.savefig('outputs/{}.svg'.format(basename), bbox_inches='tight')
def save_curve(scores, methods, env_name, class_id):
fig, ax = plt.subplots()
for method_name, _, _ in methods:
ax.plot(scores[method_name].mean(0), label=method_name)
ax.legend()
fig.savefig('outputs/{}_{}_curve.png'.format(env_name, class_id), bbox_inches='tight')
def wordcloud_col(word, font_size, position, orientation, font_path, random_state):
lam = (font_size - 6) / (48 - 6)
red = np.array([255, 75, 0])
grey = np.array([132, 145, 158])
res = lam * red + (1 - lam) * grey
res = res.astype(int)
return (res[0], res[1], res[2])
def save_wordcloud(opt, env_name, class_id, seed, method_name, font_path):
tag_scores = opt.tag_scores()
score_dict = {tag: tag_scores[tag_id] for tag_id, tag in enumerate(opt.tags)}
x, y = np.ogrid[:300, :300]
mask = (x - 150) ** 2 + (y - 150) ** 2 > 150 ** 2
mask = 255 * mask.astype(int)
wc = WordCloud(font_path=font_path, background_color='white', mask=mask, random_state=0, prefer_horizontal=1.0, max_font_size=48, min_font_size=6)
wc.generate_from_frequencies(score_dict)
wc.recolor(random_state=0, color_func=wordcloud_col)
wc.to_file('outputs/{}_{}_{}_{}_wordcloud.png'.format(env_name, class_id, seed, method_name))
with open('outputs/{}_{}_{}_{}_wordcloud.svg'.format(env_name, class_id, seed, method_name), 'w') as f:
f.write(wc.to_svg().replace('fill:(', 'fill:rgb('))
if not os.path.exists('outputs'):
os.makedirs('outputs')
parser = argparse.ArgumentParser()
parser.add_argument('--tuning', action='store_true')
parser.add_argument('--extra', action='store_true')
parser.add_argument('--env', choices=['open', 'flickr', 'flickrsim'])
parser.add_argument('--num_seeds', type=int, default=10)
parser.add_argument('--budget', type=int, default=500)
parser.add_argument('--api_key', type=str, help='API key for Flickr.')
parser.add_argument('--api_secret', type=str, help='API secret key for Flickr.')
parser.add_argument('--font_path', type=str, help='Font path for wordclouds.')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('-c', '--classes', type=int, nargs='*')
args = parser.parse_args()
glove = load_glove(300, 6)
if args.tuning:
glove50 = load_glove(50, 6)
glove100 = load_glove(100, 6)
glove200 = load_glove(200, 6)
budget = args.budget
budget_ini = 1
class_ids = get_class_ids(args.env)
num_seeds = args.num_seeds
ts = [10, 50, 100, 200, 300, 400, 500] # checkpoints
print(args.classes)
if args.classes:
class_ids = [class_ids[c] for c in args.classes]
print('classes:', class_ids)
methods = [
('Tiara_1_0.01', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.01, 'uncase': True}),
('UCB_1', UCB, {'alpha': 1.0}),
('random', RandomQuery, {})
]
if args.extra:
methods += [
('TiaraS_1_0.01', TiaraS, {'word_embedding': glove, 'lam': 1, 'alpha': 0.01}),
('eps_0.01', EPSGreedy, {'eps': 0.01}),
('eps_0.1', EPSGreedy, {'eps': 0.1}),
('eps_0.5', EPSGreedy, {'eps': 0.5}),
('UCB_0.1', UCB, {'alpha': 0.1}),
('UCB_10', UCB, {'alpha': 10.0}),
('adaeps_0.1', EPSGreedy, {'eps': 0.1, 'adaptive': True}),
('adaUCB_1', UCB, {'alpha': 1.0, 'adaptive': True}),
]
if args.tuning:
methods += [
('Tiara_1_0.001', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.001}),
('Tiara_1_0.1', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.1}),
('Tiara_1_1', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 1}),
('Tiara_1_10', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 10}),
('Tiara_1_100', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 100}),
('Tiara_0.01_0.01', Tiara, {'word_embedding': glove, 'lam': 0.01, 'alpha': 0.01}),
('Tiara_0.1_0.01', Tiara, {'word_embedding': glove, 'lam': 0.1, 'alpha': 0.01}),
('Tiara_10_0.01', Tiara, {'word_embedding': glove, 'lam': 10, 'alpha': 0.01}),
('Tiara_100_0.01', Tiara, {'word_embedding': glove, 'lam': 100, 'alpha': 0.01}),
('Tiara_1000_0.01', Tiara, {'word_embedding': glove, 'lam': 1000, 'alpha': 0.01}),
('Tiara_50dim', Tiara, {'word_embedding': glove50, 'lam': 1, 'alpha': 0.01}),
('Tiara_100dim', Tiara, {'word_embedding': glove100, 'lam': 1, 'alpha': 0.01}),
('Tiara_200dim', Tiara, {'word_embedding': glove200, 'lam': 1, 'alpha': 0.01}),
]
for class_ind, class_id in enumerate(class_ids):
scores = {method_name: np.zeros((num_seeds, budget)) for method_name, _, _ in methods}
for seed in range(num_seeds):
fig_pics = plt.figure(figsize=(len(ts) * 4, len(methods) * 3))
for method_ind, (method_name, Opt, config) in enumerate(methods):
if args.verbose:
print(method_name, class_ind, seed)
env = get_env(args.env, class_id, seed, args.api_key, args.api_secret)
opt = Opt(env, budget, seed, budget_ini=budget_ini, verbose=args.verbose, **config)
opt.optimize()
scores[method_name][seed] = [opt.history[i][1] for i in range(budget)]
update_pics(fig_pics, opt, env, ts, len(methods), method_ind)
if hasattr(opt, 'tag_scores'):
save_wordcloud(opt, args.env, class_id, seed, method_name, args.font_path)
if hasattr(env, 'save_cache'):
env.save_cache()
savefig(fig_pics, '{}_{}_{}_figures'.format(args.env, class_id, seed))
plt.close()
save_curve(scores, methods, args.env, class_id)
for method_name, _, _ in methods:
np.save('outputs/{}_{}_{}_scores.npy'.format(args.env, class_id, method_name), scores[method_name]) | 0.467818 | 0.219024 |
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.reverse import reverse
from chigre.models import PubGallery
from chigre.serializers import PubGallerySerializer
# Create your tests here.
class PubGalleryCreateTest(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser('john', '<EMAIL>', '<PASSWORD>')
self.client.login(username='john', password='<PASSWORD>')
self.data = {'title': 'photo', 'description':'great photo', 'image':'great.photo.jpg'}
def test_create_photo(self):
"""
Ensure we can create a new photo object.
"""
url = reverse('gallery-list')
response = self.client.post(url, self.data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class PubGalleryReadTest(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser('john', '<EMAIL>', '<PASSWORD>')
self.client.login(username='john', password='<PASSWORD>')
self.photo = PubGallery.objects.create(title='photo', description='great photo', image='great.photo.jpg', creator=self.superuser)
def test_read_photos(self):
"""
Ensure we can read photos.
"""
url = reverse('gallery-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_photo(self):
"""
Ensure we can read a photo object.
"""
url = reverse('gallery-detail', args=[self.photo.id])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
class PubGalleryUpdateTest(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser('john', '<EMAIL>', '<PASSWORD>')
self.client.login(username='john', password='<PASSWORD>')
self.photo = PubGallery.objects.create(title='foto', description='great photo', image='great.photo.jpg', creator=self.superuser)
self.data = PubGallerySerializer(self.photo).data
self.data.update({'title': 'photo'})
def test_update_photo(self):
"""
Ensure we can update a brewery object.
"""
url = reverse('gallery-detail', args=[self.photo.id])
response = self.client.put(url, self.data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
class BreweryDeleteTest(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser('john', '<EMAIL>', '<PASSWORD>')
self.client.login(username='john', password='<PASSWORD>')
self.photo = PubGallery.objects.create(title='foto', description='great photo', image='great.photo.jpg', creator=self.superuser)
def test_delete_photo(self):
"""
Ensure we can delete a photo object.
"""
url = reverse('gallery-detail', args=[self.photo.id])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) | chigre/tests/test_pubgallery.py | from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.reverse import reverse
from chigre.models import PubGallery
from chigre.serializers import PubGallerySerializer
# Create your tests here.
class PubGalleryCreateTest(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser('john', '<EMAIL>', '<PASSWORD>')
self.client.login(username='john', password='<PASSWORD>')
self.data = {'title': 'photo', 'description':'great photo', 'image':'great.photo.jpg'}
def test_create_photo(self):
"""
Ensure we can create a new photo object.
"""
url = reverse('gallery-list')
response = self.client.post(url, self.data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class PubGalleryReadTest(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser('john', '<EMAIL>', '<PASSWORD>')
self.client.login(username='john', password='<PASSWORD>')
self.photo = PubGallery.objects.create(title='photo', description='great photo', image='great.photo.jpg', creator=self.superuser)
def test_read_photos(self):
"""
Ensure we can read photos.
"""
url = reverse('gallery-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_photo(self):
"""
Ensure we can read a photo object.
"""
url = reverse('gallery-detail', args=[self.photo.id])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
class PubGalleryUpdateTest(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser('john', '<EMAIL>', '<PASSWORD>')
self.client.login(username='john', password='<PASSWORD>')
self.photo = PubGallery.objects.create(title='foto', description='great photo', image='great.photo.jpg', creator=self.superuser)
self.data = PubGallerySerializer(self.photo).data
self.data.update({'title': 'photo'})
def test_update_photo(self):
"""
Ensure we can update a brewery object.
"""
url = reverse('gallery-detail', args=[self.photo.id])
response = self.client.put(url, self.data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
class BreweryDeleteTest(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser('john', '<EMAIL>', '<PASSWORD>')
self.client.login(username='john', password='<PASSWORD>')
self.photo = PubGallery.objects.create(title='foto', description='great photo', image='great.photo.jpg', creator=self.superuser)
def test_delete_photo(self):
"""
Ensure we can delete a photo object.
"""
url = reverse('gallery-detail', args=[self.photo.id])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) | 0.433622 | 0.16872 |
import re
import os
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
class TwitterRequest(object):
"""docstring for TwitterRequest"""
def __init__(self):
ckey = "<KEY>"
csecret = os.environ['CON_SECRET']
atoken = "<KEY>"
asecret = os.environ['ACC_SECRET']
try:
self.auth = OAuthHandler(ckey, csecret)
self.auth.set_access_token(atoken, asecret)
self.api = tweepy.API(self.auth)
except: # pragma no cover
print('Error: Authentication failed')
def tweet_prep(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w+:\/\/\S+)", " ", tweet).split())
def tweet_sentiment(self, tweet):
analysis = TextBlob(self.tweet_prep(tweet))
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
def tweet_grab(self, query, count):
tweets = []
try:
tweets_fetched = self.api.search(q=query, count=count)
for tweet in tweets_fetched:
tweets_parsed = {}
tweets_parsed['text'] = tweet.text
tweets_parsed['sentiment'] = self.tweet_sentiment(tweet.text)
if tweet.retweet_count > 0:
if tweets_parsed not in tweets:
tweets.append(tweets_parsed)
else:
tweets.append(tweets_parsed)
return tweets
except tweepy.TweepError: # pragma no cover
print('Error : ' + str(tweepy.TweepError))
def percentage(number):
return ("%.2f" % (100 * number))
def main(query, count=100):
results = []
pos_list = []
neg_list = []
api = TwitterRequest()
tweets = api.tweet_grab(query=query, count=count)
pos_tweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']
results.append(percentage((len(pos_tweets) / len(tweets))))
neg_tweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']
results.append(percentage((len(neg_tweets) / len(tweets))))
results.append(percentage(((len(tweets) - len(neg_tweets) - len(pos_tweets))/len(tweets))))
for tweet in pos_tweets[:5]:
pos_list.append(tweet['text'])
for tweet in neg_tweets[:5]:
neg_list.append(tweet['text'])
results.append(pos_list)
results.append(neg_list)
return results | mood_bot/mood_bot/scripts/twitter.py | import re
import os
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
class TwitterRequest(object):
"""docstring for TwitterRequest"""
def __init__(self):
ckey = "<KEY>"
csecret = os.environ['CON_SECRET']
atoken = "<KEY>"
asecret = os.environ['ACC_SECRET']
try:
self.auth = OAuthHandler(ckey, csecret)
self.auth.set_access_token(atoken, asecret)
self.api = tweepy.API(self.auth)
except: # pragma no cover
print('Error: Authentication failed')
def tweet_prep(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w+:\/\/\S+)", " ", tweet).split())
def tweet_sentiment(self, tweet):
analysis = TextBlob(self.tweet_prep(tweet))
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
def tweet_grab(self, query, count):
tweets = []
try:
tweets_fetched = self.api.search(q=query, count=count)
for tweet in tweets_fetched:
tweets_parsed = {}
tweets_parsed['text'] = tweet.text
tweets_parsed['sentiment'] = self.tweet_sentiment(tweet.text)
if tweet.retweet_count > 0:
if tweets_parsed not in tweets:
tweets.append(tweets_parsed)
else:
tweets.append(tweets_parsed)
return tweets
except tweepy.TweepError: # pragma no cover
print('Error : ' + str(tweepy.TweepError))
def percentage(number):
return ("%.2f" % (100 * number))
def main(query, count=100):
results = []
pos_list = []
neg_list = []
api = TwitterRequest()
tweets = api.tweet_grab(query=query, count=count)
pos_tweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']
results.append(percentage((len(pos_tweets) / len(tweets))))
neg_tweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']
results.append(percentage((len(neg_tweets) / len(tweets))))
results.append(percentage(((len(tweets) - len(neg_tweets) - len(pos_tweets))/len(tweets))))
for tweet in pos_tweets[:5]:
pos_list.append(tweet['text'])
for tweet in neg_tweets[:5]:
neg_list.append(tweet['text'])
results.append(pos_list)
results.append(neg_list)
return results | 0.196633 | 0.142739 |
from math import sqrt, pow, fabs
DBL_MAX = 1.7976931348623158e+308 #taken from Visual C++
DBL_EPS = 0.0000000000001
def euclidean_distance_coords(x1, y1, x2, y2):
return sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
def euclidean_distance_points(p1, p2):
return euclidean_distance_coords(p1.x(), p1.y(), p2.x(), p2.y())
def euclidean_distance_arrays(x, y):
hx, wx = x.shape
hy, wy = y.shape
if wx != 1 or wy != 1 or hx != hy:
return DBL_MAX
result = 0.0
for i in range(hx):
xi = x.item(i, 0)
yi = y.item(i, 0)
result += pow(xi - yi, 2)
return sqrt(result)
def fit_parabola_coords(x1, y1, x2, y2, x3, y3):
d = (x1 - x2) * (x1 - x3) * (x2 - x3)
#avoiding zero division
if d == 0:
return -1, -1, -1
A = (x3*(y2 - y1) + x2*(y1 - y3) + x1*(y3 - y2) ) / d
B = (x3*x3*(y1 - y2) + x2*x2*(y3 - y1) + x1*x1*(y2 - y3) ) / d
C = (x2*x3*(x2 - x3)*y1 + x3*x1*(x3 - x1)*y2 + x1*x2*(x1 - x2)*y3 ) / d
return A, B, C
def fit_parabola_points(p1, p2, p3):
x1 = p1.x()
y1 = p1.y()
x2 = p2.x()
y2 = p2.y()
x3 = p3.x()
y3 = p3.y()
return fit_parabola_coords(x1, y1, x2, y2, x3, y3)
def in_parabola_coords(A, B, C, x, y):
is_convex = (A > 0)
yp = A*x*x + B*x + C
if is_convex:
return y > yp
else:
return y < yp
def in_parabola_point(A, B, C, p):
x1 = p.x()
y1 = p.y()
return in_parabola_coords(x1, y1)
def is_between_parabolas_coords(parabola_1, parabola_2, x, y):
if parabola_1 is None and parabola_2 is None:
return 1
if parabola_1 is None:
A2, B2, C2 = parabola_2
return in_parabola_coords(A2, B2, C2, x, y)
if parabola_2 is None:
A1, B1, C1 = parabola_1
return in_parabola_coords(A1, B1, C1, x, y)
#both parabolas are valid and we assume one is convex and the other is concave
A1, B1, C1 = parabola_1
A2, B2, C2 = parabola_2
return in_parabola_coords(A1, B1, C1, x, y) and in_parabola_coords(A2, B2, C2, x, y)
def is_between_parabolas_point(parabola_1, parabola_2, p):
x = p.x()
y = p.y()
return is_between_parabolas_coords(parabola_1, parabola_2, x, y)
def too_near(x1, y1, x2, y2):
return fabs(x1 - x2) < DBL_EPS and fabs(y1 - y2) < DBL_EPS
def compute_circle_center_coords(x1, y1, x2, y2, x3, y3):
if too_near(x1, y1, x2, y2) or too_near(x2, y2, x3, y3) or too_near(x3, y3, x1, y1):
return -1, -1
d = 2 * (x1*y2 - x2*y1 - x1*y3 + x3*y1 + x2*y3 - x3*y2)
h = ( (pow(x1,2.0) + pow(y1,2.0))*(y2 - y3) + (pow(x2,2.0) + pow(y2,2.0))*(y3 - y1) + (pow(x3,2.0) + pow(y3,2.0))*(y1 - y2) ) / d
k = ( (pow(x1,2.0) + pow(y1,2.0))*(x3 - x2) + (pow(x2,2.0) + pow(y2,2.0))*(x1 - x3) + (pow(x3,2.0) + pow(y3,2.0))*(x2 - x1) ) / d
return int(round(h, 0)), int(round(k, 0))
def compute_circle_center_points(p1, p2, p3):
x1 = p1.x()
y1 = p1.y()
x2 = p2.x()
y2 = p2.y()
x3 = p3.x()
y3 = p3.y()
return compute_circle_center_coords(x1, y1, x2, y2, x3, y3) | utils/math_utils.py | from math import sqrt, pow, fabs
DBL_MAX = 1.7976931348623158e+308 #taken from Visual C++
DBL_EPS = 0.0000000000001
def euclidean_distance_coords(x1, y1, x2, y2):
return sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
def euclidean_distance_points(p1, p2):
return euclidean_distance_coords(p1.x(), p1.y(), p2.x(), p2.y())
def euclidean_distance_arrays(x, y):
hx, wx = x.shape
hy, wy = y.shape
if wx != 1 or wy != 1 or hx != hy:
return DBL_MAX
result = 0.0
for i in range(hx):
xi = x.item(i, 0)
yi = y.item(i, 0)
result += pow(xi - yi, 2)
return sqrt(result)
def fit_parabola_coords(x1, y1, x2, y2, x3, y3):
d = (x1 - x2) * (x1 - x3) * (x2 - x3)
#avoiding zero division
if d == 0:
return -1, -1, -1
A = (x3*(y2 - y1) + x2*(y1 - y3) + x1*(y3 - y2) ) / d
B = (x3*x3*(y1 - y2) + x2*x2*(y3 - y1) + x1*x1*(y2 - y3) ) / d
C = (x2*x3*(x2 - x3)*y1 + x3*x1*(x3 - x1)*y2 + x1*x2*(x1 - x2)*y3 ) / d
return A, B, C
def fit_parabola_points(p1, p2, p3):
x1 = p1.x()
y1 = p1.y()
x2 = p2.x()
y2 = p2.y()
x3 = p3.x()
y3 = p3.y()
return fit_parabola_coords(x1, y1, x2, y2, x3, y3)
def in_parabola_coords(A, B, C, x, y):
is_convex = (A > 0)
yp = A*x*x + B*x + C
if is_convex:
return y > yp
else:
return y < yp
def in_parabola_point(A, B, C, p):
x1 = p.x()
y1 = p.y()
return in_parabola_coords(x1, y1)
def is_between_parabolas_coords(parabola_1, parabola_2, x, y):
if parabola_1 is None and parabola_2 is None:
return 1
if parabola_1 is None:
A2, B2, C2 = parabola_2
return in_parabola_coords(A2, B2, C2, x, y)
if parabola_2 is None:
A1, B1, C1 = parabola_1
return in_parabola_coords(A1, B1, C1, x, y)
#both parabolas are valid and we assume one is convex and the other is concave
A1, B1, C1 = parabola_1
A2, B2, C2 = parabola_2
return in_parabola_coords(A1, B1, C1, x, y) and in_parabola_coords(A2, B2, C2, x, y)
def is_between_parabolas_point(parabola_1, parabola_2, p):
x = p.x()
y = p.y()
return is_between_parabolas_coords(parabola_1, parabola_2, x, y)
def too_near(x1, y1, x2, y2):
return fabs(x1 - x2) < DBL_EPS and fabs(y1 - y2) < DBL_EPS
def compute_circle_center_coords(x1, y1, x2, y2, x3, y3):
if too_near(x1, y1, x2, y2) or too_near(x2, y2, x3, y3) or too_near(x3, y3, x1, y1):
return -1, -1
d = 2 * (x1*y2 - x2*y1 - x1*y3 + x3*y1 + x2*y3 - x3*y2)
h = ( (pow(x1,2.0) + pow(y1,2.0))*(y2 - y3) + (pow(x2,2.0) + pow(y2,2.0))*(y3 - y1) + (pow(x3,2.0) + pow(y3,2.0))*(y1 - y2) ) / d
k = ( (pow(x1,2.0) + pow(y1,2.0))*(x3 - x2) + (pow(x2,2.0) + pow(y2,2.0))*(x1 - x3) + (pow(x3,2.0) + pow(y3,2.0))*(x2 - x1) ) / d
return int(round(h, 0)), int(round(k, 0))
def compute_circle_center_points(p1, p2, p3):
x1 = p1.x()
y1 = p1.y()
x2 = p2.x()
y2 = p2.y()
x3 = p3.x()
y3 = p3.y()
return compute_circle_center_coords(x1, y1, x2, y2, x3, y3) | 0.581897 | 0.724663 |
import numpy as np
import pandas as pd
# train_x is the training data, train_y is the target values, and test_x is the test data
# stored in pandas DataFrames and Series (numpy arrays also used)
train = pd.read_csv('../input/sample-data/train_preprocessed_onehot.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed_onehot.csv')
# ---------------------------------
# Use argsort() to do index sort
# ---------------------------------
# Arrays can be ordered using index sort into ascending and descending order with argsort()
ary = np.array([10, 20, 30, 0])
idx = ary.argsort()
print(idx) # Ascending order - [3 0 1 2]
print(idx[::-1]) # Descending order - [2 1 0 3]
print(ary[idx[::-1][:3]]) # Output best three - [30, 20, 10]
# ---------------------------------
# Correlation coefficient
# ---------------------------------
import scipy.stats as st
# Correlation coefficient
corrs = []
for c in train_x.columns:
corr = np.corrcoef(train_x[c], train_y)[0, 1]
corrs.append(corr)
corrs = np.array(corrs)
# Spearman's rank correlation coefficient
corrs_sp = []
for c in train_x.columns:
corr_sp = st.spearmanr(train_x[c], train_y).correlation
corrs_sp.append(corr_sp)
corrs_sp = np.array(corrs_sp)
# Output in order to top importance (maximum of top 5)
# Using np.argsort(), you can get the indices of the ordered values
idx = np.argsort(np.abs(corrs))[::-1]
top_cols, top_importances = train_x.columns.values[idx][:5], corrs[idx][:5]
print(top_cols, top_importances)
idx2 = np.argsort(np.abs(corrs_sp))[::-1]
top_cols2, top_importances2 = train_x.columns.values[idx][:5], corrs_sp[idx][:5]
print(top_cols2, top_importances2)
# ---------------------------------
# Chi-square statistic
# ---------------------------------
from sklearn.feature_selection import chi2
from sklearn.preprocessing import MinMaxScaler
# Chi-square statistic
x = MinMaxScaler().fit_transform(train_x)
c2, _ = chi2(x, train_y)
# Output in order to top importance (maximum of top 5)
idx = np.argsort(c2)[::-1]
top_cols, top_importances = train_x.columns.values[idx][:5], corrs[idx][:5]
print(top_cols, top_importances)
# ---------------------------------
# Mutual information
# ---------------------------------
from sklearn.feature_selection import mutual_info_classif
# Mutual information
mi = mutual_info_classif(train_x, train_y)
# Output in order to top importance (maximum of top 5)
idx = np.argsort(mi)[::-1]
top_cols, top_importances = train_x.columns.values[idx][:5], corrs[idx][:5]
print(top_cols, top_importances) | ch06/ch06-04-filter.py | import numpy as np
import pandas as pd
# train_x is the training data, train_y is the target values, and test_x is the test data
# stored in pandas DataFrames and Series (numpy arrays also used)
train = pd.read_csv('../input/sample-data/train_preprocessed_onehot.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed_onehot.csv')
# ---------------------------------
# Use argsort() to do index sort
# ---------------------------------
# Arrays can be ordered using index sort into ascending and descending order with argsort()
ary = np.array([10, 20, 30, 0])
idx = ary.argsort()
print(idx) # Ascending order - [3 0 1 2]
print(idx[::-1]) # Descending order - [2 1 0 3]
print(ary[idx[::-1][:3]]) # Output best three - [30, 20, 10]
# ---------------------------------
# Correlation coefficient
# ---------------------------------
import scipy.stats as st
# Correlation coefficient
corrs = []
for c in train_x.columns:
corr = np.corrcoef(train_x[c], train_y)[0, 1]
corrs.append(corr)
corrs = np.array(corrs)
# Spearman's rank correlation coefficient
corrs_sp = []
for c in train_x.columns:
corr_sp = st.spearmanr(train_x[c], train_y).correlation
corrs_sp.append(corr_sp)
corrs_sp = np.array(corrs_sp)
# Output in order to top importance (maximum of top 5)
# Using np.argsort(), you can get the indices of the ordered values
idx = np.argsort(np.abs(corrs))[::-1]
top_cols, top_importances = train_x.columns.values[idx][:5], corrs[idx][:5]
print(top_cols, top_importances)
idx2 = np.argsort(np.abs(corrs_sp))[::-1]
top_cols2, top_importances2 = train_x.columns.values[idx][:5], corrs_sp[idx][:5]
print(top_cols2, top_importances2)
# ---------------------------------
# Chi-square statistic
# ---------------------------------
from sklearn.feature_selection import chi2
from sklearn.preprocessing import MinMaxScaler
# Chi-square statistic
x = MinMaxScaler().fit_transform(train_x)
c2, _ = chi2(x, train_y)
# Output in order to top importance (maximum of top 5)
idx = np.argsort(c2)[::-1]
top_cols, top_importances = train_x.columns.values[idx][:5], corrs[idx][:5]
print(top_cols, top_importances)
# ---------------------------------
# Mutual information
# ---------------------------------
from sklearn.feature_selection import mutual_info_classif
# Mutual information
mi = mutual_info_classif(train_x, train_y)
# Output in order to top importance (maximum of top 5)
idx = np.argsort(mi)[::-1]
top_cols, top_importances = train_x.columns.values[idx][:5], corrs[idx][:5]
print(top_cols, top_importances) | 0.58166 | 0.432663 |
import json
import os
import sys
from colorama import Fore, Back, Style
class ConfigFieldMissing(Exception):
pass
class Config(dict):
def checkField( self, name, default=None, hasDefault=False, valuesList=None):
if default is not None:
hasDefault = True
if name in self:
if (valuesList is not None) and (self[name] not in valuesList):
raise ConfigFieldMissing(Fore.RED + f'ERROR: Value for "{name}" should be one of: ' + (','.join(valuesList)) + Style.RESET_ALL)
else:
if hasDefault:
self[name] = default
else:
raise ConfigFieldMissing(
Fore.RED +
f'ERROR: missing key "{name}" in config' +
Style.RESET_ALL)
def parse_config(robot_folder_path):
config_path = robot_folder_path + '/config.json'
if not os.path.exists(config_path):
raise Exception( Fore.RED + "ERROR: The file " + config_path + " can't be found" + Style.RESET_ALL)
config = Config(json.load(open(config_path)))
config['configPath'] = config_path
config.checkField('documentId')
config.checkField('versionId', '')
config.checkField('workspaceId', '')
config.checkField('drawFrames', False)
config.checkField('drawCollisions', False)
config.checkField('assemblyName', False)
config.checkField('outputFormat', 'urdf', valuesList=['urdf', 'sdf'])
config.checkField('useFixedLinks', False)
config.checkField('ignoreLimits', False)
# Using OpenSCAD for simplified geometry
config.checkField('useScads', True)
config.checkField('pureShapeDilatation', 0.0)
# Dynamics
config.checkField('jointMaxEffort', 1)
config.checkField('jointMaxVelocity', 20)
config.checkField('noDynamics', False)
# Ignore list
config.checkField('ignore', [])
config.checkField('whitelist', None, hasDefault=True)
# Color override
config.checkField('color', None, hasDefault=True)
# STLs merge and simplification
config.checkField('mergeSTLs', 'no', valuesList=[
'no', 'visual', 'collision', 'all'])
config.checkField('maxSTLSize', 3)
config.checkField('simplifySTLs', 'no', valuesList=[
'no', 'visual', 'collision', 'all'])
# Post-import commands to execute
config.checkField('postImportCommands', [])
config['outputDirectory'] = robot_folder_path
config['dynamicsOverride'] = {}
# Add collisions=true configuration on parts
config.checkField('useCollisionsConfigurations', True)
# ROS support
config.checkField('packageName', '')
config.checkField('addDummyBaseLink', False)
config.checkField('robotName', 'onshape')
# additional XML code to insert
if config['outputFormat'] == 'urdf':
config.checkField('additionalUrdfFile', '')
additionalFileName = config['additionalUrdfFile']
else: # outputFormat can only be 'urdf' or 'sdf'
config.checkField('additionalSdfFile', '')
additionalFileName = config['addionalSdfFile']
if additionalFileName == '':
config['additionalXML'] = ''
else:
with open(robot_folder_path + additionalFileName, 'r') as additionalXMLFile:
config['additionalXML'] = additionalXMLFile.read()
# Creating dynamics override array
config.checkField('dynamics', {})
tmp = config['dynamics']
for key in tmp:
if tmp[key] == 'fixed':
config['dynamicsOverride'][key.lower()] = {"com": [0, 0, 0], "mass": 0, "inertia": [
0, 0, 0, 0, 0, 0, 0, 0, 0]}
else:
config['dynamicsOverride'][key.lower()] = tmp[key]
# Deal with output directory creation/permission verification
if not (os.path.isdir(config['outputDirectory']) and os.access(config['outputDirectory'], os.W_OK)):
try:
os.makedirs(config['outputDirectory'])
except FileExistsError:
if os.path.isdir(config['outputDirectory']):
raise Exception(f'The output directory {config["outputDirectory"]} cannot be used, it seems the directory exists but is not writeable.')
else:
raise Exception(f'The output directory {config["outputDirectory"]} cannot be used, it seems there is a file with the same name.')
except PermissionError:
raise Exception(f'The output directory {config["outputDirectory"]} cannot be used, it seems there aren\'t sufficient permissions.')
# Checking that OpenSCAD is present
if config['useScads']:
print( Style.BRIGHT + '* Checking OpenSCAD presence...' + Style.RESET_ALL)
if os.system('openscad -v 2> /dev/null') != 0:
print(Fore.RED + "Can't run openscad -v, disabling OpenSCAD support" + Style.RESET_ALL)
# print(Fore.BLUE + "TIP: consider installing openscad" + Style.RESET_ALL)
# print(Fore.BLUE + "sudo add-apt-repository ppa:openscad/releases" + Style.RESET_ALL)
# print(Fore.BLUE + "sudo apt-get update" + Style.RESET_ALL)
# print(Fore.BLUE + "sudo apt-get install openscad" + Style.RESET_ALL)
config['useScads'] = False
# Checking that MeshLab is present
if config['simplifySTLs']:
print(
Style.BRIGHT +
'* Checking MeshLab presence...' +
Style.RESET_ALL)
if not os.path.exists('/usr/bin/meshlabserver') != 0:
print(Fore.RED + "No /usr/bin/meshlabserver, disabling STL simplification support" + Style.RESET_ALL)
# print(Fore.BLUE + "TIP: consider installing meshlab:" + Style.RESET_ALL)
# print(Fore.BLUE + "sudo apt-get install meshlab" + Style.RESET_ALL)
config['simplifySTLs'] = False
# Checking that versionId and workspaceId are not set on same time
if config['versionId'] != '' and config['workspaceId'] != '':
print(Fore.RED + "You can't specify workspaceId AND versionId")
return config | onshape_to_robot/config.py | import json
import os
import sys
from colorama import Fore, Back, Style
class ConfigFieldMissing(Exception):
pass
class Config(dict):
def checkField( self, name, default=None, hasDefault=False, valuesList=None):
if default is not None:
hasDefault = True
if name in self:
if (valuesList is not None) and (self[name] not in valuesList):
raise ConfigFieldMissing(Fore.RED + f'ERROR: Value for "{name}" should be one of: ' + (','.join(valuesList)) + Style.RESET_ALL)
else:
if hasDefault:
self[name] = default
else:
raise ConfigFieldMissing(
Fore.RED +
f'ERROR: missing key "{name}" in config' +
Style.RESET_ALL)
def parse_config(robot_folder_path):
config_path = robot_folder_path + '/config.json'
if not os.path.exists(config_path):
raise Exception( Fore.RED + "ERROR: The file " + config_path + " can't be found" + Style.RESET_ALL)
config = Config(json.load(open(config_path)))
config['configPath'] = config_path
config.checkField('documentId')
config.checkField('versionId', '')
config.checkField('workspaceId', '')
config.checkField('drawFrames', False)
config.checkField('drawCollisions', False)
config.checkField('assemblyName', False)
config.checkField('outputFormat', 'urdf', valuesList=['urdf', 'sdf'])
config.checkField('useFixedLinks', False)
config.checkField('ignoreLimits', False)
# Using OpenSCAD for simplified geometry
config.checkField('useScads', True)
config.checkField('pureShapeDilatation', 0.0)
# Dynamics
config.checkField('jointMaxEffort', 1)
config.checkField('jointMaxVelocity', 20)
config.checkField('noDynamics', False)
# Ignore list
config.checkField('ignore', [])
config.checkField('whitelist', None, hasDefault=True)
# Color override
config.checkField('color', None, hasDefault=True)
# STLs merge and simplification
config.checkField('mergeSTLs', 'no', valuesList=[
'no', 'visual', 'collision', 'all'])
config.checkField('maxSTLSize', 3)
config.checkField('simplifySTLs', 'no', valuesList=[
'no', 'visual', 'collision', 'all'])
# Post-import commands to execute
config.checkField('postImportCommands', [])
config['outputDirectory'] = robot_folder_path
config['dynamicsOverride'] = {}
# Add collisions=true configuration on parts
config.checkField('useCollisionsConfigurations', True)
# ROS support
config.checkField('packageName', '')
config.checkField('addDummyBaseLink', False)
config.checkField('robotName', 'onshape')
# additional XML code to insert
if config['outputFormat'] == 'urdf':
config.checkField('additionalUrdfFile', '')
additionalFileName = config['additionalUrdfFile']
else: # outputFormat can only be 'urdf' or 'sdf'
config.checkField('additionalSdfFile', '')
additionalFileName = config['addionalSdfFile']
if additionalFileName == '':
config['additionalXML'] = ''
else:
with open(robot_folder_path + additionalFileName, 'r') as additionalXMLFile:
config['additionalXML'] = additionalXMLFile.read()
# Creating dynamics override array
config.checkField('dynamics', {})
tmp = config['dynamics']
for key in tmp:
if tmp[key] == 'fixed':
config['dynamicsOverride'][key.lower()] = {"com": [0, 0, 0], "mass": 0, "inertia": [
0, 0, 0, 0, 0, 0, 0, 0, 0]}
else:
config['dynamicsOverride'][key.lower()] = tmp[key]
# Deal with output directory creation/permission verification
if not (os.path.isdir(config['outputDirectory']) and os.access(config['outputDirectory'], os.W_OK)):
try:
os.makedirs(config['outputDirectory'])
except FileExistsError:
if os.path.isdir(config['outputDirectory']):
raise Exception(f'The output directory {config["outputDirectory"]} cannot be used, it seems the directory exists but is not writeable.')
else:
raise Exception(f'The output directory {config["outputDirectory"]} cannot be used, it seems there is a file with the same name.')
except PermissionError:
raise Exception(f'The output directory {config["outputDirectory"]} cannot be used, it seems there aren\'t sufficient permissions.')
# Checking that OpenSCAD is present
if config['useScads']:
print( Style.BRIGHT + '* Checking OpenSCAD presence...' + Style.RESET_ALL)
if os.system('openscad -v 2> /dev/null') != 0:
print(Fore.RED + "Can't run openscad -v, disabling OpenSCAD support" + Style.RESET_ALL)
# print(Fore.BLUE + "TIP: consider installing openscad" + Style.RESET_ALL)
# print(Fore.BLUE + "sudo add-apt-repository ppa:openscad/releases" + Style.RESET_ALL)
# print(Fore.BLUE + "sudo apt-get update" + Style.RESET_ALL)
# print(Fore.BLUE + "sudo apt-get install openscad" + Style.RESET_ALL)
config['useScads'] = False
# Checking that MeshLab is present
if config['simplifySTLs']:
print(
Style.BRIGHT +
'* Checking MeshLab presence...' +
Style.RESET_ALL)
if not os.path.exists('/usr/bin/meshlabserver') != 0:
print(Fore.RED + "No /usr/bin/meshlabserver, disabling STL simplification support" + Style.RESET_ALL)
# print(Fore.BLUE + "TIP: consider installing meshlab:" + Style.RESET_ALL)
# print(Fore.BLUE + "sudo apt-get install meshlab" + Style.RESET_ALL)
config['simplifySTLs'] = False
# Checking that versionId and workspaceId are not set on same time
if config['versionId'] != '' and config['workspaceId'] != '':
print(Fore.RED + "You can't specify workspaceId AND versionId")
return config | 0.19787 | 0.080177 |
import paho.mqtt.client as mqtt
import time
import urllib3
from urllib.parse import quote
import signal
import sys
# Conf
broker = "10.0.0.1"
conf = {1 : "/etc/motioneye/camera-1.conf", 2: "/etc/motioneye/camera-2.conf", 3 : "/etc/motioneye/camera-3.conf", 4 : "/etc/motioneye/camera-4.conf"}
outdoortemptopic = "weather/tempnow"
garagetemptopic = "garage/temp"
def signal_handler(sig, frame):
sys.exit(0)
def getname(conffile):
try:
# This cool one-liner from https://stackoverflow.com/a/52719066
dict = {k:v for k, *v in (l.split(' ') for l in open(conffile))}
return ''.join(dict.get("text_left")).strip()
except:
print("Error reading configuration file " + conffile)
sys.exit(1)
# Outdoor temperature is updated to four cameras
def on_outdoortemp_message(client, userdata, message):
val = str(message.payload.decode("utf-8"))
update(1, val)
update(3, val)
update(4, val)
def on_garagetemp_message(client, userdata, message):
val = str(message.payload.decode("utf-8"))
update(2, val)
# This does the actual work of updating the overlay text
def update(camera, value):
motioneyeurl = "http://localhost:7999/" + str(camera) + "/config/set?text_left=" + name[camera]
url = motioneyeurl + quote("\\n" + value + "C")
# print("Updating: " + url)
try:
http.request('GET', url)
except:
pass
# This catches INT signal for preventing ugly traces on exit
signal.signal(signal.SIGINT, signal_handler)
# Dig names for each camera
name = {1 : getname(conf[1]), 2 : getname(conf[2]), 3 : getname(conf[3]), 4 : getname(conf[4])}
http = urllib3.PoolManager()
outdoortempclient = mqtt.Client("motioneye1")
garagetempclient = mqtt.Client("motioneye2")
outdoortempclient.on_message=on_outdoortemp_message
garagetempclient.on_message=on_garagetemp_message
try:
outdoortempclient.connect(broker)
garagetempclient.connect(broker)
except:
print("Error connecting to broker")
sys.exit(1)
outdoortempclient.subscribe(outdoortemptopic)
garagetempclient.subscribe(garagetemptopic)
# First loop is started in the background
outdoortempclient.loop_start()
# Second loop blocks here
garagetempclient.loop_forever() | motion-temperature.py |
import paho.mqtt.client as mqtt
import time
import urllib3
from urllib.parse import quote
import signal
import sys
# Conf
broker = "10.0.0.1"
conf = {1 : "/etc/motioneye/camera-1.conf", 2: "/etc/motioneye/camera-2.conf", 3 : "/etc/motioneye/camera-3.conf", 4 : "/etc/motioneye/camera-4.conf"}
outdoortemptopic = "weather/tempnow"
garagetemptopic = "garage/temp"
def signal_handler(sig, frame):
sys.exit(0)
def getname(conffile):
try:
# This cool one-liner from https://stackoverflow.com/a/52719066
dict = {k:v for k, *v in (l.split(' ') for l in open(conffile))}
return ''.join(dict.get("text_left")).strip()
except:
print("Error reading configuration file " + conffile)
sys.exit(1)
# Outdoor temperature is updated to four cameras
def on_outdoortemp_message(client, userdata, message):
val = str(message.payload.decode("utf-8"))
update(1, val)
update(3, val)
update(4, val)
def on_garagetemp_message(client, userdata, message):
val = str(message.payload.decode("utf-8"))
update(2, val)
# This does the actual work of updating the overlay text
def update(camera, value):
motioneyeurl = "http://localhost:7999/" + str(camera) + "/config/set?text_left=" + name[camera]
url = motioneyeurl + quote("\\n" + value + "C")
# print("Updating: " + url)
try:
http.request('GET', url)
except:
pass
# This catches INT signal for preventing ugly traces on exit
signal.signal(signal.SIGINT, signal_handler)
# Dig names for each camera
name = {1 : getname(conf[1]), 2 : getname(conf[2]), 3 : getname(conf[3]), 4 : getname(conf[4])}
http = urllib3.PoolManager()
outdoortempclient = mqtt.Client("motioneye1")
garagetempclient = mqtt.Client("motioneye2")
outdoortempclient.on_message=on_outdoortemp_message
garagetempclient.on_message=on_garagetemp_message
try:
outdoortempclient.connect(broker)
garagetempclient.connect(broker)
except:
print("Error connecting to broker")
sys.exit(1)
outdoortempclient.subscribe(outdoortemptopic)
garagetempclient.subscribe(garagetemptopic)
# First loop is started in the background
outdoortempclient.loop_start()
# Second loop blocks here
garagetempclient.loop_forever() | 0.219672 | 0.089654 |
import numpy as np
from utils.cython_bbox import bbox_overlaps
from mnc_config import cfg
def compute_targets(rois, overlaps, labels):
"""
Compute bounding-box regression targets for an image.
"""
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(
np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
def bbox_transform(ex_rois, gt_rois):
"""
Compute bbox regression targets of external rois
with respect to gt rois
"""
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas):
"""
invert bounding box transform
apply delta on anchors to get transformed proposals
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
Clip boxes inside image boundaries
"""
x1 = boxes[:, 0::4]
y1 = boxes[:, 1::4]
x2 = boxes[:, 2::4]
y2 = boxes[:, 3::4]
keep = np.where((x1 >= 0) & (x2 <= im_shape[1] - 1) & (y1 >= 0) & (y2 <= im_shape[0] - 1))[0]
clipped_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)
# x1 >= 0
clipped_boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
clipped_boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
clipped_boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
clipped_boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return clipped_boxes, keep
def filter_small_boxes(boxes, min_size):
"""
Remove all boxes with any side smaller than min_size.
"""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
def scale_boxes(boxes, alpha):
"""
Scale boxes from w/h to alpha * w/h while keep center unchanged
Args:
boxes: a set of boxes specified using x1, y1, x2, y2
alpha: scaling factor
Returns:
boxes: boxes after applying scaling
"""
w = boxes[:, 2] - boxes[:, 0] + 1
h = boxes[:, 3] - boxes[:, 1] + 1
ctr_x = boxes[:, 0] + 0.5 * w
ctr_y = boxes[:, 1] + 0.5 * h
scaled_w = w * alpha
scaled_h = h * alpha
scaled_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)
scaled_boxes[:, 0] = ctr_x - 0.5 * scaled_w
scaled_boxes[:, 1] = ctr_y - 0.5 * scaled_h
scaled_boxes[:, 2] = ctr_x + 0.5 * scaled_w
scaled_boxes[:, 3] = ctr_y + 0.5 * scaled_h
return scaled_boxes
def bbox_compute_targets(ex_rois, gt_rois, normalize):
"""
Compute bounding-box regression targets for an image
Parameters:
-----------
ex_rois: ROIs from external source (anchors or proposals)
gt_rois: ground truth ROIs
normalize: whether normalize box (since RPN doesn't need to normalize)
Returns:
-----------
Relative value for anchor or proposals
"""
assert ex_rois.shape == gt_rois.shape
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED and normalize:
# Optionally normalize targets by a precomputed mean and std
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS)) /
np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return targets.astype(np.float32, copy=False)
def get_bbox_regression_label(bbox_target_data, num_class):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
assert bbox_target_data.shape[1] == 5
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_class), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights | lib/transform/bbox_transform.py |
import numpy as np
from utils.cython_bbox import bbox_overlaps
from mnc_config import cfg
def compute_targets(rois, overlaps, labels):
"""
Compute bounding-box regression targets for an image.
"""
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(
np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
def bbox_transform(ex_rois, gt_rois):
"""
Compute bbox regression targets of external rois
with respect to gt rois
"""
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas):
"""
invert bounding box transform
apply delta on anchors to get transformed proposals
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
Clip boxes inside image boundaries
"""
x1 = boxes[:, 0::4]
y1 = boxes[:, 1::4]
x2 = boxes[:, 2::4]
y2 = boxes[:, 3::4]
keep = np.where((x1 >= 0) & (x2 <= im_shape[1] - 1) & (y1 >= 0) & (y2 <= im_shape[0] - 1))[0]
clipped_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)
# x1 >= 0
clipped_boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
clipped_boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
clipped_boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
clipped_boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return clipped_boxes, keep
def filter_small_boxes(boxes, min_size):
"""
Remove all boxes with any side smaller than min_size.
"""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
def scale_boxes(boxes, alpha):
"""
Scale boxes from w/h to alpha * w/h while keep center unchanged
Args:
boxes: a set of boxes specified using x1, y1, x2, y2
alpha: scaling factor
Returns:
boxes: boxes after applying scaling
"""
w = boxes[:, 2] - boxes[:, 0] + 1
h = boxes[:, 3] - boxes[:, 1] + 1
ctr_x = boxes[:, 0] + 0.5 * w
ctr_y = boxes[:, 1] + 0.5 * h
scaled_w = w * alpha
scaled_h = h * alpha
scaled_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)
scaled_boxes[:, 0] = ctr_x - 0.5 * scaled_w
scaled_boxes[:, 1] = ctr_y - 0.5 * scaled_h
scaled_boxes[:, 2] = ctr_x + 0.5 * scaled_w
scaled_boxes[:, 3] = ctr_y + 0.5 * scaled_h
return scaled_boxes
def bbox_compute_targets(ex_rois, gt_rois, normalize):
"""
Compute bounding-box regression targets for an image
Parameters:
-----------
ex_rois: ROIs from external source (anchors or proposals)
gt_rois: ground truth ROIs
normalize: whether normalize box (since RPN doesn't need to normalize)
Returns:
-----------
Relative value for anchor or proposals
"""
assert ex_rois.shape == gt_rois.shape
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED and normalize:
# Optionally normalize targets by a precomputed mean and std
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS)) /
np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return targets.astype(np.float32, copy=False)
def get_bbox_regression_label(bbox_target_data, num_class):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
assert bbox_target_data.shape[1] == 5
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_class), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights | 0.866641 | 0.683532 |
import logging
from typing import Optional
import requests
from .apipath import APIPath
LOG = logging.getLogger(__name__)
class RocketChat(object):
"""
Python interface to the RocketChat REST API.
"""
def __init__(
self, url: str, username: Optional[str] = None, password: Optional[str] = None
):
self.url = url
self.api_v1_path = "/api/v1/"
self.user_id = None
self.auth_token = None
self.login(username=username, password=password)
# fmt:off
self.me = APIPath(self, "me")
self.info = APIPath(self, "info", result_key="info", auth=False, api_root="/api/")
self.directory = APIPath(self, "directory")
self.spotlight = APIPath(self, "spotlight")
self.statistics = APIPath(self, "statistics")
self.statistics.list = APIPath(self, "statistics.list")
self.assets = APIPath(self, "assets", None)
self.assets.setAsset = APIPath(self, "assets.setAsset", "POST", result_key="success")
self.assets.unsetAsset = APIPath(self, "assets.unsetAsset", "POST", result_key="success")
self.autotranslate = APIPath(self, "autotranslate", None)
self.autotranslate.getSupportedLanguages = APIPath(self, "autotranslate.getSupportedLanguages", result_key="languages")
self.autotranslate.saveSettings = APIPath(self, "autotranslate.saveSettings", "POST", result_key="success")
self.autotranslate.translateMessage = APIPath(self, "autotranslate.translateMessage", "POST", result_key="message")
self.logout = APIPath(self, "logout", "POST")
self.users = APIPath(self, "users", None)
self.users.presence = APIPath(self, "users.presence")
self.users.create = APIPath(self, "users.create", "POST", result_key="user")
self.users.createToken = APIPath(self, "users.createToken", "POST", result_key="data")
self.users.delete = APIPath(self, "users.delete", "POST", result_key="success")
self.users.deleteOwnAccount = APIPath(self, "users.deleteOwnAccount", "POST")
self.users.forgotPassword = APIPath(self, "users.forgotPassword", "POST")
self.users.generatePersonalAccessToken = APIPath(self, "users.generatePersonalAccessToken", "POST")
self.users.getAvatar = APIPath(self, "users.getAvatar", "GET")
self.users.getPersonalAccessTokens = APIPath(self, "users.getPersonalAccessTokens", "GET")
self.users.getPreferences = APIPath(self, "users.getPreferences", "GET")
self.users.getPresence = APIPath(self, "users.getPresence", "GET", result_key="presence")
self.users.getUsernameSuggestion = APIPath(self, "users.getUsernameSuggestion", "GET")
self.users.info = APIPath(self, "users.info", "GET", result_key="user")
self.users.list = APIPath(self, "users.list", "GET")
self.users.regeneratePersonalAccessToken = APIPath(self, "users.regeneratePersonalAccessToken", "POST")
self.users.register = APIPath(self, "users.register", "POST", result_key="user")
self.users.removePersonalAccessToken = APIPath(self, "users.removePersonalAccessToken", "POST")
self.users.requestDataDownload = APIPath(self, "users.requestDataDownload")
self.users.resetAvatar = APIPath(self, "users.resetAvatar", "POST", result_key="success")
self.users.setAvatar = APIPath(self, "users.setAvatar", "POST", result_key="success")
self.users.setPreferences = APIPath(self, "users.setPreferences", "POST")
self.users.setActiveStatus = APIPath(self, "users.setActiveStatus", "POST")
self.users.update = APIPath(self, "users.update", "POST", result_key="user")
self.users.updateOwnBasicInfo = APIPath(self, "users.updateOwnBasicInfo", "POST")
self.channels = APIPath(self, "channels", None)
self.channels.addAll = APIPath(self, "channels.addAll", "POST", result_key="channel")
self.channels.addLeader = APIPath(self, "channels.addLeader", "POST")
self.channels.anonymousread = APIPath(self, "channels.anonymousread")
self.channels.archive = APIPath(self, "channels.archive", "POST", result_key="success")
self.channels.cleanHistory = APIPath(self, "channels.cleanHistory", "POST", result_key="success")
self.channels.close = APIPath(self, "channels.close", "POST", result_key="success")
self.channels.counters = APIPath(self, "channels.counters")
self.channels.create = APIPath(self, "channels.create", "POST", result_key="channel")
self.channels.delete = APIPath(self, "channels.delete", "POST")
self.channels.files = APIPath(self, "channels.files")
self.channels.getAllUserMentionsByChannel = APIPath(self, "channels.getAllUserMentionsByChannel")
self.channels.getIntegrations = APIPath(self, "channels.getIntegrations", "GET", result_key="integrations")
self.channels.history = APIPath(self, "channels.history", "GET", result_key="messages")
self.channels.info = APIPath(self, "channels.info", "GET", result_key="channel")
self.channels.invite = APIPath(self, "channels.invite", "POST", result_key="channel")
self.channels.join = APIPath(self, "channels.join", "POST")
self.channels.kick = APIPath(self, "channels.kick", "POST", result_key="channel")
self.channels.leave = APIPath(self, "channels.leave", "POST", result_key="channel")
self.channels.list = APIPath(self, "channels.list", "GET", result_key="channels")
self.channels.list.joined = APIPath(self, "channels.list.joined", "GET", result_key="channels")
self.channels.members = APIPath(self, "channels.members")
self.channels.messages = APIPath(self, "channels.mesages")
self.channels.moderators = APIPath(self, "channels.moderators")
self.channels.online = APIPath(self, "channels.online")
self.channels.open = APIPath(self, "channels.open", "POST", result_key="success")
self.channels.removeLeader = APIPath(self, "channels.removeLeader", "POST")
self.channels.rename = APIPath(self, "channels.rename", "POST", result_key="channel")
self.channels.roles = APIPath(self, "channels.roles")
self.channels.setCustomFields = APIPath(self, "channels.setCustomFields", "POST")
self.channels.setAnnouncement = APIPath(self, "channels.setAnnouncement", "POST")
self.channels.setDefault = APIPath(self, "channels.setDefault", "POST")
self.channels.setDescription = APIPath(self, "channels.setDescription", "POST", result_key="description")
self.channels.setJoinCode = APIPath(self, "channels.setJoinCode", "POST", result_key="channel")
self.channels.setPurpose = APIPath(self, "channels.setPurpose", "POST", result_key="purpose")
self.channels.setReadOnly = APIPath(self, "channels.setReadOnly", "POST", result_key="channel")
self.channels.setTopic = APIPath(self, "channels.setTopic", "POST", result_key="topic")
self.channels.setType = APIPath(self, "channels.setType", "POST", result_key="channel")
self.channels.unarchive = APIPath(self, "channels.unarchive", "POST", result_key="success")
self.groups = APIPath(self, "groups", None)
self.groups.archive = APIPath(self, "groups.archive", "POST")
self.groups.addLeader = APIPath(self, "groups.addLeader", "POST")
self.groups.close = APIPath(self, "groups.close", "POST")
self.groups.create = APIPath(self, "groups.create", "POST")
self.groups.delete = APIPath(self, "groups.delete", "POST")
self.groups.files = APIPath(self, "groups.files", "POST")
self.groups.history = APIPath(self, "groups.history", "GET")
self.groups.info = APIPath(self, "groups.info", "GET")
self.groups.invite = APIPath(self, "groups.invite", "POST")
self.groups.kick = APIPath(self, "groups.kick", "POST")
self.groups.leave = APIPath(self, "groups.leave", "POST")
self.groups.list = APIPath(self, "groups.list", "GET")
self.groups.listAll = APIPath(self, "groups.listAll")
self.groups.members = APIPath(self, "groups.members")
self.groups.messages = APIPath(self, "groups.messages")
self.groups.moderators = APIPath(self, "groups.moderators")
self.groups.open = APIPath(self, "groups.open", "POST")
self.groups.removeLeader = APIPath(self, "groups.removeLeader", "POST")
self.groups.rename = APIPath(self, "groups.rename", "POST")
self.groups.roles = APIPath(self, "groups.roles")
self.groups.setAnnouncement = APIPath(self, "groups.setAnnouncement", "POST")
self.groups.setCustomFields = APIPath(self, "groups.setCustomFields", "POST")
self.groups.setDescription = APIPath(self, "groups.setDescription", "POST")
self.groups.setPurpose = APIPath(self, "groups.setPurpose", "POST")
self.groups.setReadOnly = APIPath(self, "groups.setReadOnly", "POST")
self.groups.setTopic = APIPath(self, "groups.setTopic", "POST")
self.groups.setType = APIPath(self, "groups.setType", "POST", result_key="group")
self.groups.unarchive = APIPath(self, "groups.unarchive", "POST")
self.chat = APIPath(self, "chat", None)
self.chat.delete = APIPath(self, "chat.delete", "POST")
self.chat.followMessage = APIPath(self, "chat.followMessage", "POST")
self.chat.getDeletedMessages = APIPath(self, "chat.getDeletedMessages")
self.chat.getDiscussions = APIPath(self, "chat.getDiscussions")
self.chat.getMentionedMessages = APIPath(self, "chat.getMentionedMessages")
self.chat.getMessage = APIPath(self, "chat.getMessage", "POST")
self.chat.getMessageReadReceipts = APIPath(self, "chat.getMessageReadReceipts")
self.chat.getPinnedMessages = APIPath(self, "chat.getPinnedMessages")
self.chat.getSnippetedMessages = APIPath(self, "chat.getSnippetedMessages")
self.chat.getSnippetedMessageById = APIPath(self, "chat.getSnippetedMessageById")
self.chat.getStarredMessages = APIPath(self, "chat.getStarredMessages")
self.chat.getThreadsList = APIPath(self, "chat.getThreadsList")
self.chat.ignoreUser = APIPath(self, "chat.ignoreUser")
self.chat.pinMessage = APIPath(self, "chat.pinMessage", "POST")
self.chat.postMessage = APIPath(self, "chat.postMessage", "POST")
self.chat.react = APIPath(self, "chat.react", "POST")
self.chat.reportMessage = APIPath(self, "chat.reportMessage", "POST")
self.chat.search = APIPath(self, "chat.search", "POST")
self.chat.starMessage = APIPath(self, "chat.starMessage", "POST")
self.chat.sendMessage = APIPath(self, "chat.sendMessage", "POST")
self.chat.syncThreadMessages = APIPath(self, "chat.syncThreadMessages", "POST")
self.chat.syncThreadsList = APIPath(self, "chat.syncThreadsList", "POST")
self.chat.unfollowMessage = APIPath(self, "chat.unfollowMessage", "POST")
self.chat.unPinMessage = APIPath(self, "chat.unPinMessage", "POST")
self.chat.unStarMessage = APIPath(self, "chat.unStarMessage", "POST")
self.chat.update = APIPath(self, "chat.update", "POST")
self.custom_sounds = APIPath(self, "custom-sounds", None)
self.custom_sounds.list = APIPath(self, "custom-sounds.list")
self.im = APIPath(self, "im")
self.im.close = APIPath(self, "im.close", "POST")
self.im.counters = APIPath(self, "im.counters")
self.im.create = APIPath(self, "im.create", "POST")
self.im.history = APIPath(self, "im.history", "GET")
self.im.files = APIPath(self, "im.files")
self.im.members = APIPath(self, "im.members")
self.im.messages = APIPath(self, "im.messages")
self.im.messages.others = APIPath(self, "im.messages.others", "GET")
self.im.list = APIPath(self, "im.list", "GET")
self.im.list.everyone = APIPath(self, "im.list.everyone", "GET")
self.im.open = APIPath(self, "im.open", "POST")
self.im.setTopic = APIPath(self, "im.setTopic", "POST")
self.dm = self.im
self.integrations = APIPath(self, "integrations", None)
self.integrations.create = APIPath(self, "integrations.create", "POST")
self.integrations.get = APIPath(self, "integrations.get")
self.integrations.history = APIPath(self, "integrations.history")
self.integrations.list = APIPath(self, "integrations.list")
self.integrations.remove = APIPath(self, "integrations.remove", "POST")
self.findOrCreateInvite = APIPath(self, "findOrCreateInvite", "POST")
self.listInvites = APIPath(self, "listInvites")
self.removeInvite = APIPath(self, "removeInvite", "POST")
self.useInviteToken = APIPath(self, "useInviteToken", "POST")
self.validateInviteToken = APIPath(self, "validateInviteToken", "POST")
self.livechat = APIPath(self, "livechat", None)
self.livechat.inquiries = APIPath(self, "livechat/inquiries", None)
self.livechat.inquiries.list = APIPath(self, "livechat/inquiries.list")
self.livechat.inquiries.take = APIPath(self, "livechat/inquiries.take", "POST")
self.livechat.rooms = APIPath(self, "livechat/rooms")
self.oauth_apps = APIPath(self, "oauth-apps", None)
self.oauth_apps.get = APIPath(self, "oauth-apps.get")
self.oauth_apps.list = APIPath(self, "oauth-apps.list")
self.permissions = APIPath(self, "permissions", None)
self.permissions.listAll = APIPath(self, "permissions.listAll")
self.permissions.update = APIPath(self, "permissions.update", "POST")
self.roles = APIPath(self, "roles", None)
self.roles.create = APIPath(self, "roles.create", "POST")
self.roles.list = APIPath(self, "roles.list")
self.roles.addUserToRole = APIPath(self, "roles.addUserToRole", "POST")
self.roles.getUsersInRole = APIPath(self, "roles.getUsersInRole")
self.push = APIPath(self, "push", None)
self.push.token = APIPath(self, "push.token", None)
self.push.token.save = APIPath(self, "push.token", "POST")
self.push.token.delete = APIPath(self, "push.token", "DELETE")
self.rooms = APIPath(self, "rooms", None)
self.rooms.adminRooms = APIPath(self, "rooms.adminRooms")
self.rooms.cleanHistory = APIPath(self, "rooms.cleanHistory", "POST")
self.rooms.createDiscussion = APIPath(self, "rooms.createDiscussion", "POST")
self.rooms.favorite = APIPath(self, "rooms.favorite", "POST")
self.rooms.get = APIPath(self, "rooms.get")
self.rooms.getDiscussions = APIPath(self, "rooms.getDiscussions")
self.rooms.info = APIPath(self, "rooms.info")
self.rooms.leave = APIPath(self, "rooms.leave", "POST")
self.rooms.saveNotification = APIPath(self, "rooms.saveNotification", "POST")
self.rooms.upload = APIPath(self, "rooms.upload", "POST", arg_endpoint=True)
self.commands = APIPath(self, "commands")
self.commands.get = APIPath(self, "commands.get", "GET")
self.commands.list = APIPath(self, "commands.list", "GET")
self.commands.run = APIPath(self, "commands.run", "POST")
self.custom_user_status = APIPath(self, "custom-user-status", None)
self.custom_user_status.list = APIPath(self, "custom-user-status.list")
self.emoji_custom = APIPath(self, "emoji-custom", None)
self.emoji_custom.list = APIPath(self, "emoji-custom.list")
self.emoji_custom.create = APIPath(self, "emoji-custom.create", "POST")
self.emoji_custom.delete = APIPath(self, "emoji-custom.delete", "POST")
self.emoji_custom.update = APIPath(self, "emoji-custom.update", "POST")
self.settings = APIPath(self, "settings", None)
self.settings.public = APIPath(self, "settings.public")
self.settings.oauth = APIPath(self, "settings.oauth")
self.settings.get = APIPath(self, "settings", "GET", arg_endpoint=True)
self.settings.set = APIPath(self, "settings", "POST", arg_endpoint=True)
self.service = APIPath(self, "service", None)
self.service.configurations = APIPath(self, "service.configurations")
self.subscriptions = APIPath(self, "subscriptions", None)
self.subscriptions.get = APIPath(self, "subscriptions.get")
self.subscriptions.getOne = APIPath(self, "subscriptions.getOne")
self.subscriptions.read = APIPath(self, "subscriptions.read", "POST")
self.subscriptions.unread = APIPath(self, "subscriptions.unread", "POST")
self.video_conference = APIPath(self, "video-conference", None)
self.video_conference.jitsi = APIPath(self, "video-conference/jitsi", None)
self.video_conference.jitsi.update_timeout = APIPath(self, "video-conference/jitsi.update-timeout", "POST")
self.webdav = APIPath(self, "webdav", None)
self.webdav.getMyAccounts = APIPath(self, "webdav.getMyAccounts")
# fmt:on
def auth_header(self):
"""
Return api request header dictionary with Auth data.
"""
return {
"X-Auth-Token": self.auth_token,
"X-User-Id": self.user_id,
"Content-type": "application/json",
}
def login(self, **kwargs):
"""
Authenticate this Rocketchat API.
"""
url = self.url + self.api_v1_path + "login"
r = requests.post(url, data=kwargs)
j = r.json()
if j["status"] != "success":
raise Exception(j["message"])
self.user_id = j["data"]["userId"]
self.auth_token = j["data"]["authToken"] | rocketchat/rocketchat.py | import logging
from typing import Optional
import requests
from .apipath import APIPath
LOG = logging.getLogger(__name__)
class RocketChat(object):
"""
Python interface to the RocketChat REST API.
"""
def __init__(
self, url: str, username: Optional[str] = None, password: Optional[str] = None
):
self.url = url
self.api_v1_path = "/api/v1/"
self.user_id = None
self.auth_token = None
self.login(username=username, password=password)
# fmt:off
self.me = APIPath(self, "me")
self.info = APIPath(self, "info", result_key="info", auth=False, api_root="/api/")
self.directory = APIPath(self, "directory")
self.spotlight = APIPath(self, "spotlight")
self.statistics = APIPath(self, "statistics")
self.statistics.list = APIPath(self, "statistics.list")
self.assets = APIPath(self, "assets", None)
self.assets.setAsset = APIPath(self, "assets.setAsset", "POST", result_key="success")
self.assets.unsetAsset = APIPath(self, "assets.unsetAsset", "POST", result_key="success")
self.autotranslate = APIPath(self, "autotranslate", None)
self.autotranslate.getSupportedLanguages = APIPath(self, "autotranslate.getSupportedLanguages", result_key="languages")
self.autotranslate.saveSettings = APIPath(self, "autotranslate.saveSettings", "POST", result_key="success")
self.autotranslate.translateMessage = APIPath(self, "autotranslate.translateMessage", "POST", result_key="message")
self.logout = APIPath(self, "logout", "POST")
self.users = APIPath(self, "users", None)
self.users.presence = APIPath(self, "users.presence")
self.users.create = APIPath(self, "users.create", "POST", result_key="user")
self.users.createToken = APIPath(self, "users.createToken", "POST", result_key="data")
self.users.delete = APIPath(self, "users.delete", "POST", result_key="success")
self.users.deleteOwnAccount = APIPath(self, "users.deleteOwnAccount", "POST")
self.users.forgotPassword = APIPath(self, "users.forgotPassword", "POST")
self.users.generatePersonalAccessToken = APIPath(self, "users.generatePersonalAccessToken", "POST")
self.users.getAvatar = APIPath(self, "users.getAvatar", "GET")
self.users.getPersonalAccessTokens = APIPath(self, "users.getPersonalAccessTokens", "GET")
self.users.getPreferences = APIPath(self, "users.getPreferences", "GET")
self.users.getPresence = APIPath(self, "users.getPresence", "GET", result_key="presence")
self.users.getUsernameSuggestion = APIPath(self, "users.getUsernameSuggestion", "GET")
self.users.info = APIPath(self, "users.info", "GET", result_key="user")
self.users.list = APIPath(self, "users.list", "GET")
self.users.regeneratePersonalAccessToken = APIPath(self, "users.regeneratePersonalAccessToken", "POST")
self.users.register = APIPath(self, "users.register", "POST", result_key="user")
self.users.removePersonalAccessToken = APIPath(self, "users.removePersonalAccessToken", "POST")
self.users.requestDataDownload = APIPath(self, "users.requestDataDownload")
self.users.resetAvatar = APIPath(self, "users.resetAvatar", "POST", result_key="success")
self.users.setAvatar = APIPath(self, "users.setAvatar", "POST", result_key="success")
self.users.setPreferences = APIPath(self, "users.setPreferences", "POST")
self.users.setActiveStatus = APIPath(self, "users.setActiveStatus", "POST")
self.users.update = APIPath(self, "users.update", "POST", result_key="user")
self.users.updateOwnBasicInfo = APIPath(self, "users.updateOwnBasicInfo", "POST")
self.channels = APIPath(self, "channels", None)
self.channels.addAll = APIPath(self, "channels.addAll", "POST", result_key="channel")
self.channels.addLeader = APIPath(self, "channels.addLeader", "POST")
self.channels.anonymousread = APIPath(self, "channels.anonymousread")
self.channels.archive = APIPath(self, "channels.archive", "POST", result_key="success")
self.channels.cleanHistory = APIPath(self, "channels.cleanHistory", "POST", result_key="success")
self.channels.close = APIPath(self, "channels.close", "POST", result_key="success")
self.channels.counters = APIPath(self, "channels.counters")
self.channels.create = APIPath(self, "channels.create", "POST", result_key="channel")
self.channels.delete = APIPath(self, "channels.delete", "POST")
self.channels.files = APIPath(self, "channels.files")
self.channels.getAllUserMentionsByChannel = APIPath(self, "channels.getAllUserMentionsByChannel")
self.channels.getIntegrations = APIPath(self, "channels.getIntegrations", "GET", result_key="integrations")
self.channels.history = APIPath(self, "channels.history", "GET", result_key="messages")
self.channels.info = APIPath(self, "channels.info", "GET", result_key="channel")
self.channels.invite = APIPath(self, "channels.invite", "POST", result_key="channel")
self.channels.join = APIPath(self, "channels.join", "POST")
self.channels.kick = APIPath(self, "channels.kick", "POST", result_key="channel")
self.channels.leave = APIPath(self, "channels.leave", "POST", result_key="channel")
self.channels.list = APIPath(self, "channels.list", "GET", result_key="channels")
self.channels.list.joined = APIPath(self, "channels.list.joined", "GET", result_key="channels")
self.channels.members = APIPath(self, "channels.members")
self.channels.messages = APIPath(self, "channels.mesages")
self.channels.moderators = APIPath(self, "channels.moderators")
self.channels.online = APIPath(self, "channels.online")
self.channels.open = APIPath(self, "channels.open", "POST", result_key="success")
self.channels.removeLeader = APIPath(self, "channels.removeLeader", "POST")
self.channels.rename = APIPath(self, "channels.rename", "POST", result_key="channel")
self.channels.roles = APIPath(self, "channels.roles")
self.channels.setCustomFields = APIPath(self, "channels.setCustomFields", "POST")
self.channels.setAnnouncement = APIPath(self, "channels.setAnnouncement", "POST")
self.channels.setDefault = APIPath(self, "channels.setDefault", "POST")
self.channels.setDescription = APIPath(self, "channels.setDescription", "POST", result_key="description")
self.channels.setJoinCode = APIPath(self, "channels.setJoinCode", "POST", result_key="channel")
self.channels.setPurpose = APIPath(self, "channels.setPurpose", "POST", result_key="purpose")
self.channels.setReadOnly = APIPath(self, "channels.setReadOnly", "POST", result_key="channel")
self.channels.setTopic = APIPath(self, "channels.setTopic", "POST", result_key="topic")
self.channels.setType = APIPath(self, "channels.setType", "POST", result_key="channel")
self.channels.unarchive = APIPath(self, "channels.unarchive", "POST", result_key="success")
self.groups = APIPath(self, "groups", None)
self.groups.archive = APIPath(self, "groups.archive", "POST")
self.groups.addLeader = APIPath(self, "groups.addLeader", "POST")
self.groups.close = APIPath(self, "groups.close", "POST")
self.groups.create = APIPath(self, "groups.create", "POST")
self.groups.delete = APIPath(self, "groups.delete", "POST")
self.groups.files = APIPath(self, "groups.files", "POST")
self.groups.history = APIPath(self, "groups.history", "GET")
self.groups.info = APIPath(self, "groups.info", "GET")
self.groups.invite = APIPath(self, "groups.invite", "POST")
self.groups.kick = APIPath(self, "groups.kick", "POST")
self.groups.leave = APIPath(self, "groups.leave", "POST")
self.groups.list = APIPath(self, "groups.list", "GET")
self.groups.listAll = APIPath(self, "groups.listAll")
self.groups.members = APIPath(self, "groups.members")
self.groups.messages = APIPath(self, "groups.messages")
self.groups.moderators = APIPath(self, "groups.moderators")
self.groups.open = APIPath(self, "groups.open", "POST")
self.groups.removeLeader = APIPath(self, "groups.removeLeader", "POST")
self.groups.rename = APIPath(self, "groups.rename", "POST")
self.groups.roles = APIPath(self, "groups.roles")
self.groups.setAnnouncement = APIPath(self, "groups.setAnnouncement", "POST")
self.groups.setCustomFields = APIPath(self, "groups.setCustomFields", "POST")
self.groups.setDescription = APIPath(self, "groups.setDescription", "POST")
self.groups.setPurpose = APIPath(self, "groups.setPurpose", "POST")
self.groups.setReadOnly = APIPath(self, "groups.setReadOnly", "POST")
self.groups.setTopic = APIPath(self, "groups.setTopic", "POST")
self.groups.setType = APIPath(self, "groups.setType", "POST", result_key="group")
self.groups.unarchive = APIPath(self, "groups.unarchive", "POST")
self.chat = APIPath(self, "chat", None)
self.chat.delete = APIPath(self, "chat.delete", "POST")
self.chat.followMessage = APIPath(self, "chat.followMessage", "POST")
self.chat.getDeletedMessages = APIPath(self, "chat.getDeletedMessages")
self.chat.getDiscussions = APIPath(self, "chat.getDiscussions")
self.chat.getMentionedMessages = APIPath(self, "chat.getMentionedMessages")
self.chat.getMessage = APIPath(self, "chat.getMessage", "POST")
self.chat.getMessageReadReceipts = APIPath(self, "chat.getMessageReadReceipts")
self.chat.getPinnedMessages = APIPath(self, "chat.getPinnedMessages")
self.chat.getSnippetedMessages = APIPath(self, "chat.getSnippetedMessages")
self.chat.getSnippetedMessageById = APIPath(self, "chat.getSnippetedMessageById")
self.chat.getStarredMessages = APIPath(self, "chat.getStarredMessages")
self.chat.getThreadsList = APIPath(self, "chat.getThreadsList")
self.chat.ignoreUser = APIPath(self, "chat.ignoreUser")
self.chat.pinMessage = APIPath(self, "chat.pinMessage", "POST")
self.chat.postMessage = APIPath(self, "chat.postMessage", "POST")
self.chat.react = APIPath(self, "chat.react", "POST")
self.chat.reportMessage = APIPath(self, "chat.reportMessage", "POST")
self.chat.search = APIPath(self, "chat.search", "POST")
self.chat.starMessage = APIPath(self, "chat.starMessage", "POST")
self.chat.sendMessage = APIPath(self, "chat.sendMessage", "POST")
self.chat.syncThreadMessages = APIPath(self, "chat.syncThreadMessages", "POST")
self.chat.syncThreadsList = APIPath(self, "chat.syncThreadsList", "POST")
self.chat.unfollowMessage = APIPath(self, "chat.unfollowMessage", "POST")
self.chat.unPinMessage = APIPath(self, "chat.unPinMessage", "POST")
self.chat.unStarMessage = APIPath(self, "chat.unStarMessage", "POST")
self.chat.update = APIPath(self, "chat.update", "POST")
self.custom_sounds = APIPath(self, "custom-sounds", None)
self.custom_sounds.list = APIPath(self, "custom-sounds.list")
self.im = APIPath(self, "im")
self.im.close = APIPath(self, "im.close", "POST")
self.im.counters = APIPath(self, "im.counters")
self.im.create = APIPath(self, "im.create", "POST")
self.im.history = APIPath(self, "im.history", "GET")
self.im.files = APIPath(self, "im.files")
self.im.members = APIPath(self, "im.members")
self.im.messages = APIPath(self, "im.messages")
self.im.messages.others = APIPath(self, "im.messages.others", "GET")
self.im.list = APIPath(self, "im.list", "GET")
self.im.list.everyone = APIPath(self, "im.list.everyone", "GET")
self.im.open = APIPath(self, "im.open", "POST")
self.im.setTopic = APIPath(self, "im.setTopic", "POST")
self.dm = self.im
self.integrations = APIPath(self, "integrations", None)
self.integrations.create = APIPath(self, "integrations.create", "POST")
self.integrations.get = APIPath(self, "integrations.get")
self.integrations.history = APIPath(self, "integrations.history")
self.integrations.list = APIPath(self, "integrations.list")
self.integrations.remove = APIPath(self, "integrations.remove", "POST")
self.findOrCreateInvite = APIPath(self, "findOrCreateInvite", "POST")
self.listInvites = APIPath(self, "listInvites")
self.removeInvite = APIPath(self, "removeInvite", "POST")
self.useInviteToken = APIPath(self, "useInviteToken", "POST")
self.validateInviteToken = APIPath(self, "validateInviteToken", "POST")
self.livechat = APIPath(self, "livechat", None)
self.livechat.inquiries = APIPath(self, "livechat/inquiries", None)
self.livechat.inquiries.list = APIPath(self, "livechat/inquiries.list")
self.livechat.inquiries.take = APIPath(self, "livechat/inquiries.take", "POST")
self.livechat.rooms = APIPath(self, "livechat/rooms")
self.oauth_apps = APIPath(self, "oauth-apps", None)
self.oauth_apps.get = APIPath(self, "oauth-apps.get")
self.oauth_apps.list = APIPath(self, "oauth-apps.list")
self.permissions = APIPath(self, "permissions", None)
self.permissions.listAll = APIPath(self, "permissions.listAll")
self.permissions.update = APIPath(self, "permissions.update", "POST")
self.roles = APIPath(self, "roles", None)
self.roles.create = APIPath(self, "roles.create", "POST")
self.roles.list = APIPath(self, "roles.list")
self.roles.addUserToRole = APIPath(self, "roles.addUserToRole", "POST")
self.roles.getUsersInRole = APIPath(self, "roles.getUsersInRole")
self.push = APIPath(self, "push", None)
self.push.token = APIPath(self, "push.token", None)
self.push.token.save = APIPath(self, "push.token", "POST")
self.push.token.delete = APIPath(self, "push.token", "DELETE")
self.rooms = APIPath(self, "rooms", None)
self.rooms.adminRooms = APIPath(self, "rooms.adminRooms")
self.rooms.cleanHistory = APIPath(self, "rooms.cleanHistory", "POST")
self.rooms.createDiscussion = APIPath(self, "rooms.createDiscussion", "POST")
self.rooms.favorite = APIPath(self, "rooms.favorite", "POST")
self.rooms.get = APIPath(self, "rooms.get")
self.rooms.getDiscussions = APIPath(self, "rooms.getDiscussions")
self.rooms.info = APIPath(self, "rooms.info")
self.rooms.leave = APIPath(self, "rooms.leave", "POST")
self.rooms.saveNotification = APIPath(self, "rooms.saveNotification", "POST")
self.rooms.upload = APIPath(self, "rooms.upload", "POST", arg_endpoint=True)
self.commands = APIPath(self, "commands")
self.commands.get = APIPath(self, "commands.get", "GET")
self.commands.list = APIPath(self, "commands.list", "GET")
self.commands.run = APIPath(self, "commands.run", "POST")
self.custom_user_status = APIPath(self, "custom-user-status", None)
self.custom_user_status.list = APIPath(self, "custom-user-status.list")
self.emoji_custom = APIPath(self, "emoji-custom", None)
self.emoji_custom.list = APIPath(self, "emoji-custom.list")
self.emoji_custom.create = APIPath(self, "emoji-custom.create", "POST")
self.emoji_custom.delete = APIPath(self, "emoji-custom.delete", "POST")
self.emoji_custom.update = APIPath(self, "emoji-custom.update", "POST")
self.settings = APIPath(self, "settings", None)
self.settings.public = APIPath(self, "settings.public")
self.settings.oauth = APIPath(self, "settings.oauth")
self.settings.get = APIPath(self, "settings", "GET", arg_endpoint=True)
self.settings.set = APIPath(self, "settings", "POST", arg_endpoint=True)
self.service = APIPath(self, "service", None)
self.service.configurations = APIPath(self, "service.configurations")
self.subscriptions = APIPath(self, "subscriptions", None)
self.subscriptions.get = APIPath(self, "subscriptions.get")
self.subscriptions.getOne = APIPath(self, "subscriptions.getOne")
self.subscriptions.read = APIPath(self, "subscriptions.read", "POST")
self.subscriptions.unread = APIPath(self, "subscriptions.unread", "POST")
self.video_conference = APIPath(self, "video-conference", None)
self.video_conference.jitsi = APIPath(self, "video-conference/jitsi", None)
self.video_conference.jitsi.update_timeout = APIPath(self, "video-conference/jitsi.update-timeout", "POST")
self.webdav = APIPath(self, "webdav", None)
self.webdav.getMyAccounts = APIPath(self, "webdav.getMyAccounts")
# fmt:on
def auth_header(self):
"""
Return api request header dictionary with Auth data.
"""
return {
"X-Auth-Token": self.auth_token,
"X-User-Id": self.user_id,
"Content-type": "application/json",
}
def login(self, **kwargs):
"""
Authenticate this Rocketchat API.
"""
url = self.url + self.api_v1_path + "login"
r = requests.post(url, data=kwargs)
j = r.json()
if j["status"] != "success":
raise Exception(j["message"])
self.user_id = j["data"]["userId"]
self.auth_token = j["data"]["authToken"] | 0.810254 | 0.078395 |
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, DecimalField, DateTimeField, SelectField, IntegerField
from wtforms.validators import DataRequired, Length, Optional, ValidationError
from wtforms.widgets import html_params, HTMLString
import datetime
from capital_gains_loss.models import Transaction
from flask_login import current_user
class DateTimePickerWidget(object):
"""
Date Time picker from Eonasdan GitHub
"""
data_template = (
"""
<div class="input-group date" id="datetimepicker1" data-target-input="nearest">
<input %(text)s class="form-control datetimepicker-input" data-target="#datetimepicker1"/>
<div class="input-group-append" data-target="#datetimepicker1" data-toggle="datetimepicker">
<div class="input-group-text"><i class="fa fa-calendar"></i></div>
</div>
</div>
"""
)
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
if not field.data:
field.data = ""
template = self.data_template
return HTMLString(
template % {"text": html_params(type="text", value=field.data, **kwargs)}
)
class TransactionForm(FlaskForm):
security_name = StringField('Security Name', validators=[DataRequired(), Length(max=20)])
security_details = StringField('Security Details (Optional)', validators=[])
transaction_date = StringField('Transaction Date',validators=[], widget=DateTimePickerWidget())
transaction_type = SelectField(u'Transaction Type', choices = [('buy', 'Buy'), ('sell', 'Sell')], validators=[])
quantity = IntegerField('Number of Shares', validators=[DataRequired()])
price_per_share = DecimalField('Price per Share', places=4, validators=[DataRequired()])
fees = DecimalField('Commission/Brokerage Fees', validators=[Optional()])
amount_recieved = DecimalField('Amount Recieved (Optional)', validators=[Optional()])
amount_recieved_details = StringField('Amount Recieved Details (Optional)', validators=[])
forex_rate = DecimalField('Forex Rate, if in foreign currency (will be used for both shares and fees)',places=4, validators=[Optional()])
submit = SubmitField('Add Transaction')
def validate_transaction_date(self, field):
try:
dt = datetime.datetime.strptime(field.data, '%m/%d/%Y %I:%M %p')
print(dt)
except Exception as e:
print(e)
raise ValidationError('Invalid Date format!')
def validate(self):
if not super(TransactionForm, self).validate():
return False
last_transaction = Transaction.query.filter_by(security_name=self.security_name.data.upper(),author=current_user).order_by(Transaction.transaction_date.desc()).first()
#print(last_transaction)
#print(last_transaction.total_shares)
if self.transaction_type.data.lower() == "sell":
if last_transaction is not None:
if self.quantity.data > last_transaction.total_shares:
msg = 'No Stock available to sell!'
self.transaction_type.errors.append(msg)
return False
return True
class TransactionFormUpdate(FlaskForm):
security_name = StringField('Security Name', validators=[DataRequired(), Length(max=20)])
security_details = StringField('Security Details (Optional)', validators=[])
transaction_date = StringField('Transaction Date',validators=[], widget=DateTimePickerWidget())
transaction_type = SelectField(u'Transaction Type', choices = [('buy', 'Buy'), ('sell', 'Sell')], validators=[])
quantity = IntegerField('Number of Shares', validators=[DataRequired()])
price_per_share = DecimalField('Price per Share', places=4, validators=[DataRequired()])
fees = DecimalField('Commission/Brokerage Fees', validators=[Optional()])
amount_recieved = DecimalField('Amount Recieved (Optional)', validators=[Optional()])
amount_recieved_details = StringField('Amount Recieved Details (Optional)', validators=[])
forex_rate = DecimalField('Forex Rate, if in foreign currency (will be used for both shares and fees)',places=4, validators=[Optional()])
submit = SubmitField('Update Transaction')
def validate_transaction_date(self, field):
try:
dt = datetime.datetime.strptime(field.data, '%m/%d/%Y %I:%M %p')
print(dt)
except Exception as e:
print(e)
raise ValidationError('Invalid Date format!')
def validate(self):
if not super(TransactionFormUpdate, self).validate():
return False
total_shares = 0
transactions = Transaction.query.filter_by(security_name=self.security_name.data.upper(),author=current_user).order_by(Transaction.transaction_date.asc())
for transaction in transactions:
if transaction.transaction_type.lower() == "buy":
total_shares = total_shares + transaction.quantity
elif transaction.transaction_type.lower() == "sell":
total_shares = total_shares - transaction.quantity
if self.transaction_type.data.lower() == "sell":
#print("Debug ", self.quantity.data, total_shares)
if self.quantity.data >= total_shares:
msg = 'Cannot edit to sell transaction. This will lead to selling stocks which are not available (no corresponding buy transaction)!'
self.transaction_type.errors.append(msg)
return False
return True | capital_gains_loss/transactions/forms.py | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, DecimalField, DateTimeField, SelectField, IntegerField
from wtforms.validators import DataRequired, Length, Optional, ValidationError
from wtforms.widgets import html_params, HTMLString
import datetime
from capital_gains_loss.models import Transaction
from flask_login import current_user
class DateTimePickerWidget(object):
"""
Date Time picker from Eonasdan GitHub
"""
data_template = (
"""
<div class="input-group date" id="datetimepicker1" data-target-input="nearest">
<input %(text)s class="form-control datetimepicker-input" data-target="#datetimepicker1"/>
<div class="input-group-append" data-target="#datetimepicker1" data-toggle="datetimepicker">
<div class="input-group-text"><i class="fa fa-calendar"></i></div>
</div>
</div>
"""
)
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
if not field.data:
field.data = ""
template = self.data_template
return HTMLString(
template % {"text": html_params(type="text", value=field.data, **kwargs)}
)
class TransactionForm(FlaskForm):
security_name = StringField('Security Name', validators=[DataRequired(), Length(max=20)])
security_details = StringField('Security Details (Optional)', validators=[])
transaction_date = StringField('Transaction Date',validators=[], widget=DateTimePickerWidget())
transaction_type = SelectField(u'Transaction Type', choices = [('buy', 'Buy'), ('sell', 'Sell')], validators=[])
quantity = IntegerField('Number of Shares', validators=[DataRequired()])
price_per_share = DecimalField('Price per Share', places=4, validators=[DataRequired()])
fees = DecimalField('Commission/Brokerage Fees', validators=[Optional()])
amount_recieved = DecimalField('Amount Recieved (Optional)', validators=[Optional()])
amount_recieved_details = StringField('Amount Recieved Details (Optional)', validators=[])
forex_rate = DecimalField('Forex Rate, if in foreign currency (will be used for both shares and fees)',places=4, validators=[Optional()])
submit = SubmitField('Add Transaction')
def validate_transaction_date(self, field):
try:
dt = datetime.datetime.strptime(field.data, '%m/%d/%Y %I:%M %p')
print(dt)
except Exception as e:
print(e)
raise ValidationError('Invalid Date format!')
def validate(self):
if not super(TransactionForm, self).validate():
return False
last_transaction = Transaction.query.filter_by(security_name=self.security_name.data.upper(),author=current_user).order_by(Transaction.transaction_date.desc()).first()
#print(last_transaction)
#print(last_transaction.total_shares)
if self.transaction_type.data.lower() == "sell":
if last_transaction is not None:
if self.quantity.data > last_transaction.total_shares:
msg = 'No Stock available to sell!'
self.transaction_type.errors.append(msg)
return False
return True
class TransactionFormUpdate(FlaskForm):
security_name = StringField('Security Name', validators=[DataRequired(), Length(max=20)])
security_details = StringField('Security Details (Optional)', validators=[])
transaction_date = StringField('Transaction Date',validators=[], widget=DateTimePickerWidget())
transaction_type = SelectField(u'Transaction Type', choices = [('buy', 'Buy'), ('sell', 'Sell')], validators=[])
quantity = IntegerField('Number of Shares', validators=[DataRequired()])
price_per_share = DecimalField('Price per Share', places=4, validators=[DataRequired()])
fees = DecimalField('Commission/Brokerage Fees', validators=[Optional()])
amount_recieved = DecimalField('Amount Recieved (Optional)', validators=[Optional()])
amount_recieved_details = StringField('Amount Recieved Details (Optional)', validators=[])
forex_rate = DecimalField('Forex Rate, if in foreign currency (will be used for both shares and fees)',places=4, validators=[Optional()])
submit = SubmitField('Update Transaction')
def validate_transaction_date(self, field):
try:
dt = datetime.datetime.strptime(field.data, '%m/%d/%Y %I:%M %p')
print(dt)
except Exception as e:
print(e)
raise ValidationError('Invalid Date format!')
def validate(self):
if not super(TransactionFormUpdate, self).validate():
return False
total_shares = 0
transactions = Transaction.query.filter_by(security_name=self.security_name.data.upper(),author=current_user).order_by(Transaction.transaction_date.asc())
for transaction in transactions:
if transaction.transaction_type.lower() == "buy":
total_shares = total_shares + transaction.quantity
elif transaction.transaction_type.lower() == "sell":
total_shares = total_shares - transaction.quantity
if self.transaction_type.data.lower() == "sell":
#print("Debug ", self.quantity.data, total_shares)
if self.quantity.data >= total_shares:
msg = 'Cannot edit to sell transaction. This will lead to selling stocks which are not available (no corresponding buy transaction)!'
self.transaction_type.errors.append(msg)
return False
return True | 0.634996 | 0.147463 |
import os
import unittest
import requests
from cereal import car
from tools.lib.logreader import LogReader
from opendbc.can.parser import CANParser
from selfdrive.car.honda.values import CAR as HONDA
from selfdrive.car.honda.interface import CarInterface as HondaCarInterface
from selfdrive.car.honda.carcontroller import CarController as HondaCarController
from selfdrive.car.honda.radar_interface import RadarInterface as HondaRadarInterface
from selfdrive.car.toyota.values import CAR as TOYOTA
from selfdrive.car.toyota.interface import CarInterface as ToyotaCarInterface
from selfdrive.car.toyota.carcontroller import CarController as ToyotaCarController
from selfdrive.car.toyota.radar_interface import RadarInterface as ToyotaRadarInterface
BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"
def run_route(route, car_name, CarInterface, CarController):
lr = LogReader("/tmp/"+route + ".bz2")
print(lr)
cps = []
def CANParserHook(dbc_name, signals, checks=None, bus=0, sendcan=False, tcp_addr="127.0.0.1", timeout=-1):
cp = CANParser(dbc_name, signals, checks, bus, sendcan, "", timeout)
cps.append(cp)
return cp
params = CarInterface.get_params(car_name)
CI = CarInterface(params, CarController, CANParserHook)
print(CI)
i = 0
last_monotime = 0
for msg in lr:
if msg.which() == 'can':
msg_bytes = msg.as_builder().to_bytes()
monotime = msg.logMonoTime
for x in cps:
x.update_string(monotime, msg_bytes)
if (monotime-last_monotime) > 0.01:
control = car.CarControl.new_message()
CS = CI.update(control)
if i % 100 == 0:
print('\033[2J\033[H'+str(CS))
last_monotime = monotime
i += 1
return True
def run_route_radar(route, car_name, RadarInterface, CarInterface):
lr = LogReader("/tmp/"+route + ".bz2")
print(lr)
cps = []
def CANParserHook(dbc_name, signals, checks=None, bus=0, sendcan=False, tcp_addr="127.0.0.1", timeout=-1):
cp = CANParser(dbc_name, signals, checks, bus, sendcan, "", timeout)
print(signals)
cps.append(cp)
return cp
params = CarInterface.get_params(car_name)
RI = RadarInterface(params, CANParserHook)
i = 0
updated_messages = set()
for msg in lr:
if msg.which() == 'can':
msg_bytes = msg.as_builder().to_bytes()
_, vls = cps[0].update_string(msg.logMonoTime, msg_bytes)
updated_messages.update(vls)
if RI.trigger_msg in updated_messages:
ret = RI._update(updated_messages)
if i % 10 == 0:
print('\033[2J\033[H'+str(ret))
updated_messages = set()
i += 1
return True
# TODO: make this generic
class TestCarInterface(unittest.TestCase):
def setUp(self):
self.routes = {
HONDA.CIVIC: "b0c9d2329ad1606b|2019-05-30--20-23-57",
HONDA.ACCORD: "0375fdf7b1ce594d|2019-05-21--20-10-33",
TOYOTA.PRIUS: "38bfd238edecbcd7|2019-06-07--10-15-25",
TOYOTA.RAV4: "02ec6bea180a4d36|2019-04-17--11-21-35"
}
for route in self.routes.values():
route_filename = route + ".bz2"
if not os.path.isfile("/tmp/"+route_filename):
with open("/tmp/"+route + ".bz2", "w") as f:
f.write(requests.get(BASE_URL + route_filename).content)
def test_parser_civic(self):
#self.assertTrue(run_route(self.routes[HONDA.CIVIC], HONDA.CIVIC, HondaCarInterface, HondaCarController))
pass
def test_parser_accord(self):
# one honda
#self.assertTrue(run_route(self.routes[HONDA.ACCORD], HONDA.ACCORD, HondaCarInterface, HondaCarController))
pass
def test_parser_prius(self):
#self.assertTrue(run_route(self.routes[TOYOTA.PRIUS], TOYOTA.PRIUS, ToyotaCarInterface, ToyotaCarController))
pass
def test_parser_rav4(self):
# hmm, rav4 is broken
#self.assertTrue(run_route(self.routes[TOYOTA.RAV4], TOYOTA.RAV4, ToyotaCarInterface, ToyotaCarController))
pass
def test_radar_civic(self):
#self.assertTrue(run_route_radar(self.routes[HONDA.CIVIC], HONDA.CIVIC, HondaRadarInterface, HondaCarInterface))
pass
def test_radar_prius(self):
self.assertTrue(run_route_radar(self.routes[TOYOTA.PRIUS], TOYOTA.PRIUS, ToyotaRadarInterface, ToyotaCarInterface))
pass
if __name__ == "__main__":
unittest.main() | selfdrive/car/tests/test_carstates.py | import os
import unittest
import requests
from cereal import car
from tools.lib.logreader import LogReader
from opendbc.can.parser import CANParser
from selfdrive.car.honda.values import CAR as HONDA
from selfdrive.car.honda.interface import CarInterface as HondaCarInterface
from selfdrive.car.honda.carcontroller import CarController as HondaCarController
from selfdrive.car.honda.radar_interface import RadarInterface as HondaRadarInterface
from selfdrive.car.toyota.values import CAR as TOYOTA
from selfdrive.car.toyota.interface import CarInterface as ToyotaCarInterface
from selfdrive.car.toyota.carcontroller import CarController as ToyotaCarController
from selfdrive.car.toyota.radar_interface import RadarInterface as ToyotaRadarInterface
BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"
def run_route(route, car_name, CarInterface, CarController):
lr = LogReader("/tmp/"+route + ".bz2")
print(lr)
cps = []
def CANParserHook(dbc_name, signals, checks=None, bus=0, sendcan=False, tcp_addr="127.0.0.1", timeout=-1):
cp = CANParser(dbc_name, signals, checks, bus, sendcan, "", timeout)
cps.append(cp)
return cp
params = CarInterface.get_params(car_name)
CI = CarInterface(params, CarController, CANParserHook)
print(CI)
i = 0
last_monotime = 0
for msg in lr:
if msg.which() == 'can':
msg_bytes = msg.as_builder().to_bytes()
monotime = msg.logMonoTime
for x in cps:
x.update_string(monotime, msg_bytes)
if (monotime-last_monotime) > 0.01:
control = car.CarControl.new_message()
CS = CI.update(control)
if i % 100 == 0:
print('\033[2J\033[H'+str(CS))
last_monotime = monotime
i += 1
return True
def run_route_radar(route, car_name, RadarInterface, CarInterface):
lr = LogReader("/tmp/"+route + ".bz2")
print(lr)
cps = []
def CANParserHook(dbc_name, signals, checks=None, bus=0, sendcan=False, tcp_addr="127.0.0.1", timeout=-1):
cp = CANParser(dbc_name, signals, checks, bus, sendcan, "", timeout)
print(signals)
cps.append(cp)
return cp
params = CarInterface.get_params(car_name)
RI = RadarInterface(params, CANParserHook)
i = 0
updated_messages = set()
for msg in lr:
if msg.which() == 'can':
msg_bytes = msg.as_builder().to_bytes()
_, vls = cps[0].update_string(msg.logMonoTime, msg_bytes)
updated_messages.update(vls)
if RI.trigger_msg in updated_messages:
ret = RI._update(updated_messages)
if i % 10 == 0:
print('\033[2J\033[H'+str(ret))
updated_messages = set()
i += 1
return True
# TODO: make this generic
class TestCarInterface(unittest.TestCase):
def setUp(self):
self.routes = {
HONDA.CIVIC: "b0c9d2329ad1606b|2019-05-30--20-23-57",
HONDA.ACCORD: "0375fdf7b1ce594d|2019-05-21--20-10-33",
TOYOTA.PRIUS: "38bfd238edecbcd7|2019-06-07--10-15-25",
TOYOTA.RAV4: "02ec6bea180a4d36|2019-04-17--11-21-35"
}
for route in self.routes.values():
route_filename = route + ".bz2"
if not os.path.isfile("/tmp/"+route_filename):
with open("/tmp/"+route + ".bz2", "w") as f:
f.write(requests.get(BASE_URL + route_filename).content)
def test_parser_civic(self):
#self.assertTrue(run_route(self.routes[HONDA.CIVIC], HONDA.CIVIC, HondaCarInterface, HondaCarController))
pass
def test_parser_accord(self):
# one honda
#self.assertTrue(run_route(self.routes[HONDA.ACCORD], HONDA.ACCORD, HondaCarInterface, HondaCarController))
pass
def test_parser_prius(self):
#self.assertTrue(run_route(self.routes[TOYOTA.PRIUS], TOYOTA.PRIUS, ToyotaCarInterface, ToyotaCarController))
pass
def test_parser_rav4(self):
# hmm, rav4 is broken
#self.assertTrue(run_route(self.routes[TOYOTA.RAV4], TOYOTA.RAV4, ToyotaCarInterface, ToyotaCarController))
pass
def test_radar_civic(self):
#self.assertTrue(run_route_radar(self.routes[HONDA.CIVIC], HONDA.CIVIC, HondaRadarInterface, HondaCarInterface))
pass
def test_radar_prius(self):
self.assertTrue(run_route_radar(self.routes[TOYOTA.PRIUS], TOYOTA.PRIUS, ToyotaRadarInterface, ToyotaCarInterface))
pass
if __name__ == "__main__":
unittest.main() | 0.251556 | 0.176246 |
from __future__ import unicode_literals
from nose.plugins.attrib import attr
from mogwai.connection import MogwaiQueryError
from mogwai.tests.base import BaseMogwaiTestCase
from mogwai.models import Query, IN, OUT, Edge, Vertex, GREATER_THAN
from mogwai.properties import Integer, Double
class MockVertex(object):
eid = 1
class MockVertex2(Vertex):
age = Integer()
class MockEdge(Edge):
age = Integer()
fierceness = Double()
@attr('unit', 'query_vertex')
class SimpleQueryTest(BaseMogwaiTestCase):
def setUp(self):
self.q = Query(MockVertex())
def test_limit(self):
result = self.q.limit(10)._get_partial()
self.assertEqual(result, "g.v(id).query().limit(limit)")
def test_direction_in(self):
result = self.q.direction(IN)._get_partial()
self.assertEqual(result, "g.v(id).query().direction(IN)")
def test_direction_out(self):
result = self.q.direction(OUT)._get_partial()
self.assertEqual(result, "g.v(id).query().direction(OUT)")
def test_labels(self):
result = self.q.labels('test')._get_partial()
self.assertEqual(result, "g.v(id).query().labels('test')")
# ensure the original wasn't modified
self.assertListEqual(self.q._labels, [])
def test_2labels(self):
result = self.q.labels('test', 'test2')._get_partial()
self.assertEqual(result, "g.v(id).query().labels('test', 'test2')")
def test_object_label(self):
result = self.q.labels(MockEdge)._get_partial()
self.assertEqual(result, "g.v(id).query().labels('mock_edge')")
def test_has(self):
result = self.q.has(MockEdge.get_property_by_name("age"), 10)._get_partial()
self.assertEqual(result, "g.v(id).query().has('mockedge_age', v0, Query.Compare.EQUAL)")
def test_has_double_casting(self):
result = self.q.has(MockEdge.get_property_by_name("fierceness"), 3.3)._get_partial()
self.assertEqual(result, "g.v(id).query().has('mockedge_fierceness', v0 as double, Query.Compare.EQUAL)")
def test_direction_except(self):
with self.assertRaises(MogwaiQueryError):
self.q.direction(OUT).direction(OUT)
def test_has_double_casting_plain(self):
result = self.q.has('fierceness', 3.3)._get_partial()
self.assertEqual(result, "g.v(id).query().has('fierceness', v0 as double, Query.Compare.EQUAL)")
def test_has_int(self):
result = self.q.has('age', 21, GREATER_THAN)._get_partial()
self.assertEqual(result, "g.v(id).query().has('age', v0, Query.Compare.GREATER_THAN)")
def test_intervals(self):
result = self.q.interval('age', 10, 20)._get_partial()
self.assertEqual(result, "g.v(id).query().interval('age', v0, v1)")
def test_double_interval(self):
result = self.q.interval('fierceness', 2.5, 5.2)._get_partial()
self.assertEqual(result, "g.v(id).query().interval('fierceness', v0 as double, v1 as double)") | mogwai/tests/models/vertex_queries_tests.py | from __future__ import unicode_literals
from nose.plugins.attrib import attr
from mogwai.connection import MogwaiQueryError
from mogwai.tests.base import BaseMogwaiTestCase
from mogwai.models import Query, IN, OUT, Edge, Vertex, GREATER_THAN
from mogwai.properties import Integer, Double
class MockVertex(object):
eid = 1
class MockVertex2(Vertex):
age = Integer()
class MockEdge(Edge):
age = Integer()
fierceness = Double()
@attr('unit', 'query_vertex')
class SimpleQueryTest(BaseMogwaiTestCase):
def setUp(self):
self.q = Query(MockVertex())
def test_limit(self):
result = self.q.limit(10)._get_partial()
self.assertEqual(result, "g.v(id).query().limit(limit)")
def test_direction_in(self):
result = self.q.direction(IN)._get_partial()
self.assertEqual(result, "g.v(id).query().direction(IN)")
def test_direction_out(self):
result = self.q.direction(OUT)._get_partial()
self.assertEqual(result, "g.v(id).query().direction(OUT)")
def test_labels(self):
result = self.q.labels('test')._get_partial()
self.assertEqual(result, "g.v(id).query().labels('test')")
# ensure the original wasn't modified
self.assertListEqual(self.q._labels, [])
def test_2labels(self):
result = self.q.labels('test', 'test2')._get_partial()
self.assertEqual(result, "g.v(id).query().labels('test', 'test2')")
def test_object_label(self):
result = self.q.labels(MockEdge)._get_partial()
self.assertEqual(result, "g.v(id).query().labels('mock_edge')")
def test_has(self):
result = self.q.has(MockEdge.get_property_by_name("age"), 10)._get_partial()
self.assertEqual(result, "g.v(id).query().has('mockedge_age', v0, Query.Compare.EQUAL)")
def test_has_double_casting(self):
result = self.q.has(MockEdge.get_property_by_name("fierceness"), 3.3)._get_partial()
self.assertEqual(result, "g.v(id).query().has('mockedge_fierceness', v0 as double, Query.Compare.EQUAL)")
def test_direction_except(self):
with self.assertRaises(MogwaiQueryError):
self.q.direction(OUT).direction(OUT)
def test_has_double_casting_plain(self):
result = self.q.has('fierceness', 3.3)._get_partial()
self.assertEqual(result, "g.v(id).query().has('fierceness', v0 as double, Query.Compare.EQUAL)")
def test_has_int(self):
result = self.q.has('age', 21, GREATER_THAN)._get_partial()
self.assertEqual(result, "g.v(id).query().has('age', v0, Query.Compare.GREATER_THAN)")
def test_intervals(self):
result = self.q.interval('age', 10, 20)._get_partial()
self.assertEqual(result, "g.v(id).query().interval('age', v0, v1)")
def test_double_interval(self):
result = self.q.interval('fierceness', 2.5, 5.2)._get_partial()
self.assertEqual(result, "g.v(id).query().interval('fierceness', v0 as double, v1 as double)") | 0.801392 | 0.504578 |
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel
from math import ceil
import sys
sys.path.append("..")
from models import quant_module_1d as qm
import json
import torch
import pandas as pd
def TCN_network(**kwargs):
if kwargs['quantization'] == 'False':
return TCN_network_float(dilations = kwargs['dilations'], channels = kwargs['channels'])
elif kwargs['quantization'] == 'mix':
dfs = pd.read_excel('ppg-mixed-precision.xlsx', sheet_name='mix-quantizations')
dataset = dfs[dfs['Name'] == kwargs['sheet_name']][dfs['cd'] == kwargs['cd']]
return TCN_network_quantized_mix(qm.QuantizedChanConv1d, wbits=dataset.values[0][2:14], abits=dataset.values[0][14:26], dilations = kwargs['dilations'], channels = kwargs['channels'], share_weight=True)
elif kwargs['quantization'] == 'mix-search':
return TCN_network_quantized_mix_search(qm.MixActivChanConv1d, wbits=[2, 4, 8], abits=[2, 4, 8], dilations = kwargs['dilations'], channels = kwargs['channels'], share_weight=True)
else:
return TCN_network_quantized(qm.QuantizedChanConv1d, abits = kwargs['quantization'], wbits = kwargs['quantization'], dilations = kwargs['dilations'], channels = kwargs['channels'])
class TCN_network_quantized_mix(BaseModel):
"""
TEMPONet architecture:
Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:
- TemporalConvBlock
- ConvBlock
Two instances of Regressor followed by a final Linear layer with a single neuron.
"""
def __init__(self, conv, wbits, abits, dilations, channels, share_weight = True, dataset_name='PPG_Dalia', dataset_args={}):
super(TCN_network_quantized_mix, self).__init__()
self.conv_func = conv
self.dil = dilations
self.rf = [5, 5, 5, 9, 9,17, 17]
self.ch = channels
# 1st instance of two TempConvBlocks and ConvBlock
k_tcb00 = ceil(self.rf[0] / self.dil[0])
self.tcb00 = TempConvBlock(conv,
ch_in=4,
ch_out=self.ch[0],
k_size=k_tcb00,
dil=self.dil[0],
pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2,
wbits=wbits[0],
abits=abits[0],
share_weight=share_weight,
first_layer = True
)
k_tcb01 = ceil(self.rf[1] / self.dil[1])
self.tcb01 = TempConvBlock(conv,
ch_in=self.ch[0],
ch_out=self.ch[1],
k_size=k_tcb01,
dil=self.dil[1],
pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2,
wbits=wbits[1],
abits=abits[1],
share_weight=share_weight
)
k_cb0 = ceil(self.rf[2] / self.dil[2])
self.cb0 = ConvBlock(conv,
ch_in=self.ch[1],
ch_out=self.ch[2],
k_size=k_cb0,
strd=1,
pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,
dilation=self.dil[2],
wbits=wbits[2],
abits=abits[2],
share_weight=share_weight
)
# 2nd instance of two TempConvBlocks and ConvBlock
k_tcb10 = ceil(self.rf[3] / self.dil[3])
self.tcb10 = TempConvBlock(conv,
ch_in=self.ch[2],
ch_out=self.ch[3],
k_size=k_tcb10,
dil=self.dil[3],
pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2,
wbits=wbits[3],
abits=abits[3],
share_weight=share_weight
)
k_tcb11 = ceil(self.rf[4] / self.dil[4])
self.tcb11 = TempConvBlock(conv,
ch_in=self.ch[3],
ch_out=self.ch[4],
k_size=k_tcb11,
dil=self.dil[4],
pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2,
wbits=wbits[4],
abits=abits[4],
share_weight=share_weight
)
self.cb1 = ConvBlock(conv,
ch_in=self.ch[4],
ch_out=self.ch[5],
k_size=5,
strd=2,
pad=2,
wbits=wbits[5],
abits=abits[5],
dilation=self.dil[5],
share_weight=share_weight
)
# 3td instance of TempConvBlock and ConvBlock
k_tcb20 = ceil(self.rf[5] / self.dil[6])
self.tcb20 = TempConvBlock(conv,
ch_in=self.ch[5],
ch_out=self.ch[6],
k_size=k_tcb20,
dil=self.dil[6],
pad=((k_tcb20 - 1) * self.dil[6] + 1) // 2,
wbits=wbits[6],
abits=abits[6],
share_weight=share_weight
)
k_tcb21 = ceil(self.rf[6] / self.dil[7])
self.tcb21 = TempConvBlock(conv,
ch_in=self.ch[6],
ch_out=self.ch[7],
k_size=k_tcb21,
dil=self.dil[7],
pad=((k_tcb21 - 1) * self.dil[7] + 1) // 2,
wbits=wbits[7],
abits=abits[7],
share_weight=share_weight
)
self.cb2 = ConvBlock(conv,
ch_in=self.ch[7],
ch_out=self.ch[8],
k_size=5,
strd=4,
pad=4,
wbits=wbits[8],
abits=abits[8],
dilation=self.dil[8],
share_weight=share_weight
)
# 1st instance of regressor
self.regr0 = Regressor(
ft_in=self.ch[8] * 4,
ft_out=self.ch[9],
wbits=wbits[9],
abits=abits[9]
)
# 2nd instance of regressor
self.regr1 = Regressor(
ft_in=self.ch[9],
ft_out=self.ch[10],
wbits=wbits[10],
abits=abits[10]
)
self.out_neuron = qm.QuantizedLinear(
inplane=self.ch[10],
outplane=1,
wbits=wbits[11],
abits=abits[11]
)
def forward(self, x):
x = self.cb0(self.tcb01(self.tcb00(x)))
x = self.cb1(self.tcb11(self.tcb10(x)))
x = self.cb2(self.tcb21(self.tcb20(x)))
x = x.flatten(1)
x = self.regr0(x)
x = self.regr1(x)
x = self.out_neuron(x)
return x
class TCN_network_quantized(BaseModel):
"""
TEMPONet architecture:
Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:
- TemporalConvBlock
- ConvBlock
Two instances of Regressor followed by a final Linear layer with a single neuron.
"""
def __init__(self, conv, wbits, abits, dilations, channels, share_weight = True, dataset_name='PPG_Dalia', dataset_args={}):
super(TCN_network_quantized, self).__init__()
self.conv_func = conv
self.dil = dilations
self.rf = [5, 5, 5, 9, 9,17, 17]
self.ch = channels
# 1st instance of two TempConvBlocks and ConvBlock
k_tcb00 = ceil(self.rf[0] / self.dil[0])
self.tcb00 = TempConvBlock(conv,
ch_in=4,
ch_out=self.ch[0],
k_size=k_tcb00,
dil=self.dil[0],
pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight,
first_layer = True
)
k_tcb01 = ceil(self.rf[1] / self.dil[1])
self.tcb01 = TempConvBlock(conv,
ch_in=self.ch[0],
ch_out=self.ch[1],
k_size=k_tcb01,
dil=self.dil[1],
pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_cb0 = ceil(self.rf[2] / self.dil[2])
self.cb0 = ConvBlock(conv,
ch_in=self.ch[1],
ch_out=self.ch[2],
k_size=k_cb0,
strd=1,
pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,
dilation=self.dil[2],
wbits=wbits,
abits=abits,
share_weight=share_weight
)
# 2nd instance of two TempConvBlocks and ConvBlock
k_tcb10 = ceil(self.rf[3] / self.dil[3])
self.tcb10 = TempConvBlock(conv,
ch_in=self.ch[2],
ch_out=self.ch[3],
k_size=k_tcb10,
dil=self.dil[3],
pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_tcb11 = ceil(self.rf[4] / self.dil[4])
self.tcb11 = TempConvBlock(conv,
ch_in=self.ch[3],
ch_out=self.ch[4],
k_size=k_tcb11,
dil=self.dil[4],
pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
self.cb1 = ConvBlock(conv,
ch_in=self.ch[4],
ch_out=self.ch[5],
k_size=5,
strd=2,
pad=2,
wbits=wbits,
abits=abits,
dilation=self.dil[5],
share_weight=share_weight
)
# 3td instance of TempConvBlock and ConvBlock
k_tcb20 = ceil(self.rf[5] / self.dil[6])
self.tcb20 = TempConvBlock(conv,
ch_in=self.ch[5],
ch_out=self.ch[6],
k_size=k_tcb20,
dil=self.dil[6],
pad=((k_tcb20 - 1) * self.dil[6] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_tcb21 = ceil(self.rf[6] / self.dil[7])
self.tcb21 = TempConvBlock(conv,
ch_in=self.ch[6],
ch_out=self.ch[7],
k_size=k_tcb21,
dil=self.dil[7],
pad=((k_tcb21 - 1) * self.dil[7] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
self.cb2 = ConvBlock(conv,
ch_in=self.ch[7],
ch_out=self.ch[8],
k_size=5,
strd=4,
pad=4,
wbits=wbits,
abits=abits,
dilation=self.dil[8],
share_weight=share_weight
)
# 1st instance of regressor
self.regr0 = Regressor(
ft_in=self.ch[8] * 4,
ft_out=self.ch[9],
wbits=wbits,
abits=abits
)
# 2nd instance of regressor
self.regr1 = Regressor(
ft_in=self.ch[9],
ft_out=self.ch[10],
wbits=wbits,
abits=abits
)
self.out_neuron = qm.QuantizedLinear(
inplane=self.ch[10],
outplane=1,
wbits=wbits,
abits=abits
)
def forward(self, x):
x = self.cb0(self.tcb01(self.tcb00(x)))
x = self.cb1(self.tcb11(self.tcb10(x)))
x = self.cb2(self.tcb21(self.tcb20(x)))
x = x.flatten(1)
x = self.regr0(x)
x = self.regr1(x)
x = self.out_neuron(x)
return x
class TCN_network_float(BaseModel):
"""
TEMPONet architecture:
Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:
- TemporalConvBlock
- ConvBlock
Two instances of Regressor followed by a final Linear layer with a single neuron.
"""
def __init__(self, dilations, channels, dataset_name='PPG_Dalia', dataset_args={}):
super(TCN_network_float, self).__init__()
self.dil = dilations
self.rf = [5, 5, 5, 9, 9,17, 17]
self.ch = channels
# 1st instance of two TempConvBlocks and ConvBlock
k_tcb00 = ceil(self.rf[0] / self.dil[0])
self.tcb00 = TempConvBlock_float(
ch_in=4,
ch_out=self.ch[0],
k_size=k_tcb00,
dil=self.dil[0],
pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2
)
k_tcb01 = ceil(self.rf[1] / self.dil[1])
self.tcb01 = TempConvBlock_float(
ch_in=self.ch[0],
ch_out=self.ch[1],
k_size=k_tcb01,
dil=self.dil[1],
pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2
)
k_cb0 = ceil(self.rf[2] / self.dil[2])
self.cb0 = ConvBlock_float(
ch_in=self.ch[1],
ch_out=self.ch[2],
k_size=k_cb0,
strd=1,
pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,
dilation=self.dil[2]
)
# 2nd instance of two TempConvBlocks and ConvBlock
k_tcb10 = ceil(self.rf[3] / self.dil[3])
self.tcb10 = TempConvBlock_float(
ch_in=self.ch[2],
ch_out=self.ch[3],
k_size=k_tcb10,
dil=self.dil[3],
pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2
)
k_tcb11 = ceil(self.rf[4] / self.dil[4])
self.tcb11 = TempConvBlock_float(
ch_in=self.ch[3],
ch_out=self.ch[4],
k_size=k_tcb11,
dil=self.dil[4],
pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2
)
self.cb1 = ConvBlock_float(
ch_in=self.ch[4],
ch_out=self.ch[5],
k_size=5,
strd=2,
pad=2
)
# 3td instance of TempConvBlock and ConvBlock
k_tcb20 = ceil(self.rf[5] / self.dil[6])
self.tcb20 = TempConvBlock_float(
ch_in=self.ch[5],
ch_out=self.ch[6],
k_size=k_tcb20,
dil=self.dil[6],
pad=((k_tcb20 - 1) * self.dil[6] + 1) // 2
)
k_tcb21 = ceil(self.rf[6] / self.dil[7])
self.tcb21 = TempConvBlock_float(
ch_in=self.ch[6],
ch_out=self.ch[7],
k_size=k_tcb21,
dil=self.dil[7],
pad=((k_tcb21 - 1) * self.dil[7] + 1) // 2
)
self.cb2 = ConvBlock_float(
ch_in=self.ch[7],
ch_out=self.ch[8],
k_size=5,
strd=4,
pad=4
)
# 1st instance of regressor
self.regr0 = Regressor_float(
ft_in=self.ch[8] * 4,
ft_out=self.ch[9]
)
# 2nd instance of regressor
self.regr1 = Regressor_float(
ft_in=self.ch[9],
ft_out=self.ch[10]
)
self.out_neuron = nn.Linear(
in_features=self.ch[10],
out_features=1
)
def forward(self, x):
x = self.cb0(self.tcb01(self.tcb00(x)))
x = self.cb1(self.tcb11(self.tcb10(x)))
x = self.cb2(self.tcb21(self.tcb20(x)))
x = x.flatten(1)
x = self.regr0(x)
x = self.regr1(x)
x = self.out_neuron(x)
return x
class TempConvBlock_float(BaseModel):
"""
Temporal Convolutional Block composed of one temporal convolutional layers.
The block is composed of :
- Conv1d layer
- Chomp1d layer
- ReLU layer
- BatchNorm1d layer
:param ch_in: Number of input channels
:param ch_out: Number of output channels
:param k_size: Kernel size
:param dil: Amount of dilation
:param pad: Amount of padding
"""
def __init__(self, ch_in, ch_out, k_size, dil, pad):
super(TempConvBlock_float, self).__init__()
self.tcn0 = nn.Conv1d(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=k_size,
dilation=dil,
bias = False,
padding=pad
)
self.relu0 = nn.ReLU6()
self.bn0 = nn.BatchNorm1d(
num_features=ch_out
)
def forward(self, x):
x = self.relu0(self.bn0(self.tcn0(x)))
return x
class ConvBlock_float(BaseModel):
"""
Convolutional Block composed of:
- Conv1d layer
- AvgPool1d layer
- ReLU layer
- BatchNorm1d layer
:param ch_in: Number of input channels
:param ch_out: Number of output channels
:param k_size: Kernel size
:param strd: Amount of stride
:param pad: Amount of padding
"""
def __init__(self, ch_in, ch_out, k_size, strd, pad, dilation=1):
super(ConvBlock_float, self).__init__()
self.conv0 = nn.Conv1d(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=k_size,
stride=strd,
dilation=dilation,
bias = False,
padding=pad
)
self.pool0 = nn.AvgPool1d(
kernel_size=2,
stride=2,
padding=0
)
self.relu0 = nn.ReLU6()
self.bn0 = nn.BatchNorm1d(ch_out)
def forward(self, x):
x = self.relu0(self.bn0(self.pool0(self.conv0(x))))
return x
class Regressor_float(BaseModel):
"""
Regressor block composed of :
- Linear layer
- ReLU layer
- BatchNorm1d layer
:param ft_in: Number of input channels
:param ft_out: Number of output channels
"""
def __init__(self, ft_in, ft_out):
super(Regressor_float, self).__init__()
self.ft_in = ft_in
self.ft_out = ft_out
self.fc0 = nn.Linear(
in_features=ft_in,
out_features=ft_out,
bias = False
)
self.relu0 = nn.ReLU6()
self.bn0 = nn.BatchNorm1d(
num_features=ft_out
)
def forward(self, x):
x = self.relu0(self.bn0(self.fc0(x)))
return x
class TempConvBlock(BaseModel):
"""
Temporal Convolutional Block composed of one temporal convolutional layers.
The block is composed of :
- Conv1d layer
- Chomp1d layer
- ReLU layer
- BatchNorm1d layer
:param ch_in: Number of input channels
:param ch_out: Number of output channels
:param k_size: Kernel size
:param dil: Amount of dilation
:param pad: Amount of padding
"""
def __init__(self, conv, ch_in, ch_out, k_size, dil, pad, wbits, abits, share_weight, first_layer = False):
super(TempConvBlock, self).__init__()
self.tcn0 = conv(
ch_in,
ch_out,
kernel_size = k_size,
dilation = dil,
padding = pad,
groups = 1,
bias = False,
abits = abits,
wbits = wbits,
share_weight = share_weight,
first_layer = first_layer
)
self.bn0 = nn.BatchNorm1d(num_features = ch_out)
def forward(self, x):
x = self.bn0(self.tcn0(x))
return x
class ConvBlock(BaseModel):
"""
Convolutional Block composed of:
- Conv1d layer
- AvgPool1d layer
- ReLU layer
- BatchNorm1d layer
:param ch_in: Number of input channels
:param ch_out: Number of output channels
:param k_size: Kernel size
:param strd: Amount of stride
:param pad: Amount of padding
"""
def __init__(self, conv, ch_in, ch_out, k_size, strd, pad, wbits, abits, share_weight, dilation=1):
super(ConvBlock, self).__init__()
self.conv0 = conv(
ch_in,
ch_out,
kernel_size = k_size,
stride = strd,
dilation = dilation,
padding = pad,
groups = 1,
bias = False,
abits = abits,
wbits = wbits,
share_weight = share_weight,
first_layer = False
)
self.pool0 = nn.AvgPool1d(
kernel_size = 2,
stride = 2,
padding = 0
)
self.bn0 = nn.BatchNorm1d(ch_out)
def forward(self, x):
x = self.bn0(
self.pool0(
self.conv0(
x
)
)
)
return x
class Regressor(BaseModel):
"""
Regressor block composed of :
- Linear layer
- ReLU layer
- BatchNorm1d layer
:param ft_in: Number of input channels
:param ft_out: Number of output channels
"""
def __init__(self, ft_in, ft_out, wbits, abits):
super(Regressor, self).__init__()
self.ft_in = ft_in
self.ft_out = ft_out
self.fc0 = qm.QuantizedLinear(
inplane = ft_in,
outplane = ft_out,
wbits=wbits,
abits=abits
)
self.bn0 = nn.BatchNorm1d(
num_features = ft_out
)
def forward(self, x):
x = self.bn0(
self.fc0(
x
)
)
return x
class Chomp1d(BaseModel):
"""
Module that perform a chomping operation on the input tensor.
It is used to chomp the amount of zero-padding added on the right of the input tensor, this operation is necessary to compute causal convolutions.
:param chomp_size: amount of padding 0s to be removed
"""
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TCN_network_quantized_mix_search(BaseModel):
"""
TEMPONet architecture:
Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:
- TemporalConvBlock
- ConvBlock
Two instances of Regressor followed by a final Linear layer with a single neuron.
"""
def __init__(self, conv, wbits, abits, dilations, channels, share_weight = True, dataset_name='PPG_Dalia', dataset_args={}):
super(TCN_network_quantized_mix_search, self).__init__()
self.conv_func = conv
self.dil = dilations
self.rf = [5, 5, 5, 9, 9,17, 17]
self.ch = channels
# 1st instance of two TempConvBlocks and ConvBlock
k_tcb00 = ceil(self.rf[0] / self.dil[0])
self.tcb00 = TempConvBlock(conv,
ch_in=4,
ch_out=self.ch[0],
k_size=k_tcb00,
dil=self.dil[0],
pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight,
first_layer=True
)
k_tcb01 = ceil(self.rf[1] / self.dil[1])
self.tcb01 = TempConvBlock(conv,
ch_in=self.ch[0],
ch_out=self.ch[1],
k_size=k_tcb01,
dil=self.dil[1],
pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_cb0 = ceil(self.rf[2] / self.dil[2])
self.cb0 = ConvBlock(conv,
ch_in=self.ch[1],
ch_out=self.ch[2],
k_size=k_cb0,
strd=1,
pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,
dilation=self.dil[2],
wbits=wbits,
abits=abits,
share_weight=share_weight
)
# 2nd instance of two TempConvBlocks and ConvBlock
k_tcb10 = ceil(self.rf[3] / self.dil[3])
self.tcb10 = TempConvBlock(conv,
ch_in=self.ch[2],
ch_out=self.ch[3],
k_size=k_tcb10,
dil=self.dil[3],
pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_tcb11 = ceil(self.rf[4] / self.dil[4])
self.tcb11 = TempConvBlock(conv,
ch_in=self.ch[3],
ch_out=self.ch[4],
k_size=k_tcb11,
dil=self.dil[4],
pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
self.cb1 = ConvBlock(conv,
ch_in=self.ch[4],
ch_out=self.ch[5],
k_size=5,
strd=2,
pad=2,
wbits=wbits,
abits=abits,
dilation=self.dil[5],
share_weight=share_weight
)
# 3td instance of TempConvBlock and ConvBlock
k_tcb20 = ceil(self.rf[5] / self.dil[6])
self.tcb20 = TempConvBlock(conv,
ch_in=self.ch[5],
ch_out=self.ch[6],
k_size=k_tcb20,
dil=self.dil[6],
pad=((k_tcb20 - 1) * self.dil[6] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_tcb21 = ceil(self.rf[6] / self.dil[7])
self.tcb21 = TempConvBlock(conv,
ch_in=self.ch[6],
ch_out=self.ch[7],
k_size=k_tcb21,
dil=self.dil[7],
pad=((k_tcb21 - 1) * self.dil[7] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
self.cb2 = ConvBlock(conv,
ch_in=self.ch[7],
ch_out=self.ch[8],
k_size=5,
strd=4,
pad=4,
wbits=wbits,
abits=abits,
dilation=self.dil[8],
share_weight=share_weight
)
# 1st instance of regressor
self.regr0 = Regressor(
ft_in=self.ch[8] * 4,
ft_out=self.ch[9],
wbits=8,
abits=8
)
# 2nd instance of regressor
self.regr1 = Regressor(
ft_in=self.ch[9],
ft_out=self.ch[10],
wbits=8,
abits=8
)
self.out_neuron = nn.Linear(
in_features=self.ch[10],
out_features=1
)
def forward(self, x):
x = self.cb0(self.tcb01(self.tcb00(x)))
x = self.cb1(self.tcb11(self.tcb10(x)))
x = self.cb2(self.tcb21(self.tcb20(x)))
x = x.flatten(1)
x = self.regr0(x)
x = self.regr1(x)
x = self.out_neuron(x)
return x
def complexity_loss(self):
size_product = []
loss = 0
for m in self.modules():
if isinstance(m, self.conv_func):
loss += m.complexity_loss()
size_product += [m.size_product]
normalizer = size_product[0].item()
loss /= normalizer
return loss
def fetch_best_arch(self):
sum_bitops, sum_bita, sum_bitw = 0, 0, 0
sum_mixbitops, sum_mixbita, sum_mixbitw = 0, 0, 0
layer_idx = 0
best_arch = None
for m in self.modules():
if isinstance(m, self.conv_func):
layer_arch, bitops, bita, bitw, mixbitops, mixbita, mixbitw = m.fetch_best_arch(layer_idx)
if best_arch is None:
best_arch = layer_arch
else:
for key in layer_arch.keys():
if key not in best_arch:
best_arch[key] = layer_arch[key]
else:
best_arch[key].append(layer_arch[key][0])
sum_bitops += bitops
sum_bita += bita
sum_bitw += bitw
sum_mixbitops += mixbitops
sum_mixbita += mixbita
sum_mixbitw += mixbitw
layer_idx += 1
return best_arch, sum_bitops, sum_bita, sum_bitw, sum_mixbitops, sum_mixbita, sum_mixbitw | precision_search/model/TCN_variants.py |
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel
from math import ceil
import sys
sys.path.append("..")
from models import quant_module_1d as qm
import json
import torch
import pandas as pd
def TCN_network(**kwargs):
if kwargs['quantization'] == 'False':
return TCN_network_float(dilations = kwargs['dilations'], channels = kwargs['channels'])
elif kwargs['quantization'] == 'mix':
dfs = pd.read_excel('ppg-mixed-precision.xlsx', sheet_name='mix-quantizations')
dataset = dfs[dfs['Name'] == kwargs['sheet_name']][dfs['cd'] == kwargs['cd']]
return TCN_network_quantized_mix(qm.QuantizedChanConv1d, wbits=dataset.values[0][2:14], abits=dataset.values[0][14:26], dilations = kwargs['dilations'], channels = kwargs['channels'], share_weight=True)
elif kwargs['quantization'] == 'mix-search':
return TCN_network_quantized_mix_search(qm.MixActivChanConv1d, wbits=[2, 4, 8], abits=[2, 4, 8], dilations = kwargs['dilations'], channels = kwargs['channels'], share_weight=True)
else:
return TCN_network_quantized(qm.QuantizedChanConv1d, abits = kwargs['quantization'], wbits = kwargs['quantization'], dilations = kwargs['dilations'], channels = kwargs['channels'])
class TCN_network_quantized_mix(BaseModel):
"""
TEMPONet architecture:
Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:
- TemporalConvBlock
- ConvBlock
Two instances of Regressor followed by a final Linear layer with a single neuron.
"""
def __init__(self, conv, wbits, abits, dilations, channels, share_weight = True, dataset_name='PPG_Dalia', dataset_args={}):
super(TCN_network_quantized_mix, self).__init__()
self.conv_func = conv
self.dil = dilations
self.rf = [5, 5, 5, 9, 9,17, 17]
self.ch = channels
# 1st instance of two TempConvBlocks and ConvBlock
k_tcb00 = ceil(self.rf[0] / self.dil[0])
self.tcb00 = TempConvBlock(conv,
ch_in=4,
ch_out=self.ch[0],
k_size=k_tcb00,
dil=self.dil[0],
pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2,
wbits=wbits[0],
abits=abits[0],
share_weight=share_weight,
first_layer = True
)
k_tcb01 = ceil(self.rf[1] / self.dil[1])
self.tcb01 = TempConvBlock(conv,
ch_in=self.ch[0],
ch_out=self.ch[1],
k_size=k_tcb01,
dil=self.dil[1],
pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2,
wbits=wbits[1],
abits=abits[1],
share_weight=share_weight
)
k_cb0 = ceil(self.rf[2] / self.dil[2])
self.cb0 = ConvBlock(conv,
ch_in=self.ch[1],
ch_out=self.ch[2],
k_size=k_cb0,
strd=1,
pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,
dilation=self.dil[2],
wbits=wbits[2],
abits=abits[2],
share_weight=share_weight
)
# 2nd instance of two TempConvBlocks and ConvBlock
k_tcb10 = ceil(self.rf[3] / self.dil[3])
self.tcb10 = TempConvBlock(conv,
ch_in=self.ch[2],
ch_out=self.ch[3],
k_size=k_tcb10,
dil=self.dil[3],
pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2,
wbits=wbits[3],
abits=abits[3],
share_weight=share_weight
)
k_tcb11 = ceil(self.rf[4] / self.dil[4])
self.tcb11 = TempConvBlock(conv,
ch_in=self.ch[3],
ch_out=self.ch[4],
k_size=k_tcb11,
dil=self.dil[4],
pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2,
wbits=wbits[4],
abits=abits[4],
share_weight=share_weight
)
self.cb1 = ConvBlock(conv,
ch_in=self.ch[4],
ch_out=self.ch[5],
k_size=5,
strd=2,
pad=2,
wbits=wbits[5],
abits=abits[5],
dilation=self.dil[5],
share_weight=share_weight
)
# 3td instance of TempConvBlock and ConvBlock
k_tcb20 = ceil(self.rf[5] / self.dil[6])
self.tcb20 = TempConvBlock(conv,
ch_in=self.ch[5],
ch_out=self.ch[6],
k_size=k_tcb20,
dil=self.dil[6],
pad=((k_tcb20 - 1) * self.dil[6] + 1) // 2,
wbits=wbits[6],
abits=abits[6],
share_weight=share_weight
)
k_tcb21 = ceil(self.rf[6] / self.dil[7])
self.tcb21 = TempConvBlock(conv,
ch_in=self.ch[6],
ch_out=self.ch[7],
k_size=k_tcb21,
dil=self.dil[7],
pad=((k_tcb21 - 1) * self.dil[7] + 1) // 2,
wbits=wbits[7],
abits=abits[7],
share_weight=share_weight
)
self.cb2 = ConvBlock(conv,
ch_in=self.ch[7],
ch_out=self.ch[8],
k_size=5,
strd=4,
pad=4,
wbits=wbits[8],
abits=abits[8],
dilation=self.dil[8],
share_weight=share_weight
)
# 1st instance of regressor
self.regr0 = Regressor(
ft_in=self.ch[8] * 4,
ft_out=self.ch[9],
wbits=wbits[9],
abits=abits[9]
)
# 2nd instance of regressor
self.regr1 = Regressor(
ft_in=self.ch[9],
ft_out=self.ch[10],
wbits=wbits[10],
abits=abits[10]
)
self.out_neuron = qm.QuantizedLinear(
inplane=self.ch[10],
outplane=1,
wbits=wbits[11],
abits=abits[11]
)
def forward(self, x):
x = self.cb0(self.tcb01(self.tcb00(x)))
x = self.cb1(self.tcb11(self.tcb10(x)))
x = self.cb2(self.tcb21(self.tcb20(x)))
x = x.flatten(1)
x = self.regr0(x)
x = self.regr1(x)
x = self.out_neuron(x)
return x
class TCN_network_quantized(BaseModel):
"""
TEMPONet architecture:
Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:
- TemporalConvBlock
- ConvBlock
Two instances of Regressor followed by a final Linear layer with a single neuron.
"""
def __init__(self, conv, wbits, abits, dilations, channels, share_weight = True, dataset_name='PPG_Dalia', dataset_args={}):
super(TCN_network_quantized, self).__init__()
self.conv_func = conv
self.dil = dilations
self.rf = [5, 5, 5, 9, 9,17, 17]
self.ch = channels
# 1st instance of two TempConvBlocks and ConvBlock
k_tcb00 = ceil(self.rf[0] / self.dil[0])
self.tcb00 = TempConvBlock(conv,
ch_in=4,
ch_out=self.ch[0],
k_size=k_tcb00,
dil=self.dil[0],
pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight,
first_layer = True
)
k_tcb01 = ceil(self.rf[1] / self.dil[1])
self.tcb01 = TempConvBlock(conv,
ch_in=self.ch[0],
ch_out=self.ch[1],
k_size=k_tcb01,
dil=self.dil[1],
pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_cb0 = ceil(self.rf[2] / self.dil[2])
self.cb0 = ConvBlock(conv,
ch_in=self.ch[1],
ch_out=self.ch[2],
k_size=k_cb0,
strd=1,
pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,
dilation=self.dil[2],
wbits=wbits,
abits=abits,
share_weight=share_weight
)
# 2nd instance of two TempConvBlocks and ConvBlock
k_tcb10 = ceil(self.rf[3] / self.dil[3])
self.tcb10 = TempConvBlock(conv,
ch_in=self.ch[2],
ch_out=self.ch[3],
k_size=k_tcb10,
dil=self.dil[3],
pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_tcb11 = ceil(self.rf[4] / self.dil[4])
self.tcb11 = TempConvBlock(conv,
ch_in=self.ch[3],
ch_out=self.ch[4],
k_size=k_tcb11,
dil=self.dil[4],
pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
self.cb1 = ConvBlock(conv,
ch_in=self.ch[4],
ch_out=self.ch[5],
k_size=5,
strd=2,
pad=2,
wbits=wbits,
abits=abits,
dilation=self.dil[5],
share_weight=share_weight
)
# 3td instance of TempConvBlock and ConvBlock
k_tcb20 = ceil(self.rf[5] / self.dil[6])
self.tcb20 = TempConvBlock(conv,
ch_in=self.ch[5],
ch_out=self.ch[6],
k_size=k_tcb20,
dil=self.dil[6],
pad=((k_tcb20 - 1) * self.dil[6] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_tcb21 = ceil(self.rf[6] / self.dil[7])
self.tcb21 = TempConvBlock(conv,
ch_in=self.ch[6],
ch_out=self.ch[7],
k_size=k_tcb21,
dil=self.dil[7],
pad=((k_tcb21 - 1) * self.dil[7] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
self.cb2 = ConvBlock(conv,
ch_in=self.ch[7],
ch_out=self.ch[8],
k_size=5,
strd=4,
pad=4,
wbits=wbits,
abits=abits,
dilation=self.dil[8],
share_weight=share_weight
)
# 1st instance of regressor
self.regr0 = Regressor(
ft_in=self.ch[8] * 4,
ft_out=self.ch[9],
wbits=wbits,
abits=abits
)
# 2nd instance of regressor
self.regr1 = Regressor(
ft_in=self.ch[9],
ft_out=self.ch[10],
wbits=wbits,
abits=abits
)
self.out_neuron = qm.QuantizedLinear(
inplane=self.ch[10],
outplane=1,
wbits=wbits,
abits=abits
)
def forward(self, x):
x = self.cb0(self.tcb01(self.tcb00(x)))
x = self.cb1(self.tcb11(self.tcb10(x)))
x = self.cb2(self.tcb21(self.tcb20(x)))
x = x.flatten(1)
x = self.regr0(x)
x = self.regr1(x)
x = self.out_neuron(x)
return x
class TCN_network_float(BaseModel):
"""
TEMPONet architecture:
Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:
- TemporalConvBlock
- ConvBlock
Two instances of Regressor followed by a final Linear layer with a single neuron.
"""
def __init__(self, dilations, channels, dataset_name='PPG_Dalia', dataset_args={}):
super(TCN_network_float, self).__init__()
self.dil = dilations
self.rf = [5, 5, 5, 9, 9,17, 17]
self.ch = channels
# 1st instance of two TempConvBlocks and ConvBlock
k_tcb00 = ceil(self.rf[0] / self.dil[0])
self.tcb00 = TempConvBlock_float(
ch_in=4,
ch_out=self.ch[0],
k_size=k_tcb00,
dil=self.dil[0],
pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2
)
k_tcb01 = ceil(self.rf[1] / self.dil[1])
self.tcb01 = TempConvBlock_float(
ch_in=self.ch[0],
ch_out=self.ch[1],
k_size=k_tcb01,
dil=self.dil[1],
pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2
)
k_cb0 = ceil(self.rf[2] / self.dil[2])
self.cb0 = ConvBlock_float(
ch_in=self.ch[1],
ch_out=self.ch[2],
k_size=k_cb0,
strd=1,
pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,
dilation=self.dil[2]
)
# 2nd instance of two TempConvBlocks and ConvBlock
k_tcb10 = ceil(self.rf[3] / self.dil[3])
self.tcb10 = TempConvBlock_float(
ch_in=self.ch[2],
ch_out=self.ch[3],
k_size=k_tcb10,
dil=self.dil[3],
pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2
)
k_tcb11 = ceil(self.rf[4] / self.dil[4])
self.tcb11 = TempConvBlock_float(
ch_in=self.ch[3],
ch_out=self.ch[4],
k_size=k_tcb11,
dil=self.dil[4],
pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2
)
self.cb1 = ConvBlock_float(
ch_in=self.ch[4],
ch_out=self.ch[5],
k_size=5,
strd=2,
pad=2
)
# 3td instance of TempConvBlock and ConvBlock
k_tcb20 = ceil(self.rf[5] / self.dil[6])
self.tcb20 = TempConvBlock_float(
ch_in=self.ch[5],
ch_out=self.ch[6],
k_size=k_tcb20,
dil=self.dil[6],
pad=((k_tcb20 - 1) * self.dil[6] + 1) // 2
)
k_tcb21 = ceil(self.rf[6] / self.dil[7])
self.tcb21 = TempConvBlock_float(
ch_in=self.ch[6],
ch_out=self.ch[7],
k_size=k_tcb21,
dil=self.dil[7],
pad=((k_tcb21 - 1) * self.dil[7] + 1) // 2
)
self.cb2 = ConvBlock_float(
ch_in=self.ch[7],
ch_out=self.ch[8],
k_size=5,
strd=4,
pad=4
)
# 1st instance of regressor
self.regr0 = Regressor_float(
ft_in=self.ch[8] * 4,
ft_out=self.ch[9]
)
# 2nd instance of regressor
self.regr1 = Regressor_float(
ft_in=self.ch[9],
ft_out=self.ch[10]
)
self.out_neuron = nn.Linear(
in_features=self.ch[10],
out_features=1
)
def forward(self, x):
x = self.cb0(self.tcb01(self.tcb00(x)))
x = self.cb1(self.tcb11(self.tcb10(x)))
x = self.cb2(self.tcb21(self.tcb20(x)))
x = x.flatten(1)
x = self.regr0(x)
x = self.regr1(x)
x = self.out_neuron(x)
return x
class TempConvBlock_float(BaseModel):
"""
Temporal Convolutional Block composed of one temporal convolutional layers.
The block is composed of :
- Conv1d layer
- Chomp1d layer
- ReLU layer
- BatchNorm1d layer
:param ch_in: Number of input channels
:param ch_out: Number of output channels
:param k_size: Kernel size
:param dil: Amount of dilation
:param pad: Amount of padding
"""
def __init__(self, ch_in, ch_out, k_size, dil, pad):
super(TempConvBlock_float, self).__init__()
self.tcn0 = nn.Conv1d(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=k_size,
dilation=dil,
bias = False,
padding=pad
)
self.relu0 = nn.ReLU6()
self.bn0 = nn.BatchNorm1d(
num_features=ch_out
)
def forward(self, x):
x = self.relu0(self.bn0(self.tcn0(x)))
return x
class ConvBlock_float(BaseModel):
"""
Convolutional Block composed of:
- Conv1d layer
- AvgPool1d layer
- ReLU layer
- BatchNorm1d layer
:param ch_in: Number of input channels
:param ch_out: Number of output channels
:param k_size: Kernel size
:param strd: Amount of stride
:param pad: Amount of padding
"""
def __init__(self, ch_in, ch_out, k_size, strd, pad, dilation=1):
super(ConvBlock_float, self).__init__()
self.conv0 = nn.Conv1d(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=k_size,
stride=strd,
dilation=dilation,
bias = False,
padding=pad
)
self.pool0 = nn.AvgPool1d(
kernel_size=2,
stride=2,
padding=0
)
self.relu0 = nn.ReLU6()
self.bn0 = nn.BatchNorm1d(ch_out)
def forward(self, x):
x = self.relu0(self.bn0(self.pool0(self.conv0(x))))
return x
class Regressor_float(BaseModel):
"""
Regressor block composed of :
- Linear layer
- ReLU layer
- BatchNorm1d layer
:param ft_in: Number of input channels
:param ft_out: Number of output channels
"""
def __init__(self, ft_in, ft_out):
super(Regressor_float, self).__init__()
self.ft_in = ft_in
self.ft_out = ft_out
self.fc0 = nn.Linear(
in_features=ft_in,
out_features=ft_out,
bias = False
)
self.relu0 = nn.ReLU6()
self.bn0 = nn.BatchNorm1d(
num_features=ft_out
)
def forward(self, x):
x = self.relu0(self.bn0(self.fc0(x)))
return x
class TempConvBlock(BaseModel):
"""
Temporal Convolutional Block composed of one temporal convolutional layers.
The block is composed of :
- Conv1d layer
- Chomp1d layer
- ReLU layer
- BatchNorm1d layer
:param ch_in: Number of input channels
:param ch_out: Number of output channels
:param k_size: Kernel size
:param dil: Amount of dilation
:param pad: Amount of padding
"""
def __init__(self, conv, ch_in, ch_out, k_size, dil, pad, wbits, abits, share_weight, first_layer = False):
super(TempConvBlock, self).__init__()
self.tcn0 = conv(
ch_in,
ch_out,
kernel_size = k_size,
dilation = dil,
padding = pad,
groups = 1,
bias = False,
abits = abits,
wbits = wbits,
share_weight = share_weight,
first_layer = first_layer
)
self.bn0 = nn.BatchNorm1d(num_features = ch_out)
def forward(self, x):
x = self.bn0(self.tcn0(x))
return x
class ConvBlock(BaseModel):
"""
Convolutional Block composed of:
- Conv1d layer
- AvgPool1d layer
- ReLU layer
- BatchNorm1d layer
:param ch_in: Number of input channels
:param ch_out: Number of output channels
:param k_size: Kernel size
:param strd: Amount of stride
:param pad: Amount of padding
"""
def __init__(self, conv, ch_in, ch_out, k_size, strd, pad, wbits, abits, share_weight, dilation=1):
super(ConvBlock, self).__init__()
self.conv0 = conv(
ch_in,
ch_out,
kernel_size = k_size,
stride = strd,
dilation = dilation,
padding = pad,
groups = 1,
bias = False,
abits = abits,
wbits = wbits,
share_weight = share_weight,
first_layer = False
)
self.pool0 = nn.AvgPool1d(
kernel_size = 2,
stride = 2,
padding = 0
)
self.bn0 = nn.BatchNorm1d(ch_out)
def forward(self, x):
x = self.bn0(
self.pool0(
self.conv0(
x
)
)
)
return x
class Regressor(BaseModel):
"""
Regressor block composed of :
- Linear layer
- ReLU layer
- BatchNorm1d layer
:param ft_in: Number of input channels
:param ft_out: Number of output channels
"""
def __init__(self, ft_in, ft_out, wbits, abits):
super(Regressor, self).__init__()
self.ft_in = ft_in
self.ft_out = ft_out
self.fc0 = qm.QuantizedLinear(
inplane = ft_in,
outplane = ft_out,
wbits=wbits,
abits=abits
)
self.bn0 = nn.BatchNorm1d(
num_features = ft_out
)
def forward(self, x):
x = self.bn0(
self.fc0(
x
)
)
return x
class Chomp1d(BaseModel):
"""
Module that perform a chomping operation on the input tensor.
It is used to chomp the amount of zero-padding added on the right of the input tensor, this operation is necessary to compute causal convolutions.
:param chomp_size: amount of padding 0s to be removed
"""
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TCN_network_quantized_mix_search(BaseModel):
"""
TEMPONet architecture:
Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:
- TemporalConvBlock
- ConvBlock
Two instances of Regressor followed by a final Linear layer with a single neuron.
"""
def __init__(self, conv, wbits, abits, dilations, channels, share_weight = True, dataset_name='PPG_Dalia', dataset_args={}):
super(TCN_network_quantized_mix_search, self).__init__()
self.conv_func = conv
self.dil = dilations
self.rf = [5, 5, 5, 9, 9,17, 17]
self.ch = channels
# 1st instance of two TempConvBlocks and ConvBlock
k_tcb00 = ceil(self.rf[0] / self.dil[0])
self.tcb00 = TempConvBlock(conv,
ch_in=4,
ch_out=self.ch[0],
k_size=k_tcb00,
dil=self.dil[0],
pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight,
first_layer=True
)
k_tcb01 = ceil(self.rf[1] / self.dil[1])
self.tcb01 = TempConvBlock(conv,
ch_in=self.ch[0],
ch_out=self.ch[1],
k_size=k_tcb01,
dil=self.dil[1],
pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_cb0 = ceil(self.rf[2] / self.dil[2])
self.cb0 = ConvBlock(conv,
ch_in=self.ch[1],
ch_out=self.ch[2],
k_size=k_cb0,
strd=1,
pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,
dilation=self.dil[2],
wbits=wbits,
abits=abits,
share_weight=share_weight
)
# 2nd instance of two TempConvBlocks and ConvBlock
k_tcb10 = ceil(self.rf[3] / self.dil[3])
self.tcb10 = TempConvBlock(conv,
ch_in=self.ch[2],
ch_out=self.ch[3],
k_size=k_tcb10,
dil=self.dil[3],
pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_tcb11 = ceil(self.rf[4] / self.dil[4])
self.tcb11 = TempConvBlock(conv,
ch_in=self.ch[3],
ch_out=self.ch[4],
k_size=k_tcb11,
dil=self.dil[4],
pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
self.cb1 = ConvBlock(conv,
ch_in=self.ch[4],
ch_out=self.ch[5],
k_size=5,
strd=2,
pad=2,
wbits=wbits,
abits=abits,
dilation=self.dil[5],
share_weight=share_weight
)
# 3td instance of TempConvBlock and ConvBlock
k_tcb20 = ceil(self.rf[5] / self.dil[6])
self.tcb20 = TempConvBlock(conv,
ch_in=self.ch[5],
ch_out=self.ch[6],
k_size=k_tcb20,
dil=self.dil[6],
pad=((k_tcb20 - 1) * self.dil[6] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
k_tcb21 = ceil(self.rf[6] / self.dil[7])
self.tcb21 = TempConvBlock(conv,
ch_in=self.ch[6],
ch_out=self.ch[7],
k_size=k_tcb21,
dil=self.dil[7],
pad=((k_tcb21 - 1) * self.dil[7] + 1) // 2,
wbits=wbits,
abits=abits,
share_weight=share_weight
)
self.cb2 = ConvBlock(conv,
ch_in=self.ch[7],
ch_out=self.ch[8],
k_size=5,
strd=4,
pad=4,
wbits=wbits,
abits=abits,
dilation=self.dil[8],
share_weight=share_weight
)
# 1st instance of regressor
self.regr0 = Regressor(
ft_in=self.ch[8] * 4,
ft_out=self.ch[9],
wbits=8,
abits=8
)
# 2nd instance of regressor
self.regr1 = Regressor(
ft_in=self.ch[9],
ft_out=self.ch[10],
wbits=8,
abits=8
)
self.out_neuron = nn.Linear(
in_features=self.ch[10],
out_features=1
)
def forward(self, x):
x = self.cb0(self.tcb01(self.tcb00(x)))
x = self.cb1(self.tcb11(self.tcb10(x)))
x = self.cb2(self.tcb21(self.tcb20(x)))
x = x.flatten(1)
x = self.regr0(x)
x = self.regr1(x)
x = self.out_neuron(x)
return x
def complexity_loss(self):
size_product = []
loss = 0
for m in self.modules():
if isinstance(m, self.conv_func):
loss += m.complexity_loss()
size_product += [m.size_product]
normalizer = size_product[0].item()
loss /= normalizer
return loss
def fetch_best_arch(self):
sum_bitops, sum_bita, sum_bitw = 0, 0, 0
sum_mixbitops, sum_mixbita, sum_mixbitw = 0, 0, 0
layer_idx = 0
best_arch = None
for m in self.modules():
if isinstance(m, self.conv_func):
layer_arch, bitops, bita, bitw, mixbitops, mixbita, mixbitw = m.fetch_best_arch(layer_idx)
if best_arch is None:
best_arch = layer_arch
else:
for key in layer_arch.keys():
if key not in best_arch:
best_arch[key] = layer_arch[key]
else:
best_arch[key].append(layer_arch[key][0])
sum_bitops += bitops
sum_bita += bita
sum_bitw += bitw
sum_mixbitops += mixbitops
sum_mixbita += mixbita
sum_mixbitw += mixbitw
layer_idx += 1
return best_arch, sum_bitops, sum_bita, sum_bitw, sum_mixbitops, sum_mixbita, sum_mixbitw | 0.72952 | 0.389953 |
from flask import Flask, redirect, render_template, request, url_for, session
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config["DEBUG"] = True
SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(
username="akshu",
password="<PASSWORD>",
hostname="akshu.mysql.pythonanywhere-services.com",
databasename="akshu$comments",
)
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_POOL_RECYCLE"] = 299
db = SQLAlchemy(app)
class Detail(db.Model):
__tablename__ = "details"
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String(4096))
name = db.Column(db.String(100))
email = db.Column(db.String(100))
password = db.Column(db.String(100))
cardtype = db.Column(db.String(100))
= db.Column(db.String(100))
cvv = db.Column(db.String(100))
expmonth = db.Column(db.String(100))
expyear = db.Column(db.String(100))
@app.route('/giftcard.html', methods=["GET", "POST"])
def wibble():
if request.method == "GET":
return render_template("giftcard.html")
names = Detail(name=request.form["name"],email=request.form["email"],cardtype=request.form["cardtype"],cardnumber=request.form["enccardnumber"],cvv=request.form["enccvv"],expmonth=request.form["expmonth"],expyear=request.form["expyear"])
db.session.add(names)
db.session.commit()
return render_template("payment.html")
@app.route("/signup.html", methods=["GET", "POST"])
def sign():
if request.method == "GET":
return render_template("signup.html")
names1 = Detail(name=request.form["name"],email=request.form["email"],password=request.form["password"])
db.session.add(names1)
db.session.commit()
return render_template("Thanks.html")
@app.route("/Thanks.html", methods=["GET", "POST"])
def thanks():
if request.method == "GET":
return render_template("Thanks.html")
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "GET":
return render_template("index.html") | flask_app.py |
from flask import Flask, redirect, render_template, request, url_for, session
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config["DEBUG"] = True
SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(
username="akshu",
password="<PASSWORD>",
hostname="akshu.mysql.pythonanywhere-services.com",
databasename="akshu$comments",
)
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_POOL_RECYCLE"] = 299
db = SQLAlchemy(app)
class Detail(db.Model):
__tablename__ = "details"
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String(4096))
name = db.Column(db.String(100))
email = db.Column(db.String(100))
password = db.Column(db.String(100))
cardtype = db.Column(db.String(100))
= db.Column(db.String(100))
cvv = db.Column(db.String(100))
expmonth = db.Column(db.String(100))
expyear = db.Column(db.String(100))
@app.route('/giftcard.html', methods=["GET", "POST"])
def wibble():
if request.method == "GET":
return render_template("giftcard.html")
names = Detail(name=request.form["name"],email=request.form["email"],cardtype=request.form["cardtype"],cardnumber=request.form["enccardnumber"],cvv=request.form["enccvv"],expmonth=request.form["expmonth"],expyear=request.form["expyear"])
db.session.add(names)
db.session.commit()
return render_template("payment.html")
@app.route("/signup.html", methods=["GET", "POST"])
def sign():
if request.method == "GET":
return render_template("signup.html")
names1 = Detail(name=request.form["name"],email=request.form["email"],password=request.form["password"])
db.session.add(names1)
db.session.commit()
return render_template("Thanks.html")
@app.route("/Thanks.html", methods=["GET", "POST"])
def thanks():
if request.method == "GET":
return render_template("Thanks.html")
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "GET":
return render_template("index.html") | 0.30054 | 0.046141 |
import numpy as np
from gd import *
from sgd import *
from costs import *
def least_squares_GD(y, tx, initial_w,
max_it, gamma, verbose=False):
"""Linear Regression with Gradient Descent
Uses Mean Squared Error as the loss function.
"""
losses, ws = gradient_descent(
y=y,
tx=tx,
initial_w=initial_w,
max_iters=max_it,
gamma=gamma,
verbose=verbose
)
return ws[-1], losses[-1]
def least_squares_SGD(y, tx, initial_w,
max_iters, gamma, verbose=False):
"""Linear regression with Stochastic Gradient Descent (SGD)
Current implementation uses Mean Squared Error as the loss.
"""
# Use batch_size = 1 as per the project instructions.
losses, ws = stochastic_gradient_descent(
y=y,
tx=tx,
initial_w=initial_w,
max_iters=max_iters,
gamma=gamma,
batch_size=1,
verbose=verbose
)
return ws[-1], mse(y, tx, ws[-1])
def least_squares(y, tx):
"""Linear regression fit using normal equations."""
a = tx.T @ tx
b = tx.T @ y
w = np.linalg.solve(a, b)
loss = mse(y, tx, w)
return w, loss
def ridge_regression(y, tx, lambda_):
""" Ridge regression fit using normal equations """
a = (tx.T @ tx) + lambda_*2*tx.shape[0] * np.eye(tx.shape[1])
b = tx.T @ y
w = np.linalg.solve(a, b)
return w, mse(y, tx, w)
def logistic_regression(y, tx, initial_w, max_iters,
gamma, batch_size=None, verbose=False):
""" Logistic regression with gradient descent or stochastic gradient descent"""
if batch_size:
losses, ws = stochastic_gradient_descent_logistic(
y=y,
tx=tx,
initial_w=initial_w,
batch_size=batch_size,
max_iters=max_iters,
gamma=gamma,
verbose=verbose
)
else:
losses, ws = gradient_descent_logistic(
y=y,
tx=tx,
initial_w=initial_w,
max_iters=max_iters,
gamma=gamma,
verbose=verbose
)
return ws[-1], logistic_error(y, tx, ws[-1])
def reg_logistic_regression(y, tx, lambda_, reg, initial_w,
max_iters, gamma, verbose=False,
early_stopping=True, tol = 0.0001,
patience = 5):
""" Regularized logistic regression with gradient descent"""
losses, ws = reg_gradient_descent_logistic(
y=y,
tx=tx,
initial_w=initial_w,
max_iters=max_iters,
gamma=gamma,
lambda_=lambda_,
reg=reg,
verbose=verbose,
early_stopping=early_stopping,
tol=tol,
patience=patience
)
return ws[-1], losses[-1] | project1/utils/implementations.py | import numpy as np
from gd import *
from sgd import *
from costs import *
def least_squares_GD(y, tx, initial_w,
max_it, gamma, verbose=False):
"""Linear Regression with Gradient Descent
Uses Mean Squared Error as the loss function.
"""
losses, ws = gradient_descent(
y=y,
tx=tx,
initial_w=initial_w,
max_iters=max_it,
gamma=gamma,
verbose=verbose
)
return ws[-1], losses[-1]
def least_squares_SGD(y, tx, initial_w,
max_iters, gamma, verbose=False):
"""Linear regression with Stochastic Gradient Descent (SGD)
Current implementation uses Mean Squared Error as the loss.
"""
# Use batch_size = 1 as per the project instructions.
losses, ws = stochastic_gradient_descent(
y=y,
tx=tx,
initial_w=initial_w,
max_iters=max_iters,
gamma=gamma,
batch_size=1,
verbose=verbose
)
return ws[-1], mse(y, tx, ws[-1])
def least_squares(y, tx):
"""Linear regression fit using normal equations."""
a = tx.T @ tx
b = tx.T @ y
w = np.linalg.solve(a, b)
loss = mse(y, tx, w)
return w, loss
def ridge_regression(y, tx, lambda_):
""" Ridge regression fit using normal equations """
a = (tx.T @ tx) + lambda_*2*tx.shape[0] * np.eye(tx.shape[1])
b = tx.T @ y
w = np.linalg.solve(a, b)
return w, mse(y, tx, w)
def logistic_regression(y, tx, initial_w, max_iters,
gamma, batch_size=None, verbose=False):
""" Logistic regression with gradient descent or stochastic gradient descent"""
if batch_size:
losses, ws = stochastic_gradient_descent_logistic(
y=y,
tx=tx,
initial_w=initial_w,
batch_size=batch_size,
max_iters=max_iters,
gamma=gamma,
verbose=verbose
)
else:
losses, ws = gradient_descent_logistic(
y=y,
tx=tx,
initial_w=initial_w,
max_iters=max_iters,
gamma=gamma,
verbose=verbose
)
return ws[-1], logistic_error(y, tx, ws[-1])
def reg_logistic_regression(y, tx, lambda_, reg, initial_w,
max_iters, gamma, verbose=False,
early_stopping=True, tol = 0.0001,
patience = 5):
""" Regularized logistic regression with gradient descent"""
losses, ws = reg_gradient_descent_logistic(
y=y,
tx=tx,
initial_w=initial_w,
max_iters=max_iters,
gamma=gamma,
lambda_=lambda_,
reg=reg,
verbose=verbose,
early_stopping=early_stopping,
tol=tol,
patience=patience
)
return ws[-1], losses[-1] | 0.889769 | 0.440951 |
import grpc
from .....exabel.api.management.v1 import user_service_pb2 as exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2
class UserServiceStub(object):
"""Service to manage users and groups.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListGroups = channel.unary_unary('/exabel.api.management.v1.UserService/ListGroups', request_serializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsRequest.SerializeToString, response_deserializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsResponse.FromString)
self.ListUsers = channel.unary_unary('/exabel.api.management.v1.UserService/ListUsers', request_serializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersRequest.SerializeToString, response_deserializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersResponse.FromString)
class UserServiceServicer(object):
"""Service to manage users and groups.
"""
def ListGroups(self, request, context):
"""List all groups. Only groups for the current customer is returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUsers(self, request, context):
"""List all users in the current customer.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserServiceServicer_to_server(servicer, server):
rpc_method_handlers = {'ListGroups': grpc.unary_unary_rpc_method_handler(servicer.ListGroups, request_deserializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsRequest.FromString, response_serializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsResponse.SerializeToString), 'ListUsers': grpc.unary_unary_rpc_method_handler(servicer.ListUsers, request_deserializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersRequest.FromString, response_serializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersResponse.SerializeToString)}
generic_handler = grpc.method_handlers_generic_handler('exabel.api.management.v1.UserService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class UserService(object):
"""Service to manage users and groups.
"""
@staticmethod
def ListGroups(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/exabel.api.management.v1.UserService/ListGroups', exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsRequest.SerializeToString, exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListUsers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/exabel.api.management.v1.UserService/ListUsers', exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersRequest.SerializeToString, exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | exabel_data_sdk/stubs/exabel/api/management/v1/user_service_pb2_grpc.py | import grpc
from .....exabel.api.management.v1 import user_service_pb2 as exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2
class UserServiceStub(object):
"""Service to manage users and groups.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListGroups = channel.unary_unary('/exabel.api.management.v1.UserService/ListGroups', request_serializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsRequest.SerializeToString, response_deserializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsResponse.FromString)
self.ListUsers = channel.unary_unary('/exabel.api.management.v1.UserService/ListUsers', request_serializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersRequest.SerializeToString, response_deserializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersResponse.FromString)
class UserServiceServicer(object):
"""Service to manage users and groups.
"""
def ListGroups(self, request, context):
"""List all groups. Only groups for the current customer is returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUsers(self, request, context):
"""List all users in the current customer.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserServiceServicer_to_server(servicer, server):
rpc_method_handlers = {'ListGroups': grpc.unary_unary_rpc_method_handler(servicer.ListGroups, request_deserializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsRequest.FromString, response_serializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsResponse.SerializeToString), 'ListUsers': grpc.unary_unary_rpc_method_handler(servicer.ListUsers, request_deserializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersRequest.FromString, response_serializer=exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersResponse.SerializeToString)}
generic_handler = grpc.method_handlers_generic_handler('exabel.api.management.v1.UserService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class UserService(object):
"""Service to manage users and groups.
"""
@staticmethod
def ListGroups(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/exabel.api.management.v1.UserService/ListGroups', exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsRequest.SerializeToString, exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListGroupsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListUsers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/exabel.api.management.v1.UserService/ListUsers', exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersRequest.SerializeToString, exabel_dot_api_dot_management_dot_v1_dot_user__service__pb2.ListUsersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | 0.552298 | 0.045247 |
import sys
import cPickle
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
from data import get_data, reload_file
from classifiers import create_classifier, calculate_model_accuracy
__author__ = "<NAME> and <NAME>, based on code by <NAME> for COMPSCI 270, Spring 2017, Duke University"
__copyright__ = "<NAME> and <NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__license__ = "Creative Commons Attribution-NonCommercial 4.0 International License"
__version__ = "1.0.0"
__email__ = "<EMAIL>"
def classifier(print_option=False):
'''
Main Function. Creates a classifier
'''
# Create data train/test split
data_train, data_test, target_train,target_test = get_data(range(2002,2017),custom=False)
# Create 2017 test dataset
data_test_2017, target_test_2017, matchups_2017 = get_data([2017],custom=True)
model_types = ['decision_tree', 'knn', 'gaussian_nb', 'random_forest']
for model_type in model_types:
if model_type == 'random_forest':
f = open('classifier/rf_best_3.pkl', 'rb')
sys.stdout.flush()
model = cPickle.load(f)
print model
else:
model = create_classifier(model_type)
# Fit the data to the model
model.fit(data_train, target_train)
# Predict using the fit model
predict_train = predict_with_model(model, data_train)
predict_test = predict_with_model(model, data_test_2017)
print; print "=" * 15,; print " Predicting using " + str(model_type) + ' classifier ',; print "=" * 15
if print_option:
for matchup,target,predict in zip(matchups_2017,target_test_2017,predict_test):
print str(matchup) + " Actual: " + str(target) + " Predicted: " + str(predict),
if int(matchup[1]) > int(matchup[3]) and int(target) == 0 or int(matchup[3]) > int(matchup[1]) and int(target) == 1:
print " <-- Upset!",
print
sys.stdout.flush()
accuracy_train, accuracy_test = calculate_model_accuracy(predict_train, predict_test, target_train, target_test_2017)
print('Training accuracy: {0:3f}, Accuracy on 2017 Tournament: {1:3f}'.format(accuracy_train, accuracy_test))
print
sys.stdout.flush()
return model, predict_train, predict_test, accuracy_train, accuracy_test
def predict_with_model(model,data):
return model.predict(data)
def split_dataset(data, target, train_size=0.8):
'''
Splits the provided data and targets into training and test sets
'''
data_train, data_test, target_train, target_test = train_test_split(data, target, train_size=train_size, random_state=0)
return data_train, data_test, target_train, target_test
if __name__ == '__main__':
if len(sys.argv) < 2:
raise ValueError('No arguments provided')
elif sys.argv[1] == 'train_test':
classifier()
elif sys.argv[1] == 'bracket17':
run_custom_bracket() | main.py | import sys
import cPickle
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
from data import get_data, reload_file
from classifiers import create_classifier, calculate_model_accuracy
__author__ = "<NAME> and <NAME>, based on code by <NAME> for COMPSCI 270, Spring 2017, Duke University"
__copyright__ = "<NAME> and <NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__license__ = "Creative Commons Attribution-NonCommercial 4.0 International License"
__version__ = "1.0.0"
__email__ = "<EMAIL>"
def classifier(print_option=False):
'''
Main Function. Creates a classifier
'''
# Create data train/test split
data_train, data_test, target_train,target_test = get_data(range(2002,2017),custom=False)
# Create 2017 test dataset
data_test_2017, target_test_2017, matchups_2017 = get_data([2017],custom=True)
model_types = ['decision_tree', 'knn', 'gaussian_nb', 'random_forest']
for model_type in model_types:
if model_type == 'random_forest':
f = open('classifier/rf_best_3.pkl', 'rb')
sys.stdout.flush()
model = cPickle.load(f)
print model
else:
model = create_classifier(model_type)
# Fit the data to the model
model.fit(data_train, target_train)
# Predict using the fit model
predict_train = predict_with_model(model, data_train)
predict_test = predict_with_model(model, data_test_2017)
print; print "=" * 15,; print " Predicting using " + str(model_type) + ' classifier ',; print "=" * 15
if print_option:
for matchup,target,predict in zip(matchups_2017,target_test_2017,predict_test):
print str(matchup) + " Actual: " + str(target) + " Predicted: " + str(predict),
if int(matchup[1]) > int(matchup[3]) and int(target) == 0 or int(matchup[3]) > int(matchup[1]) and int(target) == 1:
print " <-- Upset!",
print
sys.stdout.flush()
accuracy_train, accuracy_test = calculate_model_accuracy(predict_train, predict_test, target_train, target_test_2017)
print('Training accuracy: {0:3f}, Accuracy on 2017 Tournament: {1:3f}'.format(accuracy_train, accuracy_test))
print
sys.stdout.flush()
return model, predict_train, predict_test, accuracy_train, accuracy_test
def predict_with_model(model,data):
return model.predict(data)
def split_dataset(data, target, train_size=0.8):
'''
Splits the provided data and targets into training and test sets
'''
data_train, data_test, target_train, target_test = train_test_split(data, target, train_size=train_size, random_state=0)
return data_train, data_test, target_train, target_test
if __name__ == '__main__':
if len(sys.argv) < 2:
raise ValueError('No arguments provided')
elif sys.argv[1] == 'train_test':
classifier()
elif sys.argv[1] == 'bracket17':
run_custom_bracket() | 0.315525 | 0.293556 |
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.bitmovin_resource import BitmovinResource
import pprint
import six
class Keyframe(BitmovinResource):
@poscheck_model
def __init__(self,
id_=None,
name=None,
description=None,
created_at=None,
modified_at=None,
custom_data=None,
time=None,
segment_cut=None):
# type: (string_types, string_types, string_types, datetime, datetime, dict, float, bool) -> None
super(Keyframe, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data)
self._time = None
self._segment_cut = None
self.discriminator = None
if time is not None:
self.time = time
if segment_cut is not None:
self.segment_cut = segment_cut
@property
def openapi_types(self):
types = {}
if hasattr(super(Keyframe, self), 'openapi_types'):
types = getattr(super(Keyframe, self), 'openapi_types')
types.update({
'time': 'float',
'segment_cut': 'bool'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(Keyframe, self), 'attribute_map'):
attributes = getattr(super(Keyframe, self), 'attribute_map')
attributes.update({
'time': 'time',
'segment_cut': 'segmentCut'
})
return attributes
@property
def time(self):
# type: () -> float
"""Gets the time of this Keyframe.
Time in seconds where the keyframe should be inserted (required)
:return: The time of this Keyframe.
:rtype: float
"""
return self._time
@time.setter
def time(self, time):
# type: (float) -> None
"""Sets the time of this Keyframe.
Time in seconds where the keyframe should be inserted (required)
:param time: The time of this Keyframe.
:type: float
"""
if time is not None:
if not isinstance(time, (float, int)):
raise TypeError("Invalid type for `time`, type has to be `float`")
self._time = time
@property
def segment_cut(self):
# type: () -> bool
"""Gets the segment_cut of this Keyframe.
Instructs the encoder to cut the segment at this position
:return: The segment_cut of this Keyframe.
:rtype: bool
"""
return self._segment_cut
@segment_cut.setter
def segment_cut(self, segment_cut):
# type: (bool) -> None
"""Sets the segment_cut of this Keyframe.
Instructs the encoder to cut the segment at this position
:param segment_cut: The segment_cut of this Keyframe.
:type: bool
"""
if segment_cut is not None:
if not isinstance(segment_cut, bool):
raise TypeError("Invalid type for `segment_cut`, type has to be `bool`")
self._segment_cut = segment_cut
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(Keyframe, self), "to_dict"):
result = super(Keyframe, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Keyframe):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | bitmovin_api_sdk/models/keyframe.py |
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.bitmovin_resource import BitmovinResource
import pprint
import six
class Keyframe(BitmovinResource):
@poscheck_model
def __init__(self,
id_=None,
name=None,
description=None,
created_at=None,
modified_at=None,
custom_data=None,
time=None,
segment_cut=None):
# type: (string_types, string_types, string_types, datetime, datetime, dict, float, bool) -> None
super(Keyframe, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data)
self._time = None
self._segment_cut = None
self.discriminator = None
if time is not None:
self.time = time
if segment_cut is not None:
self.segment_cut = segment_cut
@property
def openapi_types(self):
types = {}
if hasattr(super(Keyframe, self), 'openapi_types'):
types = getattr(super(Keyframe, self), 'openapi_types')
types.update({
'time': 'float',
'segment_cut': 'bool'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(Keyframe, self), 'attribute_map'):
attributes = getattr(super(Keyframe, self), 'attribute_map')
attributes.update({
'time': 'time',
'segment_cut': 'segmentCut'
})
return attributes
@property
def time(self):
# type: () -> float
"""Gets the time of this Keyframe.
Time in seconds where the keyframe should be inserted (required)
:return: The time of this Keyframe.
:rtype: float
"""
return self._time
@time.setter
def time(self, time):
# type: (float) -> None
"""Sets the time of this Keyframe.
Time in seconds where the keyframe should be inserted (required)
:param time: The time of this Keyframe.
:type: float
"""
if time is not None:
if not isinstance(time, (float, int)):
raise TypeError("Invalid type for `time`, type has to be `float`")
self._time = time
@property
def segment_cut(self):
# type: () -> bool
"""Gets the segment_cut of this Keyframe.
Instructs the encoder to cut the segment at this position
:return: The segment_cut of this Keyframe.
:rtype: bool
"""
return self._segment_cut
@segment_cut.setter
def segment_cut(self, segment_cut):
# type: (bool) -> None
"""Sets the segment_cut of this Keyframe.
Instructs the encoder to cut the segment at this position
:param segment_cut: The segment_cut of this Keyframe.
:type: bool
"""
if segment_cut is not None:
if not isinstance(segment_cut, bool):
raise TypeError("Invalid type for `segment_cut`, type has to be `bool`")
self._segment_cut = segment_cut
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(Keyframe, self), "to_dict"):
result = super(Keyframe, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Keyframe):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.840913 | 0.17172 |
from datetime import timedelta as td
from django.conf import settings
from django.conf.urls.defaults import url, patterns, include
from django.contrib.auth.forms import PasswordChangeForm
from django.shortcuts import get_object_or_404
from canvas.exceptions import ServiceError, ValidationError
from canvas.models import Content
from canvas.upload import api_upload, chunk_uploads
from canvas.view_guards import require_staff, require_POST, require_user
from drawquest import knobs, models, economy, api_forms
from drawquest.api_decorators import api_decorator
from drawquest.apps.drawquest_auth.details_models import PrivateUserDetails
from drawquest.apps.drawquest_auth.models import User
from drawquest.apps.palettes.models import user_palettes, palettes_hash
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.quests.models import current_quest_details, completed_quest_ids
from drawquest.api_cache import cached_api
from drawquest import signals
from website.apps.share_tracking.models import ShareTrackingUrl
urls = patterns('',
url(r'^quest_comments/flag', 'apps.comments.api.flag_comment'),
)
urls += patterns('drawquest.api',
url(r'^activity/', include('apps.activity.api')),
url(r'^auth/', include('drawquest.apps.drawquest_auth.api')),
url(r'^chunk/', include(chunk_uploads)),
url(r'^following/', include('drawquest.apps.following.api')),
url(r'^iap/', include('drawquest.apps.iap.api')),
url(r'^palettes/', include('drawquest.apps.palettes.api')),
url(r'^playback/', include('drawquest.apps.playback.api')),
url(r'^push_notifications/', include('drawquest.apps.push_notifications.api')),
url(r'^quest_comments/', include('drawquest.apps.quest_comments.api')),
url(r'^quests/', include('drawquest.apps.quests.api')),
url(r'^stars/', include('drawquest.apps.stars.api')),
url(r'^submit_quest/', include('drawquest.apps.submit_quest.api')),
url(r'^timeline/', include('drawquest.apps.timeline.api')),
url(r'^tumblr/', include('drawquest.apps.tumblr.api')),
url(r'^upload$', api_upload),
url(r'^whitelisting/', include('drawquest.apps.whitelisting.api')),
# Only used for the admin.
url(r'^comment/', include('apps.comments.api')),
# Disabled for now for perf.
#url(r'^', include('apps.analytics.api')),
)
api = api_decorator(urls)
@api('metric/record')
def metric_record(request, name, info={}):
""" Currently a no-op. """
@api('economy/rewards')
@cached_api(key=['reward_amounts', sum(knobs.REWARDS.values())])
def rewards(request):
return {'rewards': knobs.REWARDS}
@api('user/profile')
def user_profile(request, username):
return models.user_profile_for_viewer(username, viewer=request.user)
@api('user/change_profile')
@require_user
def change_profile(request, old_password=None, new_password=<PASSWORD>, new_email=None, bio=None):
if bio is not None:
request.user.userinfo.bio_text = bio
request.user.userinfo.save()
request.user.details.force()
if new_email is not None:
if not User.validate_email(new_email):
raise ValidationError({'new_email': "Please enter a valid email address."})
if request.user.email != new_email:
if not User.email_is_unused(new_email):
raise ValidationError({'new_email': "Sorry! That email address is already being used for an account."})
request.user.email = new_email
request.user.save()
request.user.details.force()
if old_password is not None and new_password is not None:
if not User.validate_password(new_password):
raise ValidationError({
'new_password': "Sorry, your new password is too short. "
"Please use {} or more characters.".format(User.MINIMUM_PASSWORD_LENGTH),
})
form = PasswordChangeForm(user=request.user, data={
'old_password': <PASSWORD>_password,
'new_password1': <PASSWORD>,
'new_password2': <PASSWORD>,
})
api_forms.validate(form)
form.save()
request.user.details.force()
@api('user/change_avatar')
@require_user
def change_avatar(request, content_id):
user_info = request.user.userinfo
user_info.avatar = get_object_or_404(Content, id=content_id)
user_info.save()
user = User.objects.get(id=request.user.id)
user.details.force()
@api('create_email_invite_url')
def create_email_invite_url(request):
#TODO iTunes URL
url = 'http://example.com/download'
if request.user.is_authenticated():
sharer = request.user
share = ShareTrackingUrl.create(sharer, url, 'email')
url = share.url_for_channel()
return {'invite_url': url}
@api('realtime/sync')
def realtime_sync(request):
return {'channels': models.realtime_sync(request.user)}
@api('share/create_for_channel')
def share_create_for_channel(request, comment_id, channel):
comment = get_object_or_404(QuestComment, id=comment_id)
url = comment.get_share_page_url_with_tracking(request.user, channel, request=request)
if channel == 'facebook':
url = 'http://example.com' + url
return {
'share_url': url,
}
@api('economy/balance')
@require_user
def coin_balance(request):
return {'balance': economy.balance(request.user)}
@api('heavy_state_sync')
def heavy_state_sync(request):
ret = {
'realtime_sync': models.realtime_sync(request.user),
'user_palettes': user_palettes(request.user),
'current_quest': current_quest_details(),
'onboarding_quest_id': knobs.ONBOARDING_QUEST_ID,
}
if request.user.is_authenticated():
ret.update({
'user_email': request.user.email,
'user_profile': models.user_profile(request.user.username),
'balance': economy.balance(request.user),
'completed_quest_ids': completed_quest_ids(request.user),
})
return ret | website/drawquest/api.py | from datetime import timedelta as td
from django.conf import settings
from django.conf.urls.defaults import url, patterns, include
from django.contrib.auth.forms import PasswordChangeForm
from django.shortcuts import get_object_or_404
from canvas.exceptions import ServiceError, ValidationError
from canvas.models import Content
from canvas.upload import api_upload, chunk_uploads
from canvas.view_guards import require_staff, require_POST, require_user
from drawquest import knobs, models, economy, api_forms
from drawquest.api_decorators import api_decorator
from drawquest.apps.drawquest_auth.details_models import PrivateUserDetails
from drawquest.apps.drawquest_auth.models import User
from drawquest.apps.palettes.models import user_palettes, palettes_hash
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.quests.models import current_quest_details, completed_quest_ids
from drawquest.api_cache import cached_api
from drawquest import signals
from website.apps.share_tracking.models import ShareTrackingUrl
urls = patterns('',
url(r'^quest_comments/flag', 'apps.comments.api.flag_comment'),
)
urls += patterns('drawquest.api',
url(r'^activity/', include('apps.activity.api')),
url(r'^auth/', include('drawquest.apps.drawquest_auth.api')),
url(r'^chunk/', include(chunk_uploads)),
url(r'^following/', include('drawquest.apps.following.api')),
url(r'^iap/', include('drawquest.apps.iap.api')),
url(r'^palettes/', include('drawquest.apps.palettes.api')),
url(r'^playback/', include('drawquest.apps.playback.api')),
url(r'^push_notifications/', include('drawquest.apps.push_notifications.api')),
url(r'^quest_comments/', include('drawquest.apps.quest_comments.api')),
url(r'^quests/', include('drawquest.apps.quests.api')),
url(r'^stars/', include('drawquest.apps.stars.api')),
url(r'^submit_quest/', include('drawquest.apps.submit_quest.api')),
url(r'^timeline/', include('drawquest.apps.timeline.api')),
url(r'^tumblr/', include('drawquest.apps.tumblr.api')),
url(r'^upload$', api_upload),
url(r'^whitelisting/', include('drawquest.apps.whitelisting.api')),
# Only used for the admin.
url(r'^comment/', include('apps.comments.api')),
# Disabled for now for perf.
#url(r'^', include('apps.analytics.api')),
)
api = api_decorator(urls)
@api('metric/record')
def metric_record(request, name, info={}):
""" Currently a no-op. """
@api('economy/rewards')
@cached_api(key=['reward_amounts', sum(knobs.REWARDS.values())])
def rewards(request):
return {'rewards': knobs.REWARDS}
@api('user/profile')
def user_profile(request, username):
return models.user_profile_for_viewer(username, viewer=request.user)
@api('user/change_profile')
@require_user
def change_profile(request, old_password=None, new_password=<PASSWORD>, new_email=None, bio=None):
if bio is not None:
request.user.userinfo.bio_text = bio
request.user.userinfo.save()
request.user.details.force()
if new_email is not None:
if not User.validate_email(new_email):
raise ValidationError({'new_email': "Please enter a valid email address."})
if request.user.email != new_email:
if not User.email_is_unused(new_email):
raise ValidationError({'new_email': "Sorry! That email address is already being used for an account."})
request.user.email = new_email
request.user.save()
request.user.details.force()
if old_password is not None and new_password is not None:
if not User.validate_password(new_password):
raise ValidationError({
'new_password': "Sorry, your new password is too short. "
"Please use {} or more characters.".format(User.MINIMUM_PASSWORD_LENGTH),
})
form = PasswordChangeForm(user=request.user, data={
'old_password': <PASSWORD>_password,
'new_password1': <PASSWORD>,
'new_password2': <PASSWORD>,
})
api_forms.validate(form)
form.save()
request.user.details.force()
@api('user/change_avatar')
@require_user
def change_avatar(request, content_id):
user_info = request.user.userinfo
user_info.avatar = get_object_or_404(Content, id=content_id)
user_info.save()
user = User.objects.get(id=request.user.id)
user.details.force()
@api('create_email_invite_url')
def create_email_invite_url(request):
#TODO iTunes URL
url = 'http://example.com/download'
if request.user.is_authenticated():
sharer = request.user
share = ShareTrackingUrl.create(sharer, url, 'email')
url = share.url_for_channel()
return {'invite_url': url}
@api('realtime/sync')
def realtime_sync(request):
return {'channels': models.realtime_sync(request.user)}
@api('share/create_for_channel')
def share_create_for_channel(request, comment_id, channel):
comment = get_object_or_404(QuestComment, id=comment_id)
url = comment.get_share_page_url_with_tracking(request.user, channel, request=request)
if channel == 'facebook':
url = 'http://example.com' + url
return {
'share_url': url,
}
@api('economy/balance')
@require_user
def coin_balance(request):
return {'balance': economy.balance(request.user)}
@api('heavy_state_sync')
def heavy_state_sync(request):
ret = {
'realtime_sync': models.realtime_sync(request.user),
'user_palettes': user_palettes(request.user),
'current_quest': current_quest_details(),
'onboarding_quest_id': knobs.ONBOARDING_QUEST_ID,
}
if request.user.is_authenticated():
ret.update({
'user_email': request.user.email,
'user_profile': models.user_profile(request.user.username),
'balance': economy.balance(request.user),
'completed_quest_ids': completed_quest_ids(request.user),
})
return ret | 0.340595 | 0.054374 |
import inspect
from typing import Any
from typing import Callable
from typing import NamedTuple
from typing import Optional
from typing import Union
from dataclasses import dataclass
import jax
import jax.numpy as jnp
from jaxopt._src import base
from jaxopt._src.tree_util import tree_add_scalar_mul
from jaxopt._src.tree_util import tree_l2_norm
from jaxopt._src.tree_util import tree_sub
class MirrorDescentState(NamedTuple):
"""Named tuple containing state information."""
iter_num: int
error: float
aux: Optional[Any] = None
@dataclass(eq=False)
class MirrorDescent(base.IterativeSolver):
"""Mirror descent solver.
This solver minimizes:
argmin_x fun(x, *args, **kwargs),
where fun is smooth with convex domain.
The stopping criterion is:
||x - projection_grad(x, g, 1.0, hyperparams_proj)||_2 <= tol,
where ``g = grad(fun)(x, *args, **kwargs)``.
Attributes:
fun: a smooth function of the form ``fun(x, *args, **kwargs)``.
projection_grad: a function of the form
``projection_grad(x, g, stepsize, hyperparams_proj)`` representing the
mirror descent update for iterate x and gradient g. Optionally, it can be
instantiated from a projection and mapping function (mirror map) using the
method `make_projection_grad`.
stepsize: a stepsize to use, or a callable specifying the stepsize to use at
each iteration.
maxiter: maximum number of mirror descent iterations.
tol: tolerance to use.
verbose: whether to print error on every iteration or not. verbose=True will
automatically disable jit.
implicit_diff: whether to enable implicit diff or autodiff of unrolled
iterations.
implicit_diff_solve: the linear system solver to use.
has_aux: whether function fun outputs one (False) or more values (True).
When True it will be assumed by default that fun(...)[0] is the objective.
jit: whether to JIT-compile the optimization loop (default: "auto").
unroll: whether to unroll the optimization loop (default: "auto").
References:
Nemirovskij, <NAME>, and <NAME>. "Problem
complexity and method efficiency in optimization." J. Wiley @ Sons, New
York(1983).
"""
fun: Callable
projection_grad: Optional[Callable]
stepsize: Union[float, Callable]
maxiter: int = 500
tol: float = 1e-2
verbose: int = 0
implicit_diff: bool = True
implicit_diff_solve: Optional[Callable] = None
has_aux: bool = False
jit: base.AutoOrBoolean = "auto"
unroll: base.AutoOrBoolean = "auto"
@staticmethod
def make_projection_grad(projection: Callable,
mapping_fun: Callable) -> Callable:
"""Instantiates `projection_grad` argument from projection and mirror map.
Args:
projection: projection operator of the form
``projection(x, hyperparams_proj)``, typically
``argmin_z D_{gen_fun}(z, mapping_fun^{-1}(y))``.
mapping_fun: the mirror map, typically of the form
``mapping_fun = grad(gen_fun)``, where `gen_fun` is the generating
function of the Bregman divergence.
Returns:
A function `projection_grad(x, g, stepsize, hyperparams_proj)`
representing the mirror descent update for iterate x and gradient g.
"""
def projection_grad(x, x_fun_grad, stepsize, hyperparams_proj):
update = tree_add_scalar_mul(mapping_fun(x), -stepsize, x_fun_grad)
return projection(update, hyperparams_proj)
return projection_grad
def init_state(self,
init_params: Any,
hyperparams_proj: Any,
*args,
**kwargs) -> base.OptStep:
"""Initialize the solver state.
Args:
init_params: pytree containing the initial parameters.
Returns:
state
"""
del hyperparams_proj, args, kwargs # Not used.
return MirrorDescentState(iter_num=jnp.asarray(0),
error=jnp.asarray(jnp.inf))
def _error(self, x, x_fun_grad, hyperparams_proj):
next_x = self.projection_grad(x, x_fun_grad, 1.0, hyperparams_proj)
diff_x = tree_sub(next_x, x)
return tree_l2_norm(diff_x)
def _stepsize(self, iter_num):
if isinstance(self.stepsize, Callable):
return self.stepsize(iter_num)
return self.stepsize
def _update(self, x, state, hyperparams_proj, args, kwargs):
iter_num = state.iter_num
stepsize = self._stepsize(iter_num)
x_fun_grad, aux = self._grad_with_aux(x, *args, **kwargs)
next_x = self.projection_grad(x, x_fun_grad, stepsize, hyperparams_proj)
error = self._error(x, x_fun_grad, hyperparams_proj)
next_state = MirrorDescentState(iter_num=iter_num + 1, error=error, aux=aux)
return base.OptStep(params=next_x, state=next_state)
def update(self,
params: Any,
state: NamedTuple,
hyperparams_proj: Any,
*args,
**kwargs) -> base.OptStep:
"""Performs one iteration of mirror descent.
Args:
params: pytree containing the parameters.
state: named tuple containing the solver state.
hyperparams_proj: pytree containing hyperparameters of projection.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
(params, state)
"""
return self._update(params, state, hyperparams_proj, args, kwargs)
def run(self,
init_params: Any,
hyperparams_proj: Optional[Any] = None,
*args,
**kwargs) -> base.OptStep:
return super().run(init_params, hyperparams_proj, *args, **kwargs)
def _fixed_point_fun(self, sol, hyperparams_proj, args, kwargs):
sol_fun_grad, _ = self._grad_with_aux(sol, *args, **kwargs)
return self.projection_grad(sol, sol_fun_grad, 1.0, hyperparams_proj)
def optimality_fun(self, sol, hyperparams_proj, *args, **kwargs):
"""Optimality function mapping compatible with ``@custom_root``."""
fp = self._fixed_point_fun(sol, hyperparams_proj, args, kwargs)
return tree_sub(fp, sol)
def __post_init__(self):
if self.has_aux:
fun_with_aux = self.fun
else:
fun_with_aux = lambda *a, **kw: (self.fun(*a, **kw), None)
self._grad_with_aux = jax.grad(fun_with_aux, has_aux=True)
# Sets up reference signature.
fun = getattr(self.fun, "subfun", self.fun)
signature = inspect.signature(fun)
parameters = list(signature.parameters.values())
new_param = inspect.Parameter(name="hyperparams_proj",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD)
parameters.insert(1, new_param)
self.reference_signature = inspect.Signature(parameters) | jaxopt/_src/mirror_descent.py | import inspect
from typing import Any
from typing import Callable
from typing import NamedTuple
from typing import Optional
from typing import Union
from dataclasses import dataclass
import jax
import jax.numpy as jnp
from jaxopt._src import base
from jaxopt._src.tree_util import tree_add_scalar_mul
from jaxopt._src.tree_util import tree_l2_norm
from jaxopt._src.tree_util import tree_sub
class MirrorDescentState(NamedTuple):
"""Named tuple containing state information."""
iter_num: int
error: float
aux: Optional[Any] = None
@dataclass(eq=False)
class MirrorDescent(base.IterativeSolver):
"""Mirror descent solver.
This solver minimizes:
argmin_x fun(x, *args, **kwargs),
where fun is smooth with convex domain.
The stopping criterion is:
||x - projection_grad(x, g, 1.0, hyperparams_proj)||_2 <= tol,
where ``g = grad(fun)(x, *args, **kwargs)``.
Attributes:
fun: a smooth function of the form ``fun(x, *args, **kwargs)``.
projection_grad: a function of the form
``projection_grad(x, g, stepsize, hyperparams_proj)`` representing the
mirror descent update for iterate x and gradient g. Optionally, it can be
instantiated from a projection and mapping function (mirror map) using the
method `make_projection_grad`.
stepsize: a stepsize to use, or a callable specifying the stepsize to use at
each iteration.
maxiter: maximum number of mirror descent iterations.
tol: tolerance to use.
verbose: whether to print error on every iteration or not. verbose=True will
automatically disable jit.
implicit_diff: whether to enable implicit diff or autodiff of unrolled
iterations.
implicit_diff_solve: the linear system solver to use.
has_aux: whether function fun outputs one (False) or more values (True).
When True it will be assumed by default that fun(...)[0] is the objective.
jit: whether to JIT-compile the optimization loop (default: "auto").
unroll: whether to unroll the optimization loop (default: "auto").
References:
Nemirovskij, <NAME>, and <NAME>. "Problem
complexity and method efficiency in optimization." J. Wiley @ Sons, New
York(1983).
"""
fun: Callable
projection_grad: Optional[Callable]
stepsize: Union[float, Callable]
maxiter: int = 500
tol: float = 1e-2
verbose: int = 0
implicit_diff: bool = True
implicit_diff_solve: Optional[Callable] = None
has_aux: bool = False
jit: base.AutoOrBoolean = "auto"
unroll: base.AutoOrBoolean = "auto"
@staticmethod
def make_projection_grad(projection: Callable,
mapping_fun: Callable) -> Callable:
"""Instantiates `projection_grad` argument from projection and mirror map.
Args:
projection: projection operator of the form
``projection(x, hyperparams_proj)``, typically
``argmin_z D_{gen_fun}(z, mapping_fun^{-1}(y))``.
mapping_fun: the mirror map, typically of the form
``mapping_fun = grad(gen_fun)``, where `gen_fun` is the generating
function of the Bregman divergence.
Returns:
A function `projection_grad(x, g, stepsize, hyperparams_proj)`
representing the mirror descent update for iterate x and gradient g.
"""
def projection_grad(x, x_fun_grad, stepsize, hyperparams_proj):
update = tree_add_scalar_mul(mapping_fun(x), -stepsize, x_fun_grad)
return projection(update, hyperparams_proj)
return projection_grad
def init_state(self,
init_params: Any,
hyperparams_proj: Any,
*args,
**kwargs) -> base.OptStep:
"""Initialize the solver state.
Args:
init_params: pytree containing the initial parameters.
Returns:
state
"""
del hyperparams_proj, args, kwargs # Not used.
return MirrorDescentState(iter_num=jnp.asarray(0),
error=jnp.asarray(jnp.inf))
def _error(self, x, x_fun_grad, hyperparams_proj):
next_x = self.projection_grad(x, x_fun_grad, 1.0, hyperparams_proj)
diff_x = tree_sub(next_x, x)
return tree_l2_norm(diff_x)
def _stepsize(self, iter_num):
if isinstance(self.stepsize, Callable):
return self.stepsize(iter_num)
return self.stepsize
def _update(self, x, state, hyperparams_proj, args, kwargs):
iter_num = state.iter_num
stepsize = self._stepsize(iter_num)
x_fun_grad, aux = self._grad_with_aux(x, *args, **kwargs)
next_x = self.projection_grad(x, x_fun_grad, stepsize, hyperparams_proj)
error = self._error(x, x_fun_grad, hyperparams_proj)
next_state = MirrorDescentState(iter_num=iter_num + 1, error=error, aux=aux)
return base.OptStep(params=next_x, state=next_state)
def update(self,
params: Any,
state: NamedTuple,
hyperparams_proj: Any,
*args,
**kwargs) -> base.OptStep:
"""Performs one iteration of mirror descent.
Args:
params: pytree containing the parameters.
state: named tuple containing the solver state.
hyperparams_proj: pytree containing hyperparameters of projection.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
(params, state)
"""
return self._update(params, state, hyperparams_proj, args, kwargs)
def run(self,
init_params: Any,
hyperparams_proj: Optional[Any] = None,
*args,
**kwargs) -> base.OptStep:
return super().run(init_params, hyperparams_proj, *args, **kwargs)
def _fixed_point_fun(self, sol, hyperparams_proj, args, kwargs):
sol_fun_grad, _ = self._grad_with_aux(sol, *args, **kwargs)
return self.projection_grad(sol, sol_fun_grad, 1.0, hyperparams_proj)
def optimality_fun(self, sol, hyperparams_proj, *args, **kwargs):
"""Optimality function mapping compatible with ``@custom_root``."""
fp = self._fixed_point_fun(sol, hyperparams_proj, args, kwargs)
return tree_sub(fp, sol)
def __post_init__(self):
if self.has_aux:
fun_with_aux = self.fun
else:
fun_with_aux = lambda *a, **kw: (self.fun(*a, **kw), None)
self._grad_with_aux = jax.grad(fun_with_aux, has_aux=True)
# Sets up reference signature.
fun = getattr(self.fun, "subfun", self.fun)
signature = inspect.signature(fun)
parameters = list(signature.parameters.values())
new_param = inspect.Parameter(name="hyperparams_proj",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD)
parameters.insert(1, new_param)
self.reference_signature = inspect.Signature(parameters) | 0.950146 | 0.54952 |
import tensorflow as tf
import argparse
import os
from contextlib import nullcontext
import yaml
from tqdm import tqdm
from animate import animate
from reconstruction import reconstruction
from utils import load_image_video_pair, save_video, save_visualization, load_models_direct, load_models_savedmodel, load_models_tflite, save_frames_png
from frames_dataset import FramesDataset, DatasetRepeater, PairedDataset
parser = argparse.ArgumentParser(description="Run inference")
parser.add_argument('--target', choices=['direct', 'savedmodel', 'tflite'], default='direct',
help="model version to run (between running the model directly, running the model's saved_model, and running its converted tflite")
parser.add_argument('--mode', choices=['animate', 'reconstruction',], default='animate', help="Run mode (animate, reconstruct)")
parser.add_argument('--datamode', choices=['file', 'dataset'], default='file', help='Data input mode (CLI-given file or config-defined dataset)')
parser.add_argument("--model", action="store", type=str, default="vox", help="model name")
parser.add_argument("--source_image", action="store", type=str, default="example/source.png", help="source image path for file datamode")
parser.add_argument("--driving_video", action="store", type=str, default="example/driving.mp4", help="driving video path for file datamode")
parser.add_argument("--output", action="store", type=str, default="example/output", help="output file name")
parser.add_argument("--dontappend", action="store_true", help="don't append format name and .mp4 to the output filename")
parser.add_argument("--relative", action="store_true", help="relative kp mode")
parser.add_argument("--adapt", dest="adapt_movement_scale", action="store_true", help="adapt movement to the proportion between the sizes of subjects in the input image and the driving video")
parser.add_argument("--prescale", dest="prescale", action="store_true", help="Reuse the result of AntiAliasInterpolation2d performed in kp_detector in the dense motion network")
parser.add_argument("--frames", type=int, default=-1, help="number of frames to process")
parser.add_argument("--batchsize", dest="batch_size", type=int, default=4, help="batch size")
parser.add_argument("--exactbatch", dest="exact_batch", action="store_true", help="force static batch size, tile source image to batch size")
parser.add_argument("--float16", action="store_true", help="use fp16 precision")
parser.add_argument("--device", dest="device", default=None, help="device to use")
parser.add_argument("--profile", action="store_true", help="enable tensorboard profiling")
parser.add_argument("--visualizer", action="store_true", help="enable visualizer, only relevant for dataset datamode")
parser.add_argument('--loadwithtorch', action="store_true",
help="use torch to load checkpoints instead of trying to load tensor buffers manually (requires pytorch)")
parser = parser.parse_args()
if parser.float16:
tf.keras.backend.set_floatx('float16')
if parser.loadwithtorch:
import load_torch_checkpoint
load_torch_checkpoint.mode = 'torch'
context = tf.device(parser.device) if parser.device is not None else nullcontext()
if parser.profile:
tf.debugging.set_log_device_placement(True)
load_funcs = {'direct':load_models_direct, 'savedmodel':load_models_savedmodel, 'tflite':load_models_tflite}
config_path = f"config/{parser.model}-256.yaml"
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
frame_shape = config['dataset_params']['frame_shape']
num_channels = config['model_params']['common_params']['num_channels']
with context:
kp_detector, process_kp_driving, generator, _interpreter_obj_list = load_funcs[parser.target](parser.model, prediction_only=parser.datamode=='file', static_batch_size = None if not parser.exact_batch else parser.batch_size, hardcode='1' + str(int(parser.adapt_movement_scale)), prescale=parser.prescale)
format_appends = {'direct':'', 'savedmodel':'.savedmodel', 'tflite':'.tflite'}
if parser.mode == 'animate':
if parser.datamode == 'file':
source_image, frames, fps = load_image_video_pair(parser.source_image, parser.driving_video, frames=parser.frames, frame_shape=frame_shape, num_channels=num_channels)
predictions, _ = animate(source_image, frames, generator, kp_detector, process_kp_driving,
parser.relative, parser.relative, parser.adapt_movement_scale,
batch_size=parser.batch_size, prescale=parser.prescale, exact_batch=parser.exact_batch, profile=parser.profile)
output = parser.output
if not parser.dontappend:
output = output + format_appends[parser.target] + '.mp4'
save_video(output, predictions, fps=fps)
else:
outdir = './log/' + parser.model
if not parser.dontappend:
outdir = outdir + format_appends[parser.target]
if not os.path.exists(outdir):
os.mkdir(outdir)
dataset = FramesDataset(**config['dataset_params'])
dataset = PairedDataset(initial_dataset=dataset, number_of_pairs=config['animate_params']['num_pairs'])
visualizer_params = config['visualizer_params'] if parser.visualizer else None
for idx, pair in tqdm(enumerate(dataset)):
source_image, frames = pair['source_video'][0][None], pair['driving_video']
predictions, visualizations = animate(source_image, frames, generator, kp_detector, process_kp_driving,
batch_size=1, exact_batch=parser.exact_batch, profile=parser.profile, visualizer_params=visualizer_params,
**config['animate_params']['normalization_params'])
result_name = f'{idx}_{pair["source_name"]}_{pair["driving_name"]}.png'
full_outdir = outdir + '/' + result_name
save_frames_png(full_outdir, predictions)
if visualizations is not None:
image_name = result_name + config['animate_params']['format']
visualization_filename = outdir + '/' + image_name
save_visualization(visualization_filename, visualizations)
elif parser.mode == 'reconstruction':
outdir = './log/' + parser.model + '_reconstruction'
if not parser.dontappend:
outdir = outdir + format_appends[parser.target]
if not os.path.exists(outdir):
os.mkdir(outdir)
dataset = FramesDataset(**config['dataset_params'])
visualizer_params = config['visualizer_params'] if parser.visualizer else None
loss_list = []
for idx, data in tqdm(enumerate(dataset)):
if config['reconstruction_params']['num_videos'] is not None:
if idx > config['reconstruction_params']['num_videos']:
break
predictions, visualizations, loss = reconstruction(data['video'], generator, kp_detector,
profile=parser.profile, visualizer_params=visualizer_params,
)
result_name = f'{idx}_{data["name"]}.png'
full_outdir = outdir + '/' + result_name
save_frames_png(full_outdir, predictions)
if len(visualizations) != 0:
image_name = result_name + config['reconstruction_params']['format']
visualization_filename = outdir + '/' + image_name
save_visualization(visualization_filename, visualizations)
loss_list.append(loss)
print("Reconstruction loss: {}".format(sum(loss_list)/len(loss_list)))
print("Done.") | run.py | import tensorflow as tf
import argparse
import os
from contextlib import nullcontext
import yaml
from tqdm import tqdm
from animate import animate
from reconstruction import reconstruction
from utils import load_image_video_pair, save_video, save_visualization, load_models_direct, load_models_savedmodel, load_models_tflite, save_frames_png
from frames_dataset import FramesDataset, DatasetRepeater, PairedDataset
parser = argparse.ArgumentParser(description="Run inference")
parser.add_argument('--target', choices=['direct', 'savedmodel', 'tflite'], default='direct',
help="model version to run (between running the model directly, running the model's saved_model, and running its converted tflite")
parser.add_argument('--mode', choices=['animate', 'reconstruction',], default='animate', help="Run mode (animate, reconstruct)")
parser.add_argument('--datamode', choices=['file', 'dataset'], default='file', help='Data input mode (CLI-given file or config-defined dataset)')
parser.add_argument("--model", action="store", type=str, default="vox", help="model name")
parser.add_argument("--source_image", action="store", type=str, default="example/source.png", help="source image path for file datamode")
parser.add_argument("--driving_video", action="store", type=str, default="example/driving.mp4", help="driving video path for file datamode")
parser.add_argument("--output", action="store", type=str, default="example/output", help="output file name")
parser.add_argument("--dontappend", action="store_true", help="don't append format name and .mp4 to the output filename")
parser.add_argument("--relative", action="store_true", help="relative kp mode")
parser.add_argument("--adapt", dest="adapt_movement_scale", action="store_true", help="adapt movement to the proportion between the sizes of subjects in the input image and the driving video")
parser.add_argument("--prescale", dest="prescale", action="store_true", help="Reuse the result of AntiAliasInterpolation2d performed in kp_detector in the dense motion network")
parser.add_argument("--frames", type=int, default=-1, help="number of frames to process")
parser.add_argument("--batchsize", dest="batch_size", type=int, default=4, help="batch size")
parser.add_argument("--exactbatch", dest="exact_batch", action="store_true", help="force static batch size, tile source image to batch size")
parser.add_argument("--float16", action="store_true", help="use fp16 precision")
parser.add_argument("--device", dest="device", default=None, help="device to use")
parser.add_argument("--profile", action="store_true", help="enable tensorboard profiling")
parser.add_argument("--visualizer", action="store_true", help="enable visualizer, only relevant for dataset datamode")
parser.add_argument('--loadwithtorch', action="store_true",
help="use torch to load checkpoints instead of trying to load tensor buffers manually (requires pytorch)")
parser = parser.parse_args()
if parser.float16:
tf.keras.backend.set_floatx('float16')
if parser.loadwithtorch:
import load_torch_checkpoint
load_torch_checkpoint.mode = 'torch'
context = tf.device(parser.device) if parser.device is not None else nullcontext()
if parser.profile:
tf.debugging.set_log_device_placement(True)
load_funcs = {'direct':load_models_direct, 'savedmodel':load_models_savedmodel, 'tflite':load_models_tflite}
config_path = f"config/{parser.model}-256.yaml"
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
frame_shape = config['dataset_params']['frame_shape']
num_channels = config['model_params']['common_params']['num_channels']
with context:
kp_detector, process_kp_driving, generator, _interpreter_obj_list = load_funcs[parser.target](parser.model, prediction_only=parser.datamode=='file', static_batch_size = None if not parser.exact_batch else parser.batch_size, hardcode='1' + str(int(parser.adapt_movement_scale)), prescale=parser.prescale)
format_appends = {'direct':'', 'savedmodel':'.savedmodel', 'tflite':'.tflite'}
if parser.mode == 'animate':
if parser.datamode == 'file':
source_image, frames, fps = load_image_video_pair(parser.source_image, parser.driving_video, frames=parser.frames, frame_shape=frame_shape, num_channels=num_channels)
predictions, _ = animate(source_image, frames, generator, kp_detector, process_kp_driving,
parser.relative, parser.relative, parser.adapt_movement_scale,
batch_size=parser.batch_size, prescale=parser.prescale, exact_batch=parser.exact_batch, profile=parser.profile)
output = parser.output
if not parser.dontappend:
output = output + format_appends[parser.target] + '.mp4'
save_video(output, predictions, fps=fps)
else:
outdir = './log/' + parser.model
if not parser.dontappend:
outdir = outdir + format_appends[parser.target]
if not os.path.exists(outdir):
os.mkdir(outdir)
dataset = FramesDataset(**config['dataset_params'])
dataset = PairedDataset(initial_dataset=dataset, number_of_pairs=config['animate_params']['num_pairs'])
visualizer_params = config['visualizer_params'] if parser.visualizer else None
for idx, pair in tqdm(enumerate(dataset)):
source_image, frames = pair['source_video'][0][None], pair['driving_video']
predictions, visualizations = animate(source_image, frames, generator, kp_detector, process_kp_driving,
batch_size=1, exact_batch=parser.exact_batch, profile=parser.profile, visualizer_params=visualizer_params,
**config['animate_params']['normalization_params'])
result_name = f'{idx}_{pair["source_name"]}_{pair["driving_name"]}.png'
full_outdir = outdir + '/' + result_name
save_frames_png(full_outdir, predictions)
if visualizations is not None:
image_name = result_name + config['animate_params']['format']
visualization_filename = outdir + '/' + image_name
save_visualization(visualization_filename, visualizations)
elif parser.mode == 'reconstruction':
outdir = './log/' + parser.model + '_reconstruction'
if not parser.dontappend:
outdir = outdir + format_appends[parser.target]
if not os.path.exists(outdir):
os.mkdir(outdir)
dataset = FramesDataset(**config['dataset_params'])
visualizer_params = config['visualizer_params'] if parser.visualizer else None
loss_list = []
for idx, data in tqdm(enumerate(dataset)):
if config['reconstruction_params']['num_videos'] is not None:
if idx > config['reconstruction_params']['num_videos']:
break
predictions, visualizations, loss = reconstruction(data['video'], generator, kp_detector,
profile=parser.profile, visualizer_params=visualizer_params,
)
result_name = f'{idx}_{data["name"]}.png'
full_outdir = outdir + '/' + result_name
save_frames_png(full_outdir, predictions)
if len(visualizations) != 0:
image_name = result_name + config['reconstruction_params']['format']
visualization_filename = outdir + '/' + image_name
save_visualization(visualization_filename, visualizations)
loss_list.append(loss)
print("Reconstruction loss: {}".format(sum(loss_list)/len(loss_list)))
print("Done.") | 0.552057 | 0.097562 |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import DetailView, ListView
from foodgram.settings import RECIPES_PAGINATE_BY
from .forms import RecipeForm
from .models import Ingredient, Recipe, RecipeIngredient, User
from .utils import get_ingredients
class IndexListView(ListView):
""" Вывод главной страницы с рецептами
"""
paginate_by = RECIPES_PAGINATE_BY
template_name = 'index.html'
context_object_name = 'index'
def get_queryset(self):
tags_filter = self.request.GET.getlist('filters')
recipes = Recipe.objects.all()
if tags_filter:
recipes = recipes.filter(
tags__slug__in=tags_filter
).distinct().all()
return recipes
class FollowListView(LoginRequiredMixin, ListView):
""" Вывод страницы с подписками
"""
paginate_by = RECIPES_PAGINATE_BY
template_name = 'follow.html'
context_object_name = 'follow'
def get_queryset(self):
user = self.request.user
follows = user.follower.all().values_list('author_id', flat=True)
chefs = User.objects.filter(id__in=list(follows))
return chefs
class FavoriteListView(LoginRequiredMixin, ListView):
""" Вывод страницы с избранными рецептами
"""
paginate_by = RECIPES_PAGINATE_BY
template_name = 'favorite.html'
context_object_name = 'favorite'
def get_queryset(self):
tags_filter = self.request.GET.getlist('filters')
user = self.request.user
favorites = user.favorites.all().values_list('recipe_id', flat=True)
fav_recipes = Recipe.objects.filter(id__in=list(favorites))
if tags_filter:
fav_recipes = fav_recipes.filter(
tags__slug__in=tags_filter
).distinct().all()
return fav_recipes
class ShoppingListView(LoginRequiredMixin, ListView):
""" Вывод страницы со списком покупок
"""
template_name = 'shopping_list.html'
context_object_name = 'shopping_list'
def get_queryset(self):
user = self.request.user
shopper = user.shopper.all().values_list('recipe_id', flat=True)
recipe_list = Recipe.objects.filter(id__in=list(shopper))
return recipe_list
class ProfileListView(ListView):
""" Вывод страницы автора рецептов
"""
paginate_by = RECIPES_PAGINATE_BY
template_name = 'profile.html'
context_object_name = 'profile'
def get_queryset(self):
tags_filter = self.request.GET.getlist('filters')
author = get_object_or_404(User, username=self.kwargs.get('username'))
author_recipes = Recipe.objects.filter(author=author)
if tags_filter:
author_recipes = author_recipes.filter(
tags__slug__in=tags_filter
).distinct().all()
return author_recipes
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
author = get_object_or_404(User, username=self.kwargs.get('username'))
context['author'] = author
return context
class RecipeDetailView(DetailView):
""" Вывод страницы с информацией о рецепте
"""
model = Recipe
template_name = 'recipe.html'
@login_required
def shoplist_download(request):
""" Скачивание списка ингредиентов для покупки
"""
user = request.user
shopping = user.shopper.all().values_list('recipe_id', flat=True)
ingredients = RecipeIngredient.objects.values(
'ingredient_id__title', 'ingredient_id__unit').filter(
recipe_id__in=list(shopping)).annotate(
total=Sum('amount')).order_by('ingredient')
file_data = ''
line = '\n'.join([
f"{item['ingredient_id__title']}"
f"({item['ingredient_id__unit']}) - {item['total']}"
for item in ingredients
])
file_data += line + '\n'
response = HttpResponse(
file_data, content_type='application/text charset=utf-8'
)
response['Content-Disposition'] = 'attachment; filename="ShoppingList.txt"'
return response
@login_required
def new_recipe(request):
if request.method != 'POST':
form = RecipeForm(request.POST or None, files=request.FILES or None)
else:
form = RecipeForm(request.POST or None, files=request.FILES or None)
ingredients = get_ingredients(request)
if not ingredients:
form.add_error(None, 'Добавьте ингредиенты')
duration = request.POST.get(f'{"duration"}')
if int(duration) <= 0:
form.add_error(None,
'Время приготовления должно быть больше нуля'
)
user = get_object_or_404(User, username=request.user)
if form.is_valid():
recipe = form.save(commit=False)
recipe.author = user
recipe.save()
for ing_name, amount in ingredients.items():
ingredient = get_object_or_404(Ingredient, title=ing_name)
recipe_ing = RecipeIngredient(
recipe=recipe, ingredient=ingredient, amount=amount
)
recipe_ing.save()
form.save_m2m()
return redirect('index')
return render(request, 'new_recipe.html', {'form': form})
@login_required
def recipe_edit(request, recipe_id):
""" Страница с формой редактирования рецепта
"""
recipe = get_object_or_404(Recipe, id=recipe_id)
form = RecipeForm(
request.POST or None, files=request.FILES or None, instance=recipe
)
ingredients = get_ingredients(request)
if request.user != recipe.author:
return redirect('index')
if form.is_valid():
if not ingredients:
form.add_error(None, 'Добавьте ингредиенты')
else:
RecipeIngredient.objects.filter(recipe=recipe).delete()
recipe = form.save(commit=False)
recipe.author = request.user
recipe.save()
for ing_name, amount in ingredients.items():
ingredient = get_object_or_404(Ingredient, title=ing_name)
recipe_ing = RecipeIngredient(
recipe=recipe, ingredient=ingredient, amount=amount
)
recipe_ing.save()
form.save_m2m()
return redirect('index')
return render(
request, 'recipe_edit.html', {'form': form, 'recipe': recipe},
)
@login_required
def recipe_delete(request, recipe_slug):
""" Удаление рецепта
"""
recipe = get_object_or_404(Recipe, slug=recipe_slug)
if request.user == recipe.author:
recipe.delete()
return redirect('index') | recipes/views.py | from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import DetailView, ListView
from foodgram.settings import RECIPES_PAGINATE_BY
from .forms import RecipeForm
from .models import Ingredient, Recipe, RecipeIngredient, User
from .utils import get_ingredients
class IndexListView(ListView):
""" Вывод главной страницы с рецептами
"""
paginate_by = RECIPES_PAGINATE_BY
template_name = 'index.html'
context_object_name = 'index'
def get_queryset(self):
tags_filter = self.request.GET.getlist('filters')
recipes = Recipe.objects.all()
if tags_filter:
recipes = recipes.filter(
tags__slug__in=tags_filter
).distinct().all()
return recipes
class FollowListView(LoginRequiredMixin, ListView):
""" Вывод страницы с подписками
"""
paginate_by = RECIPES_PAGINATE_BY
template_name = 'follow.html'
context_object_name = 'follow'
def get_queryset(self):
user = self.request.user
follows = user.follower.all().values_list('author_id', flat=True)
chefs = User.objects.filter(id__in=list(follows))
return chefs
class FavoriteListView(LoginRequiredMixin, ListView):
""" Вывод страницы с избранными рецептами
"""
paginate_by = RECIPES_PAGINATE_BY
template_name = 'favorite.html'
context_object_name = 'favorite'
def get_queryset(self):
tags_filter = self.request.GET.getlist('filters')
user = self.request.user
favorites = user.favorites.all().values_list('recipe_id', flat=True)
fav_recipes = Recipe.objects.filter(id__in=list(favorites))
if tags_filter:
fav_recipes = fav_recipes.filter(
tags__slug__in=tags_filter
).distinct().all()
return fav_recipes
class ShoppingListView(LoginRequiredMixin, ListView):
""" Вывод страницы со списком покупок
"""
template_name = 'shopping_list.html'
context_object_name = 'shopping_list'
def get_queryset(self):
user = self.request.user
shopper = user.shopper.all().values_list('recipe_id', flat=True)
recipe_list = Recipe.objects.filter(id__in=list(shopper))
return recipe_list
class ProfileListView(ListView):
""" Вывод страницы автора рецептов
"""
paginate_by = RECIPES_PAGINATE_BY
template_name = 'profile.html'
context_object_name = 'profile'
def get_queryset(self):
tags_filter = self.request.GET.getlist('filters')
author = get_object_or_404(User, username=self.kwargs.get('username'))
author_recipes = Recipe.objects.filter(author=author)
if tags_filter:
author_recipes = author_recipes.filter(
tags__slug__in=tags_filter
).distinct().all()
return author_recipes
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
author = get_object_or_404(User, username=self.kwargs.get('username'))
context['author'] = author
return context
class RecipeDetailView(DetailView):
""" Вывод страницы с информацией о рецепте
"""
model = Recipe
template_name = 'recipe.html'
@login_required
def shoplist_download(request):
""" Скачивание списка ингредиентов для покупки
"""
user = request.user
shopping = user.shopper.all().values_list('recipe_id', flat=True)
ingredients = RecipeIngredient.objects.values(
'ingredient_id__title', 'ingredient_id__unit').filter(
recipe_id__in=list(shopping)).annotate(
total=Sum('amount')).order_by('ingredient')
file_data = ''
line = '\n'.join([
f"{item['ingredient_id__title']}"
f"({item['ingredient_id__unit']}) - {item['total']}"
for item in ingredients
])
file_data += line + '\n'
response = HttpResponse(
file_data, content_type='application/text charset=utf-8'
)
response['Content-Disposition'] = 'attachment; filename="ShoppingList.txt"'
return response
@login_required
def new_recipe(request):
if request.method != 'POST':
form = RecipeForm(request.POST or None, files=request.FILES or None)
else:
form = RecipeForm(request.POST or None, files=request.FILES or None)
ingredients = get_ingredients(request)
if not ingredients:
form.add_error(None, 'Добавьте ингредиенты')
duration = request.POST.get(f'{"duration"}')
if int(duration) <= 0:
form.add_error(None,
'Время приготовления должно быть больше нуля'
)
user = get_object_or_404(User, username=request.user)
if form.is_valid():
recipe = form.save(commit=False)
recipe.author = user
recipe.save()
for ing_name, amount in ingredients.items():
ingredient = get_object_or_404(Ingredient, title=ing_name)
recipe_ing = RecipeIngredient(
recipe=recipe, ingredient=ingredient, amount=amount
)
recipe_ing.save()
form.save_m2m()
return redirect('index')
return render(request, 'new_recipe.html', {'form': form})
@login_required
def recipe_edit(request, recipe_id):
""" Страница с формой редактирования рецепта
"""
recipe = get_object_or_404(Recipe, id=recipe_id)
form = RecipeForm(
request.POST or None, files=request.FILES or None, instance=recipe
)
ingredients = get_ingredients(request)
if request.user != recipe.author:
return redirect('index')
if form.is_valid():
if not ingredients:
form.add_error(None, 'Добавьте ингредиенты')
else:
RecipeIngredient.objects.filter(recipe=recipe).delete()
recipe = form.save(commit=False)
recipe.author = request.user
recipe.save()
for ing_name, amount in ingredients.items():
ingredient = get_object_or_404(Ingredient, title=ing_name)
recipe_ing = RecipeIngredient(
recipe=recipe, ingredient=ingredient, amount=amount
)
recipe_ing.save()
form.save_m2m()
return redirect('index')
return render(
request, 'recipe_edit.html', {'form': form, 'recipe': recipe},
)
@login_required
def recipe_delete(request, recipe_slug):
""" Удаление рецепта
"""
recipe = get_object_or_404(Recipe, slug=recipe_slug)
if request.user == recipe.author:
recipe.delete()
return redirect('index') | 0.404625 | 0.116186 |
import json, pytest, uuid
from tests.constants import TEST_PERMIT_GUID_1, TEST_MINE_GUID, DUMMY_USER_KWARGS
from app.api.permits.permit_amendment.models.permit_amendment_document import PermitAmendmentDocument
from app.api.permits.permit_amendment.models.permit_amendment import PermitAmendment
from app.api.permits.permit.models.permit import Permit
from app.extensions import db
TEST_DOCUMENT_MANAGER_GUID_1 = uuid.uuid4()
TEST_DOCUMENT_MANAGER_GUID_2 = uuid.uuid4()
@pytest.fixture(scope='function')
def setup_info(test_client):
permit = Permit.find_by_permit_guid(TEST_PERMIT_GUID_1)
test_pa = PermitAmendment.create(permit, None, None, None, 'AMD', DUMMY_USER_KWARGS)
test_pa.save()
test_pa_doc = PermitAmendmentDocument(
document_name="test1.pdf",
mine_guid=TEST_MINE_GUID,
permit_amendment_id=test_pa.permit_amendment_id,
document_manager_guid=TEST_DOCUMENT_MANAGER_GUID_1,
**DUMMY_USER_KWARGS)
test_pa_doc.save()
test_orphan_doc = PermitAmendmentDocument(
document_name="orphan.pdf",
mine_guid=TEST_MINE_GUID,
permit_amendment_id=None,
document_manager_guid=TEST_DOCUMENT_MANAGER_GUID_2,
**DUMMY_USER_KWARGS)
test_orphan_doc.save()
yield {
'permit_amendment_1': test_pa,
'permit_amendment_document_1': test_pa_doc,
'test_orphan_document_1': test_orphan_doc
}
db.session.delete(test_pa)
db.session.delete(test_pa_doc)
db.session.delete(test_orphan_doc)
try:
#it may have been deleted by the test that executed, don't freak out.
db.session.commit()
except:
pass
# PUT
def test_put_new_file(test_client, auth_headers, setup_info):
permit_amendment = setup_info.get('permit_amendment_1')
document_count = len(permit_amendment.documents)
data = {'document_manager_guid': str(uuid.uuid4()), 'filename': 'a_file.pdf'}
put_resp = test_client.put(
f'/permits/amendments/{str(permit_amendment.permit_amendment_guid)}/documents',
headers=auth_headers['full_auth_header'],
data=data)
assert put_resp.status_code == 200
assert len(permit_amendment.documents) == document_count + 1
def test_happy_path_file_removal(test_client, auth_headers, setup_info):
permit_amendment = setup_info.get('permit_amendment_1')
permit_amendment_document = setup_info.get('permit_amendment_document_1')
del_resp = test_client.delete(
f'/permits/amendments/{str(permit_amendment.permit_amendment_guid)}/documents/{str(permit_amendment_document.permit_amendment_document_guid)}',
headers=auth_headers['full_auth_header'])
assert del_resp.status_code == 204
assert permit_amendment_document not in permit_amendment.documents
def test_remove_file_no_doc_guid(test_client, auth_headers, setup_info):
permit_amendment = setup_info.get('permit_amendment_1')
del_resp = test_client.delete(
f'/permits/amendments/{str(permit_amendment.permit_amendment_guid)}/documents',
headers=auth_headers['full_auth_header'])
post_data = json.loads(del_resp.data.decode())
assert del_resp.status_code == 400
assert post_data['error']['message'] is not None
def test_remove_file_no_doc(test_client, auth_headers, setup_info):
permit_amendment = setup_info.get('permit_amendment_1')
del_resp = test_client.delete(
f'/permits/amendments/{str(permit_amendment.permit_amendment_guid)}/documents/{str(uuid.uuid4())}',
headers=auth_headers['full_auth_header'])
post_data = json.loads(del_resp.data.decode())
assert del_resp.status_code == 404
assert post_data['error']['message'] is not None
def test_remove_file_no_exp_doc(test_client, auth_headers, setup_info):
permit_amendment_document = setup_info.get('permit_amendment_document_1')
del_resp = test_client.delete(
f'/permits/amendments/{str(uuid.uuid4())}/documents/{str(permit_amendment_document.permit_amendment_document_guid)}',
headers=auth_headers['full_auth_header'])
post_data = json.loads(del_resp.data.decode())
assert del_resp.status_code == 404
assert post_data['error']['message'] is not None | python-backend/tests/permit/resources/test_permit_amendment_document_resource.py | import json, pytest, uuid
from tests.constants import TEST_PERMIT_GUID_1, TEST_MINE_GUID, DUMMY_USER_KWARGS
from app.api.permits.permit_amendment.models.permit_amendment_document import PermitAmendmentDocument
from app.api.permits.permit_amendment.models.permit_amendment import PermitAmendment
from app.api.permits.permit.models.permit import Permit
from app.extensions import db
TEST_DOCUMENT_MANAGER_GUID_1 = uuid.uuid4()
TEST_DOCUMENT_MANAGER_GUID_2 = uuid.uuid4()
@pytest.fixture(scope='function')
def setup_info(test_client):
permit = Permit.find_by_permit_guid(TEST_PERMIT_GUID_1)
test_pa = PermitAmendment.create(permit, None, None, None, 'AMD', DUMMY_USER_KWARGS)
test_pa.save()
test_pa_doc = PermitAmendmentDocument(
document_name="test1.pdf",
mine_guid=TEST_MINE_GUID,
permit_amendment_id=test_pa.permit_amendment_id,
document_manager_guid=TEST_DOCUMENT_MANAGER_GUID_1,
**DUMMY_USER_KWARGS)
test_pa_doc.save()
test_orphan_doc = PermitAmendmentDocument(
document_name="orphan.pdf",
mine_guid=TEST_MINE_GUID,
permit_amendment_id=None,
document_manager_guid=TEST_DOCUMENT_MANAGER_GUID_2,
**DUMMY_USER_KWARGS)
test_orphan_doc.save()
yield {
'permit_amendment_1': test_pa,
'permit_amendment_document_1': test_pa_doc,
'test_orphan_document_1': test_orphan_doc
}
db.session.delete(test_pa)
db.session.delete(test_pa_doc)
db.session.delete(test_orphan_doc)
try:
#it may have been deleted by the test that executed, don't freak out.
db.session.commit()
except:
pass
# PUT
def test_put_new_file(test_client, auth_headers, setup_info):
permit_amendment = setup_info.get('permit_amendment_1')
document_count = len(permit_amendment.documents)
data = {'document_manager_guid': str(uuid.uuid4()), 'filename': 'a_file.pdf'}
put_resp = test_client.put(
f'/permits/amendments/{str(permit_amendment.permit_amendment_guid)}/documents',
headers=auth_headers['full_auth_header'],
data=data)
assert put_resp.status_code == 200
assert len(permit_amendment.documents) == document_count + 1
def test_happy_path_file_removal(test_client, auth_headers, setup_info):
permit_amendment = setup_info.get('permit_amendment_1')
permit_amendment_document = setup_info.get('permit_amendment_document_1')
del_resp = test_client.delete(
f'/permits/amendments/{str(permit_amendment.permit_amendment_guid)}/documents/{str(permit_amendment_document.permit_amendment_document_guid)}',
headers=auth_headers['full_auth_header'])
assert del_resp.status_code == 204
assert permit_amendment_document not in permit_amendment.documents
def test_remove_file_no_doc_guid(test_client, auth_headers, setup_info):
permit_amendment = setup_info.get('permit_amendment_1')
del_resp = test_client.delete(
f'/permits/amendments/{str(permit_amendment.permit_amendment_guid)}/documents',
headers=auth_headers['full_auth_header'])
post_data = json.loads(del_resp.data.decode())
assert del_resp.status_code == 400
assert post_data['error']['message'] is not None
def test_remove_file_no_doc(test_client, auth_headers, setup_info):
permit_amendment = setup_info.get('permit_amendment_1')
del_resp = test_client.delete(
f'/permits/amendments/{str(permit_amendment.permit_amendment_guid)}/documents/{str(uuid.uuid4())}',
headers=auth_headers['full_auth_header'])
post_data = json.loads(del_resp.data.decode())
assert del_resp.status_code == 404
assert post_data['error']['message'] is not None
def test_remove_file_no_exp_doc(test_client, auth_headers, setup_info):
permit_amendment_document = setup_info.get('permit_amendment_document_1')
del_resp = test_client.delete(
f'/permits/amendments/{str(uuid.uuid4())}/documents/{str(permit_amendment_document.permit_amendment_document_guid)}',
headers=auth_headers['full_auth_header'])
post_data = json.loads(del_resp.data.decode())
assert del_resp.status_code == 404
assert post_data['error']['message'] is not None | 0.359027 | 0.283056 |
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from practice.weight_initialization.multi_layer_net import MultiLayerNet
from practice.optimizers import RMSProp, SGD
(X_all_train, y_all_train), (X_test, y_test) = load_mnist(one_hot_label=True)
node_size = 100
layer_size = 5
output_size = 10
lr = 0.1
rho = 0.9
optimizer = SGD(lr)
def train_nn(network):
epochs = 10
train_size = X_all_train.shape[0]
batch_size = 100
minibatch_num = np.ceil(train_size / batch_size).astype(int)
losses = []
train_accuracies = []
test_accuracies = []
for epoch in range(epochs):
idx = np.arange(train_size)
np.random.shuffle(idx)
for mn in range(minibatch_num):
batch_mask = idx[batch_size * mn:batch_size * (mn + 1)]
x_batch = X_all_train[batch_mask]
y_batch = y_all_train[batch_mask]
grads = network.gradient(x_batch, y_batch)
optimizer.update(network.params, grads)
if mn % 100 == 0:
train_accuracies.append(network.accuracy(X_all_train, y_all_train))
test_accuracies.append(network.accuracy(X_test, y_test))
print(
f"epoch {epoch + 1} loss : {network.loss(x_batch, y_batch)}, accuracy : {network.accuracy(x_batch, y_batch)} ")
losses.append(network.loss(x_batch, y_batch))
return losses, train_accuracies, test_accuracies
def plot_losses_and_accuracies(losses, accuracies, label):
plt.figure()
plt.plot(losses, label=f"{label} losses")
plt.plot(accuracies, linestyle="dashed", label=f"{label} accuracies")
plt.legend()
plt.show()
def plot_train_test_accuracies(train_accuracies, test_accuracies, label):
plt.figure()
plt.plot(train_accuracies, label=f"{label} train accuracy")
plt.plot(test_accuracies, linestyle="dashed", label=f"{label} test accuracy")
plt.legend()
plt.show()
def plot_weight_hist(network, title):
fig, axs = plt.subplots(1, layer_size + 1)
fig.suptitle(title)
for i in range(layer_size + 1):
axs[i].hist(network.params[f"W{i + 1}"].flatten(), 30, range=(0, 1))
if i != 0:
axs[i].set_yticks([])
axs[i].set_yticklabels([])
axs[i].set_title(f"{i + 1} layer")
plt.show()
networks = {'naive_wi': MultiLayerNet('naive_wi', X_all_train.shape[1], node_size, output_size, layer_size),
'xavier': MultiLayerNet('xavier', X_all_train.shape[1], node_size, output_size, layer_size),
'he': MultiLayerNet('he', X_all_train.shape[1], node_size, output_size, layer_size)}
train_accuracies_dict = {}
test_accuracies_dict = {}
for key, network in networks.items():
losses_, train_accuracies, test_accuracies = train_nn(network)
train_accuracies_dict[key] = train_accuracies
test_accuracies_dict[key] = test_accuracies
# plot_train_test_accuracies(train_accuracies, test_accuracies, key)
# plot_losses_and_accuracies(losses_, accuracies_, f'{key} weight init')
# plot_weight_hist(network, f'{key} weight initialization')
for key in train_accuracies_dict.keys():
plot_train_test_accuracies(train_accuracies_dict[key], test_accuracies_dict[key], key) | practice/weight_initialization/weight_initialization_experiment_mnist.py | import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from practice.weight_initialization.multi_layer_net import MultiLayerNet
from practice.optimizers import RMSProp, SGD
(X_all_train, y_all_train), (X_test, y_test) = load_mnist(one_hot_label=True)
node_size = 100
layer_size = 5
output_size = 10
lr = 0.1
rho = 0.9
optimizer = SGD(lr)
def train_nn(network):
epochs = 10
train_size = X_all_train.shape[0]
batch_size = 100
minibatch_num = np.ceil(train_size / batch_size).astype(int)
losses = []
train_accuracies = []
test_accuracies = []
for epoch in range(epochs):
idx = np.arange(train_size)
np.random.shuffle(idx)
for mn in range(minibatch_num):
batch_mask = idx[batch_size * mn:batch_size * (mn + 1)]
x_batch = X_all_train[batch_mask]
y_batch = y_all_train[batch_mask]
grads = network.gradient(x_batch, y_batch)
optimizer.update(network.params, grads)
if mn % 100 == 0:
train_accuracies.append(network.accuracy(X_all_train, y_all_train))
test_accuracies.append(network.accuracy(X_test, y_test))
print(
f"epoch {epoch + 1} loss : {network.loss(x_batch, y_batch)}, accuracy : {network.accuracy(x_batch, y_batch)} ")
losses.append(network.loss(x_batch, y_batch))
return losses, train_accuracies, test_accuracies
def plot_losses_and_accuracies(losses, accuracies, label):
plt.figure()
plt.plot(losses, label=f"{label} losses")
plt.plot(accuracies, linestyle="dashed", label=f"{label} accuracies")
plt.legend()
plt.show()
def plot_train_test_accuracies(train_accuracies, test_accuracies, label):
plt.figure()
plt.plot(train_accuracies, label=f"{label} train accuracy")
plt.plot(test_accuracies, linestyle="dashed", label=f"{label} test accuracy")
plt.legend()
plt.show()
def plot_weight_hist(network, title):
fig, axs = plt.subplots(1, layer_size + 1)
fig.suptitle(title)
for i in range(layer_size + 1):
axs[i].hist(network.params[f"W{i + 1}"].flatten(), 30, range=(0, 1))
if i != 0:
axs[i].set_yticks([])
axs[i].set_yticklabels([])
axs[i].set_title(f"{i + 1} layer")
plt.show()
networks = {'naive_wi': MultiLayerNet('naive_wi', X_all_train.shape[1], node_size, output_size, layer_size),
'xavier': MultiLayerNet('xavier', X_all_train.shape[1], node_size, output_size, layer_size),
'he': MultiLayerNet('he', X_all_train.shape[1], node_size, output_size, layer_size)}
train_accuracies_dict = {}
test_accuracies_dict = {}
for key, network in networks.items():
losses_, train_accuracies, test_accuracies = train_nn(network)
train_accuracies_dict[key] = train_accuracies
test_accuracies_dict[key] = test_accuracies
# plot_train_test_accuracies(train_accuracies, test_accuracies, key)
# plot_losses_and_accuracies(losses_, accuracies_, f'{key} weight init')
# plot_weight_hist(network, f'{key} weight initialization')
for key in train_accuracies_dict.keys():
plot_train_test_accuracies(train_accuracies_dict[key], test_accuracies_dict[key], key) | 0.739705 | 0.614278 |
import os
import time
import datetime
from urllib import urlencode
from cyclone import httpclient
from toughlib import utils,dispatch,logger
from toughlib import apiutils
from twisted.internet import reactor,defer
from toughradius.manage.events.event_basic import BasicEvent
from toughradius.manage.settings import TOUGHCLOUD as toughcloud
from toughradius.common import tools
from toughlib.mail import send_mail as sendmail
from email.mime.text import MIMEText
from email import Header
from urllib import quote
class AccountExpireNotifyEvent(BasicEvent):
MAIL_TPLNAME = 'tr_expire_notify'
MAIL_APIURL = "%s/sendmail"%toughcloud.apiurl
SMS_TPLNAME = 'tr_expire_notify'
SMS_APIURL = "%s/sendsms"%toughcloud.apiurl
def event_webhook_account_expire(self, userinfo):
"""webhook notify event """
notify_url = self.get_param_value("expire_notify_url")
if not notify_url:
return
url = notify_url.replace('{account}',userinfo.account_number)
url = url.replace('{customer}',utils.safestr(userinfo.realname))
url = url.replace('{expire}',userinfo.expire_date)
url = url.replace('{email}',userinfo.email)
url = url.replace('{mobile}',userinfo.mobile)
url = url.replace('{product}',utils.safestr(userinfo.product_name))
url = url.encode('utf-8')
url = quote(url,":?=/&")
return httpclient.fetch(url).addCallbacks(lambda r: logger.info(r.body),logger.exception)
@defer.inlineCallbacks
def event_toughcloud_sms_account_expire(self, userinfo):
""" toughcloud sms api notify event """
if not userinfo:
return
api_secret = self.get_param_value("toughcloud_license")
api_token = yield tools.get_sys_token()
params = dict(
token=api_token.strip(),
tplname=self.SMS_TPLNAME,
customer=utils.safestr(userinfo.realname),
username=userinfo.account_number,
product=utils.safestr(userinfo.product_name),
expire=userinfo.expire_date,
service_call=self.get_param_value("service_call",''),
service_mail=self.get_param_value("service_mail",''),
nonce = str(int(time.time()))
)
params['sign'] = apiutils.make_sign(api_secret.strip(), params.values())
try:
resp = yield httpclient.fetch(self.SMS_APIURL, postdata=urlencode(params))
logger.info(resp.body)
except Exception as err:
logger.exception(err)
@defer.inlineCallbacks
def event_toughcloud_mail_account_expire(self, userinfo):
""" toughcloud mail api notify event """
if not userinfo:
return
api_secret = self.get_param_value("toughcloud_license")
service_mail=self.get_param_value("toughcloud_service_mail")
if not service_mail:
return
api_token = yield tools.get_sys_token()
params = dict(
token=api_token.strip(),
mailto=userinfo.email,
tplname=self.MAIL_TPLNAME,
customer=utils.safestr(userinfo.realname),
username=userinfo.account_number,
product=utils.safestr(userinfo.product_name),
expire=userinfo.expire_date,
service_call=self.get_param_value("toughcloud_service_call",''),
service_mail=service_mail,
nonce = str(int(time.time()))
)
params['sign'] = apiutils.make_sign(api_secret.strip(), params.values())
try:
resp = yield httpclient.fetch(self.MAIL_APIURL, postdata=urlencode(params))
logger.info(resp.body)
except Exception as err:
logger.exception(err)
def event_smtp_account_expire(self, userinfo):
notify_tpl = self.get_param_value("smtp_notify_tpl")
ctx = notify_tpl.replace('#account#',userinfo.account_number)
ctx = ctx.replace('#expire#',userinfo.expire_date)
topic = ctx[:ctx.find('\n')]
smtp_server = self.get_param_value("smtp_server",'127.0.0.1')
from_addr = self.get_param_value("smtp_from")
smtp_port = int(self.get_param_value("smtp_port",25))
smtp_sender = self.get_param_value("smtp_sender",None)
smtp_user = self.get_param_value("smtp_user",None)
smtp_pwd = self.get_param_value("smtp_pwd",None)
return sendmail(
server=smtp_server,
port=smtp_port,
user=smtp_user,
password=<PASSWORD>,
from_addr=from_addr, mailto=userinfo.email,
topic=utils.safeunicode(topic),
content=utils.safeunicode(ctx),
tls=False)
def __call__(dbengine=None, mcache=None, **kwargs):
return AccountExpireNotifyEvent(dbengine=dbengine, mcache=mcache, **kwargs) | toughradius/manage/events/account_expire_notify.py | import os
import time
import datetime
from urllib import urlencode
from cyclone import httpclient
from toughlib import utils,dispatch,logger
from toughlib import apiutils
from twisted.internet import reactor,defer
from toughradius.manage.events.event_basic import BasicEvent
from toughradius.manage.settings import TOUGHCLOUD as toughcloud
from toughradius.common import tools
from toughlib.mail import send_mail as sendmail
from email.mime.text import MIMEText
from email import Header
from urllib import quote
class AccountExpireNotifyEvent(BasicEvent):
MAIL_TPLNAME = 'tr_expire_notify'
MAIL_APIURL = "%s/sendmail"%toughcloud.apiurl
SMS_TPLNAME = 'tr_expire_notify'
SMS_APIURL = "%s/sendsms"%toughcloud.apiurl
def event_webhook_account_expire(self, userinfo):
"""webhook notify event """
notify_url = self.get_param_value("expire_notify_url")
if not notify_url:
return
url = notify_url.replace('{account}',userinfo.account_number)
url = url.replace('{customer}',utils.safestr(userinfo.realname))
url = url.replace('{expire}',userinfo.expire_date)
url = url.replace('{email}',userinfo.email)
url = url.replace('{mobile}',userinfo.mobile)
url = url.replace('{product}',utils.safestr(userinfo.product_name))
url = url.encode('utf-8')
url = quote(url,":?=/&")
return httpclient.fetch(url).addCallbacks(lambda r: logger.info(r.body),logger.exception)
@defer.inlineCallbacks
def event_toughcloud_sms_account_expire(self, userinfo):
""" toughcloud sms api notify event """
if not userinfo:
return
api_secret = self.get_param_value("toughcloud_license")
api_token = yield tools.get_sys_token()
params = dict(
token=api_token.strip(),
tplname=self.SMS_TPLNAME,
customer=utils.safestr(userinfo.realname),
username=userinfo.account_number,
product=utils.safestr(userinfo.product_name),
expire=userinfo.expire_date,
service_call=self.get_param_value("service_call",''),
service_mail=self.get_param_value("service_mail",''),
nonce = str(int(time.time()))
)
params['sign'] = apiutils.make_sign(api_secret.strip(), params.values())
try:
resp = yield httpclient.fetch(self.SMS_APIURL, postdata=urlencode(params))
logger.info(resp.body)
except Exception as err:
logger.exception(err)
@defer.inlineCallbacks
def event_toughcloud_mail_account_expire(self, userinfo):
""" toughcloud mail api notify event """
if not userinfo:
return
api_secret = self.get_param_value("toughcloud_license")
service_mail=self.get_param_value("toughcloud_service_mail")
if not service_mail:
return
api_token = yield tools.get_sys_token()
params = dict(
token=api_token.strip(),
mailto=userinfo.email,
tplname=self.MAIL_TPLNAME,
customer=utils.safestr(userinfo.realname),
username=userinfo.account_number,
product=utils.safestr(userinfo.product_name),
expire=userinfo.expire_date,
service_call=self.get_param_value("toughcloud_service_call",''),
service_mail=service_mail,
nonce = str(int(time.time()))
)
params['sign'] = apiutils.make_sign(api_secret.strip(), params.values())
try:
resp = yield httpclient.fetch(self.MAIL_APIURL, postdata=urlencode(params))
logger.info(resp.body)
except Exception as err:
logger.exception(err)
def event_smtp_account_expire(self, userinfo):
notify_tpl = self.get_param_value("smtp_notify_tpl")
ctx = notify_tpl.replace('#account#',userinfo.account_number)
ctx = ctx.replace('#expire#',userinfo.expire_date)
topic = ctx[:ctx.find('\n')]
smtp_server = self.get_param_value("smtp_server",'127.0.0.1')
from_addr = self.get_param_value("smtp_from")
smtp_port = int(self.get_param_value("smtp_port",25))
smtp_sender = self.get_param_value("smtp_sender",None)
smtp_user = self.get_param_value("smtp_user",None)
smtp_pwd = self.get_param_value("smtp_pwd",None)
return sendmail(
server=smtp_server,
port=smtp_port,
user=smtp_user,
password=<PASSWORD>,
from_addr=from_addr, mailto=userinfo.email,
topic=utils.safeunicode(topic),
content=utils.safeunicode(ctx),
tls=False)
def __call__(dbengine=None, mcache=None, **kwargs):
return AccountExpireNotifyEvent(dbengine=dbengine, mcache=mcache, **kwargs) | 0.21892 | 0.052936 |
import socket, select, string, sys, base64
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def prompt(usuario) :
sys.stdout.write(bcolors.OKBLUE + '<' + usuario + '> ' + bcolors.ENDC)
sys.stdout.flush()
if __name__ == "__main__":
# Pedimos el host y el puerto
if(len(sys.argv) < 2) :
print bcolors.WARNING + 'Escribe : ' + sys.argv[0] + ' <host> <puerto (por defecto el 8080)>' + bcolors.ENDC
sys.exit()
host = sys.argv[1]
if len(sys.argv) == 2:
port = 8080
else:
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# Intentamos hacer la conexion
try :
s.connect((host, port))
except :
print bcolors.FAIL + '[Error 400]: No se ha podido hacer la conexion' + bcolors.ENDC
sys.exit()
print bcolors.OKGREEN + 'Conectado a ' + host + ' en el puerto ' + str(port) + '\nEscribe \exit para salir' + bcolors.ENDC
usuario = raw_input(bcolors.HEADER + "Escribe un nombre de usuario\n" + bcolors.ENDC)
prompt(usuario) # Cada usuario tiene su propio alias
while 1:
socket_list = [sys.stdin, s]
# Obtener la lista de sockets
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
#Mensaje del servidor
if sock == s:
data = sock.recv(4096)
if not data :
print bcolors.FAIL + '\n[Error 500] Desconectado del servidor' + bcolors.ENDC
sys.exit()
else :
#print data
sys.stdout.write(data)
prompt(usuario)
#Enviar mensaje escrito por el usuario
else :
msg = sys.stdin.readline()
msg = usuario + ' ' + msg
encoded = base64.b64encode(msg)
s.send(encoded)
if '\exit' in msg:
sys.exit(0)
prompt(usuario) | Practicas/P2/Ejercicio5/cliente.py |
import socket, select, string, sys, base64
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def prompt(usuario) :
sys.stdout.write(bcolors.OKBLUE + '<' + usuario + '> ' + bcolors.ENDC)
sys.stdout.flush()
if __name__ == "__main__":
# Pedimos el host y el puerto
if(len(sys.argv) < 2) :
print bcolors.WARNING + 'Escribe : ' + sys.argv[0] + ' <host> <puerto (por defecto el 8080)>' + bcolors.ENDC
sys.exit()
host = sys.argv[1]
if len(sys.argv) == 2:
port = 8080
else:
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# Intentamos hacer la conexion
try :
s.connect((host, port))
except :
print bcolors.FAIL + '[Error 400]: No se ha podido hacer la conexion' + bcolors.ENDC
sys.exit()
print bcolors.OKGREEN + 'Conectado a ' + host + ' en el puerto ' + str(port) + '\nEscribe \exit para salir' + bcolors.ENDC
usuario = raw_input(bcolors.HEADER + "Escribe un nombre de usuario\n" + bcolors.ENDC)
prompt(usuario) # Cada usuario tiene su propio alias
while 1:
socket_list = [sys.stdin, s]
# Obtener la lista de sockets
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
#Mensaje del servidor
if sock == s:
data = sock.recv(4096)
if not data :
print bcolors.FAIL + '\n[Error 500] Desconectado del servidor' + bcolors.ENDC
sys.exit()
else :
#print data
sys.stdout.write(data)
prompt(usuario)
#Enviar mensaje escrito por el usuario
else :
msg = sys.stdin.readline()
msg = usuario + ' ' + msg
encoded = base64.b64encode(msg)
s.send(encoded)
if '\exit' in msg:
sys.exit(0)
prompt(usuario) | 0.04598 | 0.080357 |
import os
import django.conf
from django.test import TestCase
from django.core.files.uploadedfile import UploadedFile
from django import forms
from document_catalogue import models
from . import base
# Create tests for models here.
class CategoryTests(TestCase):
"""
Test basic behaviours for DocumentCategory model
"""
def setUp(self):
super().setUp()
self.categories=base.create_document_categories()
def test_get_absolute_url(self):
categories = models.DocumentCategory.objects.all()
for cat in categories:
self.assertIn(cat.slug, cat.get_absolute_url(), 'URL for category should contain its slug.')
def test_has_children(self):
categories = models.DocumentCategory.objects.filter(parent=None)
for cat in categories:
self.assertTrue(cat.has_children(), 'Category reporting no children when it has sub-categories.')
def test_not_has_children(self):
cat = models.DocumentCategory.objects.get(slug='sub-category-1b')
self.assertTrue(not cat.has_children(), 'Category reporting children when it has no sub-categories.')
def test_document_count(self):
categories = models.DocumentCategory.objects.all()
for cat in categories:
self.assertEqual(cat.get_document_count(), 0, 'Category with no documents returns non-zero get_document_count.')
class DocumentTests(TestCase):
"""
Test basic behaviours for Document model
"""
FILENAME = 'myDocument.txt'
def setUp(self):
super().setUp()
self.categories=base.create_document_categories()
self.document = base.create_document(filename=self.FILENAME, file_type='txt')
def tearDown(self):
os.remove(self.document.file.path)
def test_get_absolute_url(self):
self.assertIn(str(self.document.pk), self.document.get_absolute_url(), 'URL for document should contain its pk.')
def test_get_filetype(self):
filetype = self.document.get_filetype()
self.assertEqual(filetype, 'txt', 'get_filetype returns incorrect type %s' % filetype)
def test_document_directory_path(self):
instance = lambda: None # a mutable null object
instance.category = self.categories[0]
path = models.document_upload_path_callback(instance, self.FILENAME)
self.assertIn(self.categories[0].slug, path)
self.assertIn(self.FILENAME, path)
def test_create_file(self):
filename = 'slartibartfast.txt'
file = base.create_document(filename=filename, file_type='txt', user=base.create_user(username='slartibartfast'))
doc = models.Document.objects.get(pk=file.pk)
self.assertIn(filename, doc.file.name)
self.assertIn(filename, doc.file.url)
# Cleanup
os.remove(doc.file.path)
def test_private_storage(self):
media_root = getattr(django.conf.settings, 'PRIVATE_STORAGE_ROOT') if base.appConfig.USE_PRIVATE_FILES \
else django.conf.settings.MEDIA_ROOT
self.assertIn(media_root, self.document.file.path)
self.assertIn(base.appConfig.settings.MEDIA_ROOT, self.document.file.path)
class ConstrainedfileFieldTests(TestCase):
"""
A few basic tests for common validation in both PrivateFileField and ConstrainedFileField
"""
def setUp(self):
super().setUp()
self.categories=base.create_document_categories()
def test_validation_success(self):
document = base.create_document(filename='dummy.txt', file_type='txt')
file_field = document.file.field
self.assertIsNotNone(file_field.clean(value=document.file, model_instance=document))
class FileField:
def __init__(self, field):
self.field = field
self.store = self.get_max_upload_size()
def get_max_upload_size(self):
return self.field.max_file_size if base.appConfig.USE_PRIVATE_FILES else self.field.max_upload_size
def set_max_upload_size(self, max):
self.field.max_upload_size = max
def restore_max_upload_size(self):
if base.appConfig.USE_PRIVATE_FILES:
self.field.max_file_size = self.store
else:
self.field.max_upload_size = self.store
def test_max_upload_size_fail(self):
document = base.create_document()
file_field = self.FileField(document.file.field)
file_size = file_field.get_max_upload_size()-1
document.file.file = \
UploadedFile(file=document.file.file, name=document.title, content_type='txt', size=file_size)
# fudge the max_upload_size to fall below file size.
file_field.set_max_upload_size(document.file.size - 1)
with self.assertRaises(forms.ValidationError):
file_field.field.clean(value=document.file, model_instance=document)
# Cleanup
file_field.restore_max_upload_size()
os.remove(document.file.path)
def test_content_types_fail(self):
document = base.create_document(filename='dummy.html', file_type='html')
file_field = document.file.field
document.file.file = \
UploadedFile(file=document.file.file, name=document.title, content_type='html', size=document.file.size)
with self.assertRaises(forms.ValidationError):
file_field.clean(value=document.file, model_instance=document)
# Cleanup
os.remove(document.file.path) | document_catalogue/tests/test_models.py | import os
import django.conf
from django.test import TestCase
from django.core.files.uploadedfile import UploadedFile
from django import forms
from document_catalogue import models
from . import base
# Create tests for models here.
class CategoryTests(TestCase):
"""
Test basic behaviours for DocumentCategory model
"""
def setUp(self):
super().setUp()
self.categories=base.create_document_categories()
def test_get_absolute_url(self):
categories = models.DocumentCategory.objects.all()
for cat in categories:
self.assertIn(cat.slug, cat.get_absolute_url(), 'URL for category should contain its slug.')
def test_has_children(self):
categories = models.DocumentCategory.objects.filter(parent=None)
for cat in categories:
self.assertTrue(cat.has_children(), 'Category reporting no children when it has sub-categories.')
def test_not_has_children(self):
cat = models.DocumentCategory.objects.get(slug='sub-category-1b')
self.assertTrue(not cat.has_children(), 'Category reporting children when it has no sub-categories.')
def test_document_count(self):
categories = models.DocumentCategory.objects.all()
for cat in categories:
self.assertEqual(cat.get_document_count(), 0, 'Category with no documents returns non-zero get_document_count.')
class DocumentTests(TestCase):
"""
Test basic behaviours for Document model
"""
FILENAME = 'myDocument.txt'
def setUp(self):
super().setUp()
self.categories=base.create_document_categories()
self.document = base.create_document(filename=self.FILENAME, file_type='txt')
def tearDown(self):
os.remove(self.document.file.path)
def test_get_absolute_url(self):
self.assertIn(str(self.document.pk), self.document.get_absolute_url(), 'URL for document should contain its pk.')
def test_get_filetype(self):
filetype = self.document.get_filetype()
self.assertEqual(filetype, 'txt', 'get_filetype returns incorrect type %s' % filetype)
def test_document_directory_path(self):
instance = lambda: None # a mutable null object
instance.category = self.categories[0]
path = models.document_upload_path_callback(instance, self.FILENAME)
self.assertIn(self.categories[0].slug, path)
self.assertIn(self.FILENAME, path)
def test_create_file(self):
filename = 'slartibartfast.txt'
file = base.create_document(filename=filename, file_type='txt', user=base.create_user(username='slartibartfast'))
doc = models.Document.objects.get(pk=file.pk)
self.assertIn(filename, doc.file.name)
self.assertIn(filename, doc.file.url)
# Cleanup
os.remove(doc.file.path)
def test_private_storage(self):
media_root = getattr(django.conf.settings, 'PRIVATE_STORAGE_ROOT') if base.appConfig.USE_PRIVATE_FILES \
else django.conf.settings.MEDIA_ROOT
self.assertIn(media_root, self.document.file.path)
self.assertIn(base.appConfig.settings.MEDIA_ROOT, self.document.file.path)
class ConstrainedfileFieldTests(TestCase):
"""
A few basic tests for common validation in both PrivateFileField and ConstrainedFileField
"""
def setUp(self):
super().setUp()
self.categories=base.create_document_categories()
def test_validation_success(self):
document = base.create_document(filename='dummy.txt', file_type='txt')
file_field = document.file.field
self.assertIsNotNone(file_field.clean(value=document.file, model_instance=document))
class FileField:
def __init__(self, field):
self.field = field
self.store = self.get_max_upload_size()
def get_max_upload_size(self):
return self.field.max_file_size if base.appConfig.USE_PRIVATE_FILES else self.field.max_upload_size
def set_max_upload_size(self, max):
self.field.max_upload_size = max
def restore_max_upload_size(self):
if base.appConfig.USE_PRIVATE_FILES:
self.field.max_file_size = self.store
else:
self.field.max_upload_size = self.store
def test_max_upload_size_fail(self):
document = base.create_document()
file_field = self.FileField(document.file.field)
file_size = file_field.get_max_upload_size()-1
document.file.file = \
UploadedFile(file=document.file.file, name=document.title, content_type='txt', size=file_size)
# fudge the max_upload_size to fall below file size.
file_field.set_max_upload_size(document.file.size - 1)
with self.assertRaises(forms.ValidationError):
file_field.field.clean(value=document.file, model_instance=document)
# Cleanup
file_field.restore_max_upload_size()
os.remove(document.file.path)
def test_content_types_fail(self):
document = base.create_document(filename='dummy.html', file_type='html')
file_field = document.file.field
document.file.file = \
UploadedFile(file=document.file.file, name=document.title, content_type='html', size=document.file.size)
with self.assertRaises(forms.ValidationError):
file_field.clean(value=document.file, model_instance=document)
# Cleanup
os.remove(document.file.path) | 0.505615 | 0.292008 |
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import os
ROOT_DIR = os.getenv('PLASTICC_DIR')
WORK_DIR = os.path.join(ROOT_DIR, 'plasticc')
DATA_DIR = os.path.join(ROOT_DIR, 'plasticc_data')
sys.path.append(WORK_DIR)
import numpy as np
import plasticc
import plasticc.get_data
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import to_hex
from matplotlib.backends.backend_pdf import PdfPages
from scipy.stats import gaussian_kde, describe
from astropy.visualization import hist
def main():
kwargs = plasticc.get_data.parse_getdata_options()
print("This config ", kwargs)
data_release = kwargs.pop('data_release')
fig_dir = os.path.join(WORK_DIR, 'Figures', data_release, 'rate_analysis')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
_ = kwargs.pop('model')
out_field = kwargs.get('field')
kwargs['columns']=['objid','mwebv', ]
sntypes = plasticc.get_data.GetData.get_sntypes()
getter = plasticc.get_data.GetData(data_release)
cmap = plt.cm.tab20
nlines = len(sntypes.keys())
color = iter(cmap(np.linspace(0,1,nlines-3)))
fig1 = plt.figure(figsize=(15,10))
ax1 = fig1.add_subplot(111)
if out_field == 'DDF':
upper_lim = 0.101
step = 0.001
else:
upper_lim = 0.81
step = 0.01
mwebv_range = np.arange(0, upper_lim, step)
for i, model in enumerate(sntypes.keys()):
kwargs['model'] = model
kwargs['big'] = True
head = getter.get_lcs_headers(**kwargs)
model_name = sntypes[model]
head = list(head)
nobs = len(head)
if nobs <= 1:
continue
c = to_hex(next(color), keep_alpha=False)
objid, hz = zip(*head)
long_model_name = f'{model_name}_{model}: {nobs}'
try:
density = gaussian_kde(hz, bw_method='scott')
except Exception as e:
continue
ax1.plot(mwebv_range, density(mwebv_range), color=c, label=long_model_name)
ax1.set_xlabel('MWEBV', fontsize='xx-large')
ax1.set_ylabel('PDF', fontsize='xx-large')
ax1.legend(frameon=False)
ax1.set_xlim(0, upper_lim - step)
fig1.tight_layout()
fig1.savefig(f'{fig_dir}/extinction_checks_{data_release}_{out_field}.pdf')
plt.close(fig1)
if __name__=='__main__':
sys.exit(main()) | bin/make_extinction_comp_plot.py | from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import os
ROOT_DIR = os.getenv('PLASTICC_DIR')
WORK_DIR = os.path.join(ROOT_DIR, 'plasticc')
DATA_DIR = os.path.join(ROOT_DIR, 'plasticc_data')
sys.path.append(WORK_DIR)
import numpy as np
import plasticc
import plasticc.get_data
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import to_hex
from matplotlib.backends.backend_pdf import PdfPages
from scipy.stats import gaussian_kde, describe
from astropy.visualization import hist
def main():
kwargs = plasticc.get_data.parse_getdata_options()
print("This config ", kwargs)
data_release = kwargs.pop('data_release')
fig_dir = os.path.join(WORK_DIR, 'Figures', data_release, 'rate_analysis')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
_ = kwargs.pop('model')
out_field = kwargs.get('field')
kwargs['columns']=['objid','mwebv', ]
sntypes = plasticc.get_data.GetData.get_sntypes()
getter = plasticc.get_data.GetData(data_release)
cmap = plt.cm.tab20
nlines = len(sntypes.keys())
color = iter(cmap(np.linspace(0,1,nlines-3)))
fig1 = plt.figure(figsize=(15,10))
ax1 = fig1.add_subplot(111)
if out_field == 'DDF':
upper_lim = 0.101
step = 0.001
else:
upper_lim = 0.81
step = 0.01
mwebv_range = np.arange(0, upper_lim, step)
for i, model in enumerate(sntypes.keys()):
kwargs['model'] = model
kwargs['big'] = True
head = getter.get_lcs_headers(**kwargs)
model_name = sntypes[model]
head = list(head)
nobs = len(head)
if nobs <= 1:
continue
c = to_hex(next(color), keep_alpha=False)
objid, hz = zip(*head)
long_model_name = f'{model_name}_{model}: {nobs}'
try:
density = gaussian_kde(hz, bw_method='scott')
except Exception as e:
continue
ax1.plot(mwebv_range, density(mwebv_range), color=c, label=long_model_name)
ax1.set_xlabel('MWEBV', fontsize='xx-large')
ax1.set_ylabel('PDF', fontsize='xx-large')
ax1.legend(frameon=False)
ax1.set_xlim(0, upper_lim - step)
fig1.tight_layout()
fig1.savefig(f'{fig_dir}/extinction_checks_{data_release}_{out_field}.pdf')
plt.close(fig1)
if __name__=='__main__':
sys.exit(main()) | 0.239794 | 0.101456 |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Neighborhood(models.Model):
hoodname = models.TextField(max_length=500)
hoodlocation=models.TextField(max_length=500)
# admin = models.ForeignKey(Profile,on_delete=models.CASCADE)
pic=models.ImageField(upload_to='images/')
description=models.CharField(max_length=500)
police_count=models.IntegerField(null=True,blank=True)
police_info=models.TextField(max_length=3000,default='<EMAIL>')
occupant = models.ForeignKey(User,on_delete=models.CASCADE)
health = models.IntegerField()
health_info = models.TextField(max_length=3000,default = '<EMAIL>')
def __str__(self):
return self.hoodname
def create_hood(self):
'''
Function for creating a neighborhood
'''
self.save()
def delete_hood(self):
self.delete()
@classmethod
def one_hood(cls,id):
one_hood = cls.objects.filter(id=id)
return one_hood
@classmethod
def all_hoods(cls):
'''
Function to get all neighbourhoods
'''
all_hoods = cls.objects.all()
return all_hoods
class Profile(models.Model):
user = models.OneToOneField(User,on_delete = models.CASCADE)
profile_pic = models.ImageField(upload_to='pictures/')
email = models.CharField(max_length=300)
username=models.TextField(max_length=500)
hood=models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
def save_profile(self):
'''
Function to save a user profile
'''
self.save()
def delete_profile(self):
'''
Function to delete a user profile
'''
self.delete()
@classmethod
def get_occupants(cls,hood_id):
hood_occupants = cls.objects.filter(hood_id = hood_id)
return hood_occupants
class Business(models.Model):
name=models.CharField(max_length=1000)
owner = models.ForeignKey(User,on_delete=models.CASCADE)
bizhood = models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
bizemail=models.CharField(max_length=500)
bizdescription=models.TextField(blank=True)
def create_biz(self):
self.save()
def delete_biz(self):
seld.delete()
@classmethod
def search_biz(cls,name):
searched_biz=cls.objects.filter(name__icontains = name).all()
return searched_biz
@classmethod
def all_biz(cls,bizhood_id):
all_biz=cls.objects.filter(bizhood_id = bizhood_id)
return all_biz
class Posts(models.Model):
title = models.CharField(max_length=1000)
post = models.TextField(max_length=3000)
hood = models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
user = models.ForeignKey(User,on_delete=models.CASCADE)
@classmethod
def post_by_hood(cls,hood_id):
hoodpost = cls.objects.filter(hood_id = hood_id)
return hoodpost | hood/models.py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Neighborhood(models.Model):
hoodname = models.TextField(max_length=500)
hoodlocation=models.TextField(max_length=500)
# admin = models.ForeignKey(Profile,on_delete=models.CASCADE)
pic=models.ImageField(upload_to='images/')
description=models.CharField(max_length=500)
police_count=models.IntegerField(null=True,blank=True)
police_info=models.TextField(max_length=3000,default='<EMAIL>')
occupant = models.ForeignKey(User,on_delete=models.CASCADE)
health = models.IntegerField()
health_info = models.TextField(max_length=3000,default = '<EMAIL>')
def __str__(self):
return self.hoodname
def create_hood(self):
'''
Function for creating a neighborhood
'''
self.save()
def delete_hood(self):
self.delete()
@classmethod
def one_hood(cls,id):
one_hood = cls.objects.filter(id=id)
return one_hood
@classmethod
def all_hoods(cls):
'''
Function to get all neighbourhoods
'''
all_hoods = cls.objects.all()
return all_hoods
class Profile(models.Model):
user = models.OneToOneField(User,on_delete = models.CASCADE)
profile_pic = models.ImageField(upload_to='pictures/')
email = models.CharField(max_length=300)
username=models.TextField(max_length=500)
hood=models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
def save_profile(self):
'''
Function to save a user profile
'''
self.save()
def delete_profile(self):
'''
Function to delete a user profile
'''
self.delete()
@classmethod
def get_occupants(cls,hood_id):
hood_occupants = cls.objects.filter(hood_id = hood_id)
return hood_occupants
class Business(models.Model):
name=models.CharField(max_length=1000)
owner = models.ForeignKey(User,on_delete=models.CASCADE)
bizhood = models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
bizemail=models.CharField(max_length=500)
bizdescription=models.TextField(blank=True)
def create_biz(self):
self.save()
def delete_biz(self):
seld.delete()
@classmethod
def search_biz(cls,name):
searched_biz=cls.objects.filter(name__icontains = name).all()
return searched_biz
@classmethod
def all_biz(cls,bizhood_id):
all_biz=cls.objects.filter(bizhood_id = bizhood_id)
return all_biz
class Posts(models.Model):
title = models.CharField(max_length=1000)
post = models.TextField(max_length=3000)
hood = models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
user = models.ForeignKey(User,on_delete=models.CASCADE)
@classmethod
def post_by_hood(cls,hood_id):
hoodpost = cls.objects.filter(hood_id = hood_id)
return hoodpost | 0.460532 | 0.102844 |
from common.tools.blockstring import BlockString
from common.tools.padders import PKCS7Padder, PKCS7Unpadder
from common.tools.xor import ByteXOR
class BlockCipherMode(object):
DEFAULT_BLOCK_SIZE = 16
@classmethod
def name(cls):
return cls.__name__
def __init__(self, block_size=None):
self.block_size = self.DEFAULT_BLOCK_SIZE if block_size is None\
else block_size
def _pad(self, string):
return PKCS7Padder(string).value(self.block_size)
def _unpad_if_needed(self, index, block):
if self.block_string.is_last_block_index(index):
block = PKCS7Unpadder(block).value()
return block
def _iterate_blocks_with(self, block_string, cipher, callback):
self.cipher = cipher
self.block_string = block_string
result = BlockString(block_size=self.block_size)
return reduce(lambda _result, block: _result + callback(*block),
enumerate(self.block_string), result)
def _block_encryption_callback(self, message, cipher):
raise NotImplementedError
def _block_decryption_callback(self, message, cipher):
raise NotImplementedError
def encrypt_with_cipher(self, plaintext, cipher):
if type(plaintext) != BlockString:
plaintext = BlockString(plaintext, self.block_size)
plaintext = self._pad(plaintext)
return self._iterate_blocks_with(plaintext, cipher,
self._block_encryption_callback)
def decrypt_with_cipher(self, ciphertext, cipher):
if type(ciphertext) != BlockString:
ciphertext = BlockString(ciphertext, self.block_size)
return self._iterate_blocks_with(ciphertext, cipher,
self._block_decryption_callback)
class ECB(BlockCipherMode):
def _block_encryption_callback(self, index, block):
return self.cipher.encrypt_block(block)
def _block_decryption_callback(self, index, block):
plaintext_block = self.cipher.decrypt_block(block)
plaintext_block = self._unpad_if_needed(index, plaintext_block)
return plaintext_block
class CBC(BlockCipherMode):
def __init__(self, iv, block_size=None):
BlockCipherMode.__init__(self, block_size)
self.iv = iv
def _xor(self, string1, string2):
return ByteXOR(string1, string2).value()
def _block_encryption_callback(self, index, block):
if index == 0:
self.last_ciphertext_block = self.iv
xor_block = self._xor(block, self.last_ciphertext_block)
ciphertext_block = self.cipher.encrypt_block(xor_block)
self.last_ciphertext_block = ciphertext_block
return ciphertext_block
def _block_decryption_callback(self, index, block):
if index == 0:
self.last_ciphertext_block = self.iv
decrypted_block = self.cipher.decrypt_block(block)
plaintext_block = self._xor(decrypted_block,
self.last_ciphertext_block)
plaintext_block = self._unpad_if_needed(index, plaintext_block)
self.last_ciphertext_block = block
return plaintext_block
class CTR(BlockCipherMode):
def __init__(self, counter=None, nonce=None, block_size=None):
from counter import DefaultCounter, NonceBasedCounter
BlockCipherMode.__init__(self, block_size)
if nonce is not None:
counter = NonceBasedCounter(nonce, block_size)
self.counter = counter if counter is not None\
else DefaultCounter(block_size)
def _pad(self, plaintext):
# CTR mode does not need padding.
return plaintext
def _xor(self, key, block):
block_length = len(block)
return ByteXOR(block, key[:block_length]).value()
def _block_callback(self, index, block):
key_argument = self.counter.count(index)
key = self.cipher.encrypt_block(key_argument)
return self._xor(key, block)
def _block_encryption_callback(self, index, block):
return self._block_callback(index, block)
def _block_decryption_callback(self, index, block):
return self._block_callback(index, block)
class RandomAccessCTR(CTR):
def __init__(self, *args, **kwargs):
CTR.__init__(self, *args, **kwargs)
self.keystream = str()
def get_keystream(self):
return self.keystream
def _xor(self, key, block):
self.keystream += key
return CTR._xor(self, key, block) | common/ciphers/block/modes.py | from common.tools.blockstring import BlockString
from common.tools.padders import PKCS7Padder, PKCS7Unpadder
from common.tools.xor import ByteXOR
class BlockCipherMode(object):
DEFAULT_BLOCK_SIZE = 16
@classmethod
def name(cls):
return cls.__name__
def __init__(self, block_size=None):
self.block_size = self.DEFAULT_BLOCK_SIZE if block_size is None\
else block_size
def _pad(self, string):
return PKCS7Padder(string).value(self.block_size)
def _unpad_if_needed(self, index, block):
if self.block_string.is_last_block_index(index):
block = PKCS7Unpadder(block).value()
return block
def _iterate_blocks_with(self, block_string, cipher, callback):
self.cipher = cipher
self.block_string = block_string
result = BlockString(block_size=self.block_size)
return reduce(lambda _result, block: _result + callback(*block),
enumerate(self.block_string), result)
def _block_encryption_callback(self, message, cipher):
raise NotImplementedError
def _block_decryption_callback(self, message, cipher):
raise NotImplementedError
def encrypt_with_cipher(self, plaintext, cipher):
if type(plaintext) != BlockString:
plaintext = BlockString(plaintext, self.block_size)
plaintext = self._pad(plaintext)
return self._iterate_blocks_with(plaintext, cipher,
self._block_encryption_callback)
def decrypt_with_cipher(self, ciphertext, cipher):
if type(ciphertext) != BlockString:
ciphertext = BlockString(ciphertext, self.block_size)
return self._iterate_blocks_with(ciphertext, cipher,
self._block_decryption_callback)
class ECB(BlockCipherMode):
def _block_encryption_callback(self, index, block):
return self.cipher.encrypt_block(block)
def _block_decryption_callback(self, index, block):
plaintext_block = self.cipher.decrypt_block(block)
plaintext_block = self._unpad_if_needed(index, plaintext_block)
return plaintext_block
class CBC(BlockCipherMode):
def __init__(self, iv, block_size=None):
BlockCipherMode.__init__(self, block_size)
self.iv = iv
def _xor(self, string1, string2):
return ByteXOR(string1, string2).value()
def _block_encryption_callback(self, index, block):
if index == 0:
self.last_ciphertext_block = self.iv
xor_block = self._xor(block, self.last_ciphertext_block)
ciphertext_block = self.cipher.encrypt_block(xor_block)
self.last_ciphertext_block = ciphertext_block
return ciphertext_block
def _block_decryption_callback(self, index, block):
if index == 0:
self.last_ciphertext_block = self.iv
decrypted_block = self.cipher.decrypt_block(block)
plaintext_block = self._xor(decrypted_block,
self.last_ciphertext_block)
plaintext_block = self._unpad_if_needed(index, plaintext_block)
self.last_ciphertext_block = block
return plaintext_block
class CTR(BlockCipherMode):
def __init__(self, counter=None, nonce=None, block_size=None):
from counter import DefaultCounter, NonceBasedCounter
BlockCipherMode.__init__(self, block_size)
if nonce is not None:
counter = NonceBasedCounter(nonce, block_size)
self.counter = counter if counter is not None\
else DefaultCounter(block_size)
def _pad(self, plaintext):
# CTR mode does not need padding.
return plaintext
def _xor(self, key, block):
block_length = len(block)
return ByteXOR(block, key[:block_length]).value()
def _block_callback(self, index, block):
key_argument = self.counter.count(index)
key = self.cipher.encrypt_block(key_argument)
return self._xor(key, block)
def _block_encryption_callback(self, index, block):
return self._block_callback(index, block)
def _block_decryption_callback(self, index, block):
return self._block_callback(index, block)
class RandomAccessCTR(CTR):
def __init__(self, *args, **kwargs):
CTR.__init__(self, *args, **kwargs)
self.keystream = str()
def get_keystream(self):
return self.keystream
def _xor(self, key, block):
self.keystream += key
return CTR._xor(self, key, block) | 0.719088 | 0.176672 |
import numpy as np
class BSpline:
def __init__(self):
self.ctrlPoint = dict()
def generate_control_point(self, points, points_offset=None):
assert(len(points) >= 2)
if points_offset is not None:
assert(len(points) == len(points_offset))
num_points = len(points)
cubic_spline_to_B_spline = 4.*np.eye(num_points+2) + np.eye(num_points+2, k=1) + np.eye(num_points+2, k=-1)
cubic_spline_to_B_spline[0, :3] = np.array([1., -2., 1.])
cubic_spline_to_B_spline[-1, -3:] = np.array([1., -2., 1.])
cubic_spline_to_B_spline *= 1./6.
points_matrix = np.zeros((num_points+2, len(points[0])))
for point_idx in range(num_points):
if points_offset is not None:
points_matrix[point_idx+1, :] = np.asarray(points[point_idx]) + np.asarray(points_offset[point_idx])
else:
points_matrix[point_idx+1, :] = np.asarray(points[point_idx])
control_points_matrix = np.linalg.inv(cubic_spline_to_B_spline).dot(points_matrix)
for point_idx in range(num_points+2):
self.ctrlPoint[point_idx] = control_points_matrix[point_idx, :]
def add_control_point(self, controlPoint):
self.ctrlPoint[len(self.ctrlPoint.keys())] = controlPoint
def set_control_point(self, idx, controlPoint):
self.ctrlPoint[idx] = controlPoint
def get_control_points(self):
return self.ctrlPoint
def get_control_point(self, idx):
return self.ctrlPoint[idx]
def get_value(self, tt):
assert(len(self.ctrlPoint.keys()) >= 4)
idx = int(tt)
t = tt - idx
if idx >= len(self.ctrlPoint.keys()) - 3:
idx = len(self.ctrlPoint.keys()) - 4
t = 1.
sq_t = t * t
cub_t = sq_t * t
inv_t = 1. - t
sq_inv_t = inv_t * inv_t
cub_inv_t = sq_inv_t * inv_t
return (
cub_inv_t * self.ctrlPoint[idx]
+ (3.*cub_t - 6.*sq_t + 4.)*self.ctrlPoint[idx+1]
+ (-3.*cub_t + 3.*sq_t + 3.*t + 1.)*self.ctrlPoint[idx+2]
+ cub_t * self.ctrlPoint[idx+3]
)/6.
def clear(self):
self.ctrlPoint.clear() | skate_cma/BSpline.py | import numpy as np
class BSpline:
def __init__(self):
self.ctrlPoint = dict()
def generate_control_point(self, points, points_offset=None):
assert(len(points) >= 2)
if points_offset is not None:
assert(len(points) == len(points_offset))
num_points = len(points)
cubic_spline_to_B_spline = 4.*np.eye(num_points+2) + np.eye(num_points+2, k=1) + np.eye(num_points+2, k=-1)
cubic_spline_to_B_spline[0, :3] = np.array([1., -2., 1.])
cubic_spline_to_B_spline[-1, -3:] = np.array([1., -2., 1.])
cubic_spline_to_B_spline *= 1./6.
points_matrix = np.zeros((num_points+2, len(points[0])))
for point_idx in range(num_points):
if points_offset is not None:
points_matrix[point_idx+1, :] = np.asarray(points[point_idx]) + np.asarray(points_offset[point_idx])
else:
points_matrix[point_idx+1, :] = np.asarray(points[point_idx])
control_points_matrix = np.linalg.inv(cubic_spline_to_B_spline).dot(points_matrix)
for point_idx in range(num_points+2):
self.ctrlPoint[point_idx] = control_points_matrix[point_idx, :]
def add_control_point(self, controlPoint):
self.ctrlPoint[len(self.ctrlPoint.keys())] = controlPoint
def set_control_point(self, idx, controlPoint):
self.ctrlPoint[idx] = controlPoint
def get_control_points(self):
return self.ctrlPoint
def get_control_point(self, idx):
return self.ctrlPoint[idx]
def get_value(self, tt):
assert(len(self.ctrlPoint.keys()) >= 4)
idx = int(tt)
t = tt - idx
if idx >= len(self.ctrlPoint.keys()) - 3:
idx = len(self.ctrlPoint.keys()) - 4
t = 1.
sq_t = t * t
cub_t = sq_t * t
inv_t = 1. - t
sq_inv_t = inv_t * inv_t
cub_inv_t = sq_inv_t * inv_t
return (
cub_inv_t * self.ctrlPoint[idx]
+ (3.*cub_t - 6.*sq_t + 4.)*self.ctrlPoint[idx+1]
+ (-3.*cub_t + 3.*sq_t + 3.*t + 1.)*self.ctrlPoint[idx+2]
+ cub_t * self.ctrlPoint[idx+3]
)/6.
def clear(self):
self.ctrlPoint.clear() | 0.518059 | 0.577019 |
import random
from faker import Faker
from sqlalchemy.exc import IntegrityError
from .models import Category, Post, Comment
from .models import Admin
from .extensions import db
fake = Faker('zh_CN')
def fake_admin():
admin = Admin(
username='admin',
blog_title='Blog',
blog_sub_title="No, I'm the real thing.",
name='shui',
about='你好,我是codershui,一名学过心理学的程序猿',
password='<PASSWORD>'
)
db.session.add(admin)
db.session.commit()
def fake_categories(count=10):
category = Category(name=fake.word())
db.session.add(category)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def fake_posts(count=50):
for i in range(count):
post = Post(
title=fake.sentence(),
body=fake.text(2000),
category=Category.query.get(random.randint(1, Category.query.count())),
timestamp=fake.date_time_this_year()
)
db.session.add(post)
db.session.commit()
def fake_comments(count=5000):
for i in range(count):
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=True,
post=Post.query.get(random.randint(1,Post.query.count()))
)
db.session.add(comment)
salt = int(count * 0.1)
for i in range(salt):
#未审核评论
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=False,
post=Post.query.get(random.randint(1, Post.query.count()))
)
db.session.add(comment)
#管理员评论
comment = Comment(
author='shui',
email='<EMAIL>',
site='example.com',
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
from_admin=True,
reviewed=True,
post=Post.query.get(random.randint(1,Post.query.count()))
)
db.session.add(comment)
#回复
for i in range(salt):
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=True,
replied=Comment.query.get(random.randint(1,Comment.query.count())),
post=Post.query.get(random.randint(1, Post.query.count()))
)
db.session.add(comment)
db.session.commit() | blog/blog/fakes.py | import random
from faker import Faker
from sqlalchemy.exc import IntegrityError
from .models import Category, Post, Comment
from .models import Admin
from .extensions import db
fake = Faker('zh_CN')
def fake_admin():
admin = Admin(
username='admin',
blog_title='Blog',
blog_sub_title="No, I'm the real thing.",
name='shui',
about='你好,我是codershui,一名学过心理学的程序猿',
password='<PASSWORD>'
)
db.session.add(admin)
db.session.commit()
def fake_categories(count=10):
category = Category(name=fake.word())
db.session.add(category)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def fake_posts(count=50):
for i in range(count):
post = Post(
title=fake.sentence(),
body=fake.text(2000),
category=Category.query.get(random.randint(1, Category.query.count())),
timestamp=fake.date_time_this_year()
)
db.session.add(post)
db.session.commit()
def fake_comments(count=5000):
for i in range(count):
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=True,
post=Post.query.get(random.randint(1,Post.query.count()))
)
db.session.add(comment)
salt = int(count * 0.1)
for i in range(salt):
#未审核评论
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=False,
post=Post.query.get(random.randint(1, Post.query.count()))
)
db.session.add(comment)
#管理员评论
comment = Comment(
author='shui',
email='<EMAIL>',
site='example.com',
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
from_admin=True,
reviewed=True,
post=Post.query.get(random.randint(1,Post.query.count()))
)
db.session.add(comment)
#回复
for i in range(salt):
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=True,
replied=Comment.query.get(random.randint(1,Comment.query.count())),
post=Post.query.get(random.randint(1, Post.query.count()))
)
db.session.add(comment)
db.session.commit() | 0.281109 | 0.07056 |
import os
import re
import urllib
import json
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import SiteAuth
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class PurevidResolver(Plugin, UrlResolver, SiteAuth, PluginSettings):
implements = [UrlResolver, SiteAuth, PluginSettings]
name = "purevid"
domains = ["purevid.com"]
profile_path = common.profile_path
pv_cookie_file = os.path.join(profile_path, '%s.cookies' % name)
def __init__(self):
p = self.get_setting('priority') or 1
self.priority = int(p)
self.net = Net()
try:
os.makedirs(os.path.dirname(self.pv_cookie_file))
except OSError:
pass
#UrlResolver methods
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
data = json.loads(html)
if self.get_setting('quality') == 'FLV':
url = data['clip']['bitrates'][0]['url']
else:
url = data['clip']['bitrates'][-1]['url']
params = ''
for val in data['plugins']['lighttpd']['params']:
params += val['name'] + '=' + val['value'] + '&'
url = url + '?' + params[:-1]
cookies = {}
for cookie in self.net._cj:
cookies[cookie.name] = cookie.value
url = url + '|' + urllib.urlencode({'Cookie': urllib.urlencode(cookies)})
common.addon.log_debug(url)
return url
def get_url(self, host, media_id):
return 'http://www.purevid.com/?m=video_info_embed_flv&id=%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/v/([0-9A-Za-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('login') == 'false':
return False
common.addon.log(url)
return 'purevid' in url
#SiteAuth methods
def needLogin(self):
url = 'http://www.purevid.com/?m=main'
if not os.path.exists(self.pv_cookie_file):
return True
self.net.set_cookies(self.pv_cookie_file)
source = self.net.http_GET(url).content
common.addon.log_debug(source.encode('utf-8'))
if re.search("""<span>Welcome <strong>.*</strong></span>""", source) :
common.addon.log_debug('needLogin returning False')
return False
else :
common.addon.log_debug('needLogin returning True')
return True
def login(self):
if self.needLogin() :
common.addon.log('login to purevid')
url = 'http://www.purevid.com/?m=login'
data = {'username' : self.get_setting('username'), 'password' : self.get_setting('password')}
source = self.net.http_POST(url,data).content
if re.search(self.get_setting('username'), source):
self.net.save_cookies(self.pv_cookie_file)
self.net.set_cookies(self.pv_cookie_file)
return True
else:
return False
else :
return True
#PluginSettings methods
def get_settings_xml(self):
xml = PluginSettings.get_settings_xml(self)
xml += '<setting id="PurevidResolver_login" '
xml += 'type="bool" label="Login" default="false"/>\n'
xml += '<setting id="PurevidResolver_username" enable="eq(-1,true)" '
xml += 'type="text" label=" username" default=""/>\n'
xml += '<setting id="PurevidResolver_password" enable="eq(-2,true)" '
xml += 'type="text" label=" password" option="hidden" default=""/>\n'
xml += '<setting label="Video quality" id="PurevidResolver_quality" '
xml += 'type="labelenum" values="FLV|Maximum" default="Maximum" />\n'
xml += '<setting label="This plugin calls the Purevid urlresolver - '
xml += 'change settings there." type="lsep" />\n'
return xml | script.module.urlresolver/lib/urlresolver/plugins/purevid.py | import os
import re
import urllib
import json
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import SiteAuth
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class PurevidResolver(Plugin, UrlResolver, SiteAuth, PluginSettings):
implements = [UrlResolver, SiteAuth, PluginSettings]
name = "purevid"
domains = ["purevid.com"]
profile_path = common.profile_path
pv_cookie_file = os.path.join(profile_path, '%s.cookies' % name)
def __init__(self):
p = self.get_setting('priority') or 1
self.priority = int(p)
self.net = Net()
try:
os.makedirs(os.path.dirname(self.pv_cookie_file))
except OSError:
pass
#UrlResolver methods
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
data = json.loads(html)
if self.get_setting('quality') == 'FLV':
url = data['clip']['bitrates'][0]['url']
else:
url = data['clip']['bitrates'][-1]['url']
params = ''
for val in data['plugins']['lighttpd']['params']:
params += val['name'] + '=' + val['value'] + '&'
url = url + '?' + params[:-1]
cookies = {}
for cookie in self.net._cj:
cookies[cookie.name] = cookie.value
url = url + '|' + urllib.urlencode({'Cookie': urllib.urlencode(cookies)})
common.addon.log_debug(url)
return url
def get_url(self, host, media_id):
return 'http://www.purevid.com/?m=video_info_embed_flv&id=%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/v/([0-9A-Za-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('login') == 'false':
return False
common.addon.log(url)
return 'purevid' in url
#SiteAuth methods
def needLogin(self):
url = 'http://www.purevid.com/?m=main'
if not os.path.exists(self.pv_cookie_file):
return True
self.net.set_cookies(self.pv_cookie_file)
source = self.net.http_GET(url).content
common.addon.log_debug(source.encode('utf-8'))
if re.search("""<span>Welcome <strong>.*</strong></span>""", source) :
common.addon.log_debug('needLogin returning False')
return False
else :
common.addon.log_debug('needLogin returning True')
return True
def login(self):
if self.needLogin() :
common.addon.log('login to purevid')
url = 'http://www.purevid.com/?m=login'
data = {'username' : self.get_setting('username'), 'password' : self.get_setting('password')}
source = self.net.http_POST(url,data).content
if re.search(self.get_setting('username'), source):
self.net.save_cookies(self.pv_cookie_file)
self.net.set_cookies(self.pv_cookie_file)
return True
else:
return False
else :
return True
#PluginSettings methods
def get_settings_xml(self):
xml = PluginSettings.get_settings_xml(self)
xml += '<setting id="PurevidResolver_login" '
xml += 'type="bool" label="Login" default="false"/>\n'
xml += '<setting id="PurevidResolver_username" enable="eq(-1,true)" '
xml += 'type="text" label=" username" default=""/>\n'
xml += '<setting id="PurevidResolver_password" enable="eq(-2,true)" '
xml += 'type="text" label=" password" option="hidden" default=""/>\n'
xml += '<setting label="Video quality" id="PurevidResolver_quality" '
xml += 'type="labelenum" values="FLV|Maximum" default="Maximum" />\n'
xml += '<setting label="This plugin calls the Purevid urlresolver - '
xml += 'change settings there." type="lsep" />\n'
return xml | 0.166269 | 0.046703 |
import pyglet
from pyglet.gl import *
joysticks = pyglet.input.get_joysticks()
assert joysticks, 'No joystick device is connected'
joystick = joysticks[0]
joystick.open()
window = pyglet.window.Window(width=800, height=800)
batch = pyglet.graphics.Batch()
# Labels
pyglet.text.Label("Buttons:", x=15, y=window.height - 25, font_size=14, batch=batch)
pyglet.text.Label("D Pad:", x=window.width - 125, y=window.height - 25, font_size=14, batch=batch)
button_labels = []
button_shapes = []
for i in range(len(joystick.buttons)):
rows = len(joystick.buttons) // 2
y = window.height - 50 - 25 * (i % rows)
x = 35 + 60 * (i // rows)
label = pyglet.text.Label(f"{i}:", x=x, y=y, font_size=14, anchor_x='right', batch=batch)
button_labels.append(label)
shape = pyglet.shapes.Rectangle(x + 10, y + 1, 10, 10, color=(255, 0, 0), batch=batch)
button_shapes.append(shape)
joystick_rect = pyglet.shapes.Rectangle(window.width // 2, window.height // 2, 10, 10, color=(255, 0, 255), batch=batch)
joystick_rect.anchor_position = joystick_rect.width // 2, joystick_rect.height // 2
d_pad_rect = pyglet.shapes.Rectangle(window.width - 75, window.height - 100, 10, 10, color=(0, 0, 255), batch=batch)
@window.event
def on_draw():
window.clear()
batch.draw()
x = round((.5 * joystick.x + 1), 2) * window.width / 2
y = round((-.5 * joystick.y + 1), 2) * window.height / 2
rx = (.5 * joystick.rx + 1) * 60
ry = (-.5 * joystick.ry + 1) * 60
z = joystick.z * 50
# Axes
joystick_rect.position = x, y
joystick_rect.anchor_position = joystick_rect.width // 2, joystick_rect.height // 2
joystick_rect.width = 10 + rx + z
joystick_rect.height = 10 + ry + z
# Buttons
for i in range(len(joystick.buttons)):
rect = button_shapes[i]
rect.color = (0, 255, 0) if joystick.buttons[i] else (255, 0, 0)
# Hat
d_pad_x = window.width - 100 + joystick.hat_x * 50
d_pad_y = window.height - 100 + joystick.hat_y * 50
d_pad_rect.position = d_pad_x, d_pad_y
pyglet.app.run() | examples/input/joystick.py |
import pyglet
from pyglet.gl import *
joysticks = pyglet.input.get_joysticks()
assert joysticks, 'No joystick device is connected'
joystick = joysticks[0]
joystick.open()
window = pyglet.window.Window(width=800, height=800)
batch = pyglet.graphics.Batch()
# Labels
pyglet.text.Label("Buttons:", x=15, y=window.height - 25, font_size=14, batch=batch)
pyglet.text.Label("D Pad:", x=window.width - 125, y=window.height - 25, font_size=14, batch=batch)
button_labels = []
button_shapes = []
for i in range(len(joystick.buttons)):
rows = len(joystick.buttons) // 2
y = window.height - 50 - 25 * (i % rows)
x = 35 + 60 * (i // rows)
label = pyglet.text.Label(f"{i}:", x=x, y=y, font_size=14, anchor_x='right', batch=batch)
button_labels.append(label)
shape = pyglet.shapes.Rectangle(x + 10, y + 1, 10, 10, color=(255, 0, 0), batch=batch)
button_shapes.append(shape)
joystick_rect = pyglet.shapes.Rectangle(window.width // 2, window.height // 2, 10, 10, color=(255, 0, 255), batch=batch)
joystick_rect.anchor_position = joystick_rect.width // 2, joystick_rect.height // 2
d_pad_rect = pyglet.shapes.Rectangle(window.width - 75, window.height - 100, 10, 10, color=(0, 0, 255), batch=batch)
@window.event
def on_draw():
window.clear()
batch.draw()
x = round((.5 * joystick.x + 1), 2) * window.width / 2
y = round((-.5 * joystick.y + 1), 2) * window.height / 2
rx = (.5 * joystick.rx + 1) * 60
ry = (-.5 * joystick.ry + 1) * 60
z = joystick.z * 50
# Axes
joystick_rect.position = x, y
joystick_rect.anchor_position = joystick_rect.width // 2, joystick_rect.height // 2
joystick_rect.width = 10 + rx + z
joystick_rect.height = 10 + ry + z
# Buttons
for i in range(len(joystick.buttons)):
rect = button_shapes[i]
rect.color = (0, 255, 0) if joystick.buttons[i] else (255, 0, 0)
# Hat
d_pad_x = window.width - 100 + joystick.hat_x * 50
d_pad_y = window.height - 100 + joystick.hat_y * 50
d_pad_rect.position = d_pad_x, d_pad_y
pyglet.app.run() | 0.317638 | 0.400456 |
import os
import click
from ....click.coroutine import coroutine
from ....click.docker import DockerPathExists, wrap_docker
from ....click.lazy import lazy_import
from ....click.mutex import MutexOption, ValidateMutex
lazy_import(
globals(),
"""
from ....click.validators import validate_validators
from ....curation.launch import launch_curation
from ....curation.pyppeteer.resource_navigator import PyppeteerResourceNavigator
from ....curation.resources import curate_resources
from ....curation.resources_session import ResourcesCurationSession
from ....datamine import Datamine
""",
)
@click.command(cls=ValidateMutex(click.Command))
@click.pass_context
@click.argument(
"datamine",
type=click.Path(
exists=DockerPathExists(), readable=True, dir_okay=False, allow_dash=True
),
)
@click.option(
"--validate",
"-v",
"validators",
multiple=True,
callback=validate_validators,
default=["dns-error", "invalid-response", "http-status-error"],
show_envvar=True,
)
@click.option(
"--valid-luis-threshold",
type=click.IntRange(0, 100),
default=0,
show_envvar=True,
)
@click.option(
"--random-luis-threshold",
type=click.IntRange(0, 100),
default=100,
show_envvar=True,
)
@click.option(
"--discard-session",
is_flag=True,
cls=MutexOption,
not_required_if=["session"],
show_envvar=True,
)
@click.option(
"--session",
type=click.Path(exists=False, writable=True, dir_okay=False),
default="resources_session.gz",
cls=MutexOption,
not_required_if=["discard_session"],
show_envvar=True,
)
@wrap_docker()
@coroutine
async def resources(
ctx,
datamine,
validators,
valid_luis_threshold,
random_luis_threshold,
discard_session,
session,
):
"""
Starts a new session for the interactive curation process of resource providers.
Reads the scraped information on providers from the DATAMINE file path.
-v, --validate VALIDATOR enables the VALIDATOR during the curation session.
By default the dns-error, invalid-response and http-status-error validators
will be enabled. If this options is provided at least once, only the validators
mentioned explicitly in the option will be enabled.
\b
You can list the registered (not yet validated) validator modules using:
> cmd-iaso curate --list-validators.
--valid-luis-threshold specifies the percentage of pings with valid LUIs to a
resource which must exhibit an error for it to be reported.
By default, all errors to valid LUIs are reported.
Each validator can choose whether to abide by this option or not.
--random-luis-threshold specifies the percentage of pings with random LUIS to
a resource which must exhibit an error for it to be reported.
By default, no errors to random LUIs are reported.
Each validator can choose whether to abide by this option or not.
\b
--session SESSION stores the session information at the SESSION path.
If this option is not provided, resources_session.gz will be used by default.
To disable storing the new session altogther, use:
> cmd-iaso curate [...] start resources [...] --discard-session [...]
\b
For more information on the interactive curation process, use:
> cmd-iaso curate --help
"""
if session is not None and os.path.exists(session):
click.confirm(
f"{session} already exists. Do you want to overwrite {session} with a fresh session?",
abort=True,
)
click.echo(
click.style(f"Loading the datamine file from {datamine} ...", fg="yellow")
)
await launch_curation(
curate_resources,
PyppeteerResourceNavigator,
ctx,
ctx.parent.parent.params["statistics"],
ctx.parent.parent.params["controller"],
ctx.parent.parent.params["navigator"],
ctx.parent.parent.params["informant"],
ctx.parent.parent.params["chrome"],
ctx.parent.parent.params["tags"],
ctx.parent.parent.params["ignored_tags"],
ResourcesCurationSession(
session,
Datamine(datamine),
validators,
valid_luis_threshold,
random_luis_threshold,
0,
set(),
),
) | iaso/cli/curate/start/resources.py | import os
import click
from ....click.coroutine import coroutine
from ....click.docker import DockerPathExists, wrap_docker
from ....click.lazy import lazy_import
from ....click.mutex import MutexOption, ValidateMutex
lazy_import(
globals(),
"""
from ....click.validators import validate_validators
from ....curation.launch import launch_curation
from ....curation.pyppeteer.resource_navigator import PyppeteerResourceNavigator
from ....curation.resources import curate_resources
from ....curation.resources_session import ResourcesCurationSession
from ....datamine import Datamine
""",
)
@click.command(cls=ValidateMutex(click.Command))
@click.pass_context
@click.argument(
"datamine",
type=click.Path(
exists=DockerPathExists(), readable=True, dir_okay=False, allow_dash=True
),
)
@click.option(
"--validate",
"-v",
"validators",
multiple=True,
callback=validate_validators,
default=["dns-error", "invalid-response", "http-status-error"],
show_envvar=True,
)
@click.option(
"--valid-luis-threshold",
type=click.IntRange(0, 100),
default=0,
show_envvar=True,
)
@click.option(
"--random-luis-threshold",
type=click.IntRange(0, 100),
default=100,
show_envvar=True,
)
@click.option(
"--discard-session",
is_flag=True,
cls=MutexOption,
not_required_if=["session"],
show_envvar=True,
)
@click.option(
"--session",
type=click.Path(exists=False, writable=True, dir_okay=False),
default="resources_session.gz",
cls=MutexOption,
not_required_if=["discard_session"],
show_envvar=True,
)
@wrap_docker()
@coroutine
async def resources(
ctx,
datamine,
validators,
valid_luis_threshold,
random_luis_threshold,
discard_session,
session,
):
"""
Starts a new session for the interactive curation process of resource providers.
Reads the scraped information on providers from the DATAMINE file path.
-v, --validate VALIDATOR enables the VALIDATOR during the curation session.
By default the dns-error, invalid-response and http-status-error validators
will be enabled. If this options is provided at least once, only the validators
mentioned explicitly in the option will be enabled.
\b
You can list the registered (not yet validated) validator modules using:
> cmd-iaso curate --list-validators.
--valid-luis-threshold specifies the percentage of pings with valid LUIs to a
resource which must exhibit an error for it to be reported.
By default, all errors to valid LUIs are reported.
Each validator can choose whether to abide by this option or not.
--random-luis-threshold specifies the percentage of pings with random LUIS to
a resource which must exhibit an error for it to be reported.
By default, no errors to random LUIs are reported.
Each validator can choose whether to abide by this option or not.
\b
--session SESSION stores the session information at the SESSION path.
If this option is not provided, resources_session.gz will be used by default.
To disable storing the new session altogther, use:
> cmd-iaso curate [...] start resources [...] --discard-session [...]
\b
For more information on the interactive curation process, use:
> cmd-iaso curate --help
"""
if session is not None and os.path.exists(session):
click.confirm(
f"{session} already exists. Do you want to overwrite {session} with a fresh session?",
abort=True,
)
click.echo(
click.style(f"Loading the datamine file from {datamine} ...", fg="yellow")
)
await launch_curation(
curate_resources,
PyppeteerResourceNavigator,
ctx,
ctx.parent.parent.params["statistics"],
ctx.parent.parent.params["controller"],
ctx.parent.parent.params["navigator"],
ctx.parent.parent.params["informant"],
ctx.parent.parent.params["chrome"],
ctx.parent.parent.params["tags"],
ctx.parent.parent.params["ignored_tags"],
ResourcesCurationSession(
session,
Datamine(datamine),
validators,
valid_luis_threshold,
random_luis_threshold,
0,
set(),
),
) | 0.451085 | 0.115611 |
from typing import Optional, TYPE_CHECKING
from dataclasses import dataclass, asdict
from cloudfoundry_client.v3.entities import Entity, EntityManager, ToManyRelationship
if TYPE_CHECKING:
from cloudfoundry_client.client import CloudFoundryClient
@dataclass
class AppsQuota:
total_memory_in_mb: int
per_process_memory_in_mb: int
total_instances: int
per_app_tasks: int
@dataclass
class ServicesQuota:
paid_services_allowed: bool
total_service_instances: int
total_service_keys: int
@dataclass
class RoutesQuota:
total_routes: int
total_reserved_ports: int
@dataclass
class DomainsQuota:
total_domains: int
class OrganizationQuotaManager(EntityManager):
def __init__(self, target_endpoint: str, client: "CloudFoundryClient"):
super().__init__(target_endpoint, client, "/v3/organization_quotas")
def remove(self, guid: str):
super()._remove(guid)
def create(
self,
name: str,
apps_quota: Optional[AppsQuota] = None,
services_quota: Optional[ServicesQuota] = None,
routes_quota: Optional[RoutesQuota] = None,
domains_quota: Optional[DomainsQuota] = None,
assigned_organizations: Optional[ToManyRelationship] = None,
) -> Entity:
data = self._asdict(name, apps_quota, services_quota, routes_quota, domains_quota, assigned_organizations)
return super()._create(data)
def update(
self,
guid: str,
name: str,
apps_quota: Optional[AppsQuota] = None,
services_quota: Optional[ServicesQuota] = None,
routes_quota: Optional[RoutesQuota] = None,
domains_quota: Optional[DomainsQuota] = None,
) -> Entity:
data = self._asdict(name, apps_quota, services_quota, routes_quota, domains_quota)
return super()._update(guid, data)
def apply_to_organizations(self, guid: str, organizations: ToManyRelationship) -> ToManyRelationship:
return ToManyRelationship.from_json_object(
super()._post(
"%s%s/%s/relationships/organizations" % (self.target_endpoint, self.entity_uri, guid), data=organizations
)
)
def _asdict(
self,
name: str,
apps_quota: Optional[AppsQuota] = None,
services_quota: Optional[ServicesQuota] = None,
routes_quota: Optional[RoutesQuota] = None,
domains_quota: Optional[DomainsQuota] = None,
assigned_organizations: Optional[ToManyRelationship] = None,
):
data = {"name": name}
if apps_quota:
data["apps"] = asdict(apps_quota)
if services_quota:
data["services"] = asdict(services_quota)
if routes_quota:
data["routes"] = asdict(routes_quota)
if domains_quota:
data["domains"] = asdict(domains_quota)
if assigned_organizations:
data["relationships"] = {"organizations": assigned_organizations}
return data | main/cloudfoundry_client/v3/organization_quotas.py | from typing import Optional, TYPE_CHECKING
from dataclasses import dataclass, asdict
from cloudfoundry_client.v3.entities import Entity, EntityManager, ToManyRelationship
if TYPE_CHECKING:
from cloudfoundry_client.client import CloudFoundryClient
@dataclass
class AppsQuota:
total_memory_in_mb: int
per_process_memory_in_mb: int
total_instances: int
per_app_tasks: int
@dataclass
class ServicesQuota:
paid_services_allowed: bool
total_service_instances: int
total_service_keys: int
@dataclass
class RoutesQuota:
total_routes: int
total_reserved_ports: int
@dataclass
class DomainsQuota:
total_domains: int
class OrganizationQuotaManager(EntityManager):
def __init__(self, target_endpoint: str, client: "CloudFoundryClient"):
super().__init__(target_endpoint, client, "/v3/organization_quotas")
def remove(self, guid: str):
super()._remove(guid)
def create(
self,
name: str,
apps_quota: Optional[AppsQuota] = None,
services_quota: Optional[ServicesQuota] = None,
routes_quota: Optional[RoutesQuota] = None,
domains_quota: Optional[DomainsQuota] = None,
assigned_organizations: Optional[ToManyRelationship] = None,
) -> Entity:
data = self._asdict(name, apps_quota, services_quota, routes_quota, domains_quota, assigned_organizations)
return super()._create(data)
def update(
self,
guid: str,
name: str,
apps_quota: Optional[AppsQuota] = None,
services_quota: Optional[ServicesQuota] = None,
routes_quota: Optional[RoutesQuota] = None,
domains_quota: Optional[DomainsQuota] = None,
) -> Entity:
data = self._asdict(name, apps_quota, services_quota, routes_quota, domains_quota)
return super()._update(guid, data)
def apply_to_organizations(self, guid: str, organizations: ToManyRelationship) -> ToManyRelationship:
return ToManyRelationship.from_json_object(
super()._post(
"%s%s/%s/relationships/organizations" % (self.target_endpoint, self.entity_uri, guid), data=organizations
)
)
def _asdict(
self,
name: str,
apps_quota: Optional[AppsQuota] = None,
services_quota: Optional[ServicesQuota] = None,
routes_quota: Optional[RoutesQuota] = None,
domains_quota: Optional[DomainsQuota] = None,
assigned_organizations: Optional[ToManyRelationship] = None,
):
data = {"name": name}
if apps_quota:
data["apps"] = asdict(apps_quota)
if services_quota:
data["services"] = asdict(services_quota)
if routes_quota:
data["routes"] = asdict(routes_quota)
if domains_quota:
data["domains"] = asdict(domains_quota)
if assigned_organizations:
data["relationships"] = {"organizations": assigned_organizations}
return data | 0.906805 | 0.18628 |
from get_user import read_ibutton, get_user_song
from flask import Flask, request, jsonify
from ntpath import basename
import sqlite3
import argparse
app = Flask(__name__)
def create_user_dict():
conn = sqlite3.connect('/harold/Harold/harold_api.db')
c = conn.cursor()
user_dict = {}
for row in c.execute('SELECT * FROM api_users ORDER BY username'):
user_dict[row[0]] = [row[1], row[2]]
conn.close()
return user_dict
def set_song(uid, song_id):
conn = sqlite3.connect('/harold/Harold/harold_api.db')
c = conn.cursor()
c.execute('UPDATE api_users SET song_played=0 WHERE username="{uid}";'.format(uid=uid))
c.execute('UPDATE api_users SET song_id={song_id} WHERE username="{uid}";'.format(song_id=song_id, uid=uid))
conn.commit()
conn.close()
def create_user(uid, song_id):
conn = sqlite3.connect('/harold/Harold/harold_api.db')
c = conn.cursor()
c.execute('INSERT INTO api_users VALUES ("{uid}", "{song_id}", 0)'.format(song_id=song_id, uid=uid))
conn.commit()
conn.close()
@app.route("/<ibutton>/<song_id>", methods=["GET", "POST"])
def incoming_request(ibutton, song_id):
inc_req = (ibutton, song_id)
username, homedir = read_ibutton(inc_req[0])
song_json = []
if request.method == "GET":
song_index = 0
try:
song_list = get_user_song(homedir, username, False)
except:
song_list = [False]
try:
if isinstance(song_list, list):
for entry in song_list:
song_json.append(dict(id=song_index, name=basename(entry)))
song_index += 1
else:
song_json.append(dict(id=song_index, name=basename(song_list)))
return jsonify(songs=song_json, user=username, status="true")
except:
song_json.append(dict(id=0, name="null"))
return jsonify(songs=song_json, user=username, status="false")
if request.method == "POST":
try:
user_dict = create_user_dict()
print("User dict created")
if username in user_dict:
print("User found in dictionary!")
set_song(username, song_id)
print("Database updated.")
else:
print("User created in database.")
create_user(username, song_id)
print("Successful")
return jsonify({"error": False})
except:
return jsonify({"error": True})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--test",
help="Runs the server in test mode and updates the"
" testUsers database.",
action="store_true")
args = parser.parse_args()
app.run(host='0.0.0.0', port=56125, debug=args.test) | Harold/api.py | from get_user import read_ibutton, get_user_song
from flask import Flask, request, jsonify
from ntpath import basename
import sqlite3
import argparse
app = Flask(__name__)
def create_user_dict():
conn = sqlite3.connect('/harold/Harold/harold_api.db')
c = conn.cursor()
user_dict = {}
for row in c.execute('SELECT * FROM api_users ORDER BY username'):
user_dict[row[0]] = [row[1], row[2]]
conn.close()
return user_dict
def set_song(uid, song_id):
conn = sqlite3.connect('/harold/Harold/harold_api.db')
c = conn.cursor()
c.execute('UPDATE api_users SET song_played=0 WHERE username="{uid}";'.format(uid=uid))
c.execute('UPDATE api_users SET song_id={song_id} WHERE username="{uid}";'.format(song_id=song_id, uid=uid))
conn.commit()
conn.close()
def create_user(uid, song_id):
conn = sqlite3.connect('/harold/Harold/harold_api.db')
c = conn.cursor()
c.execute('INSERT INTO api_users VALUES ("{uid}", "{song_id}", 0)'.format(song_id=song_id, uid=uid))
conn.commit()
conn.close()
@app.route("/<ibutton>/<song_id>", methods=["GET", "POST"])
def incoming_request(ibutton, song_id):
inc_req = (ibutton, song_id)
username, homedir = read_ibutton(inc_req[0])
song_json = []
if request.method == "GET":
song_index = 0
try:
song_list = get_user_song(homedir, username, False)
except:
song_list = [False]
try:
if isinstance(song_list, list):
for entry in song_list:
song_json.append(dict(id=song_index, name=basename(entry)))
song_index += 1
else:
song_json.append(dict(id=song_index, name=basename(song_list)))
return jsonify(songs=song_json, user=username, status="true")
except:
song_json.append(dict(id=0, name="null"))
return jsonify(songs=song_json, user=username, status="false")
if request.method == "POST":
try:
user_dict = create_user_dict()
print("User dict created")
if username in user_dict:
print("User found in dictionary!")
set_song(username, song_id)
print("Database updated.")
else:
print("User created in database.")
create_user(username, song_id)
print("Successful")
return jsonify({"error": False})
except:
return jsonify({"error": True})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--test",
help="Runs the server in test mode and updates the"
" testUsers database.",
action="store_true")
args = parser.parse_args()
app.run(host='0.0.0.0', port=56125, debug=args.test) | 0.242744 | 0.064565 |
from typing import Optional, Dict, List
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from scanpy.tools import score_genes
from scTenifold.data._sim import *
def adobo_score(X,
genes,
n_bins: int = 25,
n_ctrl: int = 50,
random_state: int = 42,
file_path: Path = None):
if len(genes) == 0:
raise ValueError('Gene list ("genes") is empty.')
gene_mean = X.mean(axis=1)
gene_mean = gene_mean.sort_values()
binned = pd.qcut(gene_mean, n_bins)
ret = []
for g in genes:
sampled_bin = binned[binned == binned[binned.index == g].values[0]]
if n_ctrl > sampled_bin.shape[0]:
ret.append(sampled_bin.index)
else:
ret.append(
sampled_bin.sample(n_ctrl, replace=True, random_state=random_state).index
)
con = []
for g in ret:
con.append(X[X.index.isin(g)].mean(axis=0))
con = pd.concat(con, axis=1).transpose()
con.index = genes
targets = X[X.index.isin(genes)]
targets = targets.reindex(genes)
scores = (targets-con).mean(axis=0)
if file_path:
scores.to_csv(file_path)
return scores
def _get_assigned_bins(data_avg: np.ndarray,
cluster_len: int,
n_bins: int) -> np.ndarray:
assigned_bin = np.zeros(shape=(cluster_len, ), dtype=np.int32) # (G,)
bin_size = cluster_len / n_bins
for i_bin in range(n_bins):
assigned_bin[(assigned_bin == 0) &
(data_avg <= data_avg[int(np.round(bin_size * i_bin))])] = i_bin
return assigned_bin
def _get_ctrl_use(assigned_bin: np.ndarray,
gene_arr,
target_dict,
n_ctrl,
random_state) -> List[str]:
selected_bins = list(set(assigned_bin[np.in1d(gene_arr, target_dict["Pos"])]))
genes_in_same_bin = gene_arr[np.in1d(assigned_bin, selected_bins)]
ctrl_use = list()
for _ in range(len(target_dict["Pos"])):
ctrl_use.extend(random_state.choice(genes_in_same_bin, n_ctrl))
return list(set(ctrl_use))
def cell_cycle_score(X,
gene_list: List[str],
sample_list: List[str],
target_dict: Optional[Dict[str, List[str]]] = None,
n_bins: int = 25,
n_ctrl: int = 50,
random_state: int = 42,
file_path: Optional[Path] = None):
random_state = np.random.default_rng(random_state)
if target_dict is None:
target_dict = {"Pos": DEFAULT_POS,
"Neg": DEFAULT_NEG}
else:
target_dict = {k: [i.upper() for i in v] for k, v in target_dict.items()}
if len(set(gene_list) & set(target_dict["Pos"])) == 0:
raise ValueError('No feature genes found in gene_list.')
gene_list = [i.upper() for i in gene_list]
cluster_len = X.shape[0]
data_avg = X.mean(axis=1)
sort_arg = np.argsort(data_avg)
data_avg = data_avg[sort_arg]
gene_list = np.array(gene_list)[sort_arg]
X = X[sort_arg, :]
assigned_bin = _get_assigned_bins(data_avg, cluster_len, n_bins)
used_ctrl = _get_ctrl_use(assigned_bin, gene_list, target_dict,
n_ctrl, random_state)
ctrl_score = X[np.in1d(gene_list, used_ctrl), :].mean(axis=0).T
features_score = X[np.in1d(gene_list, target_dict["Pos"]), :].mean(axis=0).T
scores = features_score - ctrl_score
if file_path:
pd.DataFrame({"score": scores}, index=sample_list).to_csv(file_path)
return scores
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--random_state",
help="random seed", default=42, type=int)
parser.add_argument("-o", "--output_path",
help="output directory, it will be automatically and recursively created",
default=".", type=str)
parser.add_argument("-g", "--genes",
help="number of the genes in the test data",
default=1000, type=int)
parser.add_argument("-s", "--samples",
help="number of the samples (cells/observations) in the test data",
default=100, type=int)
parser.add_argument("-b", "--bins",
help="number of bins",
default=25, type=int)
parser.add_argument("-c", "--ctrls",
help="number of controls",
default=50, type=int)
args = parser.parse_args()
output_dir = Path(args.output_path)
output_dir.mkdir(parents=True, exist_ok=True)
data_obj = TestDataGenerator(n_genes=args.genes,
n_samples=args.samples,
n_bins=args.bins,
n_ctrl=args.ctrls,
random_state=args.random_state)
data_obj.save_data(output_dir / Path("test_data.csv"), use_normalized=True)
np_data = data_obj.get_data("numpy", True)
np_data["file_path"] = output_dir / Path("cell_scores.csv")
pd_data = data_obj.get_data("pandas", True)
pd_data["file_path"] = output_dir / Path("adobo_cell_scores.csv")
cell_cycle_score(**np_data)
score_genes(**(data_obj.get_data("ann_data", True))).write_csvs(output_dir / Path("scanpy_result"))
adobo_score(**pd_data) | scTenifold/cell_cycle/scoring.py | from typing import Optional, Dict, List
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from scanpy.tools import score_genes
from scTenifold.data._sim import *
def adobo_score(X,
genes,
n_bins: int = 25,
n_ctrl: int = 50,
random_state: int = 42,
file_path: Path = None):
if len(genes) == 0:
raise ValueError('Gene list ("genes") is empty.')
gene_mean = X.mean(axis=1)
gene_mean = gene_mean.sort_values()
binned = pd.qcut(gene_mean, n_bins)
ret = []
for g in genes:
sampled_bin = binned[binned == binned[binned.index == g].values[0]]
if n_ctrl > sampled_bin.shape[0]:
ret.append(sampled_bin.index)
else:
ret.append(
sampled_bin.sample(n_ctrl, replace=True, random_state=random_state).index
)
con = []
for g in ret:
con.append(X[X.index.isin(g)].mean(axis=0))
con = pd.concat(con, axis=1).transpose()
con.index = genes
targets = X[X.index.isin(genes)]
targets = targets.reindex(genes)
scores = (targets-con).mean(axis=0)
if file_path:
scores.to_csv(file_path)
return scores
def _get_assigned_bins(data_avg: np.ndarray,
cluster_len: int,
n_bins: int) -> np.ndarray:
assigned_bin = np.zeros(shape=(cluster_len, ), dtype=np.int32) # (G,)
bin_size = cluster_len / n_bins
for i_bin in range(n_bins):
assigned_bin[(assigned_bin == 0) &
(data_avg <= data_avg[int(np.round(bin_size * i_bin))])] = i_bin
return assigned_bin
def _get_ctrl_use(assigned_bin: np.ndarray,
gene_arr,
target_dict,
n_ctrl,
random_state) -> List[str]:
selected_bins = list(set(assigned_bin[np.in1d(gene_arr, target_dict["Pos"])]))
genes_in_same_bin = gene_arr[np.in1d(assigned_bin, selected_bins)]
ctrl_use = list()
for _ in range(len(target_dict["Pos"])):
ctrl_use.extend(random_state.choice(genes_in_same_bin, n_ctrl))
return list(set(ctrl_use))
def cell_cycle_score(X,
gene_list: List[str],
sample_list: List[str],
target_dict: Optional[Dict[str, List[str]]] = None,
n_bins: int = 25,
n_ctrl: int = 50,
random_state: int = 42,
file_path: Optional[Path] = None):
random_state = np.random.default_rng(random_state)
if target_dict is None:
target_dict = {"Pos": DEFAULT_POS,
"Neg": DEFAULT_NEG}
else:
target_dict = {k: [i.upper() for i in v] for k, v in target_dict.items()}
if len(set(gene_list) & set(target_dict["Pos"])) == 0:
raise ValueError('No feature genes found in gene_list.')
gene_list = [i.upper() for i in gene_list]
cluster_len = X.shape[0]
data_avg = X.mean(axis=1)
sort_arg = np.argsort(data_avg)
data_avg = data_avg[sort_arg]
gene_list = np.array(gene_list)[sort_arg]
X = X[sort_arg, :]
assigned_bin = _get_assigned_bins(data_avg, cluster_len, n_bins)
used_ctrl = _get_ctrl_use(assigned_bin, gene_list, target_dict,
n_ctrl, random_state)
ctrl_score = X[np.in1d(gene_list, used_ctrl), :].mean(axis=0).T
features_score = X[np.in1d(gene_list, target_dict["Pos"]), :].mean(axis=0).T
scores = features_score - ctrl_score
if file_path:
pd.DataFrame({"score": scores}, index=sample_list).to_csv(file_path)
return scores
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--random_state",
help="random seed", default=42, type=int)
parser.add_argument("-o", "--output_path",
help="output directory, it will be automatically and recursively created",
default=".", type=str)
parser.add_argument("-g", "--genes",
help="number of the genes in the test data",
default=1000, type=int)
parser.add_argument("-s", "--samples",
help="number of the samples (cells/observations) in the test data",
default=100, type=int)
parser.add_argument("-b", "--bins",
help="number of bins",
default=25, type=int)
parser.add_argument("-c", "--ctrls",
help="number of controls",
default=50, type=int)
args = parser.parse_args()
output_dir = Path(args.output_path)
output_dir.mkdir(parents=True, exist_ok=True)
data_obj = TestDataGenerator(n_genes=args.genes,
n_samples=args.samples,
n_bins=args.bins,
n_ctrl=args.ctrls,
random_state=args.random_state)
data_obj.save_data(output_dir / Path("test_data.csv"), use_normalized=True)
np_data = data_obj.get_data("numpy", True)
np_data["file_path"] = output_dir / Path("cell_scores.csv")
pd_data = data_obj.get_data("pandas", True)
pd_data["file_path"] = output_dir / Path("adobo_cell_scores.csv")
cell_cycle_score(**np_data)
score_genes(**(data_obj.get_data("ann_data", True))).write_csvs(output_dir / Path("scanpy_result"))
adobo_score(**pd_data) | 0.724773 | 0.371678 |
from app import db
class Plex(db.Model):
__tablename__ = 'plex_utills'
# plex and docker config
id = db.Column(db.Integer, primary_key=True)
plexurl = db.Column(db.String)
token = db.Column(db.String)
filmslibrary = db.Column(db.String)
library3d = db.Column(db.String)
plexpath = db.Column(db.String)
mountedpath = db.Column(db.String)
# Schedules
t1 = db.Column(db.String)
t2 = db.Column(db.String)
t3 = db.Column(db.String)
t4 = db.Column(db.String)
t5 = db.Column(db.String)
# Enable various settings
backup = db.Column(db.Integer)
posters4k = db.Column(db.Integer)
mini4k = db.Column(db.Integer)
hdr = db.Column(db.Integer)
posters3d = db.Column(db.Integer)
mini3d = db.Column(db.Integer)
disney = db.Column(db.Integer)
pixar = db.Column(db.Integer)
hide4k = db.Column(db.Integer)
transcode = db.Column(db.Integer)
tvlibrary = db.Column(db.String)
tv4kposters = db.Column(db.Integer)
films4kposters = db.Column(db.Integer)
tmdb_api = db.Column(db.String)
tmdb_restore = db.Column(db.Integer)
recreate_hdr = db.Column(db.Integer)
new_hdr = db.Column(db.Integer)
def __init__(self, plexurl, token, filmslibrary, library3d, plexpath, mountedpath, t1, t2, t3, t4, t5, backup, posters4k, mini4k, hdr, posters3d, mini3d, disney, pixar, hide4k, transcode, tvlibrary, tv4kposters, films4kposters, tmdb_api, tmdb_restore, recreate_hdr, new_hdr):
self.plexurl = plexurl
self.token = token
self.filmslibrary = filmslibrary
self.library3d = library3d
self.plexpath = plexpath
self.mountedpath = mountedpath
self.t1 = t1
self.t2 = t2
self.t3 = t3
self.t4 = t4
self.t5 = t5
self.backup = backup
self.posters4k = posters4k
self.mini4k = mini4k
self.hdr = hdr
self.posters3d = posters3d
self.mini3d = mini3d
self.disney = disney
self.pixar = pixar
self.hide4k = hide4k
self.transcode = transcode
self.tvlibrary = tvlibrary
self.tv4kposters = tv4kposters
self.films4kposters = films4kposters
self.tmdb_api = tmdb_api
self.tmdb_restore = tmdb_restore
self.recreate_hdr = recreate_hdr
self.new_hdr = new_hdr | app/models.py | from app import db
class Plex(db.Model):
__tablename__ = 'plex_utills'
# plex and docker config
id = db.Column(db.Integer, primary_key=True)
plexurl = db.Column(db.String)
token = db.Column(db.String)
filmslibrary = db.Column(db.String)
library3d = db.Column(db.String)
plexpath = db.Column(db.String)
mountedpath = db.Column(db.String)
# Schedules
t1 = db.Column(db.String)
t2 = db.Column(db.String)
t3 = db.Column(db.String)
t4 = db.Column(db.String)
t5 = db.Column(db.String)
# Enable various settings
backup = db.Column(db.Integer)
posters4k = db.Column(db.Integer)
mini4k = db.Column(db.Integer)
hdr = db.Column(db.Integer)
posters3d = db.Column(db.Integer)
mini3d = db.Column(db.Integer)
disney = db.Column(db.Integer)
pixar = db.Column(db.Integer)
hide4k = db.Column(db.Integer)
transcode = db.Column(db.Integer)
tvlibrary = db.Column(db.String)
tv4kposters = db.Column(db.Integer)
films4kposters = db.Column(db.Integer)
tmdb_api = db.Column(db.String)
tmdb_restore = db.Column(db.Integer)
recreate_hdr = db.Column(db.Integer)
new_hdr = db.Column(db.Integer)
def __init__(self, plexurl, token, filmslibrary, library3d, plexpath, mountedpath, t1, t2, t3, t4, t5, backup, posters4k, mini4k, hdr, posters3d, mini3d, disney, pixar, hide4k, transcode, tvlibrary, tv4kposters, films4kposters, tmdb_api, tmdb_restore, recreate_hdr, new_hdr):
self.plexurl = plexurl
self.token = token
self.filmslibrary = filmslibrary
self.library3d = library3d
self.plexpath = plexpath
self.mountedpath = mountedpath
self.t1 = t1
self.t2 = t2
self.t3 = t3
self.t4 = t4
self.t5 = t5
self.backup = backup
self.posters4k = posters4k
self.mini4k = mini4k
self.hdr = hdr
self.posters3d = posters3d
self.mini3d = mini3d
self.disney = disney
self.pixar = pixar
self.hide4k = hide4k
self.transcode = transcode
self.tvlibrary = tvlibrary
self.tv4kposters = tv4kposters
self.films4kposters = films4kposters
self.tmdb_api = tmdb_api
self.tmdb_restore = tmdb_restore
self.recreate_hdr = recreate_hdr
self.new_hdr = new_hdr | 0.439507 | 0.042942 |
import pygrtest_common
import pygr.Data
import random
import unittest
from nosebase import *
from pygr import sequence
class Conserve_Suite(unittest.TestCase):
def exonquery_megatest(self):
def printConservation(id,label,site):
if msa.seqs.IDdict: # skip if alignment is empty
for src,dest,edge in msa[site].edges(mergeMost=True):
print '%d\t%s\t%s\t%s\t%s\t%s\t%2.1f\t%2.1f' \
%(id,label,repr(src),src,idDict[dest],dest,
100*edge.pIdentity(),100*edge.pAligned())
def getConservation(id,label,site):
if msa.seqs.IDdict: # skip if alignment is empty
for src,dest,edge in msa[site].edges(mergeMost=True):
a = '%d\t%s\t%s\t%s\t%s\t%s\t%2.1f\t%2.1f' \
%(id,label,repr(src),src,idDict[dest],dest,
100*edge.pIdentity(),100*edge.pAligned())
exons = pygr.Data.getResource('Bio.Annotation.ASAP2.HUMAN.hg17.exons')
msa = pygr.Data.getResource('Bio.MSA.UCSC.hg17_multiz17way')
idDict = ~(msa.seqDict) # INVERSE: MAPS SEQ --> STRING IDENTIFIER
l = exons.keys()
coverage = 0.001 # 1% coverage -> ~90 minutes wall-clock time
for i in range(int(len(l) * coverage)):
k = random.randint(0,len(l) - 1)
id = l[k]
exon = exons[id].sequence
ss1=exon.before()[-2:] # GET THE 2 NT SPLICE SITES
ss2=exon.after()[:2]
cacheHint=msa[ss1+ss2] #CACHE THE COVERING INTERVALS FROM ss1 TO ss2
try:
getConservation(id,'ss1',ss1)
getConservation(id,'ss2',ss2)
getConservation(id,'exon',exon)
except TypeError:
print id, exon
class Blast_Suite(unittest.TestCase):
def setUp(self):
self.genomes = ['.'.join(x.split('.')[-2:]) for x in pygr.Data.dir('Bio.Seq.Genome')]
available_exons = [x for x in pygr.Data.dir('Bio.Annotation.ASAP2') if 'exons' in x and 'cDNA' not in x and 'Map' not in x]
self.available_exons = [x.replace('Bio.Annotation.ASAP2.','').replace('.exons','') for x in available_exons]
def genome_blast_megatest(self):
for genome in self.genomes:
if genome in self.available_exons:
#print genome
g = pygr.Data.getResource('Bio.Seq.Genome.%s' % genome)
exons = pygr.Data.getResource('Bio.Annotation.ASAP2.%s.exons' % genome)
it = exons.iteritems()
id, exon = it.next()
id, exon = it.next()
del it
exon2 = exon
exon = sequence.Sequence(str(exon.sequence),'1')
m = g.megablast(exon, maxseq=1, minIdentity=0.9)
if m.seqs.IDdict: # skip if alignment is empty
tmp = m[exon].edges(mergeMost=True)
if tmp:
src, dest, edge = tmp[0]
#print repr(src), repr(dest), len(tmp)
self.assertEqual(edge.pIdentity(trapOverflow=False), 1.)
#else:
#print 'no destination matches of proper length'
def all_v_all_blast_test(self):
from pygr import cnestedlist,seqdb
from pygr import sequence
stored = PygrDataTextFile('results/seqdb2.pickle','r')
old_result = stored['sp_allvall']
min_ID = 0.5
msa=cnestedlist.NLMSA('all_vs_all',mode='w',bidirectional=False) # ON-DISK
sp=seqdb.BlastDB('sp_hbb1') # OPEN SWISSPROT DATABASE
for id,s in sp.iteritems(): # FOR EVERY SEQUENCE IN SWISSPROT
sp.blast(s,msa,expmax=1e-10, verbose=False) # GET STRONG HOMOLOGS, SAVE ALIGNMENT IN msa
msa.build(saveSeqDict=True) # DONE CONSTRUCTING THE ALIGNMENT, SO BUILD THE ALIGNMENT DB INDEXES
db = msa.seqDict.dicts.keys()[0]
result = {}
for k in db.values():
edges = msa[k].edges(minAlignSize=12,pIdentityMin=min_ID)
for t in edges:
assert len(t[0]) >= 12
tmpdict = dict(map(lambda x:(x, None), [(str(t[0]), str(t[1]), t[2].pIdentity(trapOverflow=False)) for t in edges]))
result[repr(k)] = tmpdict.keys()
result[repr(k)].sort()
assert sorted(result.keys()) == sorted(old_result.keys())
for k in result:
l = result[k]
l2 = old_result[k]
assert len(l) == len(l2)
for i in range(len(l)):
src, dest, identity = l[i]
old_src, old_dest, old_identity = l2[i]
assert (src, dest) == (old_src, old_dest)
assert identity - old_identity < .0001
assert identity >= min_ID
def all_v_all_blast_save():
from pygr import cnestedlist,seqdb
working = PygrDataTextFile('results/seqdb2.pickle','w')
msa=cnestedlist.NLMSA('all_vs_all',mode='w',bidirectional=False) # ON-DISK
sp=seqdb.BlastDB('sp_hbb1') # OPEN SWISSPROT DATABASE
for id,s in sp.iteritems(): # FOR EVERY SEQUENCE IN SWISSPROT
sp.blast(s,msa,expmax=1e-10, verbose=False) # GET STRONG HOMOLOGS, SAVE ALIGNMENT IN msa
msa.build(saveSeqDict=True) # DONE CONSTRUCTING THE ALIGNMENT, SO BUILD THE ALIGNMENT DB INDEXES
db = msa.seqDict.dicts.keys()[0]
result = {}
for k in db.values():
edges = msa[k].edges(minAlignSize=12, pIdentityMin=0.5)
for t in edges:
assert len(t[0]) >= 12
tmpdict = dict(map(lambda x:(x, None), [(str(t[0]), str(t[1]), t[2].pIdentity(trapOverflow=False)) for t in edges]))
result[repr(k)] = tmpdict.keys()
result[repr(k)].sort()
working['sp_allvall'] = result
working.save()
return msa
class Blastx_Test(object):
def blastx_test(self):
from pygr import seqdb, blast
dna = seqdb.SequenceFileDB('hbb1_mouse.fa')
prot = seqdb.SequenceFileDB('sp_hbb1')
blastmap = blast.BlastxMapping(prot)
correct = [(146, 146, 438, 0.979), (146, 146, 438, 0.911), (146, 146, 438, 0.747), (146, 146, 438, 0.664), (146, 146, 438, 0.623), (146, 146, 438, 0.596), (145, 145, 435, 0.510), (143, 143, 429, 0.531), (146, 146, 438, 0.473), (146, 146, 438, 0.473), (146, 146, 438, 0.486), (144, 144, 432, 0.451), (145, 145, 435, 0.455), (144, 144, 432, 0.451), (146, 146, 438, 0.466), (146, 146, 438, 0.459), (52, 52, 156, 0.442), (90, 90, 270, 0.322), (23, 23, 69, 0.435), (120, 120, 360, 0.283), (23, 23, 69, 0.435), (120, 120, 360, 0.258), (23, 23, 69, 0.435), (120, 120, 360, 0.275), (23, 23, 69, 0.435), (120, 120, 360, 0.267)]
results = blastmap[dna['gi|171854975|dbj|AB364477.1|']]
l = []
for result in results:
for src,dest,edge in result.edges():
l.append((len(src),len(dest),len(src.sequence),
edge.pIdentity()))
assert approximate_cmp(l, correct, 0.001) == 0, 'blastx results mismatch'
try:
results = blastmap[prot['HBB1_MOUSE']]
raise AssertionError('failed to trap blastp in BlastxMapping')
except ValueError:
pass
class Tblastn_Test(object):
def tblastn_test(self):
from pygr import seqdb, blast
dna = seqdb.SequenceFileDB('hbb1_mouse.fa')
prot = seqdb.SequenceFileDB('sp_hbb1')
blastmap = blast.BlastMapping(dna)
result = blastmap[prot['HBB1_XENLA']]
src,dest,edge = iter(result.edges()).next()
assert str(src) == 'LTAHDRQLINSTWGKLCAKTIGQEALGRLLWTYPWTQRYFSSFGNLNSADAVFHNEAVAAHGEKVVTSIGEAIKHMDDIKGYYAQLSKYHSETLHVDPLNFKRFGGCLSIALARHFHEEYTPELHAAYEHLFDAIADALGKGYH'
assert str(dest) == 'LTDAEKAAVSGLWGKVNSDEVGGEALGRLLVVYPWTQRYFDSFGDLSSASAIMGNAKVKAHGKKVITAFNEGLNHLDSLKGTFASLSELHCDKLHVDPENFRLLGNMIVIVLGHHLGKDFTPAAQAAFQKVMAGVATALAHKYH'
assert str(dest.sequence) == 'CTGACTGATGCTGAGAAGGCTGCTGTCTCTGGCCTGTGGGGAAAGGTGAACTCCGATGAAGTTGGTGGTGAGGCCCTGGGCAGGCTGCTGGTTGTCTACCCTTGGACCCAGAGGTACTTTGATAGCTTTGGAGACCTATCCTCTGCCTCTGCTATCATGGGTAATGCCAAAGTGAAGGCCCATGGCAAGAAAGTGATAACTGCCTTTAACGAGGGCCTGAATCACTTGGACAGCCTCAAGGGCACCTTTGCCAGCCTCAGTGAGCTCCACTGTGACAAGCTCCATGTGGATCCTGAGAACTTCAGGCTCCTGGGCAATATGATCGTGATTGTGCTGGGCCACCACCTGGGCAAGGATTTCACCCCCGCTGCACAGGCTGCCTTCCAGAAGGTGATGGCTGGAGTGGCCACTGCCCTGGCTCACAAGTACCAC'
assert approximate_cmp([[edge.pIdentity()]], [[0.451]],
0.001)==0
blastmap = blast.BlastMapping(prot)
try:
results = blastmap[dna['gi|171854975|dbj|AB364477.1|']]
raise AssertionError('failed to trap blastx in BlastMapping')
except ValueError:
pass
def bad_subject_test(self):
from pygr import parse_blast
from pygr.nlmsa_utils import CoordsGroupStart,CoordsGroupEnd
correctCoords = ((12,63,99508,99661),
(65,96,99661,99754),
(96,108,99778,99814),
(108,181,99826,100045))
ifile = file('bad_tblastn.txt')
try:
p = parse_blast.BlastHitParser()
it = iter(correctCoords)
for ival in p.parse_file(ifile):
if not isinstance(ival,(CoordsGroupStart,
CoordsGroupEnd)):
assert (ival.src_start,ival.src_end,
ival.dest_start,ival.dest_end) \
== it.next()
finally:
ifile.close()
if __name__ == '__main__':
a=all_v_all_blast_save() | tests/oldtests/blast_test.py | import pygrtest_common
import pygr.Data
import random
import unittest
from nosebase import *
from pygr import sequence
class Conserve_Suite(unittest.TestCase):
def exonquery_megatest(self):
def printConservation(id,label,site):
if msa.seqs.IDdict: # skip if alignment is empty
for src,dest,edge in msa[site].edges(mergeMost=True):
print '%d\t%s\t%s\t%s\t%s\t%s\t%2.1f\t%2.1f' \
%(id,label,repr(src),src,idDict[dest],dest,
100*edge.pIdentity(),100*edge.pAligned())
def getConservation(id,label,site):
if msa.seqs.IDdict: # skip if alignment is empty
for src,dest,edge in msa[site].edges(mergeMost=True):
a = '%d\t%s\t%s\t%s\t%s\t%s\t%2.1f\t%2.1f' \
%(id,label,repr(src),src,idDict[dest],dest,
100*edge.pIdentity(),100*edge.pAligned())
exons = pygr.Data.getResource('Bio.Annotation.ASAP2.HUMAN.hg17.exons')
msa = pygr.Data.getResource('Bio.MSA.UCSC.hg17_multiz17way')
idDict = ~(msa.seqDict) # INVERSE: MAPS SEQ --> STRING IDENTIFIER
l = exons.keys()
coverage = 0.001 # 1% coverage -> ~90 minutes wall-clock time
for i in range(int(len(l) * coverage)):
k = random.randint(0,len(l) - 1)
id = l[k]
exon = exons[id].sequence
ss1=exon.before()[-2:] # GET THE 2 NT SPLICE SITES
ss2=exon.after()[:2]
cacheHint=msa[ss1+ss2] #CACHE THE COVERING INTERVALS FROM ss1 TO ss2
try:
getConservation(id,'ss1',ss1)
getConservation(id,'ss2',ss2)
getConservation(id,'exon',exon)
except TypeError:
print id, exon
class Blast_Suite(unittest.TestCase):
def setUp(self):
self.genomes = ['.'.join(x.split('.')[-2:]) for x in pygr.Data.dir('Bio.Seq.Genome')]
available_exons = [x for x in pygr.Data.dir('Bio.Annotation.ASAP2') if 'exons' in x and 'cDNA' not in x and 'Map' not in x]
self.available_exons = [x.replace('Bio.Annotation.ASAP2.','').replace('.exons','') for x in available_exons]
def genome_blast_megatest(self):
for genome in self.genomes:
if genome in self.available_exons:
#print genome
g = pygr.Data.getResource('Bio.Seq.Genome.%s' % genome)
exons = pygr.Data.getResource('Bio.Annotation.ASAP2.%s.exons' % genome)
it = exons.iteritems()
id, exon = it.next()
id, exon = it.next()
del it
exon2 = exon
exon = sequence.Sequence(str(exon.sequence),'1')
m = g.megablast(exon, maxseq=1, minIdentity=0.9)
if m.seqs.IDdict: # skip if alignment is empty
tmp = m[exon].edges(mergeMost=True)
if tmp:
src, dest, edge = tmp[0]
#print repr(src), repr(dest), len(tmp)
self.assertEqual(edge.pIdentity(trapOverflow=False), 1.)
#else:
#print 'no destination matches of proper length'
def all_v_all_blast_test(self):
from pygr import cnestedlist,seqdb
from pygr import sequence
stored = PygrDataTextFile('results/seqdb2.pickle','r')
old_result = stored['sp_allvall']
min_ID = 0.5
msa=cnestedlist.NLMSA('all_vs_all',mode='w',bidirectional=False) # ON-DISK
sp=seqdb.BlastDB('sp_hbb1') # OPEN SWISSPROT DATABASE
for id,s in sp.iteritems(): # FOR EVERY SEQUENCE IN SWISSPROT
sp.blast(s,msa,expmax=1e-10, verbose=False) # GET STRONG HOMOLOGS, SAVE ALIGNMENT IN msa
msa.build(saveSeqDict=True) # DONE CONSTRUCTING THE ALIGNMENT, SO BUILD THE ALIGNMENT DB INDEXES
db = msa.seqDict.dicts.keys()[0]
result = {}
for k in db.values():
edges = msa[k].edges(minAlignSize=12,pIdentityMin=min_ID)
for t in edges:
assert len(t[0]) >= 12
tmpdict = dict(map(lambda x:(x, None), [(str(t[0]), str(t[1]), t[2].pIdentity(trapOverflow=False)) for t in edges]))
result[repr(k)] = tmpdict.keys()
result[repr(k)].sort()
assert sorted(result.keys()) == sorted(old_result.keys())
for k in result:
l = result[k]
l2 = old_result[k]
assert len(l) == len(l2)
for i in range(len(l)):
src, dest, identity = l[i]
old_src, old_dest, old_identity = l2[i]
assert (src, dest) == (old_src, old_dest)
assert identity - old_identity < .0001
assert identity >= min_ID
def all_v_all_blast_save():
from pygr import cnestedlist,seqdb
working = PygrDataTextFile('results/seqdb2.pickle','w')
msa=cnestedlist.NLMSA('all_vs_all',mode='w',bidirectional=False) # ON-DISK
sp=seqdb.BlastDB('sp_hbb1') # OPEN SWISSPROT DATABASE
for id,s in sp.iteritems(): # FOR EVERY SEQUENCE IN SWISSPROT
sp.blast(s,msa,expmax=1e-10, verbose=False) # GET STRONG HOMOLOGS, SAVE ALIGNMENT IN msa
msa.build(saveSeqDict=True) # DONE CONSTRUCTING THE ALIGNMENT, SO BUILD THE ALIGNMENT DB INDEXES
db = msa.seqDict.dicts.keys()[0]
result = {}
for k in db.values():
edges = msa[k].edges(minAlignSize=12, pIdentityMin=0.5)
for t in edges:
assert len(t[0]) >= 12
tmpdict = dict(map(lambda x:(x, None), [(str(t[0]), str(t[1]), t[2].pIdentity(trapOverflow=False)) for t in edges]))
result[repr(k)] = tmpdict.keys()
result[repr(k)].sort()
working['sp_allvall'] = result
working.save()
return msa
class Blastx_Test(object):
def blastx_test(self):
from pygr import seqdb, blast
dna = seqdb.SequenceFileDB('hbb1_mouse.fa')
prot = seqdb.SequenceFileDB('sp_hbb1')
blastmap = blast.BlastxMapping(prot)
correct = [(146, 146, 438, 0.979), (146, 146, 438, 0.911), (146, 146, 438, 0.747), (146, 146, 438, 0.664), (146, 146, 438, 0.623), (146, 146, 438, 0.596), (145, 145, 435, 0.510), (143, 143, 429, 0.531), (146, 146, 438, 0.473), (146, 146, 438, 0.473), (146, 146, 438, 0.486), (144, 144, 432, 0.451), (145, 145, 435, 0.455), (144, 144, 432, 0.451), (146, 146, 438, 0.466), (146, 146, 438, 0.459), (52, 52, 156, 0.442), (90, 90, 270, 0.322), (23, 23, 69, 0.435), (120, 120, 360, 0.283), (23, 23, 69, 0.435), (120, 120, 360, 0.258), (23, 23, 69, 0.435), (120, 120, 360, 0.275), (23, 23, 69, 0.435), (120, 120, 360, 0.267)]
results = blastmap[dna['gi|171854975|dbj|AB364477.1|']]
l = []
for result in results:
for src,dest,edge in result.edges():
l.append((len(src),len(dest),len(src.sequence),
edge.pIdentity()))
assert approximate_cmp(l, correct, 0.001) == 0, 'blastx results mismatch'
try:
results = blastmap[prot['HBB1_MOUSE']]
raise AssertionError('failed to trap blastp in BlastxMapping')
except ValueError:
pass
class Tblastn_Test(object):
def tblastn_test(self):
from pygr import seqdb, blast
dna = seqdb.SequenceFileDB('hbb1_mouse.fa')
prot = seqdb.SequenceFileDB('sp_hbb1')
blastmap = blast.BlastMapping(dna)
result = blastmap[prot['HBB1_XENLA']]
src,dest,edge = iter(result.edges()).next()
assert str(src) == 'LTAHDRQLINSTWGKLCAKTIGQEALGRLLWTYPWTQRYFSSFGNLNSADAVFHNEAVAAHGEKVVTSIGEAIKHMDDIKGYYAQLSKYHSETLHVDPLNFKRFGGCLSIALARHFHEEYTPELHAAYEHLFDAIADALGKGYH'
assert str(dest) == 'LTDAEKAAVSGLWGKVNSDEVGGEALGRLLVVYPWTQRYFDSFGDLSSASAIMGNAKVKAHGKKVITAFNEGLNHLDSLKGTFASLSELHCDKLHVDPENFRLLGNMIVIVLGHHLGKDFTPAAQAAFQKVMAGVATALAHKYH'
assert str(dest.sequence) == 'CTGACTGATGCTGAGAAGGCTGCTGTCTCTGGCCTGTGGGGAAAGGTGAACTCCGATGAAGTTGGTGGTGAGGCCCTGGGCAGGCTGCTGGTTGTCTACCCTTGGACCCAGAGGTACTTTGATAGCTTTGGAGACCTATCCTCTGCCTCTGCTATCATGGGTAATGCCAAAGTGAAGGCCCATGGCAAGAAAGTGATAACTGCCTTTAACGAGGGCCTGAATCACTTGGACAGCCTCAAGGGCACCTTTGCCAGCCTCAGTGAGCTCCACTGTGACAAGCTCCATGTGGATCCTGAGAACTTCAGGCTCCTGGGCAATATGATCGTGATTGTGCTGGGCCACCACCTGGGCAAGGATTTCACCCCCGCTGCACAGGCTGCCTTCCAGAAGGTGATGGCTGGAGTGGCCACTGCCCTGGCTCACAAGTACCAC'
assert approximate_cmp([[edge.pIdentity()]], [[0.451]],
0.001)==0
blastmap = blast.BlastMapping(prot)
try:
results = blastmap[dna['gi|171854975|dbj|AB364477.1|']]
raise AssertionError('failed to trap blastx in BlastMapping')
except ValueError:
pass
def bad_subject_test(self):
from pygr import parse_blast
from pygr.nlmsa_utils import CoordsGroupStart,CoordsGroupEnd
correctCoords = ((12,63,99508,99661),
(65,96,99661,99754),
(96,108,99778,99814),
(108,181,99826,100045))
ifile = file('bad_tblastn.txt')
try:
p = parse_blast.BlastHitParser()
it = iter(correctCoords)
for ival in p.parse_file(ifile):
if not isinstance(ival,(CoordsGroupStart,
CoordsGroupEnd)):
assert (ival.src_start,ival.src_end,
ival.dest_start,ival.dest_end) \
== it.next()
finally:
ifile.close()
if __name__ == '__main__':
a=all_v_all_blast_save() | 0.095328 | 0.316369 |
from typing import Tuple
import torch
from cubework.distributed import ParallelManager as pm
from cubework.distributed import all_gather, all_reduce, broadcast, reduce, reduce_scatter
from cubework.distributed.utils import ParallelMode
from cubework.global_vars import env
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
from ..utils import async_comm_bucket
def get_depth_from_env() -> int:
return env.depth_3d
def get_input_parallel_mode() -> ParallelMode:
return getattr(pm, env.input_group_3d)
def get_weight_parallel_mode() -> ParallelMode:
return getattr(pm, env.weight_group_3d)
def get_output_parallel_mode() -> ParallelMode:
return getattr(pm, env.output_group_3d)
def get_input_x_weight_parallel_mode() -> ParallelMode:
return getattr(pm, env.input_x_weight_group_3d)
def get_output_x_weight_parallel_mode() -> ParallelMode:
return getattr(pm, env.output_x_weight_group_3d)
def swap_in_out_group():
env.input_group_3d, env.output_group_3d = env.output_group_3d, env.input_group_3d
env.input_x_weight_group_3d, env.output_x_weight_group_3d = (
env.output_x_weight_group_3d,
env.input_x_weight_group_3d,
)
def split_batch_3d(
input_: Tensor,
dim: int = 0,
input_parallel_mode: ParallelMode = pm.PARALLEL_3D_INPUT,
weight_parallel_mode: ParallelMode = pm.PARALLEL_3D_WEIGHT,
) -> Tensor:
if input_.size(dim) <= 1:
return input_
weight_parallel_mode = get_weight_parallel_mode()
input_parallel_mode = get_input_parallel_mode()
output = torch.chunk(input_, weight_parallel_mode.world_size, dim=dim)[weight_parallel_mode.local_rank].contiguous()
output = torch.chunk(output, input_parallel_mode.world_size, dim=dim)[input_parallel_mode.local_rank].contiguous()
return output
class _ReduceTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, parallel_mode):
return all_reduce(input_, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return output_grad, None
def reduce_tensor_3d(tensor: Tensor, parallel_mode: ParallelMode) -> Tensor:
return _ReduceTensor3D.apply(tensor, parallel_mode)
class _AllGatherWeight3D(torch.autograd.Function):
@staticmethod
def forward(ctx, weight, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
output = all_gather(weight, dim, parallel_mode)
return output
@staticmethod
def backward(ctx, output_grad):
grad, op = reduce_scatter(output_grad, ctx.dim, ctx.parallel_mode, async_op=True)
async_comm_bucket.append(op)
return grad, None, None
def all_gather_weight_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
return _AllGatherWeight3D.apply(tensor, dim, parallel_mode)
class _ReduceScatterTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
return reduce_scatter(input_, dim, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
return input_grad, None, None
def reduce_scatter_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
return _ReduceScatterTensor3D.apply(tensor, dim, parallel_mode)
class _ReduceByBatch3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(
ctx,
input_: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
reduce_mean: bool = False,
) -> Tensor:
output = all_reduce(input_, input_parallel_mode)
output = all_reduce(output, weight_parallel_mode)
ctx.reduce_mean = reduce_mean
if reduce_mean:
reduce_size = input_parallel_mode.world_size * weight_parallel_mode.world_size
ctx.reduce_size = reduce_size
return output.clone() / reduce_size
return output.clone()
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
if ctx.reduce_mean:
return output_grad / ctx.reduce_size, None, None, None
else:
return output_grad, None, None, None
def reduce_by_batch_3d(
tensor: Tensor, input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, reduce_mean: bool = False
) -> Tensor:
return _ReduceByBatch3D.apply(tensor, input_parallel_mode, weight_parallel_mode, reduce_mean)
class _BroadcastWeight3D_FromDiagonal(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx,
input_: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
) -> Tensor:
src_rank = input_parallel_mode.ranks_in_group[output_parallel_mode.local_rank]
output = broadcast(input_, src_rank, input_parallel_mode)
ctx.src_rank = src_rank
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_grad = reduce(output_grad, ctx.src_rank, ctx.input_parallel_mode)
if ctx.input_parallel_mode.local_rank == ctx.output_parallel_mode.local_rank:
input_grad = all_reduce(input_grad, ctx.weight_parallel_mode)
else:
input_grad = None
return input_grad, None, None, None
def broadcast_weight_3d_from_diagonal(
tensor: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
) -> Tensor:
return _BroadcastWeight3D_FromDiagonal.apply(
tensor, input_parallel_mode, weight_parallel_mode, output_parallel_mode
) | cubework/module/parallel_3d/_utils.py | from typing import Tuple
import torch
from cubework.distributed import ParallelManager as pm
from cubework.distributed import all_gather, all_reduce, broadcast, reduce, reduce_scatter
from cubework.distributed.utils import ParallelMode
from cubework.global_vars import env
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
from ..utils import async_comm_bucket
def get_depth_from_env() -> int:
return env.depth_3d
def get_input_parallel_mode() -> ParallelMode:
return getattr(pm, env.input_group_3d)
def get_weight_parallel_mode() -> ParallelMode:
return getattr(pm, env.weight_group_3d)
def get_output_parallel_mode() -> ParallelMode:
return getattr(pm, env.output_group_3d)
def get_input_x_weight_parallel_mode() -> ParallelMode:
return getattr(pm, env.input_x_weight_group_3d)
def get_output_x_weight_parallel_mode() -> ParallelMode:
return getattr(pm, env.output_x_weight_group_3d)
def swap_in_out_group():
env.input_group_3d, env.output_group_3d = env.output_group_3d, env.input_group_3d
env.input_x_weight_group_3d, env.output_x_weight_group_3d = (
env.output_x_weight_group_3d,
env.input_x_weight_group_3d,
)
def split_batch_3d(
input_: Tensor,
dim: int = 0,
input_parallel_mode: ParallelMode = pm.PARALLEL_3D_INPUT,
weight_parallel_mode: ParallelMode = pm.PARALLEL_3D_WEIGHT,
) -> Tensor:
if input_.size(dim) <= 1:
return input_
weight_parallel_mode = get_weight_parallel_mode()
input_parallel_mode = get_input_parallel_mode()
output = torch.chunk(input_, weight_parallel_mode.world_size, dim=dim)[weight_parallel_mode.local_rank].contiguous()
output = torch.chunk(output, input_parallel_mode.world_size, dim=dim)[input_parallel_mode.local_rank].contiguous()
return output
class _ReduceTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, parallel_mode):
return all_reduce(input_, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return output_grad, None
def reduce_tensor_3d(tensor: Tensor, parallel_mode: ParallelMode) -> Tensor:
return _ReduceTensor3D.apply(tensor, parallel_mode)
class _AllGatherWeight3D(torch.autograd.Function):
@staticmethod
def forward(ctx, weight, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
output = all_gather(weight, dim, parallel_mode)
return output
@staticmethod
def backward(ctx, output_grad):
grad, op = reduce_scatter(output_grad, ctx.dim, ctx.parallel_mode, async_op=True)
async_comm_bucket.append(op)
return grad, None, None
def all_gather_weight_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
return _AllGatherWeight3D.apply(tensor, dim, parallel_mode)
class _ReduceScatterTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
return reduce_scatter(input_, dim, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
return input_grad, None, None
def reduce_scatter_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
return _ReduceScatterTensor3D.apply(tensor, dim, parallel_mode)
class _ReduceByBatch3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(
ctx,
input_: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
reduce_mean: bool = False,
) -> Tensor:
output = all_reduce(input_, input_parallel_mode)
output = all_reduce(output, weight_parallel_mode)
ctx.reduce_mean = reduce_mean
if reduce_mean:
reduce_size = input_parallel_mode.world_size * weight_parallel_mode.world_size
ctx.reduce_size = reduce_size
return output.clone() / reduce_size
return output.clone()
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
if ctx.reduce_mean:
return output_grad / ctx.reduce_size, None, None, None
else:
return output_grad, None, None, None
def reduce_by_batch_3d(
tensor: Tensor, input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, reduce_mean: bool = False
) -> Tensor:
return _ReduceByBatch3D.apply(tensor, input_parallel_mode, weight_parallel_mode, reduce_mean)
class _BroadcastWeight3D_FromDiagonal(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx,
input_: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
) -> Tensor:
src_rank = input_parallel_mode.ranks_in_group[output_parallel_mode.local_rank]
output = broadcast(input_, src_rank, input_parallel_mode)
ctx.src_rank = src_rank
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_grad = reduce(output_grad, ctx.src_rank, ctx.input_parallel_mode)
if ctx.input_parallel_mode.local_rank == ctx.output_parallel_mode.local_rank:
input_grad = all_reduce(input_grad, ctx.weight_parallel_mode)
else:
input_grad = None
return input_grad, None, None, None
def broadcast_weight_3d_from_diagonal(
tensor: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
) -> Tensor:
return _BroadcastWeight3D_FromDiagonal.apply(
tensor, input_parallel_mode, weight_parallel_mode, output_parallel_mode
) | 0.912251 | 0.28258 |
from django.urls import (path, include)
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from rest_framework.authtoken.views import obtain_auth_token
from .views import login_view
from apps.Localizr.views import (
locale_list_view,
locale_detail_view,
app_info_list_view,
key_string_list_view,
key_string_detail_view,
app_info_key_string_list_view,
app_info_key_string_detail_view,
key_value_list_view,
localized_string_list_view,
localized_string_detail_view,
)
urlpatterns = [
path('v1/token/', obtain_auth_token, name='auth-token'),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
# STATIC FILES
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# PAGES
urlpatterns += [
path('v1/locales/', locale_list_view, name='locale-list'),
path('v1/locales/<int:pk>', locale_detail_view, name='locale-detail'),
path('v1/apps/', app_info_list_view, name='app-info-list'),
path('v1/apps/<int:pk>', locale_list_view, name='app-info-detail'),
path('v1/keys/', key_string_list_view, name='key-string-list'),
path('v1/keys/<int:pk>', key_string_detail_view, name='key-string-detail'),
path('v1/app-key-strings/', app_info_key_string_list_view,
name='app-key-strings-list'),
path('v1/app-key-strings/<int:pk>', app_info_key_string_detail_view,
name='app-key-strings-detail'),
path('v1/localized-strings/', localized_string_list_view,
name='localized-string-list'),
path('v1/localized-strings/<int:pk>',
localized_string_detail_view, name='localized-string-detail'),
path('app/<slug:app_slug>.<slug:locale_code>',
key_value_list_view, name='key-value-list'),
path('v1/login/', login_view, name='login'),
path('', admin.site.urls),
]
if settings.DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
admin.site.site_header = 'Localizr'
admin.site.site_title = 'Localizr'
admin.site.site_url = None | project/urls.py | from django.urls import (path, include)
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from rest_framework.authtoken.views import obtain_auth_token
from .views import login_view
from apps.Localizr.views import (
locale_list_view,
locale_detail_view,
app_info_list_view,
key_string_list_view,
key_string_detail_view,
app_info_key_string_list_view,
app_info_key_string_detail_view,
key_value_list_view,
localized_string_list_view,
localized_string_detail_view,
)
urlpatterns = [
path('v1/token/', obtain_auth_token, name='auth-token'),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
# STATIC FILES
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# PAGES
urlpatterns += [
path('v1/locales/', locale_list_view, name='locale-list'),
path('v1/locales/<int:pk>', locale_detail_view, name='locale-detail'),
path('v1/apps/', app_info_list_view, name='app-info-list'),
path('v1/apps/<int:pk>', locale_list_view, name='app-info-detail'),
path('v1/keys/', key_string_list_view, name='key-string-list'),
path('v1/keys/<int:pk>', key_string_detail_view, name='key-string-detail'),
path('v1/app-key-strings/', app_info_key_string_list_view,
name='app-key-strings-list'),
path('v1/app-key-strings/<int:pk>', app_info_key_string_detail_view,
name='app-key-strings-detail'),
path('v1/localized-strings/', localized_string_list_view,
name='localized-string-list'),
path('v1/localized-strings/<int:pk>',
localized_string_detail_view, name='localized-string-detail'),
path('app/<slug:app_slug>.<slug:locale_code>',
key_value_list_view, name='key-value-list'),
path('v1/login/', login_view, name='login'),
path('', admin.site.urls),
]
if settings.DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
admin.site.site_header = 'Localizr'
admin.site.site_title = 'Localizr'
admin.site.site_url = None | 0.221098 | 0.04807 |
from TicTacToe import TicTacToe
from copy import deepcopy
from math import log, sqrt
from random import choice as rndchoice
import time
class GameTree:
def __init__(self, s, par_node=None, pre_action=None):
self.parent = par_node
self.pre_action = pre_action
self.child = []
self.r = 0
self.n = 0
self.state = s
self.player = MCTS.current_player(s)
self.uct = float('inf')
self.result = MCTS.terminal(s)
def __repr__(self):
ratio = self.r / (self.n + 1)
l = [str(e) for e in (self.pre_action, ''.join(self.state), self.r, self.n, str(ratio)[:5], str(self.uct)[:5])]
return ' '.join(l)
def update(self, v):
self.n += 1
if v == 3:
self.r += 0.5
elif v == 3 - self.player:
self.r += 1
class MCTS:
def __init__(self, s):
self.root = GameTree(s)
self.game = TicTacToe() ## Initialize the game
self.expand_node(self.root) ## Start the initial node expansion
def run_mcts(self, board):
self.__init__(board)
start_time = time.time()
iii = 0
while time.time() - start_time < 2:
self.mcts_loop()
iii += 1
def ai_move(self):
best_node, best_visits = None, 0
for n in self.root.child:
if n.n > best_visits: best_visits, best_node = n.n, n
return best_node.pre_action
def mcts_loop(self):
node = self.node_selection(self.root)
self.expand_node(node)
if node.child:
selected_node = rndchoice(node.child)
else:
selected_node = node
v = self.simulation(deepcopy(selected_node.state))
self.backpropagation(selected_node, v)
def node_selection(self, node):
if node.child:
imax, vmax = 0, 0
for i, n in enumerate(node.child):
n.uct = MCTS.uct(n)
v = n.uct
if v > vmax:
imax, vmax = i, v
selected = node.child[imax]
return self.node_selection(selected)
else:
selected = node
return selected
def expand_node(self, node):
if self.terminal(node.state) == 0:
actions = self.available_move(node.state)
for a in actions:
state_after_action = self.action_result(node.state, a)
node.child.append(GameTree(state_after_action, node, a))
def simulation(self, s):
if self.terminal(s) == 0:
actions = self.available_move(s)
a = rndchoice(actions)
s = self.action_result(s, a)
return s
else:
return self.terminal(s)
def backpropagation(self, node, v):
node.update(v)
if node.parent:
self.backpropagation(node.parent, v)
@staticmethod
def terminal(s):
for wc in TicTacToe().winning_cases:
if s[wc[0]] != '_' and \
s[wc[0]] == s[wc[1]] and \
s[wc[1]] == s[wc[2]]:
if s[wc[0]] == 'X':
return 1
else:
return 2
if '_' not in s:
return 3
else:
return 0
@staticmethod
def available_move(s):
l = []
for i in range(9):
if s[i] == '_': l.append(i)
return l
@staticmethod
def action_result(s, a):
p = MCTS.current_player(s)
new_s = deepcopy(s)
new_s[a] = 'X' if p == 1 else 'O'
return new_s
@staticmethod
def current_player(s):
n = s.count('_')
if n % 2 == 1:
return 1
else:
return 2
@staticmethod
def uct(node):
v = (node.r / (node.n + 1e-12)) + sqrt(2 * log(node.parent.n + 1) / (node.n + 1e-12))
return v
if __name__ == '__main__':
game = TicTacToe()
ai = MCTS(game.board)
while game.result == 0:
game.display_board()
ai.run_mcts(board=game.board)
game.switch_player(ai.ai_move())
game.check_result()
game.display_board()
if game.result == 3:
print('The game has ended in a draw')
else:
print(f'Player {game.result} has won the game') | chapter10/TicTacToe/mcts.py | from TicTacToe import TicTacToe
from copy import deepcopy
from math import log, sqrt
from random import choice as rndchoice
import time
class GameTree:
def __init__(self, s, par_node=None, pre_action=None):
self.parent = par_node
self.pre_action = pre_action
self.child = []
self.r = 0
self.n = 0
self.state = s
self.player = MCTS.current_player(s)
self.uct = float('inf')
self.result = MCTS.terminal(s)
def __repr__(self):
ratio = self.r / (self.n + 1)
l = [str(e) for e in (self.pre_action, ''.join(self.state), self.r, self.n, str(ratio)[:5], str(self.uct)[:5])]
return ' '.join(l)
def update(self, v):
self.n += 1
if v == 3:
self.r += 0.5
elif v == 3 - self.player:
self.r += 1
class MCTS:
def __init__(self, s):
self.root = GameTree(s)
self.game = TicTacToe() ## Initialize the game
self.expand_node(self.root) ## Start the initial node expansion
def run_mcts(self, board):
self.__init__(board)
start_time = time.time()
iii = 0
while time.time() - start_time < 2:
self.mcts_loop()
iii += 1
def ai_move(self):
best_node, best_visits = None, 0
for n in self.root.child:
if n.n > best_visits: best_visits, best_node = n.n, n
return best_node.pre_action
def mcts_loop(self):
node = self.node_selection(self.root)
self.expand_node(node)
if node.child:
selected_node = rndchoice(node.child)
else:
selected_node = node
v = self.simulation(deepcopy(selected_node.state))
self.backpropagation(selected_node, v)
def node_selection(self, node):
if node.child:
imax, vmax = 0, 0
for i, n in enumerate(node.child):
n.uct = MCTS.uct(n)
v = n.uct
if v > vmax:
imax, vmax = i, v
selected = node.child[imax]
return self.node_selection(selected)
else:
selected = node
return selected
def expand_node(self, node):
if self.terminal(node.state) == 0:
actions = self.available_move(node.state)
for a in actions:
state_after_action = self.action_result(node.state, a)
node.child.append(GameTree(state_after_action, node, a))
def simulation(self, s):
if self.terminal(s) == 0:
actions = self.available_move(s)
a = rndchoice(actions)
s = self.action_result(s, a)
return s
else:
return self.terminal(s)
def backpropagation(self, node, v):
node.update(v)
if node.parent:
self.backpropagation(node.parent, v)
@staticmethod
def terminal(s):
for wc in TicTacToe().winning_cases:
if s[wc[0]] != '_' and \
s[wc[0]] == s[wc[1]] and \
s[wc[1]] == s[wc[2]]:
if s[wc[0]] == 'X':
return 1
else:
return 2
if '_' not in s:
return 3
else:
return 0
@staticmethod
def available_move(s):
l = []
for i in range(9):
if s[i] == '_': l.append(i)
return l
@staticmethod
def action_result(s, a):
p = MCTS.current_player(s)
new_s = deepcopy(s)
new_s[a] = 'X' if p == 1 else 'O'
return new_s
@staticmethod
def current_player(s):
n = s.count('_')
if n % 2 == 1:
return 1
else:
return 2
@staticmethod
def uct(node):
v = (node.r / (node.n + 1e-12)) + sqrt(2 * log(node.parent.n + 1) / (node.n + 1e-12))
return v
if __name__ == '__main__':
game = TicTacToe()
ai = MCTS(game.board)
while game.result == 0:
game.display_board()
ai.run_mcts(board=game.board)
game.switch_player(ai.ai_move())
game.check_result()
game.display_board()
if game.result == 3:
print('The game has ended in a draw')
else:
print(f'Player {game.result} has won the game') | 0.541409 | 0.301221 |