index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
986,200 | 1a1f5be20af628624a553756f0786c13231eb796 | import threading
import os
from subprocess import Popen, PIPE
class MPG123Player(threading.Thread):
daemon = True # added to make Ctrl-C and Ctrl-D work when running python -i player.py
def __init__(self, music=''):
self.music = music
self.working = False
threading.Thread.__init__(self)
self._kill_me = False
self.player = self.init_player()
self.player_cmd('silence')
def finish_it(self):
self._kill_me = True
def init_player(self):
_dev_null = open(os.devnull, 'w')
return Popen(['mpg123', '-q', '-R', '--preload', '0.1', self.music], shell=False, stdout=PIPE, stdin=PIPE, stderr=_dev_null)
def run(self):
'''Thread method that is called when a Thread is started,
this is the "main loop" of it'''
try:
self.player_loop()
finally:
self.quit()
def play(self, music=''):
music = music or self.music
if self.working:
self.stop()
if music:
cmd = 'LOAD ' + music
self.player_cmd(cmd)
self.working = True
def jump(self, step):
cmd = 'JUMP ' + step
self.player_cmd(cmd)
def pause(self):
self.player_cmd('PAUSE')
def stop(self):
self.player_cmd('STOP')
self.working = False
def player_cmd(self, cmd):
self.player.stdin.write("{}\n".format(cmd).encode("utf-8"))
self.player.stdin.flush()
def quit(self):
self.player.terminate()
def player_loop(self):
self.play()
while not self._kill_me:
status = self.player.stdout.readline()
print status
|
986,201 | 8fff857b35a469df6cab51578eda2cb532edb32a | from __future__ import print_function
import torch
import torch.optim as optim
import numpy as np
import math
def add_sparse_args(parser):
# hyperparameters for Zero-Cost Neuroregeneration
parser.add_argument('--growth', type=str, default='random', help='Growth mode. Choose from: momentum, random, and momentum_neuron.')
parser.add_argument('--prune', type=str, default='magnitude', help='Death mode / pruning mode. Choose from: magnitude, SET, threshold, CS_death.')
parser.add_argument('--redistribution', type=str, default='none', help='Redistribution mode. Choose from: momentum, magnitude, nonzeros, or none.')
parser.add_argument('--prune-rate', type=float, default=0.50, help='The pruning rate / death rate for Zero-Cost Neuroregeneration.')
parser.add_argument('--pruning-rate', type=float, default=0.50, help='The pruning rate / death rate.')
parser.add_argument('--sparse', action='store_true', help='Enable sparse mode. Default: True.')
parser.add_argument('--fix', action='store_true', help='Fix topology during training. Default: True.')
parser.add_argument('--update-frequency', type=int, default=100, metavar='N', help='how many iterations to train between mask update')
parser.add_argument('--sparse-init', type=str, default='ERK, uniform, uniform_structured for sparse training', help='sparse initialization')
# hyperparameters for gradually pruning
parser.add_argument('--method', type=str, default='GraNet', help='method name: DST, GraNet, GraNet_uniform, GMP, GMO_uniform')
parser.add_argument('--init-density', type=float, default=0.50, help='The pruning rate / death rate.')
parser.add_argument('--final-density', type=float, default=0.05, help='The density of the overall sparse network.')
parser.add_argument('--init-prune-epoch', type=int, default=0, help='The pruning rate / death rate.')
parser.add_argument('--final-prune-epoch', type=int, default=110, help='The density of the overall sparse network.')
parser.add_argument('--rm-first', action='store_true', help='Keep the first layer dense.')
class CosineDecay(object):
def __init__(self, prune_rate, T_max, eta_min=0.005, last_epoch=-1):
self.sgd = optim.SGD(torch.nn.ParameterList([torch.nn.Parameter(torch.zeros(1))]), lr=prune_rate)
self.cosine_stepper = torch.optim.lr_scheduler.CosineAnnealingLR(self.sgd, T_max, eta_min, last_epoch)
def step(self):
self.cosine_stepper.step()
def get_dr(self):
return self.sgd.param_groups[0]['lr']
class LinearDecay(object):
def __init__(self, prune_rate, factor=0.99, frequency=600):
self.factor = factor
self.steps = 0
self.frequency = frequency
def step(self):
self.steps += 1
def get_dr(self, prune_rate):
if self.steps > 0 and self.steps % self.frequency == 0:
return prune_rate*self.factor
else:
return prune_rate
class Masking(object):
def __init__(self, optimizer, prune_rate=0.3, growth_death_ratio=1.0, prune_rate_decay=None, death_mode='magnitude', growth_mode='momentum', redistribution_mode='momentum', threshold=0.001, args=None, train_loader=None, device=None):
growth_modes = ['random', 'momentum', 'momentum_neuron', 'gradient']
if growth_mode not in growth_modes:
print('Growth mode: {0} not supported!'.format(growth_mode))
print('Supported modes are:', str(growth_modes))
self.args = args
self.loader = train_loader
self.device = torch.device("cuda")
self.growth_mode = growth_mode
self.death_mode = death_mode
self.growth_death_ratio = growth_death_ratio
self.redistribution_mode = redistribution_mode
self.prune_rate_decay = prune_rate_decay
self.sparse_init = args.sparse_init
self.masks = {}
self.final_masks = {}
self.grads = {}
self.nonzero_masks = {}
self.scores = {}
self.pruning_rate = {}
self.modules = []
self.names = []
self.optimizer = optimizer
self.adjusted_growth = 0
self.adjustments = []
self.baseline_nonzero = None
self.name2baseline_nonzero = {}
# stats
self.name2variance = {}
self.name2zeros = {}
self.name2nonzeros = {}
self.total_variance = 0
self.total_removed = 0
self.total_zero = 0
self.total_nonzero = 0
self.total_params = 0
self.fc_params = 0
self.prune_rate = prune_rate
self.name2prune_rate = {}
self.steps = 0
if self.args.fix:
self.prune_every_k_steps = None
else:
self.prune_every_k_steps = self.args.update_frequency
def init(self, mode='ER', density=0.05, erk_power_scale=1.0, grad_dict=None):
if self.args.method == 'GMP':
print('initialized with GMP, ones')
self.baseline_nonzero = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = torch.ones_like(weight, dtype=torch.float32, requires_grad=False).cuda()
self.baseline_nonzero += (self.masks[name] != 0).sum().int().item()
self.apply_mask()
elif self.sparse_init == 'prune_uniform':
# used for pruning stabability test
print('initialized by prune_uniform')
self.baseline_nonzero = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = (weight!=0).cuda()
num_zeros = (weight==0).sum().item()
num_remove = (self.args.pruning_rate) * self.masks[name].sum().item()
k = math.ceil(num_zeros + num_remove)
if num_remove == 0.0: return weight.data != 0.0
x, idx = torch.sort(torch.abs(weight.data.view(-1)))
self.masks[name].data.view(-1)[idx[:k]] = 0.0
self.baseline_nonzero += (self.masks[name] != 0).sum().int().item()
self.apply_mask()
elif self.sparse_init == 'prune_global':
# used for pruning stabability test
print('initialized by prune_global')
self.baseline_nonzero = 0
total_num_nonzoros = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = (weight!=0).cuda()
self.name2nonzeros[name] = (weight!=0).sum().item()
total_num_nonzoros += self.name2nonzeros[name]
weight_abs = []
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
weight_abs.append(torch.abs(weight))
# Gather all scores in a single vector and normalise
all_scores = torch.cat([torch.flatten(x) for x in weight_abs])
num_params_to_keep = int(total_num_nonzoros * (1 - self.args.pruning_rate))
threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = ((torch.abs(weight)) >= acceptable_score).float()
self.apply_mask()
elif self.sparse_init == 'prune_and_grow_uniform':
# used for pruning stabability test
print('initialized by pruning and growing uniformly')
self.baseline_nonzero = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
# prune
self.masks[name] = (weight!=0).cuda()
num_zeros = (weight==0).sum().item()
num_remove = (self.args.pruning_rate) * self.masks[name].sum().item()
k = math.ceil(num_zeros + num_remove)
if num_remove == 0.0: return weight.data != 0.0
x, idx = torch.sort(torch.abs(weight.data.view(-1)))
self.masks[name].data.view(-1)[idx[:k]] = 0.0
total_regrowth = (self.masks[name]==0).sum().item() - num_zeros
# set the pruned weights to zero
weight.data = weight.data * self.masks[name]
if 'momentum_buffer' in self.optimizer.state[weight]:
self.optimizer.state[weight]['momentum_buffer'] = self.optimizer.state[weight]['momentum_buffer'] * self.masks[name]
# grow
grad = grad_dict[name]
grad = grad * (self.masks[name] == 0).float()
y, idx = torch.sort(torch.abs(grad).flatten(), descending=True)
self.masks[name].data.view(-1)[idx[:total_regrowth]] = 1.0
self.baseline_nonzero += (self.masks[name] != 0).sum().int().item()
self.apply_mask()
elif self.sparse_init == 'prune_and_grow_global':
# used for pruning stabability test
print('initialized by pruning and growing globally')
self.baseline_nonzero = 0
total_num_nonzoros = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = (weight!=0).cuda()
self.name2nonzeros[name] = (weight!=0).sum().item()
total_num_nonzoros += self.name2nonzeros[name]
weight_abs = []
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
weight_abs.append(torch.abs(weight))
# Gather all scores in a single vector and normalise
all_scores = torch.cat([torch.flatten(x) for x in weight_abs])
num_params_to_keep = int(total_num_nonzoros * (1 - self.args.pruning_rate))
threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = ((torch.abs(weight)) >= acceptable_score).float()
# set the pruned weights to zero
weight.data = weight.data * self.masks[name]
if 'momentum_buffer' in self.optimizer.state[weight]:
self.optimizer.state[weight]['momentum_buffer'] = self.optimizer.state[weight]['momentum_buffer'] * self.masks[name]
### grow
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
total_regrowth = self.name2nonzeros[name] - (self.masks[name]!=0).sum().item()
grad = grad_dict[name]
grad = grad * (self.masks[name] == 0).float()
y, idx = torch.sort(torch.abs(grad).flatten(), descending=True)
self.masks[name].data.view(-1)[idx[:total_regrowth]] = 1.0
self.baseline_nonzero += (self.masks[name] != 0).sum().int().item()
self.apply_mask()
# structured pruning
elif self.sparse_init == 'prune_structured':
# uniformly structured pruning
print('initialized by pruning structured')
self.baseline_nonzero = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = (weight != 0).cuda()
nunits = weight.size(0)
criteria_for_layer = weight.data.abs().view(nunits, -1).sum(dim=1)
num_zeros = (criteria_for_layer == 0).sum().item()
num_nonzeros = nunits-num_zeros
num_remove = self.args.pruning_rate * num_nonzeros
k = int(num_zeros + num_remove)
x, idx = torch.sort(criteria_for_layer)
self.masks[name][idx[:k]] = 0.0
self.apply_mask()
elif self.sparse_init == 'prune_and_grow_structured':
# # uniformly structured pruning
print('initialized by prune_and_grow_structured')
self.baseline_nonzero = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = (weight != 0).cuda()
nunits = weight.size(0)
# prune
criteria_for_layer = weight.data.abs().view(nunits, -1).sum(dim=1)
num_zeros = (criteria_for_layer == 0).sum().item()
num_nonzeros = nunits-num_zeros
num_remove = self.args.pruning_rate * num_nonzeros
print(f"number of removed channels is {num_remove}")
k = int(num_zeros + num_remove)
x, idx = torch.sort(criteria_for_layer)
self.masks[name][idx[:k]] = 0.0
# set the pruned weights to zero
weight.data = weight.data * self.masks[name]
if 'momentum_buffer' in self.optimizer.state[weight]:
self.optimizer.state[weight]['momentum_buffer'] = self.optimizer.state[weight][
'momentum_buffer'] * self.masks[name]
# grow
num_remove = num_nonzeros - (weight.data.view(nunits, -1).sum(dim=1) != 0).sum().item()
print(f"number of removed channels is {num_remove}")
grad = grad_dict[name]
grad = grad * (self.masks[name] == 0).float()
grad_criteria_for_layer = grad.data.abs().view(nunits, -1).sum(dim=1)
y, idx = torch.sort(grad_criteria_for_layer, descending=True)
self.masks[name][idx[:num_remove]] = 1.0
self.apply_mask()
elif self.sparse_init == 'uniform':
self.baseline_nonzero = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name][:] = (torch.rand(weight.shape) < density).float().data.cuda() # lsw
# self.masks[name][:] = (torch.rand(weight.shape) < density).float().data #lsw
self.baseline_nonzero += weight.numel() * density
self.apply_mask()
elif self.sparse_init == 'uniform_structured':
self.baseline_nonzero = 0
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
nunits = weight.size(0)
num_zeros = int(nunits * (1-density))
zero_idx = np.random.choice(range(nunits), num_zeros, replace=False)
self.masks[name][zero_idx] = 0.0
self.apply_mask()
elif self.sparse_init == 'ERK':
print('initialize by ERK')
for name, weight in self.masks.items():
self.total_params += weight.numel()
if 'classifier' in name:
self.fc_params = weight.numel()
is_epsilon_valid = False
dense_layers = set()
while not is_epsilon_valid:
divisor = 0
rhs = 0
raw_probabilities = {}
for name, mask in self.masks.items():
n_param = np.prod(mask.shape)
n_zeros = n_param * (1 - density)
n_ones = n_param * density
if name in dense_layers:
# See `- default_sparsity * (N_3 + N_4)` part of the equation above.
rhs -= n_zeros
else:
# Corresponds to `(1 - default_sparsity) * (N_1 + N_2)` part of the
# equation above.
rhs += n_ones
# Erdos-Renyi probability: epsilon * (n_in + n_out / n_in * n_out).
raw_probabilities[name] = (
np.sum(mask.shape) / np.prod(mask.shape)
) ** erk_power_scale
# Note that raw_probabilities[mask] * n_param gives the individual
# elements of the divisor.
divisor += raw_probabilities[name] * n_param
# By multipliying individual probabilites with epsilon, we should get the
# number of parameters per layer correctly.
epsilon = rhs / divisor
# If epsilon * raw_probabilities[mask.name] > 1. We set the sparsities of that
# mask to 0., so they become part of dense_layers sets.
max_prob = np.max(list(raw_probabilities.values()))
max_prob_one = max_prob * epsilon
if max_prob_one > 1:
is_epsilon_valid = False
for mask_name, mask_raw_prob in raw_probabilities.items():
if mask_raw_prob == max_prob:
print(f"Sparsity of var:{mask_name} had to be set to 0.")
dense_layers.add(mask_name)
else:
is_epsilon_valid = True
density_dict = {}
total_nonzero = 0.0
# With the valid epsilon, we can set sparsities of the remaning layers.
for name, mask in self.masks.items():
n_param = np.prod(mask.shape)
if name in dense_layers:
density_dict[name] = 1.0
else:
probability_one = epsilon * raw_probabilities[name]
density_dict[name] = probability_one
print(
f"layer: {name}, shape: {mask.shape}, density: {density_dict[name]}"
)
self.masks[name][:] = (torch.rand(mask.shape) < density_dict[name]).float().data.cuda()
total_nonzero += density_dict[name] * mask.numel()
print(f"Overall sparsity {total_nonzero / self.total_params}")
self.apply_mask()
total_size = 0
for name, weight in self.masks.items():
total_size += weight.numel()
sparse_size = 0
for name, weight in self.masks.items():
sparse_size += (weight != 0).sum().int().item()
print('Total parameters under sparsity level of {0}: {1}'.format(density, sparse_size / total_size))
def step(self):
self.optimizer.step()
self.apply_mask()
self.prune_rate_decay.step()
self.prune_rate = self.prune_rate_decay.get_dr()
self.steps += 1
if self.prune_every_k_steps is not None:
if self.args.method == 'GraNet':
if self.steps >= (self.args.init_prune_epoch * len(self.loader)*self.args.multiplier) and self.steps % self.prune_every_k_steps == 0:
self.pruning(self.steps)
self.truncate_weights(self.steps)
self.print_nonzero_counts()
elif self.args.method == 'GraNet_uniform':
if self.steps >= (self.args.init_prune_epoch * len(
self.loader) * self.args.multiplier) and self.steps % self.prune_every_k_steps == 0:
self.pruning_uniform(self.steps)
self.truncate_weights(self.steps)
self.print_nonzero_counts()
# _, _ = self.fired_masks_update()
elif self.args.method == 'DST':
if self.steps % self.prune_every_k_steps == 0:
self.truncate_weights()
self.print_nonzero_counts()
elif self.args.method == 'GMP':
if self.steps >= (self.args.init_prune_epoch * len(self.loader) * self.args.multiplier) and self.steps % self.prune_every_k_steps == 0:
self.pruning(self.steps)
elif self.args.method == 'GMP_uniform':
if self.steps >= (self.args.init_prune_epoch * len(self.loader) * self.args.multiplier) and self.steps % self.prune_every_k_steps == 0:
self.pruning_uniform(self.steps)
def pruning(self, step):
# prune_rate = 1 - self.args.final_density - self.args.init_density
curr_prune_iter = int(step / self.prune_every_k_steps)
final_iter = int((self.args.final_prune_epoch * len(self.loader)*self.args.multiplier) / self.prune_every_k_steps)
ini_iter = int((self.args.init_prune_epoch * len(self.loader)*self.args.multiplier) / self.prune_every_k_steps)
total_prune_iter = final_iter - ini_iter
print('******************************************************')
print(f'Pruning Progress is {curr_prune_iter - ini_iter} / {total_prune_iter}')
print('******************************************************')
if curr_prune_iter >= ini_iter and curr_prune_iter <= final_iter:
prune_decay = (1 - ((curr_prune_iter - ini_iter) / total_prune_iter)) ** 3
curr_prune_rate = (1 - self.args.init_density) + (self.args.init_density - self.args.final_density) * (
1 - prune_decay)
weight_abs = []
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
weight_abs.append(torch.abs(weight))
# Gather all scores in a single vector and normalise
all_scores = torch.cat([torch.flatten(x) for x in weight_abs])
num_params_to_keep = int(len(all_scores) * (1 - curr_prune_rate))
threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.masks[name] = ((torch.abs(weight)) > acceptable_score).float() # must be > to prevent acceptable_score is zero, leading to dense tensors
self.apply_mask()
total_size = 0
for name, weight in self.masks.items():
total_size += weight.numel()
print('Total Model parameters:', total_size)
sparse_size = 0
for name, weight in self.masks.items():
sparse_size += (weight != 0).sum().int().item()
print('Sparsity after pruning: {0}'.format(
(total_size-sparse_size) / total_size))
def pruning_uniform(self, step):
# prune_rate = 1 - self.args.final_density - self.args.init_density
curr_prune_iter = int(step / self.prune_every_k_steps)
final_iter = (self.args.final_prune_epoch * len(self.loader)*self.args.multiplier) / self.prune_every_k_steps
ini_iter = (self.args.init_prune_epoch * len(self.loader)*self.args.multiplier) / self.prune_every_k_steps
total_prune_iter = final_iter - ini_iter
print('******************************************************')
print(f'Pruning Progress is {curr_prune_iter - ini_iter} / {total_prune_iter}')
print('******************************************************')
if curr_prune_iter >= ini_iter and curr_prune_iter <= final_iter:
prune_decay = (1 - ((curr_prune_iter - ini_iter) / total_prune_iter)) ** 3
curr_prune_rate = (1 - self.args.init_density) + (self.args.init_density - self.args.final_density) * (
1 - prune_decay)
# keep the density of the last layer as 0.2 if spasity is larger then 0.8
if curr_prune_rate >= 0.8:
curr_prune_rate = 1 - (self.total_params * (1-curr_prune_rate) - 0.2 * self.fc_params)/(self.total_params-self.fc_params)
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
score = torch.flatten(torch.abs(weight))
if 'classifier' in name:
num_params_to_keep = int(len(score) * 0.2)
threshold, _ = torch.topk(score, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
self.masks[name] = ((torch.abs(weight)) >= acceptable_score).float()
else:
num_params_to_keep = int(len(score) * (1 - curr_prune_rate))
threshold, _ = torch.topk(score, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
self.masks[name] = ((torch.abs(weight)) >= acceptable_score).float()
else:
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
score = torch.flatten(torch.abs(weight))
num_params_to_keep = int(len(score) * (1 - curr_prune_rate))
threshold, _ = torch.topk(score, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
self.masks[name] = ((torch.abs(weight)) >= acceptable_score).float()
self.apply_mask()
total_size = 0
for name, weight in self.masks.items():
total_size += weight.numel()
print('Total Model parameters:', total_size)
sparse_size = 0
for name, weight in self.masks.items():
sparse_size += (weight != 0).sum().int().item()
print('Sparsity after pruning: {0}'.format(
(total_size-sparse_size) / total_size))
def add_module(self, module, sparse_init='ERK', grad_dic=None):
self.module = module
self.sparse_init = self.sparse_init
self.modules.append(module)
for name, tensor in module.named_parameters():
if len(tensor.size()) == 4 or len(tensor.size()) == 2:
self.names.append(name)
self.masks[name] = torch.ones_like(tensor, dtype=torch.float32, requires_grad=False).cuda()
if self.args.rm_first:
for name, tensor in module.named_parameters():
if 'conv.weight' in name or 'feature.0.weight' in name:
self.masks.pop(name)
print(f"pop out {name}")
self.init(mode=self.args.sparse_init, density=self.args.init_density, grad_dict=grad_dic)
def remove_weight(self, name):
if name in self.masks:
print('Removing {0} of size {1} = {2} parameters.'.format(name, self.masks[name].shape,
self.masks[name].numel()))
self.masks.pop(name)
elif name + '.weight' in self.masks:
print('Removing {0} of size {1} = {2} parameters.'.format(name, self.masks[name + '.weight'].shape,
self.masks[name + '.weight'].numel()))
self.masks.pop(name + '.weight')
else:
print('ERROR', name)
def remove_weight_partial_name(self, partial_name):
removed = set()
for name in list(self.masks.keys()):
if partial_name in name:
print('Removing {0} of size {1} with {2} parameters...'.format(name, self.masks[name].shape,
np.prod(self.masks[name].shape)))
removed.add(name)
self.masks.pop(name)
print('Removed {0} layers.'.format(len(removed)))
i = 0
while i < len(self.names):
name = self.names[i]
if name in removed:
self.names.pop(i)
else:
i += 1
def remove_type(self, nn_type):
for module in self.modules:
for name, module in module.named_modules():
if isinstance(module, nn_type):
self.remove_weight(name)
def apply_mask(self):
for module in self.modules:
for name, tensor in module.named_parameters():
if name in self.masks:
tensor.data = tensor.data*self.masks[name]
if 'momentum_buffer' in self.optimizer.state[tensor]:
self.optimizer.state[tensor]['momentum_buffer'] = self.optimizer.state[tensor]['momentum_buffer']*self.masks[name]
def truncate_weights(self, step=None):
self.gather_statistics()
# prune
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
mask = self.masks[name]
new_mask = self.magnitude_death(mask, weight, name)
self.pruning_rate[name] = int(self.name2nonzeros[name] - new_mask.sum().item())
self.masks[name][:] = new_mask
# grow
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
new_mask = self.masks[name].data.byte()
new_mask = self.gradient_growth(name, new_mask, self.pruning_rate[name], weight)
# exchanging masks
self.masks.pop(name)
self.masks[name] = new_mask.float()
self.apply_mask()
'''
REDISTRIBUTION
'''
def gather_statistics(self):
self.name2nonzeros = {}
self.name2zeros = {}
for module in self.modules:
for name, tensor in module.named_parameters():
if name not in self.masks: continue
mask = self.masks[name]
self.name2nonzeros[name] = mask.sum().item()
self.name2zeros[name] = mask.numel() - self.name2nonzeros[name]
#DEATH
def magnitude_death(self, mask, weight, name):
num_remove = math.ceil(self.prune_rate*self.name2nonzeros[name])
if num_remove == 0.0: return weight.data != 0.0
num_zeros = self.name2zeros[name]
k = math.ceil(num_zeros + num_remove)
x, idx = torch.sort(torch.abs(weight.data.view(-1)))
threshold = x[k-1].item()
return (torch.abs(weight.data) > threshold)
#GROWTH
def random_growth(self, name, new_mask, total_regrowth, weight):
n = (new_mask==0).sum().item()
if n == 0: return new_mask
expeced_growth_probability = (total_regrowth/n)
new_weights = torch.rand(new_mask.shape).cuda() < expeced_growth_probability #lsw
# new_weights = torch.rand(new_mask.shape) < expeced_growth_probability
new_mask_ = new_mask.byte() | new_weights
if (new_mask_!=0).sum().item() == 0:
new_mask_ = new_mask
return new_mask_
def momentum_growth(self, name, new_mask, total_regrowth, weight):
grad = self.get_momentum_for_weight(weight)
grad = grad*(new_mask==0).float()
y, idx = torch.sort(torch.abs(grad).flatten(), descending=True)
new_mask.data.view(-1)[idx[:total_regrowth]] = 1.0
return new_mask
def gradient_growth(self, name, new_mask, total_regrowth, weight):
grad = self.get_gradient_for_weights(weight)
grad = grad*(new_mask==0).float()
y, idx = torch.sort(torch.abs(grad).flatten(), descending=True)
new_mask.data.view(-1)[idx[:total_regrowth]] = 1.0
return new_mask
'''
UTILITY
'''
def get_momentum_for_weight(self, weight):
if 'exp_avg' in self.optimizer.state[weight]:
adam_m1 = self.optimizer.state[weight]['exp_avg']
adam_m2 = self.optimizer.state[weight]['exp_avg_sq']
grad = adam_m1/(torch.sqrt(adam_m2) + 1e-08)
elif 'momentum_buffer' in self.optimizer.state[weight]:
grad = self.optimizer.state[weight]['momentum_buffer']
return grad
def get_gradient_for_weights(self, weight):
grad = weight.grad.clone()
return grad
def print_nonzero_counts(self):
for module in self.modules:
for name, tensor in module.named_parameters():
if name not in self.masks: continue
mask = self.masks[name]
num_nonzeros = (mask != 0).sum().item()
val = '{0}: {1}->{2}, density: {3:.3f}'.format(name, self.name2nonzeros[name], num_nonzeros, num_nonzeros/float(mask.numel()))
print(val)
print('Death rate: {0}\n'.format(self.prune_rate))
def reset_momentum(self):
"""
Taken from: https://github.com/AlliedToasters/synapses/blob/master/synapses/SET_layer.py
Resets buffers from memory according to passed indices.
When connections are reset, parameters should be treated
as freshly initialized.
"""
for module in self.modules:
for name, tensor in module.named_parameters():
if name not in self.masks: continue
mask = self.masks[name]
weights = list(self.optimizer.state[tensor])
for w in weights:
if w == 'momentum_buffer':
# momentum
if self.args.reset_mom_zero:
print('zero')
self.optimizer.state[tensor][w][mask == 0] = 0
else:
print('mean')
self.optimizer.state[tensor][w][mask==0] = torch.mean(self.optimizer.state[tensor][w][mask.byte()])
# self.optimizer.state[tensor][w][mask==0] = 0
elif w == 'square_avg' or \
w == 'exp_avg' or \
w == 'exp_avg_sq' or \
w == 'exp_inf':
# Adam
self.optimizer.state[tensor][w][mask==0] = torch.mean(self.optimizer.state[tensor][w][mask.byte()])
def fired_masks_update(self):
ntotal_fired_weights = 0.0
ntotal_weights = 0.0
layer_fired_weights = {}
for module in self.modules:
for name, weight in module.named_parameters():
if name not in self.masks: continue
self.fired_masks[name] = self.masks[name].data.byte() | self.fired_masks[name].data.byte()
ntotal_fired_weights += float(self.fired_masks[name].sum().item())
ntotal_weights += float(self.fired_masks[name].numel())
layer_fired_weights[name] = float(self.fired_masks[name].sum().item())/float(self.fired_masks[name].numel())
print('Layerwise percentage of the fired weights of', name, 'is:', layer_fired_weights[name])
total_fired_weights = ntotal_fired_weights/ntotal_weights
print('The percentage of the total fired weights is:', total_fired_weights)
return layer_fired_weights, total_fired_weights |
986,202 | 22c0a8ee40982deb49dd3684a9a19ea09f07e9d9 | num = input()
tako = []
for i in range(num):
tako.append(input())
print min(tako)
|
986,203 | 7a31f2093253d8dd0a31f1089c690ec53e8c32f7 | import json
from collections import defaultdict
# def format_date(s):
# return
output = defaultdict(list)
nyc_stations = ["NY CITY CENTRAL PARK NY US", "STATEN ISLAND 1.4 SE NY US", "LA GUARDIA AIRPORT NY US", "STATEN ISLAND 4.5 SSE NY US", "JFK INTERNATIONAL AIRPORT NY US"]
with open("weather.csv") as f:
next(f)
for line in f:
line = line.split(',')
date = line[5]
year = date[:4]
date = date[4:6] + "/" + date[6:8] + "/" + date[:4]
snow_d = float(line[11])
station = line[1]
if year == "2016" and snow_d != -9999 and snow_d != 0 and station in nyc_stations:
output[date].append(snow_d)
output = { key: round(sum(output[key])/len(output[key]), 2) for key in output}
with open('weather.json', 'w') as f:
json.dump(output, f) |
986,204 | 97bb62eb8cf291710b9747a3596043c07d29ac0e | from hospital_pricing.models import Hospital, Pricing, DRGCode
from api.serializers import HospitalSerializer, PricingSerializer
from rest_framework import generics, permissions, status, viewsets
from rest_framework.response import Response
class PricingViewSet(viewsets.ModelViewSet):
"""
This ViewSet provides both 'list' and 'detail' views.
"""
queryset = Pricing.objects.select_related('hospital','drg_code').order_by('pricing_id')
serializer_class = PricingSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def delete(self, request, pk, format=None):
site = self.get_object(pk)
self.perform_destroy(self, site)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
instance.delete()
class HospitalViewSet(viewsets.ModelViewSet):
"""
This ViewSet provides both 'list' and 'detail' views.
"""
queryset = Hospital.objects.select_related('city','state','zip_code','hospital_quality_score','hospital_ownership').order_by('hospital.hospital_id')
serializer_class = HospitalSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def delete(self, request, pk, format=None):
site = self.get_object(pk)
self.perform_destroy(self, site)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
instance.delete()
'''
class SiteListAPIView(generics.ListCreateAPIView):
queryset = HeritageSite.objects.select_related('heritage_site_category').order_by('site_name')
serializer_class = HeritageSiteSerializer
permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,)
'''
'''
class SiteDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = HeritageSite.objects.select_related('heritage_site_category').order_by('site_name')
serializer_class = HeritageSiteSerializer
permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,)
''' |
986,205 | 0634856b07562b96ef2da8a4113197fcd3852744 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Schedule, Comment
admin.site.register(Schedule)
admin.site.register(Comment)
|
986,206 | abc93ff3730229724865ce467de1057d41c71536 | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseNotFound
from .models import SadrzajProjekatPodkategorije, Prijava, Istaknuto, Projekat, ProjekatPodkategorija, Aktuelnost, ClanTima, Fotografija, Partner, PressClanak, Publikacija, IstorijskaTura, VideoKlip, Izlozba, ArhivskiMaterijal
from users.models import Profil
from itertools import chain
from .forms import PrijavaForm
def home(request):
projekti = Projekat.objects.all().order_by('-Datum_objave')
#ovde bi trebala da sledi profilna fotografija udruzenja, sada je neka random.
# projekat1 = Projekat.objects.get(Ime='Tura 1')
galerija = Fotografija.objects.all().order_by('-Datum_objave')
aktuelnosti = Aktuelnost.objects.all().order_by('-Datum_objave')
cpi = Profil.objects.get(ime='Centar za Primenjenu Istoriju')
istaknuto = Istaknuto.objects.all()
aktuelnostiX = aktuelnosti[1:] #list without the first item
count = 1
projekti_nums = []
for projekat in projekti:
if count < 10:
num = "0" + str(count) + "."
else:
num = str(count) + "."
count += 1
projekti_nums.append(num)
video_klipovi = VideoKlip.objects.all().order_by('-Datum_objave')
publikacije = Publikacija.objects.all().order_by('-Datum_objave')
count = 0
slides = []
slide = []
num = 0
while num < len(galerija):
slide = []
for i in range(count, count + 7):
try:
slide.append(galerija[i])
num += 1
except IndexError:
break
count += 7
slides.append(slide)
print ("slides: {}".format(slides))
for item in aktuelnostiX:
if item.id == aktuelnostiX[0].id:
print (aktuelnostiX[0].Ime)
print (aktuelnostiX[0].id)
elif item.id == aktuelnostiX[3].id:
print (aktuelnostiX[3].Ime)
print (aktuelnostiX[3].id)
elif item.id == aktuelnostiX[2].id:
print (aktuelnostiX[2].Ime)
print (aktuelnostiX[2].id)
for item in aktuelnostiX:
print (item.id, item.Ime)
# for slide in slides:
# print (len(slide))
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
context = {'istaknuto': istaknuto, 'cpi': cpi, 'projekti': projekti, 'slides': slides,
'galerija': galerija, 'url': url, 'aktuelnosti': aktuelnosti,
'video_klipovi': video_klipovi, 'publikacije': publikacije,
'projekti_nums': projekti_nums, 'aktuelnostiX': aktuelnostiX}
return render(request, 'cpi/b4.html', context)
def aktuelnosti(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
aktuelnosti_list = Aktuelnost.objects.all().order_by('-Datum_objave')
aktuelnosti_listX = aktuelnosti_list[1:] #list without the first item
count = 0
slides = []
slide = []
slideX =[] #here will be first aktuelnosti boxes on each page.
slideXX = []
num = 0
while num < len(aktuelnosti_list):
slideX = []
slide = []
for i in range(count, count + 5):
if i == count:
try:
slideX.append(aktuelnosti_list[i])
num += 1
except IndexError:
break
else:
try:
slide.append(aktuelnosti_list[i])
num += 1
except IndexError:
break
count += 5
slides.append(slide)
slideXX.append(slideX)
print ("slideXX: {}".format(slideXX))
print ("slides: {}".format(slides))
context = {'aktuelnosti_list': aktuelnosti_list, 'aktuelnosti_listX': aktuelnosti_listX, 'slides': slides, 'url': url, 'slideXX': slideXX}
return render(request, 'cpi/aktuelnosti.html', context)
def aktuelnost_detalji(request, pk):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
aktuelnost = Aktuelnost.objects.get(id=pk)
context = {'aktuelnost': aktuelnost}
return render(request, 'cpi/aktuelnost_detalji.html', context)
def pretraga(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
print (url)
if 'q' in request.GET and request.GET['q']:
q = request.GET['q']
projekti = Projekat.objects.filter(Ime__icontains=q)
tim = ClanTima.objects.filter(Ime__icontains=q)
foto = Fotografija.objects.filter(Ime__icontains=q)
partneri = Partner.objects.filter(Ime__icontains=q)
tekst = Publikacija.objects.filter(Naslov__icontains=q)
clanak = PressClanak.objects.filter(Naslov__icontains=q)
rezultati= list(chain(projekti, tim, foto, partneri, tekst, clanak))
context = {'rezultati': rezultati, 'query': q, 'url': url}
return render(request, 'cpi/rezultati_pretrage.html', context)
else:
context = {'url': url, 'error': True}
return render(request, 'cpi/rezultati_pretrage.html', context )
def prijava(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
# if request.method == 'POST':
# formular = PrijavaForm(request.POST)
# if formular.is_valid():
# formular.save()
# return render(request, 'cpi/potvrda_prijave.html')
# else:
# formular = PrijavaForm()
if request.method == 'POST':
name = request.POST['ime']
surname = request.POST['prezime']
email = request.POST['email']
tura = request.POST['tour']
a = Prijava(Ime = name, Prezime = surname, Email = email, Tura = tura)
a.save()
context ={'url': url}
return render(request, 'cpi/potvrda_prijave.html', context)
else:
return HttpResponseNotFound('<h1>Page not found</h1>')
def projekti(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
projekti = Projekat.objects.all().order_by('-Datum_objave')
count = 0
slides = []
slide = []
num = 0
num_list =[]
num_lists =[]
num_num = num + 1
while num < len(projekti):
slide = []
num_list =[]
for i in range(count, count + 9):
try:
slide.append(projekti[i])
if num_num < 10:
num_list.append("0" + str(num_num) + ".")
else:
num_list.append(str(num_num) + ".")
except IndexError:
break
num += 1
num_num += 1
count += 9
slides.append(slide)
num_lists.append(num_list)
print (num_lists)
print(slides)
projekti = Projekat.objects.all().order_by('-Datum_objave')
count = 0
slidesM = []
slide = []
num = 0
num_list =[]
num_listsM =[]
num_num = num + 1
while num < len(projekti):
slide = []
num_list =[]
for i in range(count, count + 5):
try:
slide.append(projekti[i])
if num_num < 10:
num_list.append("0" + str(num_num) + ".")
else:
num_list.append(str(num_num) + ".")
except IndexError:
break
num += 1
num_num += 1
count += 5
slidesM.append(slide)
num_listsM.append(num_list)
print (num_listsM)
print(slidesM)
context = {'projekti': projekti, 'slides': slides, 'url': url, 'num_lists': num_lists, 'slidesM': slidesM, 'num_listsM': num_listsM}
return render(request, 'cpi/projekti.html', context)
def projekat_galerija(request, pk):
projekat = Projekat.objects.get(id = pk)
galerija = Fotografija.objects.filter(Projekat = projekat)
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
print (galerija)
context = {'projekat': projekat, 'galerija': galerija, 'url': url}
return render(request, 'cpi/projekat_galerija.html', context)
def o_nama(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
profil = Profil.objects.get(ime="Centar za Primenjenu Istoriju")
context = {'profil': profil, 'url': url}
return render(request, 'cpi/o_nama.html', context)
def tim(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
ekipa = ClanTima.objects.all()
context = {'ekipa': ekipa, 'url': url}
return render(request, 'cpi/tim.html', context)
def clanovi_detalji(request):
return render(request, 'cpi/clanovi_detalji.html', context)
def partneri(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
prijatelji = Partner.objects.all()
context = {'prijatelji': prijatelji, 'url': url}
return render(request, 'cpi/partneri.html', context)
def galerija(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
galerija = Fotografija.objects.all()
context = {'galerija': galerija, 'url': url}
return render(request, 'cpi/galerija.html', context)
def press(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
clanci = PressClanak.objects.all()
context = {'clanci': clanci, 'url': url}
return render(request, 'cpi/press.html', context)
def istorijske_ture(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
ture = IstorijskaTura.objects.all().order_by('-Datum_objave')
count = 0
slides = []
slide = []
num = 0
num_list =[]
num_lists =[]
num_num = num + 1
while num < len(ture):
slide = []
num_list =[]
for i in range(count, count + 9):
try:
slide.append(ture[i])
if num_num < 10:
num_list.append("0" + str(num_num) + ".")
else:
num_list.append(str(num_num) + ".")
except IndexError:
break
num += 1
num_num += 1
count += 9
slides.append(slide)
num_lists.append(num_list)
print (num_lists)
print(slides)
count = 0
slidesX = []
slide = []
num = 0
while num < len(ture):
slide = []
for i in range(count, count + 5):
try:
slide.append(ture[i])
except IndexError:
break
num += 1
count += 5
slidesX.append(slide)
print(slidesX)
context = {'slides': slides, 'slidesX': slidesX, 'ture': ture, 'url': url}
return render(request, 'cpi/istorijske_ture.html', context)
def tura_detalji(request, pk):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
tura = IstorijskaTura.objects.get(id=pk)
context = {'tura': tura, 'url': url}
return render(request, 'cpi/tura_detalji.html', context)
def publikacije(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
publikacije = Publikacija.objects.all().order_by('-Datum_objave')
count = 0
slides = []
slide = []
num = 0
num_list =[]
num_lists =[]
num_num = num + 1
while num < len(publikacije):
slide = []
num_list =[]
for i in range(count, count + 9):
try:
slide.append(publikacije[i])
if num_num < 10:
num_list.append("0" + str(num_num) + ".")
else:
num_list.append(str(num_num) + ".")
except IndexError:
break
num += 1
num_num += 1
count += 9
slides.append(slide)
num_lists.append(num_list)
print (num_lists)
print(slides)
count = 0
slidesX = []
slide = []
num = 0
while num < len(publikacije):
slide = []
for i in range(count, count + 5):
try:
slide.append(publikacije[i])
except IndexError:
break
num += 1
count += 5
slidesX.append(slide)
print("SlidesX:", slidesX)
for slide in slides:
print (len(slide))
context = {'publikacije': publikacije, 'slides': slides, 'slidesX': slidesX,'url': url}
return render(request, 'cpi/publikacije.html', context)
def projekti_detalji(request, pk):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
publikacije = Publikacija.objects.all().order_by('-Datum_objave')
projekat = Projekat.objects.get(id=pk)
izlozbe = Izlozba.objects.filter(Projekat = projekat)
publikacije = Publikacija.objects.filter(Projekat = projekat)
galerija = Fotografija.objects.filter(Projekat = projekat)
arhiva = ArhivskiMaterijal.objects.filter(Projekat = projekat)
press = PressClanak.objects.filter(Projekat = projekat)
podkategorija = SadrzajProjekatPodkategorije.objects.filter(Projekat = projekat)
meni_list = []
menu_list = []
if len(izlozbe) != 0:
meni_list.append("IZLOŽBA")
menu_list.append("EXHIBITION")
if len(publikacije) != 0:
meni_list.append("PUBLIKACIJE")
menu_list.append("PUBLICATIONS")
if len(arhiva) != 0:
meni_list.append("ARHIVSKI MATERIJAL")
menu_list.append("ARCHIVE")
if len(press) != 0:
meni_list.append("PRESS")
menu_list.append("PRESS")
if len(galerija) != 0:
meni_list.append("GALERIJA")
menu_list.append("GALLERY")
if len(podkategorija) != 0:
podkategorija_instance = SadrzajProjekatPodkategorije.objects.get(Projekat = projekat)
meni_list.append(podkategorija_instance.Podkategorija.Ime.upper())
menu_list.append(podkategorija_instance.Podkategorija.Ime_en.upper())
print (meni_list)
print (menu_list)
context = {'projekat': projekat, "meni_list": meni_list, "menu_list": menu_list, 'url': url}
return render(request, 'cpi/projekat_detalji.html', context)
def projekat_info(request, pk, info):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
projekat = Projekat.objects.get(id = pk)
if info == "GALERIJA" or info == "GALLERY":
galerija = Fotografija.objects.filter(Projekat = projekat)
context = {'galerija': galerija, 'info': info, 'projekat': projekat, 'url': url}
return render(request, 'cpi/projekat_galerija.html', context )
if info == "PRESS":
press = PressClanak.objects.filter(Projekat = projekat)
context = {'press': press, 'info': info, 'projekat': projekat, 'url': url}
return render(request, 'cpi/projekat_press.html', context )
if info == "ARHIVSKI MATERIJAL" or info == "ARCHIVE":
arhiva = ArhivskiMaterijal.objects.filter(Projekat = projekat)
context = {'arhiva': arhiva, 'info': info, 'projekat': projekat, 'url': url}
return render(request, 'cpi/projekat_arhiva.html', context )
if info == "PUBLIKACIJE" or info == "PUBLICATIONS":
publikacije = Publikacija.objects.filter(Projekat = projekat)
context = {'publikacije': publikacije, 'info': info, 'projekat': projekat, 'url': url}
return render(request, 'cpi/projekat_publikacije.html', context)
if info == "IZLOŽBA" or info == "EXHIBITION":
izlozba = Izlozba.objects.filter(Projekat = projekat)
context = {'izlozba': izlozba, 'info': info, 'projekat': projekat, 'url': url}
return render(request, 'cpi/projekat_izlozba.html', context )
else:
sadrzaj = SadrzajProjekatPodkategorije.objects.get(Projekat=projekat)
context = {'sadrzaj': sadrzaj, 'info': info, 'projekat': projekat, 'url': url}
return render(request, "cpi/sadrzaj.html", context)
def publikacije_detalji(request, pk):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
publikacija = Publikacija.objects.get(id=pk)
context = {'publikacija': publikacija, 'url': url}
return render(request, 'cpi/publikacija_detalji.html', context)
def press_detalji(request, pk):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
clanak = PressClanak.objects.get(id=pk)
context = {'clanak': clanak, 'url': url}
return render(request, 'cpi/press_detalji.html', context)
def galerija_detalji(request, pk):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
galerija = Fotografija.objects.all().order_by('-Datum_objave')
fotka = Fotografija.objects.get(id=pk)
count = 0
for foto in galerija:
if foto.id == fotka.id:
fotka_num = count
count += 1
print (fotka.Ime)
print (fotka.Slika.url)
context = {'fotka': fotka, 'galerija': galerija, 'fotka_num': fotka_num, 'url': url}
return render(request, 'cpi/galerija_detalji.html', context)
def tim_detalji(request, pk):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
clan_tima = ClanTima.objects.get(id=pk)
context = {'clan_tima': clan_tima, 'url': url}
return render(request, 'cpi/tim_clan.html', context)
def partneri_detalji(request, pk):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
partner = Partner.objects.get(id=pk)
context = {'partner': partner, 'url': url}
return render(request, 'cpi/projekat_detalji.html', context)
def video(request):
url = request.path
if 'en' in url:
url = url[3:]
else:
url = url
videos = VideoKlip.objects.all().order_by('-Datum_objave')
context= {'videos': videos, 'url': url}
return render(request, 'cpi/video.html', context)
|
986,207 | df6b6ff8b50159b0e2f6564a7f93d78689bf763c | import pathlib
import os
import tensorflow as tf
import numpy as np
import model
import losses
import dataset_helpers
DATA_PATH = os.path.realpath(os.path.join(__file__, '../../data'))
SHAPE_BASIS_PATH = os.path.join(DATA_PATH, "shape_basis.npy")
SHAPE_MEAN_PATH = os.path.join(DATA_PATH, "shape_mean.npy")
EXPR_BASIS_PATH = os.path.join(DATA_PATH, "expression_basis.npy")
COLOR_BASIS_PATH = os.path.join(DATA_PATH, "color_basis.npy")
MESH_FACES_PATH = os.path.join(DATA_PATH, "mesh_faces.npy")
@tf.function
def train_step(input_image, generator, discriminator,
gen_optimizer, disc_optimizer,
shape_mean, shape_basis, expr_basis,
mesh_faces):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
batch_size = tf.shape(input_image)[0]
canvas_size = tf.shape(input_image)[1]
verts, pose_matrix, lights = model.gen_face_batch(shape_mean, shape_basis, expr_basis,
batch_size)
pncc = model.render_face_pncc_batch(verts, mesh_faces, pose_matrix, canvas_size)
gen_input = tf.concat([input_image, pncc], axis=-1)
gen_output = generator(gen_input, training=True)
texture = gen_output[:, :, :, :3]
uv_map = gen_output[:, :, :, 3:]
textured_face = model.render_face_texture_batch(
texture, uv_map, lights, verts, mesh_faces, pose_matrix, canvas_size)
disc_real_output = discriminator([gen_input, input_image[:, :, :, :3]], training=True)
disc_generated_output = discriminator([gen_input, textured_face], training=True)
gen_loss = losses.generator_loss(disc_generated_output)
disc_loss = losses.discriminator_loss(disc_real_output, disc_generated_output)
generator_gradients = gen_tape.gradient(gen_loss, generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
gen_optimizer.apply_gradients(zip(generator_gradients, generator.trainable_variables))
disc_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables))
return gen_loss, disc_loss, textured_face, pncc, texture
def fit(train_ds, epochs,
generator, discriminator, gen_optimizer, disc_optimizer,
shape_mean, shape_basis, expr_basis, mesh_faces,
summary_writer, checkpoint, checkpoint_path):
avg_gen_loss = tf.keras.metrics.Mean(name='avg_gen_loss')
avg_disc_loss = tf.keras.metrics.Mean(name='avg_disc_loss')
for epoch in range(epochs):
print("Epoch: ", epoch)
# Train
for n, input_image in train_ds.enumerate():
print('.', end='')
if (n + 1) % 100 == 0:
print()
gen_loss, disc_loss, textured_face, pncc, texture = train_step(
input_image, generator, discriminator, gen_optimizer, disc_optimizer,
shape_mean, shape_basis, expr_basis, mesh_faces)
avg_gen_loss.update_state(gen_loss)
avg_disc_loss.update_state(disc_loss)
step = gen_optimizer.iterations
if tf.equal(step % 10, 0):
with summary_writer.as_default():
tf.summary.scalar('Generator loss', avg_gen_loss.result(), step=step)
tf.summary.scalar('Discriminator loss', avg_disc_loss.result(), step=step)
avg_gen_loss.reset_states()
avg_disc_loss.reset_states()
if tf.equal(step % 100, 0):
tf.summary.image('Generator output', textured_face, step=step)
tf.summary.image('PNCC preview', pncc, step=step)
tf.summary.image('texture preview', texture, step=step)
if tf.equal(step % 10000, 0):
checkpoint.save(file_prefix=checkpoint_path)
print()
checkpoint.save(file_prefix=checkpoint_path)
checkpoint.save(file_prefix=checkpoint_path)
if __name__ == '__main__':
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
tf.config.optimizer.set_jit(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
shape_mean = tf.constant(np.load(SHAPE_MEAN_PATH).astype(np.float32))
shape_basis = tf.constant(np.load(SHAPE_BASIS_PATH).astype(np.float32))
expr_basis = tf.constant(np.load(EXPR_BASIS_PATH).astype(np.float32))
mesh_faces = tf.constant(np.load(MESH_FACES_PATH).T)
home = pathlib.Path.home()
checkpoint_path = os.path.join(home, "tensorflow_training", "realistic_faces", "ckpts")
summary_path = os.path.join(home, "tensorflow_training", "realistic_faces", "summary")
ffhq_base_path = os.path.join(home, "Datasets", "FlickrFacesHQ", "images1024x1024")
celebahq_base_path = os.path.join(home, "Datasets", "CelebA", "celebahq_img")
filenames = \
dataset_helpers.filenames_from_dataset_path(ffhq_base_path) + \
dataset_helpers.filenames_from_dataset_path(celebahq_base_path)
ds = tf.data.Dataset.from_tensor_slices(filenames).shuffle(len(filenames))
ds = ds.map(dataset_helpers.load_image)
ds = ds.shuffle(48).batch(4)
generator = model.Generator()
discriminator = model.Discriminator()
gen_optimizer = tf.optimizers.Adam(2e-4, beta_1=0.5)
disc_optimizer = tf.optimizers.Adam(1e-4, beta_1=0.5)
checkpoint = tf.train.Checkpoint(generator_optimizer=gen_optimizer,
discriminator_optimizer=disc_optimizer,
generator=generator,
discriminator=discriminator)
summary_writer = tf.summary.create_file_writer(logdir=summary_path)
fit(ds, 100, generator, discriminator, gen_optimizer, disc_optimizer,
shape_mean, shape_basis, expr_basis, mesh_faces,
summary_writer, checkpoint, checkpoint_path)
|
986,208 | a2ef62a56613b935245c69a81e8ad9cd36c364f2 | # STATUS builds, need sample
class jansson:
name = __name__
home = "http://www.digip.org/jansson/"
scmOrigin = "git clone https://github.com/akheron/jansson {destination}"
dataTypes = [
"json"
]
target = "test/bin/json_process"
targetParam = ""
aflFuzzParam = ""
clean = [
"make distclean"
]
build = [
"autoreconf -i",
"./configure --disable-shared CC={AFL_CC}",
"make",
"cd test/bin && make json_process",
]
|
986,209 | 676b4eb2f036c5379c5367528429cd124facc5b0 | # Carlos Badillo García
# Programa que lee el valor de cada uno de los lados de un triangulo e indica el tipo de triángulo que es
def indicarTipoTriangulo(lado1, lado2, lado3): #Indicar que tipo de triángulo es dependiendo el valor de sus lados
if lado1 == lado2 == lado3:
return "El triángulo es equilátero"
elif lado1 == lado2 and lado1 != lado3 or lado1 == lado3 and lado1 != lado2 or lado2 == lado3 and lado2 != lado1:
return "El triángulo es isósceles"
elif lado1**2 == lado2**2 + lado3**2 or lado2**2 == lado1**2 + lado3**2 or lado3**2 == lado1**2 + lado2**2:
return "El triángulo es rectángulo"
else:
return "Estos lados no corresponden a un triángulo"
def main(): # El usuario introduc el valor de lado 1, lado 2 y lado 3, luego imprime que tipo de triángulo
lado1 = int(input("Teclea valor del lado1: "))
lado2 = int(input("Teclea valor del lado2: "))
lado3 = int(input("Teclea valor del lado3: "))
tipoTriangulo = indicarTipoTriangulo(lado1, lado2, lado3)
print (tipoTriangulo)
main() |
986,210 | 8f8fd4350909cb7069324d348f7eb3a0112ad4ae | """
Helper moduel to handle temporary files as well as provide a clean call to
interface with Tango server for submissions.
"""
import time
from subprocess import run
def submit(user, user_file):
"""
Funtion to alter the a string in order to make the appropriate funtion call
in order to submit a job to the tango server.
"""
user_job = '{"localFile" : "userfile", "destFile" : "primes.py"}'.replace(
"userfile", user_file)
run(['bash', 'submit.sh', user_file, user_job, user])
def create_temp_copy(user, code):
"""
Function to create the temporary file from user submited code. In order to
create jobs for the tango server and/or find similar code to provide a hint
for the client.
"""
fname = user + "_primes.py"
user_file = open(fname, 'w')
user_file.write(code)
user_file.close()
return fname
def clean_up(user, fname, tango_output):
"""
Function containing clean up code, to remove output file once displayed to
client to reduce storage, as well as allow Tango server some leeway to
properly terminate before clean up.
"""
time.sleep(1)
run(['rm', fname])
time.sleep(1)
path = tango_output + user + '.out'
run(['rm', path])
|
986,211 | d19669d0181a164fd36d635f0fb020c51345357a | import re
import json
import pandas as pd
import pickle
import random
import numpy as np
import operator
import gensim.models
from gensim.test.utils import datapath
from gensim import utils
from gensim.parsing.preprocessing import preprocess_string, strip_tags, strip_punctuation, strip_multiple_whitespaces, \
strip_numeric, remove_stopwords, strip_short
from nltk.tokenize import sent_tokenize
from scipy import spatial
# Greeting Inputs
GREETING_INPUTS = ["hi", "hello", "hola", "greetings", "wassup", "hey"]
# Greeting responses back to the user
GREETING_RESPONSES = ["hi", "hey", "what's good", "hello", "hey there"]
def aggregate_embeddings(list_of_embeddings):
"""
takes simple average of embeddings to produce "Average" context of the words
:param list_of_embeddings: list of numpy array of same dimension
:return: numpy array
"""
return np.mean(list_of_embeddings, axis=0)
def is_greeting(sentence):
if any([True if word.lower() in GREETING_INPUTS else False for word in sentence.split()]):
return True
return False
class niceeeText:
def __init__(self):
self.w2v = gensim.models.Word2Vec.load('untuned_twit2vec').wv
with open('answer_embeddings.pickle', 'rb') as file:
self.key = pickle.load(file)
@staticmethod
def remove_emojis(text_array):
"""
remove emoji unicode characters from text array
:param text_array: numpy array of text
:return: numpy array of text, w/o emojis
"""
# emoji regular expression #
emoji_pattern = re.compile(
"["
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F600-\U0001F64F" # emoticons
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F700-\U0001F77F" # alchemical symbols
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
"\U0001FA00-\U0001FA6F" # Chess Symbols
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
"\U00002702-\U000027B0" # Dingbats
"\U000024C2-\U0001F251"
"]+"
)
# assert(isinstance(text,np.ndarray)) # always feed a numpy array
return np.array([re.sub(emoji_pattern, "", str(string)) for string in text_array])
@staticmethod
def remove_handles(text_array):
"""
remove twitter handles from an array of txt
:param text_array: numpy array of text
:return: text array without handles
"""
handle_pattern = re.compile(r'([@])\w+')
return np.array([re.sub(handle_pattern, "", str(string)) for string in text_array])
@staticmethod
def replace_hashtags(text_array,
repchar=' '):
"""
replace hashtags in array of text with repchar
:param text_array: numpy array of text
:param repchar: replacement character, must be a string
:return: numpy array of text with replaced hashtags
"""
assert isinstance(repchar, str)
hashtag_pattern = re.compile(r'([#])\w+')
return np.array([re.sub(hashtag_pattern, repchar, str(string)) for string in text_array])
def total_twitter_scrub(self, text_array):
"""
wrapper to call each of the three above at once
:param text_array: array of text
:return: array of text
"""
return self.replace_hashtags(self.remove_handles(self.remove_emojis(text_array)))
@staticmethod
def process_text(document):
"""
gensim text preprocessing wrapper minus lemmatization bc of acronyms
:param document: document of text to be preprocessed
:return: clean tokens of the document
"""
return preprocess_string(document,
filters=[strip_tags, strip_punctuation,
strip_multiple_whitespaces,
strip_numeric, remove_stopwords,
strip_short]
)
def clean_corpus(self, corpus, flatten=True):
"""
clean corpus of text and tokenize
:param flatten: boolean; should return flat list of words or list of lists [sentences]
:param corpus: raw string of corpus
:return: list of tokenized, clean sentences from corpus
"""
if flatten:
t = [self.process_text(sent) for sent in list(sent_tokenize(corpus))]
return [item for sublist in t for item in sublist]
return [self.process_text(sent) for sent in list(sent_tokenize(corpus))]
def embed_word(self, word):
"""
embed word
:param word: string, word to embed
:return: numpy array; word embedding according to self.w2v
"""
try:
return self.w2v[str(word)]
except KeyError:
return [np.nan] # gotta do something else here
def embed_corpus(self, list_of_words):
"""
call self.word for word in list of words
:param list_of_words: list of cleaned words to be embeded
:return: list of numpy array: w2v embedding
"""
return [self.embed_word(word) for word in list_of_words if not all(np.isnan(self.embed_word(word)))]
def clean_then_embed(self, corpus):
"""
wrapper to call self.embed_corpus and self.clea_corpus
:param corpus:
:return: list of numpy array; w2v embedding
"""
return self.embed_corpus(self.clean_corpus(corpus=corpus))
def fetch_most_similar_answer(self, text, unclean=True, threshold=.25):
"""
fetch most similar answer for text query
:param text: input text
:param unclean: boolean, please just pass raw text values
:return: string of most similar answer
"""
if is_greeting(text):
return random.choice(GREETING_RESPONSES)
if unclean:
inp_embedded = aggregate_embeddings(self.clean_then_embed(text)).reshape(-1, 1)
comparison_dict = {k: spatial.distance.cosine(v.reshape(-1, 1), inp_embedded) for k, v in
self.key.items()}
# dont get lost, just calculating the cosine similarity between this and every answer in the corpus #
if max(comparison_dict.items(), key=operator.itemgetter(1))[1] < threshold:
return "I apologize, I don't understand"
return max(comparison_dict.items(), key=operator.itemgetter(1))[0]
|
986,212 | 9853d8eef5834e9f0062e3a80a55c400ff48fd00 | from selenium.webdriver.common.by import By
from base.base_page import BasePage
from pages.home.navigation_page import NavigationPage
import utilities.custom_logger as cl
class LoginPage(BasePage):
log = cl.custom_logger()
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
self.navigation_page = NavigationPage(driver)
# Locators
_login_button = "//a[@href='/users/login/' and @role='button']"
_username_field = "id_username"
_login_password_field = "id_password"
_submit_button = "//button[@name='submit']"
_login_link = "//a[@class='nav-link' and @href='/users/login/']"
_logout_link = "Log out"
_alert_login = "alert"
def click_login_button(self):
if self.element_present(self._login_button, locator_type=By.XPATH):
self.element_click(self._login_button, locator_type=By.XPATH)
def enter_username_field(self, username):
self.element_send_keys(username, self._username_field)
def enter_password_field(self, password):
self.element_send_keys(password, self._login_password_field)
def click_submit_button(self):
self.element_click(self._submit_button, locator_type=By.XPATH)
def login(self, username="", password=""):
self.click_login_button()
self.clear_fields()
self.enter_username_field(username)
self.enter_password_field(password)
self.click_submit_button()
def verify_login_success(self, username):
result = self.element_present(f"//span[contains(text(), 'Hello, {username}')]", locator_type=By.XPATH)
return result
def verify_invalid_data_login_failed(self):
result = self.element_present(self._alert_login, locator_type=By.CLASS_NAME)
return result
def verify_empty_data_login_failed(self):
result = self.element_present(self._login_link, locator_type=By.XPATH)
return result
def verify_logged_page_title(self):
return self.verify_page_title("Cooking Manager")
def clear_fields(self):
username_field = self.get_element(locator=self._username_field)
username_field.clear()
password_field = self.get_element(locator=self._login_password_field)
password_field.clear()
def logout(self):
if self.element_present(locator=self._logout_link, locator_type=By.LINK_TEXT):
self.element_click(locator=self._logout_link, locator_type=By.LINK_TEXT)
self.navigation_page.navigate_to_main_page()
|
986,213 | b5eb51f5ff2448b94e40a0637f2009afb3bd1af2 | def check_string(some_string, valid_strings_start, valid_strings_end):
end_matches = 0
start_matches = 0
unit_len = len(valid_strings_start[0])
if len(some_string) % unit_len != 0:
return False
if some_string[:unit_len] in valid_strings_start:
start_matches += 1
else:
return False
if some_string[-1 * unit_len :] in valid_strings_end:
end_matches += 1
else:
return False
checking_end = False
for i in range(unit_len, len(some_string) - unit_len, unit_len):
substr = some_string[i: i+unit_len]
if not checking_end:
if substr in valid_strings_start:
start_matches += 1
else:
checking_end = True
if checking_end:
if substr in valid_strings_end:
end_matches += 1
else:
return False
return start_matches > end_matches
class LiteralRule:
def __init__(self, name, matched_str_list):
self.name = name
self.type = "literal"
self.matched_str_list = sorted(matched_str_list)
def match(self, some_string, rule_lookup):
# print(some_string)
for x in self.matched_str_list:
if some_string[:len(x)] == x:
return True, some_string[len(x):]
return False, some_string
def complete_match(self, some_string, rule_lookup):
matched, rem_string = self.match(some_string, rule_lookup)
if rem_string != "":
return False
return matched
def __repr__(self):
return self.name + ": " + "|".join(self.matched_str_list)
def as_regex(self):
return "|".join(self.matched_str_list)
class RecursiveRule:
def __init__(self, name, subrules):
self.name = name
self.type = "recursive"
self.subrules = subrules
def match(self, some_string, rule_lookup):
# print(some_string)
cur_string = some_string
for rule_set in self.subrules:
for rule in rule_set:
# print(f"Before partial match: {cur_string}")
matched, cur_string = rule_lookup[rule].match(cur_string, rule_lookup)
# if matched:
# print(f"After partial match: {cur_string}")
# else:
# print("Did not match")
if not matched:
cur_string = some_string
break
if matched:
# print(f"Matched rule set: {rule_set}")
return True, cur_string
return False, some_string
def complete_match(self, some_string, rule_lookup):
matched, rem_string = self.match(some_string, rule_lookup)
if matched and rem_string != "":
# print(f"Remaining string: {rem_string}")
return False
return matched
def attempt_to_make_literal(self, rule_lookup):
success_for_sub_rules = True
for rule_set in self.subrules:
for rule in rule_set:
if rule == self.name:
success_for_sub_rules = False
continue
if rule_lookup[rule].type == "recursive":
success, rule_lookup = rule_lookup[rule].attempt_to_make_literal(rule_lookup)
success_for_sub_rules = success_for_sub_rules and success
if not success_for_sub_rules:
return False, rule_lookup
possible_matches = []
for rule_set in self.subrules:
cur_matches = rule_lookup[rule_set[0]].matched_str_list
next_matches = []
for rule in rule_set[1:]:
for word2 in rule_lookup[rule].matched_str_list:
for word1 in cur_matches:
new_word = f"{word1}{word2}"
if new_word not in next_matches:
next_matches.append(new_word)
cur_matches = next_matches
next_matches = []
possible_matches += cur_matches
new_rule = LiteralRule(self.name, possible_matches)
rule_lookup[self.name] = new_rule
return True, rule_lookup
class LoopingRuleSingle:
def __init__(self, name, looped_literal_sub_rule):
self.name = name
self.subrule = looped_literal_sub_rule
def match(self, some_string, rule_lookup):
matched, rem_string = self.subrule.match(some_string, rule_lookup)
while matched and rem_string != "":
matched, rem_string = self.subrule.match(rem_string, rule_lookup)
return matched, rem_string
class LoopingRuleDoubleCustom:
def __init__(self, name, looped_literal_sub_rule1, looped_literal_sub_rule2):
self.name = name
self.subrule1 = looped_literal_sub_rule1
self.subrule2 = looped_literal_sub_rule2
def match(self, some_string, rule_lookup):
print(some_string)
rule_1_match_counts = 0
rule_2_match_counts = 0
matched, rem_string = self.subrule1.match(some_string, rule_lookup)
rule_1_match_counts += 1
if not matched:
return False, some_string
while matched and rem_string != "":
matched, rem_string = self.subrule1.match(rem_string, rule_lookup)
rule_1_match_counts += 1
matched, rem_string = self.subrule2.match(rem_string, rule_lookup)
rule_2_match_counts += 1
if not matched:
return False, some_string
while matched and rem_string != "":
matched, rem_string = self.subrule2.match(rem_string, rule_lookup)
rule_2_match_counts += 1
print(rule_1_match_counts)
print(rule_2_match_counts)
if not matched or rule_2_match_counts >= rule_1_match_counts:
return False, some_string
return matched, rem_string
def complete_match(self, some_string, rule_lookup):
matched, rem_string = self.match(some_string, rule_lookup)
if matched and rem_string != "":
# print(f"Remaining string: {rem_string}")
return False
return matched
class LoopingRuleDouble:
def __init__(self, name, looped_literal_sub_rule1, looped_literal_sub_rule2):
self.name = name
self.subrule1 = looped_literal_sub_rule1
self.subrule2 = looped_literal_sub_rule2
# We need to worry about rule lengths for easy processing
self.rule1_len = len(looped_literal_sub_rule1.matched_str_list[0])
self.rule2_len = len(looped_literal_sub_rule2.matched_str_list[0])
def match(self, some_string, rule_lookup):
if len(some_string) % (self.rule1_len + self.rule2_len) != 0:
return False, some_string
iterations = len(some_string) // (self.rule1_len + self.rule2_len)
some_string_substr1 = some_string[:iterations * self.rule1_len]
some_string_substr2 = some_string[iterations * self.rule1_len:]
matched, rem_string = self.subrule1.match(some_string_substr1, rule_lookup)
while matched and rem_string != "":
matched, rem_string = self.subrule1.match(rem_string, rule_lookup)
if not matched:
return matched, some_string
matched, rem_string = self.subrule2.match(some_string_substr2, rule_lookup)
while matched and rem_string != "":
matched, rem_string = self.subrule2.match(rem_string, rule_lookup)
if not matched:
return matched, some_string
return matched, rem_string
def parse_rule_from_line(line):
line = line.strip()
line_name = line.split(": ")[0]
line_rules = line.split(": ")[1]
if "\"" in line_rules:
return line_name, LiteralRule(line_name, [line_rules[1:-1]])
recursive_rules = [[x for x in rule_set.split()] for rule_set in line_rules.split(" | ")]
return line_name, RecursiveRule(line_name, recursive_rules)
|
986,214 | 108ba09b3ab967abf0b2ba2bf7724138d36fef5e | # prob 1302 from https://www.acmicpc.net/problem/1302
# if 'top' > 'toz':
# print('top')
# else:
# print('toz')
# dic = {'top':4, 'kimtop':2}
# if 'toz' in dic:
# print(dic['top'])
# else:
# print('fault')
N = int(input())
dic = {}
tilteList = []
for x in range(N):
tmp = input()
if tmp in dic:
dic[tmp] += 1
else:
dic[tmp] = 1
tilteList.append(tmp)
# print(tilteList)
# print(dic)
maxTitle = ''
for y in range(len(tilteList)):
if y==0:
maxTitle = tilteList[y]
else:
tmp1 = tilteList[y-1]
tmp2 = tilteList[y]
num1 = dic[maxTitle]
num2 = dic[tmp2]
if (num1 < num2):
maxTitle = tmp2
elif num1 == num2:
if maxTitle > tilteList[y]:
maxTitle = tilteList[y]
# else:
# maxTitle = tilteList[y-1]
# print(maxTitle)
print(maxTitle)
|
986,215 | 092ff56924a35c4139fff34f88d24b5bbe986bab | import pymysql
def printCursor(cursor):
for x in cursor:
print(x)
mydb = pymysql.connect(host="localhost", user="root",password="" ,database="locofi")
mycursor = mydb.cursor()
|
986,216 | c880fd89e1d8b1ec2c806433c090ee06eb4f22a4 | # Program No : 17
# program to create a histogram from a given list of integers
def Histogram(lis,sym) :
for i in lis :
for j in range(i) :
print(sym,end=" ")
print(end="\n")
Histogram([2,5,3,2,1],'9')
#Program to concatenate all elements in a list into a string and return it
def Concate(llist) :
sert = " "
for i in llist :
sert = sert + str(i)
return sert
print("Concatinated : ",Concate(['A','IM','BI','T','CH','PR','OGRA','M','MER']))
#program to print all even numbers from a given numbers list in the same order and ,
#stop the printing if any numbers that come after 237 in the sequence
numbers = [
386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345,
399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217,
815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717,
958,743, 527 ]
new_list = []
for u in numbers :
if u == 237 :
break
elif u % 2 == 0 :
new_list.append(u)
else :
pass
print("New List : ",new_list)
|
986,217 | eb68b0661045ab402750ec76996e6c3a517b1778 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 23 11:01:24 2015
@author: Mathew Topper
"""
"""
TBC
"""
# Set up logging
import logging
module_logger = logging.getLogger(__name__)
import abc
import socket
import contextlib
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from .misc import safe_update
#Create and engine and get the metadata
Base = declarative_base()
class Database(object):
__metaclass__ = abc.ABCMeta
def __init__(self, adapter_name, config_dict=None):
self._adapter = self._init_adapter(adapter_name)
self._credentials = self._init_credentials()
self._echo = False
self._engine = None
self._meta = None
self._timeout = None
self.session = None
if config_dict is not None: self.set_credentials(config_dict)
return
@abc.abstractproperty
def default_port(self):
'''The default port for connecting to the database'''
return
@abc.abstractproperty
def default_user_id(self):
'''The default user id for connecting to the database'''
return
@abc.abstractproperty
def default_password(self):
'''The default password for connecting to the database'''
return
@abc.abstractproperty
def default_database(self):
'''The default databse name'''
return
@abc.abstractproperty
def valid_adapters(self):
'''List of valid adapters for the SQL manager'''
return
def _init_adapter(self, adapter_name):
if not self._is_valid_adapter(adapter_name):
errStr = ("Adapter {} is not valid for this SQL "
"manager.").format(adapter_name)
raise ValueError(errStr)
return adapter_name
def _init_credentials(self):
credentials_dict = {'host': None,
'port': None,
'dbname': None,
'user': None,
'pwd': None
}
return credentials_dict
def set_echo(self, echo):
self._echo = echo
return
def set_timeout(self, timeout):
self._timeout = timeout
return
def set_credentials(self, config_dict):
self._credentials = safe_update(self._credentials, config_dict)
return
def get_credentials(self):
default_dict = {'host': None,
'port': self.default_port,
'dbname': self.default_database,
'user': self.default_user_id,
'pwd': self.default_password
}
credentials = safe_update(default_dict, self._credentials)
return credentials
@abc.abstractmethod
def get_connection_string(self):
raise NotImplementedError('This superclass can not be used to '
'generate a connection string.')
def configure(self, engine_args=None, connect_args=None):
connection_string = self.get_connection_string()
kwargs = {"echo": self._echo}
if engine_args is not None:
kwargs.update(engine_args)
if connect_args is not None:
kwargs["connect_args"] = connect_args
self._engine = create_engine(connection_string,
**kwargs)
self._meta = MetaData(bind=self._engine)
Session = sessionmaker(bind=self._engine)
self.session = Session()
return
def reflect_table(self, table_name,
remove_trailing_space=True):
kwargs = {"autoload": True,
"autoload_with": self._engine}
# Sanitise the table names if required
if remove_trailing_space: table_name = table_name.rstrip()
reflected = Table(table_name,
self._meta,
**kwargs)
return reflected
def safe_reflect_table(self, table_name):
'''If the table is already in the meta data take that version rather
than reflecting again.'''
meta_name = table_name
if meta_name in self._meta.tables:
table = self._meta.tables[meta_name]
else:
table = self.reflect_table(table_name)
return table
@contextlib.contextmanager
def exectute_query(self, query):
if self._engine is None:
errStr = "No connection has been made."
yield IOError(errStr)
connection = self._engine.connect()
try:
result = connection.execute(query)
yield result
finally:
connection.close()
def execute_transaction(self, query):
if self._engine is None:
errStr = "No connection has been made."
raise IOError(errStr)
# runs a transaction
with self._engine.begin() as connection:
connection.execute(query)
return
def call_stored_proceedure(self, proceedure_name, proceedure_args):
'''Return the results from calling a stored proceedure. Note this
is not DB agnostic as not all SQL DBs support stored proceedures.'''
connection = self._engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(proceedure_name, proceedure_args)
results = list(cursor.fetchall())
cursor.close()
connection.commit()
finally:
connection.close()
return results
def close(self):
self._engine.dispose()
self.session = None
return
def _is_valid_adapter(self, adapter_name):
"""Return true if the adapter is valid for the SQL manager."""
valid_adapters = self.valid_adapters
result = False
if adapter_name in valid_adapters:
result = True
return result
def _get_first_entries(self, query_str):
with self.exectute_query(query_str) as result:
first_entries = [row[0] for row in result]
return first_entries
def __del__(self):
self.close()
class PostgreSQL(Database):
def __init__(self, adapter_name, config=None):
super(PostgreSQL, self).__init__(adapter_name, config)
@property
def default_port(self):
return 5432
@property
def default_user_id(self):
return "postgres"
@property
def default_password(self):
return "postgres"
@property
def default_database(self):
return "postgres"
@property
def valid_adapters(self):
return ["psycopg2"]
def get_connection_string(self):
credentials = self.get_credentials()
host = credentials['host']
port = credentials['port']
uid = credentials['user']
pwd = credentials['pwd']
db_name = credentials['dbname']
hostString = 'postgresql+{}://{}:{}@{}:{}'.format(self._adapter,
uid,
pwd,
host,
port)
conn_string = '{}/{}'.format(hostString, db_name)
return conn_string
def configure(self):
engine_args = None
connect_args = None
if self._timeout is not None:
engine_args = {"pool_timeout": self._timeout}
connect_args = {'connect_timeout': self._timeout}
super(PostgreSQL, self).configure(engine_args, connect_args)
return
def reflect_table(self, table_name,
schema="public",
remove_trailing_space=True):
# Sanitise the table names if required
if remove_trailing_space: table_name = table_name.rstrip()
kwargs = {"autoload": True,
"autoload_with": self._engine,
"schema": schema}
reflected = Table(table_name,
self._meta,
**kwargs)
return reflected
def safe_reflect_table(self, table_name, schema="public"):
'''If the table is already in the meta data take that version rather
than reflecting again.'''
meta_name = "{}.{}".format(schema, table_name)
if meta_name in self._meta.tables:
table = self._meta.tables[meta_name]
else:
table = self.reflect_table(table_name, schema)
return table
def drop_columns(self, table_name, column_list, schema="public"):
table_name = "{}.{}".format(schema, table_name)
for column_name in column_list:
query_str = ('ALTER TABLE {} DROP COLUMN '
'"{}";').format(table_name, column_name)
self.execute_transaction(query_str)
return
def get_table_names(self, schema=None):
query_str = "SELECT table_name FROM information_schema.tables"
if schema is not None: query_str += (" WHERE table_schema = "
"'{}'").format(schema)
query_str += ";"
table_names = self._get_first_entries(query_str)
return table_names
def get_column_names(self, table, schema=None):
query_str = ("SELECT column_name FROM information_schema.columns "
"WHERE table_name = '{}'").format(table)
if schema is not None: query_str += (" AND table_schema = "
"'{}'").format(schema)
query_str += ";"
column_names = self._get_first_entries(query_str)
return column_names
def get_db_names(self):
query_str = "SELECT datname FROM pg_database;"
with self.exectute_query(query_str) as result:
db_names = [row[0] for row in result]
return db_names
def has_permission(self, table_name):
query_str = ("select "
"has_table_privilege('{}','select');").format(table_name)
with self.exectute_query(query_str) as result:
permissions = [row[0] for row in result]
return permissions[0]
def server_execute_query(self, query,
fetch_limit=1000,
cursor_limit=None,
cursor_name="aneris client"):
""""""
if self._engine is None:
errStr = "No connection has been made."
raise IOError(errStr)
# Sanitise the line limit and query string
safe_fetch_limit = int(fetch_limit)
new_query = query.strip()
# Add cursor limit to query if requested and its not already been set
if cursor_limit is not None and "LIMIT" not in new_query:
safe_cursor_limit = int(cursor_limit)
if new_query[-1] == ";": new_query = new_query[:-1]
new_query += " LIMIT {:d};".format(int(safe_cursor_limit))
connection = self._engine.raw_connection()
msg = "Executing server side query: {}".format(new_query)
module_logger.debug(msg)
try:
cursor = connection.cursor(cursor_name)
cursor.execute(new_query)
results = []
while True:
rows = cursor.fetchmany(safe_fetch_limit)
if not rows: break
module_logger.debug("Fetched {} rows".format(len(rows)))
for row in rows: results.append(row)
cursor.close()
connection.commit()
finally:
connection.close()
return results
class SQLite(Database):
def __init__(self):
super(SQLite, self).__init__(None)
@property
def default_port(self):
return None
@property
def default_user_id(self):
return None
@property
def default_password(self):
return None
@property
def default_database(self):
return ""
@property
def valid_adapters(self):
return [None]
def set_dbname(self, dbname):
config_dict = {'dbname': dbname}
self.set_credentials(config_dict)
return
def get_connection_string(self):
credentials = self.get_credentials()
db_name = credentials['dbname']
host_root = 'sqlite://'
if db_name:
conn_string = "{}/{}".format(host_root, db_name)
else:
conn_string = host_root
return conn_string
def get_table_names(self):
query_str = "SELECT name FROM sqlite_master WHERE type='table'"
table_names = self._get_first_entries(query_str)
return table_names
def check_host_port(host_ip, port):
"""Check if a connection can be established to the given host and port."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address_str = "Host: {} Port: {} ".format(host_ip, port)
try:
s.connect((host_ip, port))
result = True
msg = address_str + "OPEN"
except socket.error:
result = False
msg = address_str + "CLOSED"
finally:
s.close()
return result, msg
|
986,218 | 3fe0ae4b13d7a7ad87bf7b756a2ac9f5e5691528 | #!/usr/bin/python
"""
v.0.1 User's Command Lines v.0.1
\033[1mDESCRIPTION\033[0m
Based on blast result files (concatenated), this
program creates a protein similarity profile
among various species. Proteins for a given species
was blasted (blastp) against several different
species and the program returns which query protein
blasted on which species, according to thresholds
as established by Ludin et al. (2011).
\033[1mUSAGE\033[0m
%program <in_blast> <tag_file> <table_out> <stats_out> <host_proteins>
\033[1mtag_file\033[0m = file with names of species in each category
Similar to a FASTA file
________ File example starts below this line ________
>controls
a_thaliana
d_melanogaster
>parasites
e_granulosus
e_multilocularis
>Host
g_aculeatus
______________ File example stops here ______________
\033[1mhost_proteins\033[0m = output file containing sequences that blasted on
the host only.
\033[1mCREDITS\033[0m
DOC PANTS 2014 \m/
"""
import sys
from Bio import SeqIO
import re
try:
blast_file = sys.argv[1]
tag_file = sys.argv[2]
table_out = sys.argv[3]
stats_out = sys.argv[4]
host_proteins = sys.argv[5]
except:
print __doc__
sys.exit(1)
# BLASTp identity thresholds placed in a dict() as a reference for later comparison
# Thresholds empirically pre-defined by Ludin et al. 2011
# Threshold dict() for high similarity k-mers (host)
t_denom_pos = (10,11,12,13,14)
t_num_pos = (10,10,11,12,12)
pos_thresholds = dict(zip(t_denom_pos,t_num_pos))
# Threshold dict() for conserved k-mers (controls + parasites)
t_denom_cons = (5,6,7,8,9,10,11,12,13,14)
t_num_cons = (5,5,5,6,7,7,8,8,9,11)
cons_thresholds = dict(zip(t_denom_cons,t_num_cons))
# Dict() created to store the respective category
# of each species, based on the input file given.
# Will be used to count the number of k-mers that
# successfully blasted against species in either
# of the categories given by the user (in this
# case, 3 categories : parasites, controls, host).
# Key = species, value = category. With the species
# name, it's fast and easy to get its category with
# this dictionnary.
species = {}
category = ""
with open(tag_file, "rU") as tag_f:
for line in tag_f:
line = line.strip()
if line.startswith(">"):
category = ""
category = line.split(">")[-1]
elif line.startswith(">") == False:
species[line] = category
proteins = {} # Will contain info on kmers per protein and their blast results.
new_query = False
id_start = False
prot = ""
kmer = ""
hit_id = ""
with open(blast_file, "rU") as in_f:
for line in in_f:
line = line.strip()
# When encounters a new query, stores k-mer name and
# protein name and tells the program that a new query
# was found.
if line.startswith("Query=") and new_query == False:
kmer = line.split("Query= ")[-1]
l = len(kmer.split("_"))
prot = "_".join(kmer.split("_")[:l-1])
new_query = True
# In case the new query has no hits found, restarts
# everything back to zero, ready to find a new query.
elif re.findall("No hits found", line) != [] and new_query:
new_query = False
kmer = ""
prot = ""
# When a hit for a given query is found
elif line.startswith("> ") and new_query and id_start == False:
hit_id = "_".join(line.split("> ")[-1].split("_")[:2]) # Stores hit ID
if hit_id in species:
id_start = True # Tells the program there is a new hit
kmer_num = int(kmer.split("_")[-1]) # Stores k-mer number for the protein
# Will create a new entry in the dict() with the hit ID.
# Hit ID = species name. Will be used to store info, for
# each protein, as to which k-mer blasted on which species
# with success. At this stage, the dict() is basically getting
# created and will be filled in when the identity score is found.
if prot not in proteins:
proteins[prot] = {}
proteins[prot][kmer_num] = {}
proteins[prot][kmer_num][hit_id] = []
elif prot in proteins:
if kmer_num not in proteins[prot]:
proteins[prot][kmer_num] = {}
proteins[prot][kmer_num][hit_id] = []
elif kmer_num in proteins[prot]:
proteins[prot][kmer_num][hit_id] = []
# If hit_id is not among the species of interest
# it won't be included in the dict()
elif hit_id not in species:
hit_id = ""
# When identity score is found, stores the info in the big dict()
elif line.startswith("Identities = ") and new_query and id_start:
ident_num = int(line.split("Identities = ")[1].split(" (")[0].split("/")[0])
ident_denom = int(line.split("Identities = ")[1].split(" (")[0].split("/")[-1])
# If this k-mer has no blast identity score for this particular
# species, the scores are entered in the dict(). When hit_id is
# not empty, this means the k-mer had another hit in this species
# and it has already been entered in the dict(). Only the first
# hit per species, i.e. the best, is kept for each k-mer.
if proteins[prot][kmer_num][hit_id] == []:
proteins[prot][kmer_num][hit_id].append((ident_num, ident_denom))
hit_id = ""
id_start = False
# When the query results are over, resets everything back to zero.
elif line.startswith("Effective search space used:") and new_query:
new_query = False
kmer = ""
prot = ""
# Categories for the Venn diagram are created here.
# Will contain the number of proteins and k-mers
# that blasted against whatever species they
# contain (parasites, controls, host or any combination
# of these categories).
controls = {}
controls["prot"] = 0
controls["kmers"] = 0
parasites = {}
parasites["prot"] = 0
parasites["kmers"] = 0
host = {}
host["prot"] = 0
host["kmers"] = 0
cont_par = {}
cont_par["prot"] = 0
cont_par["kmers"] = 0
cont_par_host = {}
cont_par_host["prot"] = 0
cont_par_host["kmers"] = 0
par_host = {}
par_host["prot"] = 0
par_host["kmers"] = 0
cont_host = {}
cont_host["prot"] = 0
cont_host["kmers"] = 0
host_only = set()
with open(table_out, "w") as t_out:
with open(stats_out, "w") as s_out:
# Writes down species names in the output
# file header (tab delimited).
for spec_name in sorted(species):
t_out.write("\t" + spec_name)
# Start parsing the big dict() containing info on all
# proteins.
for protein in sorted(proteins):
t_out.write("\n" + protein)
# will contain the num. of k-mer that blasted against
# each species for the protein. Start by adding all
# species names and assigning them the value 0.
ident_scores = {}
for spec in species:
ident_scores[spec] = 0
for kmer in proteins[protein]:
for hit_id in proteins[protein][kmer]: # hit_id = species name
# If species is not the host species, conserved protein thresholds are used
if species[hit_id] != "host": # dict() species used to retrieve category
if 5 <= proteins[protein][kmer][hit_id][0][1] < 15 :
if proteins[protein][kmer][hit_id][0][0] >= cons_thresholds[proteins[protein][kmer][hit_id][0][1]]:
ident_scores[hit_id] += 1
# If species is host, then high similarity threshold are used.
elif species[hit_id] == "host":
if proteins[protein][kmer][hit_id][0][1] >= 10:
if proteins[protein][kmer][hit_id][0][0] >= pos_thresholds[proteins[protein][kmer][hit_id][0][1]]:
ident_scores[hit_id] += 1
in_cont = False
in_para = False
in_host = False
num_kmers = 0
# For each species name, verifies if there was
# some k-mers that successfully blasted on it.
# Verifies which of the 3 categories are
# represented in the sucessfull blasts (controls,
# and/or parasites and/or host)
for spec_name in sorted(ident_scores):
if ident_scores[spec_name] > 0:
if species[spec_name] == "controls":
in_cont = True
num_kmers += ident_scores[spec_name]
elif species[spec_name] == "parasites":
in_para = True
num_kmers += ident_scores[spec_name]
elif species[spec_name] == "host":
in_host = True
num_kmers += ident_scores[spec_name]
# Writes the number of k-mers that successfully blasted
# on the species in the "table output file".
t_out.write("\t" + str(ident_scores[spec_name]))
# fills in the objects destined to create the "stats file" and
# the Venn diagram (or some sort of inclusion diagram)
if in_cont == True and in_para == False and in_host == False:
controls["prot"] += 1
controls["kmers"] += num_kmers
elif in_cont == False and in_para == True and in_host == False:
parasites["prot"] += 1
parasites["kmers"] += num_kmers
elif in_cont == False and in_para == False and in_host == True:
host["prot"] += 1
host["kmers"] += num_kmers
host_only.add(protein)
elif in_cont == True and in_para == True and in_host == False:
cont_par["prot"] += 1
cont_par["kmers"] += num_kmers
elif in_cont == True and in_para == True and in_host == True:
cont_par_host["prot"] += 1
cont_par_host["kmers"] += num_kmers
elif in_cont == False and in_para == True and in_host == True:
par_host["prot"] += 1
par_host["kmers"] += num_kmers
elif in_cont == True and in_para == False and in_host == True:
cont_host["prot"] += 1
cont_host["kmers"] += num_kmers
with open(host_proteins, "w") as h_prot:
for protein in host_only:
h_prot.write(protein + "\n")
# Writes down, in "stats out file", the number of k-mers in
# each Venn diagram category. Will be used later to produce
# a Venn diagram in R.
s_out.write("Controls only: " + str(controls["prot"]) + " (" + str(controls["kmers"]) + ")")
s_out.write("\n" + "Parasites only: " + str(parasites["prot"]) + " (" + str(parasites["kmers"]) + ")")
s_out.write("\n" + "Host only: " + str(host["prot"]) + " (" + str(host["kmers"]) + ")")
s_out.write("\n" + "Controls and parasites: " + str(cont_par["prot"]) + " (" + str(cont_par["kmers"]) + ")")
s_out.write("\n" + "Controls, parasites and host: " + str(cont_par_host["prot"]) + " (" + str(cont_par_host["kmers"]) + ")")
s_out.write("\n" + "Parasites and host: " + str(par_host["prot"]) + " (" + str(par_host["kmers"]) + ")")
s_out.write("\n" + "Controls and host: " + str(cont_host["prot"]) + " (" + str(cont_host["kmers"]) + ")")
print "\n\033[1mJob done !\033[0m\n" |
986,219 | 9b4ad66f0f0bc336dd9eae532b23ccb0e1d43971 | import math
import cmath
a=float(input("Enter a:"))
b=float(input("Enter b:"))
c=float(input("Enter c:"))
disc=b**2-4*a*c
if disc<0:
print("Imaginary roots")
realp=float(-b/2*a)
imagp=cmath.sqrt(abs(disc))/(2.0*a)
print("realp:",realp)
print("imagp:",imagp)
elif disc==0:
print("Roots are real and equal")
root1=float(-b/2*a)
root2=root1
print("root1:",root1)
print("root2:",root2)
elif disc>0:
print("Roots are real and distinct")
root1=(-b+cmath.sqrt(disc))/(2*a)
root2=(-b-cmath.sqrt(disc))/(2*a)
print("root1:",root1)
print("root2:",root2)
|
986,220 | 9d40aaddd5c7b11d90bd77879b78c9720158bbaf | import pickle
import numpy as np # linear algebra
import sklearn as sk # machine learning
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn import preprocessing # for standardizing the data
import pandas as pd
import seaborn as sns # visualization tool
import matplotlib.pyplot as plt # for plotting
import seaborn as sns # visualization tool
import tensorflow as tf # for creating neural networks
from tensorflow import keras # an easier interface to work with than tensorflow
import scrape
import os, sys
def openAllTeamsFromSeason(season):
with open("teams" + str(season) + ".pickle", 'rb') as handle:
data = pickle.load(handle)
return data
def openTeamFromSeason(team,season):
with open("teams" + str(season) + ".pickle", 'rb') as handle:
data = pickle.load(handle)
return data[team]
def openSeason(season):
with open("season" + str(season) + ".pickle", 'rb') as handle:
data = pickle.load(handle)
return data
def getStat(team,stat):
return float(team[stat])
def calculate(home,away):
#['g', 'points', 'total_yards', 'plays_offense', 'yds_per_play_offense',
#'turnovers', 'fumbles_lost', 'first_down', 'pass_cmp', 'pass_att', 'pass_yds',
#'pass_td', 'pass_int', 'pass_net_yds_per_att', 'pass_fd', 'rush_att', 'rush_yds',
# 'rush_td', 'rush_yds_per_att', 'rush_fd', 'penalties', 'penalties_yds', 'pen_fd',
#'score_pct', 'turnover_pct', 'exp_pts_tot'])
stats = ["turnover_pct","exp_pts_tot","yds_per_play_offense"]
data = []
for stat in stats:
data.append(getStat(home,stat))
data.append(1)
for stat in stats:
data.append(getStat(away,stat))
data.append(0)
return data
def buildVectors():
x_train = []
y_train = []
x_test = []
y_test = []
for i in range(2000,2019):
teams_for_year = openAllTeamsFromSeason(i)
head_to_head = openSeason(i)
for game in head_to_head:
winner = teams_for_year[game[0]]
loser = teams_for_year[game[1]]
if i <= 2017:
if game[2] == 1:
# AWAY WIN
x_train.append(calculate(loser,winner))
y_train.append(1)
else:
x_train.append(calculate(winner,loser))
y_train.append(0)
else:
if game[2] == 1:
# AWAY WIN
x_test.append(calculate(loser,winner))
y_test.append(1)
else:
x_test.append(calculate(winner,loser))
y_test.append(0)
return x_train, y_train, x_test, y_test
def network(showPlot=False):
Dense = keras.layers.Dense
Activation = keras.layers.Activation
to_categorical = keras.utils.to_categorical
Sequential = keras.Sequential
x_train, y_train, x_test, y_test = buildVectors()
# Standardize the data.
scaler = preprocessing.StandardScaler().fit(x_train)
x_train = scaler.transform(x_train)
# We also need to transform the test set, using the same means and standard deviations
# that were calculated from and used to transform the training set data.
x_test = scaler.transform(x_test)
train_hot_labels = to_categorical(y_train, num_classes = 600)
test_hot_labels = to_categorical(y_test, num_classes = 600)
# Instantiate a new neural network model
model = Sequential()
# Add some layers.
# Fist the input layer, which has 7 values, is connected to hidden layer 5, with 100 nodes (neurons).
model.add(Dense(1400, activation='sigmoid', input_dim = 8))
# Layer 2, hidden layer
model.add(Dense(900, activation='sigmoid'))
# Layer 3, output layer
model.add(Dense(600, activation = 'softmax', ))
# Compile the NN model, defining the optimizer to use, the loss function, and the metrics to use.
# These settings are appropriate for a multiple-class classification task.
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
# Train the model, iterating on the data in batches of 64 sample, over 8 epochs
history = model.fit(x_train, train_hot_labels,
validation_split = 0.25,
epochs = 8,
batch_size = 64)
# Evaluate the model's performance
train_loss, train_acc = model.evaluate(x_train, train_hot_labels)
test_loss, test_acc = model.evaluate(x_test, test_hot_labels)
print('Training set accuracy:', train_acc)
print('Test set accuracy:', test_acc)
if showPlot:
print("Showing Plots")
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
if __name__== "__main__":
# UNCOMMENT THIS LINE IF IT THROWS ERROR 15 REGARDING libiomp5.dylib
# os.environ['KMP_DUPLICATE_LIB_OK']='True'
if "scrape" in sys.argv:
scrape.scrapeTeamOffenses()
scrape.scrapeWeeklyResults()
network(True if "plot" in sys.argv else False)
|
986,221 | be857acd646bd32c18284ced575de49eb1f9a24f | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Poincare(nn.Module):
"""docstring for Poincare"""
def __init__(self, args, dc, device):
super(Poincare, self).__init__()
self.args = args
self.dc = dc
self.Concept_Embeddings = nn.init.uniform_(torch.randn(len(dc.word2idx), args.embed_dims), a = -args.init, b = args.init).to(device)
def forward(self, left_idx, right_idx):
# print(left_idx.size())
# print(right_idx.size())
left_embeddings = self.Concept_Embeddings[left_idx]
right_embeddings = self.Concept_Embeddings[right_idx]
# print(left_embeddings.size())
# print(right_embeddings.size())
result_tuple, dists = self._poincare(left_embeddings, right_embeddings)
# print('DIST:', dists.size())
return result_tuple, dists
def _arcosh(self, x):
# print(x.size())
return torch.log(x + torch.sqrt(x * x - 1))
def _prepare(self, u, v):
# print(u.size(), v.size())
uu = torch.sum(u * u, -1)
vv = torch.sum(v * v, -1)
uv = torch.sum(u * v, -1)
alpha = 1 - uu
alpha[alpha<=0] = self.args.EPS
beta = 1 - vv
beta[beta<=0] = self.args.EPS
gamma = 1 + 2 * (uu - 2 * uv + vv) / alpha / beta
gamma[gamma<1.0] = 1.0
# print(uu.size(), uv.size(), vv.size(), alpha.size(), beta.size(), gamma.size())
return (uu, uv, vv, alpha, beta, gamma)
def _poincare(self, u, v):
result_tuple = self._prepare(u, v)
gamma = result_tuple[-1]
return result_tuple, self._arcosh(gamma)
def backward(self, left_idx, right_idx, grad_output, result_tuple):
u = self.Concept_Embeddings[left_idx]
v = self.Concept_Embeddings[right_idx]
c = grad_output.unsqueeze(-1)
uu, uv, vv, alpha, beta, gamma = result_tuple
c *= 4 / torch.sqrt(gamma * gamma - 1) / alpha / beta
cu = c * alpha * alpha / 4
cv = c * beta * beta / 4
grad_u = (cu * (vv - 2 * uv + 1) / alpha) * u - cu * v
grad_v = (cv * (uu - 2 * uv + 1) / beta) * v - cv * u
grad_u[grad_u == float('inf')] = 0.0
grad_v[grad_v == float('inf')] = 0.0
grad_u[grad_u == float('-inf')] = 0.0
grad_v[grad_v == float('-inf')] = 0.0
grad_u[torch.isnan(grad_u)] = 0.0
grad_v[torch.isnan(grad_v)] = 0.0
return grad_u, grad_v
def step(self, idx, grad, lr):
self.Concept_Embeddings[idx] -= lr*grad
|
986,222 | 91daa199709fa48802e23c1eeeb9ce6c7ece5f5d | str1=input()
str2=""
res=[]
for h in str1:
if h>='a' and h<='z':
str2+=h
elif h=='B':
str2=str2[0:len(str2)-1]
elif h=='P':
res.append(str2)
def search(res,x,y):
count=0
x=x-1
y=y-1
str1=res[x]
str2=res[y]
for x in range(0,len(str2)-len(str1)+1):
if str1==str2[x:x+len(str1)]:
count+=1
return count
n=int(input())
temp=[]
for _ in range(n):
temp.append(input().split(" "))
for t in temp:
print(search(res,int(t[0]),int(t[1]))) |
986,223 | e9c2f91ec723de657360532ec442043eb4763275 | # https://www.acmicpc.net/problem/2033 문제 제목 : 반올림 , 언어 : Python, 날짜 : 2020-01-03, 결과 : 실패
# 오늘은 하루종일 밖이라 코딩을 못한다..
# 왜 스트링으로 푼건 틀리는지 모르겟다. 반례도 못찾겠다...
import sys
from collections import deque
N = deque(sys.stdin.readline()[:-1])#list(map(int, list(sys.stdin.readline())))
len_N = len(N)
lesser = 10
point = 2
def re_new(len_list_a, list_a):
list_a = deque(list_a)
for i in range(1, len_list_a+1):
if i == len_list_a:
if int(list_a[len_list_a - i]) >= 10:
list_a[len_list_a - i] = str(int(list_a[len_list_a - i]) - 10)
list_a.appendleft('1')
len_list_a +=1
return len_list_a , list_a
#print("test :",len_list_a - i)
if int(list_a[len_list_a - i]) >= 10:
list_a[len_list_a - i - 1] = str(int(list_a[len_list_a - i - 1]) + 1)
list_a[len_list_a - i] = str(int(list_a[len_list_a - i]) - 10)
#2 -1 -1
while int("".join(N)) > lesser:
if int(N[len_N - point + 1]) >= 5:
N[len_N - point + 1] = '0'
N[len_N - point] = str(int(N[len_N - point]) + 1)
elif int(N[len_N - point + 1]) < 5:
N[len_N - point + 1] = '0'
point+=1
lesser*=10
len_N, N = re_new(len_N, N)
print("".join(N))
# https://www.acmicpc.net/problem/2033 문제 제목 : 반올림 , 언어 : Python, 날짜 : 2020-01-04, 결과 : 성공
import sys
N = int(sys.stdin.readline())
standard_point = 10
while N > standard_point:
if N%standard_point >= 5*standard_point//10:
N = N//standard_point * standard_point + standard_point
else:
N = N//standard_point * standard_point
standard_point*=10
print(N)
|
986,224 | dc223e8176ae62f5e83f9745bf758a3850fa6df5 | #syntax errors ---the errors which occur invalid syntax are called syntax errors
#Runtime errors ----The errors which occur while execution of the programm..
# once all syntax errors are corrected then only program execution starts
#runtime errors also call it as exceptions----while execution of the program something goes wrong
# because of end user input or programming logic or memory problems etc then we will get runtime errors.
#print(10/0)
#print(10/'ten')
'''x=int(input('enter number:'))
print(x)'''
#exception handling concepts applicable for runtime errors not syntax errors
#exception ---an unexpected event that disturbs normal flow of program is called exception
#eg ;--zero division error,type error,value error,file not found error,eof error,sleeping error
#we should not block our resource and we should not miss anything
# we have to define alternative way to rest of the program execution
#within the try block if anywhere exception raised then rest of the try block not executed eventhough
#we handled that exception
#if try with multiple except blocks available
# single except block that can handle multiple exceptions
#except(zero division error,value error)as msg:
# default except block also available---it should be last in multiple except block
#finally block----it is not recommended to maintain clean up code
#whether exception raised or not raised and whether exception handled or not handled --such type of
# best place is nothing but finally block
#the main purpose of finally block is to maintain clean up code
#ry:
# Risky code
#except:
# Handling code
#finally:
# cleanup code
#there is only one situation finally block won't be executed when ever we are using os._exit(0)
# we can take ty-except-finally blocks inside try or except or finally blocks i.e ..nesting of
# try- except-finally is possible
'''try:
---
----
---
try:
----
----
except:
----
---
---
finally:
-----
except:
-----
-----
finally:
-----
-----'''
#contro flow of nested try-except-finally:
#we can use else with try-except-finally
#else block will be executed if and only if there is no exceptions raised in try block
'''try:
# -----
------
except:
-----
-----
else:
----
----'''
#whenever we are writing else block compulsory except block shoulb be there i.e without except
#block we cnnot write else block
#try-except-else-finally order is important
#two types of exceptions
#pre-defined exceptions----pvm is responsible for pre-defined exceptions
#user defined exceptions-----programmer is responsible to define
# hence we have to raise explicitly based on our requirement using raise keyword
#eg;--insufficient funds,tooyoung ,tooold ,invalidinput
class TooYoungException(Exception):
def __init__(self,arg):
self.msg=arg
class TooOldException(Exception):
def __init__(self,arg):
self.msg = arg
age=int(input('enter age :'))
if age > 60:
raise TooOldException('please wait some more time')
elif age < 18:
raise TooYoungException ('you are wait more time')
else:
print('you will get ')
#generally we have to take risky code outer try block and too much risky code we have to take inner
#try block if any exception raised in inner try block inner except block will executed.if any exception
#raised at outer block outer except block will executed
|
986,225 | 8ab4b1751295f6c9867cf855ba0d2104404772df | # -*- encoding:utf-8 -*-
'''
@time: 2019/12/21 8:28 下午
@author: huguimin
@email: 718400742@qq.com
一个doc表示一个样本
'''
import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from layers.dynamic_rnn import DynamicLSTM
from layers.attention import Attention
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(
torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
def forward(self, text, adj):
hidden = torch.matmul(text, self.weight)
denom = torch.sum(adj, dim=2, keepdim=True) + 1
output = torch.matmul(adj, hidden) / denom
if self.bias is not None:
return output + self.bias
else:
return output
class ECGCN(nn.Module):
def __init__(self, word_embedding, pos_embedding, opt):
super(ECGCN, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(word_embedding, dtype=torch.float))
self.pos_embed = nn.Embedding.from_pretrained(torch.tensor(pos_embedding, dtype=torch.float))
self.word_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)#(32,75,45,200)
self.clause_encode = Attention(2*opt.hidden_dim, 1, opt.max_sen_len, opt)#(32,75,200)
# gcn
# self.gc1 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
# self.gc2 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
# self.gc3 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
#gat
# self.ga1 = GAT(2*opt.hidden_dim, 2*opt.hidden_dim, self.opt.num_class, self.opt.keep_prob1, self.opt.alpha, self.opt.heads)
self.fc1 = nn.Linear(2*opt.hidden_dim + self.opt.embedding_dim_pos, 2*opt.hidden_dim)
self.fc2 = nn.Linear(2*opt.hidden_dim, opt.num_class)
self.text_embed_dropout = nn.Dropout(opt.keep_prob1)
self.gates = nn.ModuleList()
self.gcns = nn.ModuleList()
for i in range(3):
self.gcns.append(GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim))
self.gates.append(nn.Linear(2*opt.hidden_dim, 1))
def position_weight(self, inputs, emotion_id, doc_len):
"""
:param inputs: [32, 75, 200]
:param emotion_id: [32,]
:param doc_len: [32]
:param pos_embedding: [103, 50]
:return:[32,75,50]
"""
batch_size, max_len = inputs.shape[0], inputs.shape[1]
relative_pos = np.zeros((batch_size, max_len))
for sample in range(batch_size):
len = doc_len[sample].item()
for i in range(len):
relative_pos[sample][i] = i - emotion_id[sample].item() + 69
return relative_pos
def emotion_encode(self, inputs, emotion_id):
"""
:param inputs: [32, 75, 200]
:param emotion_id: [32,]
:param doc_len: [32,]
:return: [32, 1, 200]
"""
batch_size, max_len, dim = inputs.shape[0], inputs.shape[1], inputs.shape[2]
emotion_clause = np.zeros((batch_size, dim))
for sample in range(batch_size):
clause = inputs[sample][emotion_id[sample]]
emotion_clause[sample] = clause.cpu().detach().numpy()
return torch.FloatTensor(emotion_clause)
def emotion_weight(self, inputs, emotion_clause):
"""
:param inputs: [32, 75, 200]
emotion_clause:[32, 1, 200]
:return: [32, 75]
"""
batch, dim = inputs.shape[0], inputs.shape[2]
emotion_clause = torch.reshape(emotion_clause, [batch, dim, 1])
alpha = torch.reshape(torch.matmul(inputs, emotion_clause.float()), [-1, self.opt.max_doc_len, 1])
return alpha
def mask(self, inputs, emotion_id):
"""
:param inputs: [32,75,200]
:param emotion_id: [32,]
:return: [32, 1, 200]
"""
batch_size, max_len = inputs.shape[0], inputs.shape[1]
emotion_idx = emotion_id.cpu().numpy()
mask = [[] for i in range(batch_size)]
for i in range(batch_size):
for j in range(emotion_idx[i]):
mask[i].append(0)
for j in range(emotion_idx[i], emotion_id[i] + 1):
mask[i].append(1)
for j in range(emotion_idx[i] + 1, max_len):
mask[i].append(0)
mask = torch.tensor(mask).unsqueeze(2).float().to(self.opt.device)
return mask * inputs
def pack_sen_len(self, sen_len):
"""
:param sen_len: [32, 75]
:return:
"""
batch_size = sen_len.shape[0]
up_sen_len = np.zeros([batch_size, self.opt.max_doc_len])
for i, doc in enumerate(sen_len):
for j, sen in enumerate(doc):
if sen == 0:
up_sen_len[i][j] = 1
else:
up_sen_len[i][j] = sen
return torch.tensor(up_sen_len)
def forward(self, inputs):
x, sen_len, doc_len, doc_id, emotion_id, adj = inputs
up_sen_len = self.pack_sen_len(sen_len)
x = torch.reshape(x, [-1, self.opt.max_sen_len])
x = self.embed(x)
x = self.text_embed_dropout(x)
up_sen_len = torch.reshape(up_sen_len, [-1])
word_encode = self.word_lstm(x, up_sen_len) #(32*75, batch_max_len, 200)
clause_encode = self.clause_encode(word_encode, sen_len)
embs = [clause_encode]
embs += [self.pos_embed(torch.LongTensor(self.position_weight(clause_encode, emotion_id, doc_len)).to(self.opt.device))]
emotion_encode = self.emotion_encode(clause_encode, emotion_id) ###情感子句的嵌入表示
###对每层的GCN都与emotion_encode计算一个score.
# x = F.relu(self.gc1(clause_encode, adj))
# x = F.relu(self.gc2(x, adj))
# x = F.relu(self.gc3(x, adj))
x = clause_encode
for i in range(3):
x = F.relu(self.gcns[i](x, adj))
weight = F.sigmoid(self.gates[i](emotion_encode))
weight = weight.unsqueeze(dim=-1)
x = x * weight
output = self.fc2(x.float())
return output
# def forward(self, inputs, vs=False):
# attention = []
# x, sen_len, doc_len, doc_id, emotion_id, adj = inputs#(x(32,75, 45)), (32, 75)
# up_sen_len = self.pack_sen_len(sen_len)
# x = torch.reshape(x, [-1, self.opt.max_sen_len])
# x = self.embed(x)
# x = self.text_embed_dropout(x)
# up_sen_len = torch.reshape(up_sen_len, [-1])
# word_encode = self.word_lstm(x, up_sen_len) #(32*75, batch_max_len, 200)
# clause_encode = self.clause_encode(word_encode, sen_len)
# embs = [clause_encode]
# embs += [self.pos_embed(torch.LongTensor(self.position_weight(clause_encode, emotion_id, doc_len)).to(self.opt.device))]
# "concat"
# clause_encode = torch.cat(embs, dim=2)
# clause_encode = torch.reshape(clause_encode, [-1, self.opt.max_doc_len, 2 * self.opt.hidden_dim + self.opt.embedding_dim_pos])
# clause_encode = self.fc1(clause_encode)
# # 策略1 "emotion clause 与 clause的attention weight"
# # emotion_encode = self.emotion_encode(clause_encode, emotion_id)
# # batch, dim = clause_encode.shape[0], clause_encode.shape[2]
# # emotion_encode = torch.reshape(emotion_encode, [batch, dim , 1])
# # alpha = self.emotion_weight(clause_encode, emotion_encode)
# #
# # ones = torch.ones((batch, self.opt.max_doc_len, 1))
# #
# # emotion_encode = emotion_encode.expand(-1,-1,self.opt.max_doc_len).transpose(1,2)
# # clause_encode = alpha * emotion_encode + (ones-alpha)*clause_encode
# x = F.relu(self.gc1(clause_encode, adj))
# x = F.relu(self.gc2(x, adj))
# # x = F.relu(self.gc3(x, adj))
# # output = self.ga1(clause_encode, adj)
#
# batch, dim = clause_encode.shape[0], clause_encode.shape[2]
# ones = torch.ones((batch, self.opt.max_doc_len, 1)).to(self.opt.device)
# emotion_encode = self.emotion_encode(x, emotion_id).to(self.opt.device)
# alpha = self.emotion_weight(clause_encode, emotion_encode)
# # # emotion_encode = self.mask(x, emotion_id)
# # # alpha_mat = torch.matmul(emotion_encode, clause_encode.transpose(1,2))
# # # alpha = F.softmax(alpha_mat.sum(1, keepdim=True), dim=2).transpose(1,2) #(32,1,75)
# # # ones = torch.ones((batch, self.opt.max_doc_len, 1))
# # emotion_encode = torch.reshape(emotion_encode, [batch, dim, 1])
# # emotion_encode = emotion_encode.expand(-1, -1, self.opt.max_doc_len).transpose(1, 2)
# # # x = emotion_encode * alpha + (ones-alpha)*clause_encode
# emotion_encode = torch.reshape(emotion_encode, [batch, dim, 1])
# emotion_encode = emotion_encode.expand(-1, -1, self.opt.max_doc_len).transpose(1, 2)
# x = clause_encode * alpha + (ones - alpha) * emotion_encode
# x = self.text_embed_dropout(x)
# # # x = torch.matmul(alpha, clause_encode).squeeze(1)
# #
# # # 策略2 以原始的句表示为主,图卷积作为辅助
# # #
# #
# output = self.fc2(x.float())
# if vs:
# return output, attention
# return output
|
986,226 | 09faf370a23cc1f20f1f9a730f93da93082484af | from os import path
from heapq import heappush, heappop
#!python % < ../A-small-practice.in
# in_file = '../B-small-practice.in'
in_file = '../B-large-practice.in'
# in_file = '../sample.in'
out_file = '.'.join([path.splitext(path.basename(in_file))[0], 'out'])
def solution(fin, fout):
def tstr2int(tstr):
thour, tmin = map(int, tstr.split(':'))
return thour * 60 + tmin
def cmppair(x, y):
return cmp(x[0], y[0]) or cmp(x[1], y[1])
def onestep(tin, cur_wait, other_wait):
heappush(other_wait, tin[1] + T)
if cur_wait and cur_wait[0] <= tin[0]:
heappop(cur_wait)
return 0
return 1
N = int(fin.readline())
for i in range(N):
T = int(fin.readline())
NA, NB = map(int, fin.readline().split())
Ares, Bres = 0, 0
Await, Bwait = [], []
A, B = [], []
for j in range(NA):
tpair = map(tstr2int, fin.readline().split())
A.append(tpair)
for j in range(NB):
tpair = map(tstr2int, fin.readline().split())
B.append(tpair)
A.sort(cmppair)
B.sort(cmppair)
ai = bi = 0
while ai < len(A) and bi < len(B):
if A[ai][0] < B[bi][0]:
Ares += onestep(A[ai], Await, Bwait)
ai += 1
else:
Bres += onestep(B[bi], Bwait, Await)
bi += 1
while ai < len(A):
Ares += onestep(A[ai], Await, Bwait)
ai += 1
while bi < len(B):
Bres += onestep(B[bi], Bwait, Await)
bi += 1
fout.write('Case #%s: %s %s\n' % (i + 1, Ares, Bres))
with open(in_file) as fin:
with open(out_file, 'w') as fout:
solution(fin, fout)
|
986,227 | fe411b7bd44b08744937355d4cfe4fe85522c871 |
"""
asset_manager_pre_export_dialog
==========================================
Dialog that asks wether or not the current scene should be saved
before export begins. Offers the possibility to remember your choice.
-----------------------
**Author:** `Timm Wagener <mailto:wagenertimm@gmail.com>`_
"""
#Import
#------------------------------------------------------------------
#python
import os
import sys
import functools
import logging
import subprocess
import time
import shutil
import webbrowser
import yaml
import hashlib
import string
import random
#PySide
from PySide import QtGui
from PySide import QtCore
from PySide import QtUiTools
import shiboken
import pysideuic
#Import variable
do_reload = True
#helga
#global_variables
from helga.general.setup.global_variables import global_variables
if(do_reload):reload(global_variables)
#global_functions
from helga.general.setup.global_functions import global_functions
if(do_reload):reload(global_functions)
#asset_manager
#lib
#asset_manager_globals
from lib import asset_manager_globals
if(do_reload):reload(asset_manager_globals)
#lib.gui
#asset_manager_stylesheets
from lib.gui import asset_manager_stylesheets
if(do_reload):reload(asset_manager_stylesheets)
#Globals
#------------------------------------------------------------------
#Pathes
TOOL_ROOT_PATH = asset_manager_globals.TOOL_ROOT_PATH
MEDIA_PATH = asset_manager_globals.MEDIA_PATH
ICONS_PATH = asset_manager_globals.ICONS_PATH
#darkening_factor
DARKENING_FACTOR = asset_manager_globals.DARKENING_FACTOR
#brightening_factor
BRIGHTENING_FACTOR = asset_manager_globals.BRIGHTENING_FACTOR
#AssetManager colors
BRIGHT_ORANGE = asset_manager_globals.BRIGHT_ORANGE
DARK_ORANGE = asset_manager_globals.DARK_ORANGE
BRIGHT_BLUE = asset_manager_globals.BRIGHT_BLUE
DARK_BLUE = asset_manager_globals.DARK_BLUE
BRIGHT_GREEN = asset_manager_globals.BRIGHT_GREEN
DARK_GREEN = asset_manager_globals.DARK_GREEN
BRIGHT_GREY = asset_manager_globals.BRIGHT_GREY
GREY = asset_manager_globals.GREY
DARK_GREY = asset_manager_globals.DARK_GREY
DARK_BLUE = asset_manager_globals.DARK_BLUE
BRIGHT_BLUE = asset_manager_globals.BRIGHT_BLUE
WHITE = asset_manager_globals.WHITE
#AssetManager Icons
ICON_EXPORT = asset_manager_globals.ICON_EXPORT
ICON_CHAR = asset_manager_globals.ICON_CHAR
ICON_PROP = asset_manager_globals.ICON_PROP
ICON_SHOT = asset_manager_globals.ICON_SHOT
ICON_UPDATE = asset_manager_globals.ICON_UPDATE
ICON_DOCS = asset_manager_globals.ICON_DOCS
#form_class, base_class
#------------------------------------------------------------------
#ui_file
ui_file_name = 'asset_manager_pre_export_dialog.ui'
ui_file = os.path.join(MEDIA_PATH, ui_file_name)
#form_class, base_class
form_class, base_class = global_functions.load_ui_type(ui_file)
#AssetManagerPreExportDialog class
#------------------------------------------------------------------
class AssetManagerPreExportDialog(form_class, base_class):
"""
AssetManagerPreExportDialog
"""
def __new__(cls, *args, **kwargs):
"""
AssetManagerPreExportDialog instance factory.
"""
#asset_manager_pre_export_dialog_instance
asset_manager_pre_export_dialog_instance = super(AssetManagerPreExportDialog, cls).__new__(cls, args, kwargs)
return asset_manager_pre_export_dialog_instance
def __init__(self,
question = 'Question',
logging_level = logging.DEBUG,
parent = None):
"""
Customize instance.
"""
#super
self.parent_class = super(AssetManagerPreExportDialog, self)
self.parent_class.__init__(parent)
#setObjectName
self.setObjectName(self.__class__.__name__)
#instance variables
#------------------------------------------------------------------
self.title_name = self.__class__.__name__
self.version = 0.1
self.title = self.title_name +' ' + str(self.version)
self.icon_path = os.path.join(ICONS_PATH, 'icon_asset_manager.png')
#question
self.question = question
#remember_choice
self.remember_choice = False #is set in setup_additional_ui
#logger
#------------------------------------------------------------------
#logger
self.logger = logging.getLogger(self.__class__.__name__)
self.logging_level = logging_level
self.logger.setLevel(self.logging_level)
#Init procedure
#------------------------------------------------------------------
#setupUi
self.setupUi(self)
#setup_additional_ui
self.setup_additional_ui()
#connect_ui
self.connect_ui()
#style_ui
self.style_ui()
#test_methods
self.test_methods()
#UI setup methods
#------------------------------------------------------------------
def setup_additional_ui(self):
"""
Setup additional UI like mvc or helga tool header.
"""
#set title
self.setWindowTitle(self.title)
#set question
self.lbl_question.setText(self.question)
#set_remember_choice
self.set_remember_choice(self.chkbx_remember_choice.isChecked())
def connect_ui(self):
"""
Connect UI widgets with slots or functions.
"""
#btn_accept
self.btn_accept.clicked.connect(self.accept)
#btn_reject
self.btn_reject.clicked.connect(self.reject)
#chkbx_remember_choice
self.chkbx_remember_choice.stateChanged.connect(self.set_remember_choice)
def style_ui(self):
"""
Setup tool palette, tool stylesheet and specific widget stylesheets.
"""
#styled_background
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.Dialog)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)
#correct_styled_background_attribute
self.correct_styled_background_attribute()
#set_margins_and_spacing
self.set_margins_and_spacing()
#set flat
self.btn_accept.setFlat(True)
self.btn_reject.setFlat(True)
#set_stylesheet
self.setStyleSheet(asset_manager_stylesheets.get_stylesheet())
#adjust size (Shrink to minimum size)
self.adjustSize()
#Methods
#------------------------------------------------------------------
def correct_styled_background_attribute(self):
"""
Set QtCore.Qt.WA_StyledBackground True for all widgets.
Without this attr. set, the background-color stylesheet
will have no effect on QWidgets. This should replace the
need for palette settings.
ToDo:
Maybe add exclude list when needed.
"""
#wdgt_list
wdgt_list = self.findChildren(QtGui.QWidget) #Return several types ?!?!
#iterate and set
for wdgt in wdgt_list:
#check type
if(type(wdgt) is QtGui.QWidget):
#styled_background
wdgt.setAttribute(QtCore.Qt.WA_StyledBackground, True)
def set_margins_and_spacing(self):
"""
Eliminate margin and spacing for all layout widgets.
"""
#margin_list
margin_list = [0,0,0,0]
#lyt_classes_list
lyt_classes_list = [QtGui.QStackedLayout, QtGui.QGridLayout, QtGui.QFormLayout,
QtGui.QBoxLayout, QtGui.QVBoxLayout, QtGui.QHBoxLayout, QtGui.QBoxLayout]
#lyt_list
lyt_list = []
for lyt_class in lyt_classes_list:
lyt_list += [wdgt for wdgt in self.findChildren(lyt_class)]
#set margin and spacing
for lyt in lyt_list:
#check type
if(type(lyt) in lyt_classes_list):
#set
lyt.setContentsMargins(*margin_list)
lyt.setSpacing(0)
#Getter & Setter
#------------------------------------------------------------------
@QtCore.Slot(int)
def set_remember_choice(self, value):
"""
Set self.remember_choice
"""
#set
self.remember_choice = value
#log
self.logger.debug('Set remember choice to {0}'.format(self.remember_choice))
def get_remember_choice(self):
"""
Get self.remember_choice
"""
return self.remember_choice
#Slots
#------------------------------------------------------------------
#Events
#------------------------------------------------------------------
def closeEvent(self, event):
"""
Customized closeEvent
"""
#parent close event
self.parent_class.closeEvent(event)
#Test
#------------------------------------------------------------------
def dummy_method(self, msg = 'dummy'):
"""
Dummy method
"""
#log
self.logger.debug('{0}'.format(msg))
#print
print('{0}'.format(msg))
def stylesheet_test(self, wdgt):
"""
Test if setting a stylesheet overrides all attributes or just
the one it is setting.
"""
#stylesheet_str
stylesheet_str = 'background-color: red;'
#set stylesheet
wdgt.setStyleSheet(stylesheet_str)
def test_methods(self):
"""
Suite of test methods to execute on startup.
"""
#log
self.logger.debug('\n\nExecute test methods:\n-----------------------------')
#test methods here
#------------------------------------------------------------------
#dummy_method
self.dummy_method()
#stylesheet_test
#self.stylesheet_test(self.wdgt_explanation)
#------------------------------------------------------------------
#log
self.logger.debug('\n\n-----------------------------\nFinished test methods.')
|
986,228 | aabc8e5e504642ca8cfb4920d0a96dbb7f03e7e1 | # Generated by Django 2.2 on 2020-04-27 14:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0026_auto_20200427_1439'),
]
operations = [
migrations.RemoveField(
model_name='basecomment',
name='avatar',
),
migrations.RemoveField(
model_name='basecomment',
name='name',
),
migrations.AddField(
model_name='basecomment',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='users.User'),
),
]
|
986,229 | 03a71601b77416cd0d9ea7540cc61c5f13bbc2d6 | from django.contrib import admin
from.models import Post,Action
# Register your models here.
class ActionAdmin(admin.TabularInline):
model = Action
class PostAdminModel(admin.ModelAdmin):
inlines = [ActionAdmin]
admin.site.register(Post,PostAdminModel) |
986,230 | 9d394200da6e6ebf7c9fd7d47b2c666fddd656b9 | # -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.admin.sites import AlreadyRegistered
def admin_register(model, user_admin):
try:
admin.site.register(model, user_admin)
except AlreadyRegistered:
admin.site.unregister(model)
admin.site.register(model, user_admin)
|
986,231 | c4b5c389c7bf550db45cdcb8824eec948cd7d251 | from django.shortcuts import render, redirect
from .models import UserAccount,Inbox,Outbox
def user_view(request):
if request.method == 'GET':
return render(request, "user.html", {})
elif request.method == 'POST':
user = UserAccount()
user.name = request.POST.get('user_name')
user.email = request.POST.get('user_mail')
user.password = request.POST.get('user_password')
user.save()
context = {
'name':user.name,
'email':user.email,
'inbox':user.inbox,
'outbox':user.outbox,
}
return render(request, "login.html", context)
def user_login(request):
if request.method == 'GET':
return render(request, "user_login.html", {})
if request.method == 'POST':
email = request.POST.get('user_mail')
password = request.POST.get('user_password')
context = {
'name': UserAccount.objects.get(email=email).name,
'email':UserAccount.objects.get(email=email).email,
'inbox':UserAccount.objects.get(email=email).inbox,
'outbox':UserAccount.objects.get(email=email).outbox
}
return render(request, "login.html",context)
def send_message(request):
if request.method == 'GET':
return render(request, "login.html",{})
if request.method == 'POST':
uremail = request.POST.get('your_mail')
email = request.POST.get('target_mail')
message = request.POST.get('user_message')
user = UserAccount.objects.get(email=uremail)
rcv_user = UserAccount.objects.get(email=email)
outbox = Outbox()
outbox.message = message
outbox.user_account = UserAccount.objects.get(email=uremail)
outbox.save()
user.outbox = outbox
user.save()
inbox = Inbox()
inbox.message = message
inbox.user_account = outbox.user_account
inbox.save()
rcv_user.inbox = inbox
rcv_user.save()
outbox = Outbox.objects.filter(user_account=user)
context = {
'name':user.name,
'email':user.email,
'inbox':user.inbox,
'outbox':outbox
}
return render(request, 'login.html',context) |
986,232 | 50b496c811f6b2c771862ac407df199449c55795 | import subprocess
import cv2
import numpy as np
from matplotlib import pyplot as plt
#
# img = cv2.imread('lena.jpg',0)
# edges = cv2.Canny(img,225,225)
#
# plt.subplot(121),plt.imshow(img,cmap = 'gray')
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(edges,cmap = 'gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
#
# plt.show()
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('test2.jpg')
spatialRadius = 35
colorRadius = 30
# plt.imshow(img)
pyramidLevels = 3
img2 = cv2.pyrMeanShiftFiltering(img, 60, colorRadius, pyramidLevels)
plt.subplot(2, 3, 1), plt.imshow(img)
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(gray, 0, 2500, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# print(type(ret), type(thresh))
# plt.subplot(121),
# plt.imshow(thresh, cmap = 'gray')
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(121),plt.imshow(thresh, cmap = 'gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
# cv2.Sobel(img, re_img, 1, 1)
# plt.imshow(re_img)
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# compute the Scharr gradient magnitude representation of the images
# in both the x and y direction
# x = 6
# gradX = cv2.Sobel(gray, ddepth = x, dx = 1, dy = 0, ksize = -1)
# gradY = cv2.Sobel(gray, ddepth = x, dx = 0, dy = 1, ksize = -1)
# gradient = cv2.subtract(gradX, gradY)
# # gradient = cv2.convertScaleAbs(gradient)
# plt.subplot(1,3,1), plt.imshow(gradient)
# plt.subplot(1,3,2), plt.imshow(cv2.blur(gradient, (9, 9)))
# blur = cv2.blur(gradient, (9, 9))
# # ret, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY)
# plt.subplot(1,3,3), plt.imshow(blur)
# plt.subplot(2,2,1), plt.imshow(img)
# plt.subplot(2,2,2), plt.imshow(gradient)
# plt.subplot(2,2,3), plt.imshow(cv2.convertScaleAbs(gradX))
# plt.subplot(2,2,4), plt.imshow(cv2.convertScaleAbs(gradY))
# plt.show()
# noise removal
# kernel = np.ones((3, 3), np.uint8)
# opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)
#
# sure background area
# sure_bg = cv2.dilate(opening,kernel, iterations=100)
#
# Finding sure foreground area
# dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
# for i, x in enumerate(range(70, 0, -5)):
# print(x/100*dist_transform.max())
# ret, sure_fg = cv2.threshold(dist_transform, x*dist_transform.max(), 2500, 0)
#
# # Finding unknown region
# sure_fg = np.uint8(sure_fg)
# unknown = cv2.subtract(sure_bg, sure_fg)
# ret, markers = cv2.connectedComponents(sure_fg)
# # print(markers, markers)
# # Add one to all labels so that sure background is not 0, but 1
# markers = markers+1
#
# # Now, mark the region of unknown with zero
# markers[unknown==255] = 0
# markers = cv2.watershed(img, markers)
# img[markers == -1] = [255, 0, 0]
# plt.subplot(4, 4, i), \
# plt.imshow(img.copy(), cmap = 'gray')
# plt.title(round(x/100,2))
# import random
#
# n = 20
# a = [[1 if random.random() < 0.1 else 0 for _ in range(n)] for __ in range(n)]
# for x in a:
# print(*x)
#
# images = subprocess.check_output(['ls images'], shell=True).decode().split('\n')
# images = {x.split('.')[0]: cv2.imread('images/' + x, 0) for x in images if x.startswith('g')}
# graphs = {k: [[255 if x[i] == 255 else 0 for i in range(len(x))] for x in v] for k, v in images.items()}
#
#
# def seeds(mas, max_i, max_j, white=255, c=10):
# def rec(i, j, c, vec):
# try:
# if mas[i][j] == white:
# mas[i][j] = c
# if vec != 1:
# if i > 0:
# rec(i - 1, j, c, 1)
# if vec != 2:
# if i < max_i - 1:
# rec(i + 1, j, c, 1)
# if vec != 3:
# if j > 0:
# rec(i, j - 1, c, 1)
# if vec == 4:
# if i < max_j - 1:
# rec(i, j + 1, c, 1)
# except IndexError as e:
# print(e, i, j, max_i, max_j)
#
# for _i, m in enumerate(mas):
# for _j, im in enumerate(m):
# if im == white:
# c += 10
# rec(_i, _j, c, 0)
#
#
# seeds(graphs['g1'], len(graphs['g1']), len(graphs['g1'][0]))
# plt.imshow(graphs['g1'], cmap='gray')
# plt.show()
|
986,233 | a4152ecd2bd7eef8754703b425b38fbab01bf1ab | import sys
def flip_str1(s):
for i in range(len(s)):
sys.stdout.write(s[-i-1])
sys.stdout.write('\n')
def flip_str2(s):
for i in range(len(s)):
print(s[-i-1]),
print('\n')
s=raw_input('Enter string:')
flip_str1(s)
flip_str2(s) |
986,234 | ea907cdd4e7d9feed846384c1ef5c8d72edbe5ee | # Tree iteration module
#
# A finite tree whose nodes childs are totally ordered from, say,
# left to right, can be iterated depth-first, see Wikipedia.
# To be able to reconstruct the tree-structure after iteration,
# one may pass either the current depth or the change in depth from
# the previous node (called step) during iteration.
# E.g. Python uses indentation to indicate a blocks depth, while C uses
# the delimiters "{" and "}" to indicate a change in depth.
# The functions depth2step and step2depth translate between those
# two representations.
def depth2step(it, bot=0, le=lambda x,y: x<=y):
return depth2step_Ctx(it, bot, le).main()
class depth2step_Ctx:
def __init__(self, it, bot, le):
self.it = it; self.le = le; self.bot = bot
self.stack = [bot]
def main(self):
for depth in self.it:
yield self.getStep(depth)
def getStep(self, depth):
if self.stack[-1] == depth:
return 0
if self.le(self.stack[-1], depth):
self.stack.append(depth)
return 1
d = 0
while not self.le(self.stack[-1], depth):
self.stack.pop()
d -= 1
if self.stack[-1] == depth:
return d
raise UpDownError()
def step2depth(it):
depth = 0
for step in it:
depth += step
yield depth
class UpDownError(Exception):
pass
if __name__=="__main__":
print("Testing module treeit")
d = (1, 2, 3, 2, 2, 3, 1, 0)
s = (1, 1, 1, -1, 0, 1, -2, -1)
assert tuple(depth2step(d)) == s
assert tuple(step2depth(s)) == d
|
986,235 | 4c370d77f2e8c8ec9ff0d2a3f5f5a0e27c0f7f3e | import sys
s, n = sys.stdin.readline().split()
s = int(s)
n_length = len(n)
width, height = s + 2, 2 * s + 3
total_width = width * n_length
matrix = [[] for _ in range(height)]
for index in range(n_length):
number = int(n[index])
inner_matrix = [[' ' for _ in range(width)] for _ in range(height)]
mid_height = height // 2
end_width, end_height = width - 1, height - 1
for i in range(height):
for j in range(width):
if j == 0 or j == end_width:
inner_matrix[i][j] = '|'
if i == 0 or i == mid_height or i == end_height:
inner_matrix[i][j] = '-'
inner_matrix[0][0] = inner_matrix[0][end_width] = inner_matrix[mid_height][0] = inner_matrix[mid_height][end_width] = inner_matrix[end_height][0] = inner_matrix[end_height][end_width] = ' '
if number == 0:
for j in range(width):
inner_matrix[mid_height][j] = ' '
elif number == 1:
for j in range(width):
inner_matrix[0][j] = ' '
inner_matrix[mid_height][j] = ' '
inner_matrix[end_height][j] = ' '
for j in range(height):
inner_matrix[j][0] = ' '
elif number == 2:
for j in range(mid_height):
inner_matrix[j][0] = ' '
for j in range(mid_height, height):
inner_matrix[j][end_width] = ' '
elif number == 3:
for j in range(height):
inner_matrix[j][0] = ' '
elif number == 4:
for j in range(width):
inner_matrix[0][j] = ' '
inner_matrix[end_height][j] = ' '
for j in range(mid_height, height):
inner_matrix[j][0] = ' '
elif number == 5:
for j in range(mid_height):
inner_matrix[j][end_width] = ' '
for j in range(mid_height, height):
inner_matrix[j][0] = ' '
elif number == 6:
for j in range(mid_height):
inner_matrix[j][end_width] = ' '
elif number == 7:
for j in range(height):
inner_matrix[j][0] = ' '
for j in range(width):
inner_matrix[mid_height][j] = ' '
inner_matrix[end_height][j] = ' '
elif number == 8:
pass
elif number == 9:
for j in range(mid_height, height):
inner_matrix[j][0] = ' '
for i in range(height):
matrix[i].extend(inner_matrix[i])
matrix[i].append(' ')
for value in matrix:
for element in value:
print(element, end = '')
print("")
|
986,236 | 25885be7d5d4755e3c80d52fd85c60067d512235 | from interfaces.prediction_network import PredictionNetwork
from TicTacToe.tick_tack_toe_state import TickTackToeState
import torch
from alpha_network.alpha_network import AlphaNetwork
class TickTackToePredictionNetwork(PredictionNetwork):
def __init__(self, network):
self._network = network
def predict(self, state):
"""
:param state: The TickTackToeState to predict for
:return: a pair (action_probability_pairs, value)
where action_probability_pairs is a list of (action, probability) pairs predicted by the network
the value is the probability that the current player will win the game, predicted by the network
"""
assert isinstance(state, TickTackToeState)
state_tensor = state.convert_to_tensor()
action_probabilities, value = self._network.predict(state_tensor)
all_possible_actions = state.all_possible_actions()
all_possible_actions_raw = [(action.row, action.col) for action in all_possible_actions]
for row in range(3):
for col in range(3):
if (row, col) not in all_possible_actions_raw:
action_probabilities[row * 3 + col] = 0
action_probabilities = action_probabilities / sum(action_probabilities)
action_probability_pairs = [(action, action_probabilities[action.row * 3 + action.col].item())
for action in all_possible_actions]
return action_probability_pairs, value.item()
def translate_to_action_probabilities_tensor(self, action_mcts_probability_pairs):
tensor = torch.zeros([1, 9], dtype=torch.double)
for action, mcts_probability in action_mcts_probability_pairs:
tensor[0, action.row * 3 + action.col] = mcts_probability
return tensor
|
986,237 | c3e06f23510cf992a638f080966b5aa6b1f60ba6 | import unittest
from filter import *
class TestCases(unittest.TestCase):
def test_positive_t_1(self):
self.assertAlmostEqual(are_positive([-1,10,-30,20,0]),[10,20])
def test_positive_t_2(self):
self.assertAlmostEqual(are_positive([-1,10,-30,20,0]),[10,20])
def test_are_greater_than_n_1(self):
self.assertAlmostEqual(are_greater_than_n([1,2,3,4,5,6,7,8],4), [5,6,7,8])
def test_are_greater_than_n_2(self):
self.assertAlmostEqual(are_greater_than_n([-1,-5,-61,2,3,4,5,6,7,8],0), [2,3,4,5,6,7,8])
def test_are_divisible_by_n_1(self):
self.assertAlmostEqual(are_divisible_by_n([2,3,4,5,100,-2],2), [2,4,100,-2])
def test_are_divisible_by_n_2(self):
self.assertAlmostEqual(are_divisible_by_n([2.5,3,33,11,12,99],3), [3,33,12,99])
if __name__ == '__main__':
unittest.main()
|
986,238 | b9ca3c041d550867722e6262b2021ab24f268058 | from django.core.mail import send_mail
from django.template.loader import render_to_string
from rest_framework.permissions import AllowAny
from rest_framework.views import APIView
from rest_framework.response import Response
from mail.api.serializers import SendMyMailSerializer
from mail.models import MyMailConfig, MyMailContent
class SendMyMail(APIView):
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
serializer = SendMyMailSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
data = serializer.validated_data
mail_attachment = request.FILES.get('mail_attachment')
try:
if mail_attachment:
mail_attachment = request.FILES.get('mail_attachment')
mail_content_obj = MyMailContent.objects.create(mail_to=data['mail_to'],
mail_content=data['mail_content'],
mail_subject=data['mail_subject'],
mail_attachment=mail_attachment)
else:
mail_content_obj = MyMailContent.objects.create(mail_to=data['mail_to'],
mail_content=data['mail_content'],
mail_subject=data['mail_subject'])
except Exception as e:
return Response({'message': 'Exception ' + str(e)})
my_config_obj = MyMailConfig.objects.all()[0]
to = mail_content_obj.mail_to
plain_message = None
from_email = my_config_obj.email_id
subject = mail_content_obj.mail_subject
if mail_attachment:
message_text = render_to_string('send_my_mail_attachment.html', {
'mail_to': mail_content_obj.mail_to,
'user': my_config_obj.email_id,
'content': mail_content_obj.mail_content,
'url': mail_content_obj.mail_attachment.url
})
else:
message_text = render_to_string('send_my_mail.html', {
'mail_to': mail_content_obj.mail_to,
'user': my_config_obj.email_id,
'content': mail_content_obj.mail_content
})
send_mail(subject, plain_message, from_email, [to], html_message=message_text,
auth_user=my_config_obj.email_id,
auth_password=my_config_obj.email_password)
return Response({'message': 'Mail sent successfully'})
|
986,239 | 20ed92d3fcdd0787f980710b87db38b6d5925158 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root is None:
return []
res = []
curLayer = []
curLayerIdx = 0
stack = []
stack.append((0, root))
while stack:
layerIdx, root = stack.pop(0)
# process node
if not curLayer:
curLayer.append(root.val)
else:
if curLayerIdx != layerIdx:
res.append(curLayer)
curLayerIdx = layerIdx
curLayer = [root.val]
else:
curLayer.append(root.val)
# get children and add to stack
if root.left:
stack.append((layerIdx+1, root.left))
if root.right:
stack.append((layerIdx+1, root.right))
res.append(curLayer)
return res
class SolutionV1(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root is None:
return []
res = []
curLayer = []
stack = [root]
stackTmp = []
while stack or stackTmp:
if not stack:
res.append(curLayer)
stack = stackTmp
stackTmp, curLayer = [], []
root = stack.pop(0)
# process node
curLayer.append(root.val)
# get children and add to stack
if root.left:
stackTmp.append(root.left)
if root.right:
stackTmp.append(root.right)
res.append(curLayer)
return res
class SolutionV2(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
:n: Batch Processing
"""
if root is None:
return []
res = []
stack = []
stack.append(root)
while stack:
curLayer = []
stackTmp = []
for r in stack:
curLayer.append(r.val)
if r.left: stackTmp.append(r.left)
if r.right: stackTmp.append(r.right)
res.append(curLayer)
stack = stackTmp
return res
class SolutionV3(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
:n: Batch Processing (Better Version)
"""
if root is None:
return []
res = []
stack = []
stack.append(root)
while stack:
curLayer = []
levelSize = len(stack)
for _ in range(levelSize):
r = stack.pop(0)
curLayer.append(r.val)
if r.left: stack.append(r.left)
if r.right: stack.append(r.right)
res.append(curLayer)
return res
class SolutionV4(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
:note: DFS
"""
if root is None:
return []
res = []
self._recur(root, res, 0)
return res
def _recur(self, root, res, level):
if not root:
return
if level >= len(res):
res.append([root.val])
else:
res[level].append(root.val)
self._recur(root.left, res, level+1)
self._recur(root.right, res, level+1)
|
986,240 | 93380859be49e6f1755fff927a57579bd2402b34 | import stripe
stripe.api_key = 'zGG4QHGhMlBElVBCioobCucrHJdaoeFP'
def post_to_stripe(order_data):
'''
Takes order data dict, extracts necessary data, and passes it to stripe
for processing.
'''
token = order_data['token']
sub_total = order_data['subtotal']
tax = order_data['tax']
total = int((sub_total + tax) * 1000)
charge = stripe.Charge.create(
amount = total, # amt in cents
currency = 'usd',
card = token,
description = 'samplepayment'
)
return charge
def check_payment(stripe_data, **kwargs):
'''
Takes a dictionary of stripe data and kwargs that represent keys in the
dictionary and the values that each key should have. Checks the dictionary
to see that each key yields the desired value. Returns true if so,
false if not.
'''
for key in kwargs.keys():
if kwargs[key] != stripe_data[key]:
print stripe_date[key]
return False
return True
|
986,241 | 27385c92b54429caa6bef058bfd71ba649559e1b |
from subprocess import check_output
from bs4 import BeautifulSoup
from pprint import pprint
import sqlite3
import re
if __name__ == "__main__":
html = lambda link: check_output("curl -s %s" % link,
shell=True).decode("utf-8")
# http://tamilnation.co/literature/kural/kaviyogi/tks1a.htm
# https://ilakkiyam.com/thirukural
Paal, Iyal, Adigaaram = [], [], []
Paal += [("அறத்துப்பால்", "Righteousness", "Arathuppal")] * 37
Paal += [("பொருட்பால்", "Wealth", "Porutpaal")] * 71
Paal += [("காமத்துப்பால்", "Love", "Kaamathuppal")] * 25
Iyal += [("பாயிரவியல்", "Prologue", "Paayiraviyal")] * 4
Iyal += [("இல்லறவியல்", "Domestic Virtue", "Illaraviyal")] * 20
Iyal += [("துறவறவியல்", "Ascetic Virtue", "Thuravaraviyal")] * 13
Iyal += [("ஊழியல்", "Fate", "Oozhiyal")] * 1
Iyal += [("அரசியல்", "Royalty", "Arasiyal")] * 25
Iyal += [("அமைச்சியல்", "Ministers of State", "Amaichiyal")] * 10
Iyal += [("அங்கவியல்", "Politics", "Angaviyal")] * 22
Iyal += [("ஒழிபியல்", "Miscellaneous", "Ozhibiyal")] * 13
Iyal += [("களவியல்", "The Pre-marital love", "Kalaviyal")] * 7
Iyal += [("கற்பியல்", "The Post-marital love", "Karpiyal")] * 18
link = "http://www.ytamizh.com/thirukuralchapters/"
soup = BeautifulSoup(html(link), "html.parser")
t = soup.find_all("tr")[1:]
for chap in t:
info = [j.contents[0] for j in chap.find_all("td")][:-1]
info[1] = [j.contents[0] for j in chap.find_all("a")][0]
Adigaaram.append(tuple(info))
Kural = [list(sum(i,())) for i in zip(Paal, Iyal, Adigaaram) for j in range(10)]
no = 0
for chap in range(1, 134):
link = "http://www.ytamizh.com/thirukural/chapter-%s/" % chap
soup = BeautifulSoup(html(link), "html.parser")
kurals = soup.find_all("div", attrs={"class": "kural_sub"})
for kural in kurals:
for k in kural.find_all('p'):
K = [re.sub(r"<br/>|\r", "", str(i)) for i in k]
Kural[no].append("".join(K))
Kural[no].insert(0, no+1)
no += 1
print("started")
with sqlite3.connect("kural.db") as conn:
cur = conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS kural (
no INTEGER PRIMARY KEY,
paal_ta TEXT,
paal_en TEXT,
paal_te TEXT,
iyal_ta TEXT,
iyal_en TEXT,
iyal_te TEXT,
adigaaram_no INTEGER,
adigaaram_ta TEXT,
adigaaram_en TEXT,
adigaaram_te TEXT,
kural_ta TEXT,
varadarasan TEXT,
paapaya TEXT,
kural_en TEXT,
en_meaning TEXT,
kural_te TEXT
)""")
for rec in Kural:
cur.execute("""
INSERT INTO kural values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
""", rec)
#EOF
|
986,242 | 8e6a05750706a43d5f2824912a18098e18195856 | # Given two strings, check to see if they are anagrams.
# An anagram is when the two strings can be written using the exact same
# letters (so you can just rearrange the letters to get a different phrase or word).
# For example:
# "public relations" is an anagram of "crap built on lies."
# "clint eastwood" is an anagram of "old west action"
# Note: Ignore spaces and capitalization. So "d go" is an anagram of "God" and "dog" and "o d g".
# This solution manually checks for spaces as the dictonary is built
# the strings could also be manually have their white space removed as in anagram_sorted
def anagram(x, y):
dict = {}
for char in x:
if char not in dict and char is not ' ':
dict[char] = 1
elif char is not ' ':
dict[char] += 1
for char in y:
if char not in dict and char is not ' ':
return False
elif char is not ' ':
dict[char] -= 1
for key in dict:
if dict[key] is not 0:
return False
return True
# not the optimal solution as timsort is O(n log n)
# However, this is the optimal memory size solution
def anagram_sorted(x, y):
x = x.replace(' ', '').lower()
y = y.replace(' ', '').lower()
return sorted(x) == sorted(y)
print(anagram('clint eastwood', 'old west action'))
print(anagram('clint eastgood', 'old west action'))
print(anagram(' d o g ', ' g o d ')) |
986,243 | 4e9c25955c1c7046434e23aeb68bdfffcac78cdf | # -*- coding: utf-8 -*-
import sys,os
import math
import re
import argparse
import json
import ast
import copy
from subprocess import Popen, PIPE
from operator import itemgetter
from abc import ABCMeta,abstractmethod
from emap import EMAP
sys.path.append(os.path.join(os.path.dirname(__file__), '../utils/'))
from collection_stats import CollectionStats
from query import Query
from judgment import Judgment
from evaluation import Evaluation
from performance import Performances
from gen_doc_details import GenDocDetails
import numpy as np
import scipy.stats
from scipy.optimize import curve_fit
import unittest
class RealModels(object):
"""
all kinds of real models
"""
def __init__(self):
super(RealModels, self).__init__()
def okapi_apply1(self, tf, idf, k1=1.2):
return (k1+1)*idf*tf
def okapi_apply2(self, tf, r2):
return tf+r2
def okapi(self, collection_stats, tf, df, doclen, k1=1.2, b=0.35):
"""
okapi
"""
idfs = np.log((collection_stats.get_doc_counts() + 1)/(df+1e-4))
avdl = collection_stats.get_avdl()
r1 = np.apply_along_axis(self.okapi_apply1, 0, tf, idfs, k1)
r2 = k1*(1-b+b*doclen/avdl)
r3 = np.apply_along_axis(self.okapi_apply2, 1, tf, r2)
return np.sum(r1/r3, axis=0)
def tf1(self, collection_stats, tf, df, doclen):
"""
tf - numpy matrix (even if there is only one term), each row is the tf values for each term
doclen - numpy array
"""
return np.sum(tf, axis=0)
def tf4(self, collection_stats, row):
"""
1+log(1+log(tf))
"""
return round(1+math.log(1+math.log(int(row['total_tf']))), 3)
def tf5_apply(self, tf, k):
return tf/(tf+k)
def tf5(self, collection_stats, tf, df, doclen):
"""
tf/(tf+k) k=1.0 default
"""
k = 1.0
r = np.apply_along_axis(self.tf5_apply, 0, tf, [k])
return np.sum(r, axis=0)
def tfidf1_apply(self, tf, idf):
return idf*tf/(tf+1)
def tfidf1(self, collection_stats, tf, df, doclen):
"""
tf/(tf+k) * idf k=1.0 default
"""
idfs = np.log((collection_stats.get_doc_counts() + 1)/(df+1e-4))
r = np.apply_along_axis(self.tfidf1_apply, 0, tf, idfs)
return np.sum(r, axis=0)
def tfln1_apply(self, tf, doclen):
return tf/doclen
def tfln1(self, collection_stats, tf, df, doclen):
"""
tf/dl
"""
r = np.apply_along_axis(self.tfln1_apply, 0, tf, doclen)
return np.sum(r, axis=0)
def tfln3(self, collection_stats, row):
"""
log(tf)/(tf+log(dl))
"""
return round(np.log(float(row['total_tf']))/(float(row['total_tf'])+np.log(float(row['doc_len']))), 3)
def tfln5(self, collection_stats, row, delta=2.75):
"""
(log(tf)+delta)/(tf+log(dl))
"""
return round((np.log(float(row['total_tf']))+delta)/np.log(float(row['doc_len'])), 3)
def get_func_mapping(self, method_name='tf1', para_str=''):
formal_method_name = method_name
if method_name == 'okapi':
x_func = self.okapi
formal_method_name = 'okapi,'+para_str #e.g. 'b:0.0'
elif method_name == 'tf1':
x_func = self.tf1
elif method_name == 'tf4':
x_func = self.tf4
elif method_name == 'tf5':
x_func = self.tf5
elif method_name == 'tfln1':
x_func = self.tfln1
elif method_name == 'tfln3':
x_func = self.tfln3
elif method_name == 'tfln5':
x_func = self.tfln5
elif method_name == 'tfidf1':
x_func = self.tfidf1
return x_func, formal_method_name
class FittingModels(object):
"""
all kinds of fitting models
"""
def __init__(self):
super(FittingModels, self).__init__()
def size(self):
return 14
def mix_expon1(self, xaxis, l):
return scipy.stats.expon(scale=1.0/l).pdf(xaxis)
def mix_expon2(self, xaxis, pi, l1, l2):
return pi*scipy.stats.expon(scale=1.0/l1).pdf(xaxis) + (1-pi)*scipy.stats.expon(scale=1.0/l2).pdf(xaxis)
def mix_expon3(self, xaxis, pi1, pi2, l1, l2, l3):
return pi1*scipy.stats.expon(scale=1.0/l1).pdf(xaxis) + pi2*scipy.stats.expon(scale=1.0/l2).pdf(xaxis) + (1-pi1-pi2)*scipy.stats.expon(scale=1.0/l3).pdf(xaxis)
def mix_expdecay1(self, xaxis, n0, l):
return n0*np.exp(-l*xaxis)
def mix_expdecay2(self, xaxis, pi, n01, n02, l1, l2):
return pi*n01*np.exp(-l1*xaxis) + (1-pi)*n02*np.exp(-l2*xaxis)
def asymptotic_decay(self, xaxis, n0, halflife):
return n0*(1 - xaxis/(xaxis+halflife))
def power_decay(self, xaxis, n0, halflife):
return n0*np.power(xaxis, -halflife)
def mix_lognormal1(self, xaxis, sigma):
return scipy.stats.lognorm.pdf(xaxis, sigma)
def mix_lognormal2(self, xaxis, pi, sigma1, sigma2):
return pi*scipy.stats.lognorm.pdf(xaxis, sigma1)+(1-pi)*scipy.stats.lognorm.pdf(xaxis, sigma2)
def mix_normal1(self, xaxis, mu, sigma):
return scipy.stats.norm.pdf(xaxis, loc=mu, scale=sigma)
def mix_normal2(self, xaxis, pi, mu1, mu2, sigma1, sigma2):
return pi*scipy.stats.norm.pdf(xaxis, loc=mu1, scale=sigma1)+(1-pi)*scipy.stats.norm.pdf(xaxis, loc=mu2, scale=sigma2)
def mix_gamma1(self, xaxis, a):
return scipy.stats.gamma.pdf(xaxis, a)
def mix_gamma2(self, xaxis, pi, a1, a2):
return pi*scipy.stats.gamma.pdf(xaxis, a1)+(1-pi)*scipy.stats.gamma.pdf(xaxis, a2)
def mix_poisson1(self, xaxis, mu):
return scipy.stats.poisson.pmf(xaxis, mu)
def mix_poisson2(self, xaxis, pi, mu1, mu2):
return pi*scipy.stats.poisson.pmf(xaxis, mu1)+(1-pi)*scipy.stats.poisson.pmf(xaxis, mu2)
def curve_fit_mapping(self, i):
fitting_list = [self.mix_expon1, self.mix_expon2, self.mix_lognormal1,
self.mix_lognormal2, self.mix_normal1, self.mix_normal2, self.mix_gamma1,
self.mix_gamma2, self.mix_poisson1, self.mix_poisson2, self.asymptotic_decay,
self.power_decay, self.mix_expdecay1, self.mix_expdecay2]
return fitting_list[i-1]
def cal_curve_fit(self, xaxis, yaxis, mode=1):
if mode == 1:
p0 = [1]
bounds = ([0], [np.inf])
func_name = 'EXP'
elif mode == 2:
p0 = [0.5, 2, 0.5]
bounds = ([0, 0, 0], [1, np.inf, np.inf])
func_name = '2-EXP'
elif mode == 3:
p0 = [1]
bounds = ([-np.inf], [np.inf])
func_name = 'LN'
elif mode == 4:
p0 = [0.45, 1, 1]
bounds = ([0, -np.inf, -np.inf], [1, np.inf, np.inf])
func_name = '2-LN'
elif mode == 5:
p0 = [0, 1]
bounds = ([-np.inf, 0], [np.inf, np.inf])
func_name = 'NN'
elif mode == 6:
p0 = [0.45, 0, 0, 1, 1]
bounds = ([0, -np.inf, -np.inf, 0, 0], [1, np.inf, np.inf, np.inf, np.inf])
func_name = '2-NN'
elif mode == 7:
p0 = [1]
bounds = ([0], [np.inf])
func_name = 'GA'
elif mode == 8:
p0 = [0.45, 1, 1]
bounds = ([0, 0, 0], [1, np.inf, np.inf])
func_name = '2-GA'
elif mode == 9:
p0 = [1]
bounds = ([0], [np.inf])
func_name = 'PO'
elif mode == 10:
p0 = [0.45, 1, 1]
bounds = ([0, 0, 0], [1, np.inf, np.inf])
func_name = '2-PO'
elif mode == 11:
p0 = [1, 2]
bounds = ([0, 0], [np.inf, np.inf])
func_name = 'AD'
elif mode == 12:
p0 = [1, 2]
bounds = ([0, 0], [np.inf, np.inf])
func_name = 'PD'
elif mode == 13:
p0 = [1, 1]
bounds = ([0, 0], [np.inf, np.inf])
func_name = 'ED'
elif mode == 14:
p0 = [0.5, 1, 1, 2, 0.5]
bounds = ([0, 0, 0, 0, 0], [1, np.inf, np.inf, np.inf, np.inf])
func_name = '2-ED'
xaxis = np.asarray(xaxis)
func = self.curve_fit_mapping(mode)
try:
fit_sigma = np.linspace(0.001, 0.01, len(xaxis)) # we want the points with larger value has larger weights,
# this is suitable for non-continuous metrics like AP and nDCG
#print fit_sigma
popt, pcov = curve_fit(func, xaxis, yaxis, p0=p0, method='trf', bounds=bounds, sigma=fit_sigma)
perr = np.sqrt(np.diag(pcov))
trialY = func(xaxis, *popt)
#print mode, popt, np.absolute(trialY-yaxis).sum(), scipy.stats.ks_2samp(yaxis, trialY)
except:
return None
return [mode, func_name, popt, trialY, np.absolute(trialY-yaxis).sum(), scipy.stats.ks_2samp(yaxis, trialY)]
class CalEstMAP(object):
"""
compute the estimated MAP for the fitted models for relevant docs and non-relevant docs
"""
def __init__(self):
super(CalEstMAP, self).__init__()
def cal_map(self, rel_docs=[], non_reldocs=[], all_docs=[], mode=1):
"""
@mode: how to calculate the MAP
1 - using discrete distributions for all docs and the rel docs. this is suitable for TF functions.
2 - using continuous distributions for rel docs and non-rel docs.
"""
if mode == 1:
assert len(rel_docs) == len(all_docs)
for i, ele in enumerate(rel_docs):
if ele > all_docs[i]:
rel_docs[i] = all_docs[i]
return EMAP().cal_expected_map(zip(rel_docs, all_docs))
elif mode == 2:
# TODO: implement
return 0.0
else:
raise RuntimeError('mode must be in [1,2]')
class EM(object):
"""
Expectation Maximization algorithm
"""
def __init__(self):
super(EM, self).__init__()
def exponential(self, data=[], init_lambdas=[1,0.75], max_iteration=500):
"""
two mixture of exponential
"""
xaxis = np.arange(1, len(data)+1)
data = np.array(data)
idx = 1
lambdas = np.array(init_lambdas)
while idx < max_iteration:
y = [lmbda*np.exp(data*(-lmbda)) for lmbda in lambdas]
weights = y/np.sum(y, axis=0)
coefficients = np.mean(weights, axis=1)
lambdas = np.sum(weights, axis=1)/np.sum(weights*data, axis=1)
idx+=1
print lambdas, coefficients
return lambdas, coefficients
class Test(unittest.TestCase):
pass
class SD(object):
"""base class of
score distribution for rankings
"""
__metaclass__ = ABCMeta
def __init__(self, ranking_list, distribution_method, debug):
"""
ranking_list for a specific query(one qid)
"""
self._ranking_list, self._distribution_method, self._debug = \
ranking_list, distribution_method, debug
self._m1 = None
self._v1 = None
self._m0 = None
self._v0 = None
self._lambda = None
self._aupr = None
self._non_rel_distribution = None
self._rel_distribution = None
def _compute_stat_from_list(self, l):
temp = np.array(l)
mean = np.mean(temp)
var = np.var(temp)
return mean,var
def _estimate_stats_with_rel_info(self):
"""estimate the statistics of relevant/non-relevant
distributions(mean/variance: m/v). Note that the subscripts
"1,0" corresponds statistics of relevant/non-relevant
"""
nonrel_list = []
rel_list = []
for ele in self._ranking_list:
score = ele[0]
rel = ele[1]
if rel:
rel_list.append(score)
else:
nonrel_list.append(score)
m1,v1 = self._compute_stat_from_list(rel_list)
m0,v0 = self._compute_stat_from_list(nonrel_list)
estimated_lambda = len(rel_list)*1.0/len(self._ranking_list)
self._m1 = m1
self._v1 = v1
self._m0 = m0
self._v0 = v0
self._lambda = estimated_lambda
if self._debug :
print "m1: %f, v1: %f, m0: %f, v0: %f" %(self._m1,self._v1,self._m0,self._v0)
print "lambda: %f" %(estimated_lambda)
def _compute_rel_likelihood(self, score):
return self._rel_distribution.pdf(score)
def _compute_nonrel_likelihood(self, score):
return self._non_rel_distribution.pdf(score)
def _compute_recall(self, score):
return 1-self._rel_distribution.cdf(score)
def _compute_fallout(self, score):
return 1-self._non_rel_distribution.cdf(score)
def _compute_aupr(self):
lambda_value = self._lambda
N = len(self._ranking_list)
ap = .0
s1 = self._ranking_list[0][0]
if self._debug:
print "top score is %f" % (s1)
score = 2*s1
recall = 0
fallout = 0
prec = [0]*N
rec = [0]*N
ds = score/N
for i in range(N):
score = score - ds
#recall += self._compute_re_likelihood(qid,score)*ds
#fallout += self._compute_non_re_likelihood(qid,score)*ds
recall = self._compute_recall(score)
fallout = self._compute_fallout(score)
#if qid == "429":
# print "recall %f" %recall
# print "fallot %f" %fallout
denominator = lambda_value*recall + (1-lambda_value)*fallout
if recall == 0:
prec[i] = 0
else:
prec[i] = (lambda_value*recall)/denominator
rec[i] = recall
if i>0:
ap += (rec[i]-rec[i-1]) * (prec[i]+prec[i-1])/2
if self._debug:
print "ap = %f" % (ap)
return ap
@property
def aupr(self):
if not self._aupr:
raise RuntimeError("Parameters are not estimated!")
else:
return self._aupr
class GammaSD(SD):
def __init__(self, run, debug=False):
super(GammaSD, self).__init__(run,"gamma",debug)
def _estimate_para(self, qrel=None):
#estimate parameters for models
self._estimate_stats_with_rel_info()
self._k1 = (self._m1)**2 / self._v1
self._theta1 = self._v1 / self._m1
self._k0 = (self._m0)**2 / self._v0
self._theta0 = self._v0 / self._m0
if self._debug :
print "k1: %f, theta1: %f, k0: %f, theta0: %f" %(self._k1, self._theta1,self._k0,self._theta0)
def estimate_distribution(self, qrel=None):
self._estimate_para()
self._rel_distribution = scipy.stats.gamma(self._k1,1/self._theta1)
self._non_rel_distribution = scipy.stats.gamma(self._k0,1/self._theta0)
class LognormalSD(SD):
def __init__(self,run,debug=False):
super(LognormalSD,self).__init__(run,"lognormal",debug)
def _estimate_para(self, qrel=None):
#estimate parameters for models
self._estimate_stats_with_rel_info()
self._mu1 = math.log(self._m1+1e-6) - 0.5*(1 + (self._v1/(self._m1**2)) )
var1 = math.log(1 + (self._v1/(self._m1**2)) )
self._sigma1 = math.sqrt(var1)
self._mu0 = math.log(self._m0+1e-6) - 0.5*(1 + (self._v0/(self._m0**2)) )
var0 = math.log(1 + (self._v0/(self._m0**2)) )
self._sigma0 = math.sqrt(var0)
if self._debug :
print "mu1: %f, sigma1: %f, mu0: %f, sigma0: %f" %(self._mu1,self._sigma1,self._mu0,self._sigma0)
def estimate_distribution(self, qrel=None):
self._estimate_para()
self._rel_distribution = scipy.stats.lognorm(self._sigma1, scale = math.exp(self._mu1))
self._non_rel_distribution = scipy.stats.lognorm(self._sigma0, scale = math.exp(self._mu0))
if __name__ == '__main__':
#unittest.main()
em = EM()
a = [70,40,20,10,9,8,7,6,5,4,3,2,2,2,2,2,2,1,1,1,1,1]
em.exponential(np.asarray(a)*1./np.sum(a))
|
986,244 | d0eaaf750257d6e1bfc8515d931b24e9d67a1d01 | # filter_16_T
# Like filter_16 with prescribed duration
# 16 bit/sample
from math import cos, pi
import pyaudio
import struct
# Fs : Sampling frequency (samples/second)
Fs = 8000
# Also try Fs = 16000 and Fs = 32000
T = 2 # T : Duration of audio to play (seconds)
N = T*Fs # N : Number of samples to play
# Pole location
f1 = 400.0 # Frequency
om1 = 2.0*pi * f1/Fs
Ta = 0.8 # Ta : Time for envelope to decay to 1% (in seconds)
# Try different values of Ta like 0.5, 0.2, 1.5
r = 0.01**(1.0/(Ta*Fs))
print('Fs = %f' % Fs)
print('r = %f' % r)
# Difference equation coefficients
b0 = 1
b1 = -r*cos(om1)
a1 = -2*r*cos(om1)
a2 = r**2
print('b0 = %f' % b0)
print('b1 = %f' % b1)
print('a1 = %f' % a1)
print('a2 = %f' % a2)
# Initialization
x1 = 0.0
y1 = 0.0
y2 = 0.0
gain = 5000.0
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16,
channels = 1,
rate = Fs,
input = False,
output = True)
for n in range(0, N):
# Use impulse as input signal
if n == 0:
x0 = 1.0
else:
x0 = 0.0
# Difference equation
y0 = b0 * x0 - b1 * x1 - a1 * y1 - a2 * y2
# Delays
x1 = x0
y2 = y1
y1 = y0
# Output
output_value = gain * y0
output_string = struct.pack('h', int(output_value) ) # 'h' for 16 bits
stream.write(output_string)
print("* Finished *")
stream.stop_stream()
stream.close()
p.terminate()
|
986,245 | 93b11a12f50d1ac59220e7a08ec6ef5a2df5dc79 | from tokenization import BasicTokenizer
import spacy
from spacy.tokens import Doc
line='The switches between clarity and intoxication gave me a headache, but at least the silver-haired faery’s explanation of the queens’ “gifts” helped me understand why I could want to wrap my legs around a creature who terrified me.'
nlp=spacy.load('en_core_web_lg')
tokenizer=BasicTokenizer()
def my_tokenizer(text):
bert_tokens=tokenizer.tokenize(text)
return Doc(nlp.vocab,words=bert_tokens)
nlp.tokenizer=my_tokenizer
doc=nlp(line)
print([(t.text,t.dep_,t.head.text,t.head.pos_,[c for c in t.children]) for t in doc])
|
986,246 | eae9de8046ff085cbdf537808438415fc72191a3 | import random
import collections
from PIL import Image, ImageFilter, ImageOps
import numpy as np
import cv2
import torch
import scipy.sparse
from sklearn import svm
import torchvision as tv
from torchvision.transforms import functional as F
import albumentations as albu
image_size = (336,336)
class CropFaceParts(object):
def __init__(self, image_col='data'):
self.global_x = [0.27,0.7379,0.5193,0.3210,0.6977]
self.global_y = [0.4034,0.3984,0.6036,0.7952,0.7891]
self.wh_scale_factor = 0.7765
self.bbox_scale_factor = 1.6
self.base_image_size = 250
self.image_col = image_col
def __call__(self, item_dict):
image = item_dict[self.image_col]
image = np.array(image)
bbox = self._get_average_bbox(image)
eye_crop = self._eye_crop(image,bbox)
item_dict['eyes'] = eye_crop
chin_crop = self._chin_crop(image,bbox)
item_dict['chin'] = chin_crop
nose_crop = self._nose_crop(image,bbox)
item_dict['nose'] = nose_crop
ear_l_crop,ear_r_crop = self._ears_crop(image,bbox)
item_dict['ear_l'] = ear_l_crop
item_dict['ear_r'] = ear_r_crop
return item_dict
def _get_average_bbox(self, img):
bbox_h = img.shape[1] / self.bbox_scale_factor
bbox_w = bbox_h * self.wh_scale_factor
bbox_y = (img.shape[1] - bbox_h) / 2
bbox_x = (img.shape[0] - bbox_w) / 2
return bbox_x,bbox_y,bbox_w,bbox_h
def _eye_crop(self, img, bbox):
bbox_x,bbox_y,bbox_w,bbox_h = bbox
xc = int((self.global_x[1] * bbox_w + self.global_x[0] * bbox_w) / 2)
yc = int((self.global_y[1] * bbox_h + self.global_y[0] * bbox_h) / 2)
k = bbox_h* 1.3 / self.base_image_size
shift=[97,40]
x0_ = xc - shift[0] * k
x1_ = xc + shift[0] * k
y0_ = yc - shift[1] * k
y1_ = yc + shift[1] * k
cut = img[max(0, int(bbox_y + y0_)):min(int(bbox_y + y1_), img.shape[0]),
max(0, int(bbox_x + x0_)):min(int(bbox_x + x1_), img.shape[1])]
if cut.shape[0] == 0 or cut.shape[1] == 0:
cut = np.zeros((1, 1, 3)).astype(np.uint8)
return Image.fromarray(cut)
def _chin_crop(self, img, bbox):
bbox_x,bbox_y,bbox_w,bbox_h = bbox
xc = int((self.global_x[4] * bbox_w + self.global_x[3] * bbox_w) / 2)
yc = int((self.global_y[4] * bbox_h + self.global_y[3] * bbox_h) / 2)
k = bbox_h*1.3 / self.base_image_size
shift=[77,20]
x0_ = xc - shift[0] * k
x1_ = xc + shift[0] * k
y0_ = yc - shift[1] * k
y1_ = yc + 3*shift[1] * k
cut = img[max(0, int(bbox_y + y0_)):min(int(bbox_y + y1_), img.shape[0]),
max(0, int(bbox_x + x0_)):min(int(bbox_x + x1_), img.shape[1])]
if cut.shape[0] == 0 or cut.shape[1] == 0:
cut = np.zeros((1, 1, 3)).astype(np.uint8)
return Image.fromarray(cut)
def _nose_crop(self, img, bbox):
bbox_x,bbox_y,bbox_w,bbox_h = bbox
xc = int(self.global_x[2] * bbox_w)
yc = int(self.global_y[2] * bbox_h)
k = bbox_h*1.3 / self.base_image_size
shift = [40,30]
x0_ = xc - shift[0] * k
x1_ = xc + shift[0] * k
y0_ = yc - shift[1] * k
y1_ = yc + shift[1] * k
cut = img[max(0, int(bbox_y + y0_)):min(int(bbox_y + y1_), img.shape[0]),
max(0, int(bbox_x + x0_)):min(int(bbox_x + x1_), img.shape[1])]
if cut.shape[0] == 0 or cut.shape[1] == 0:
cut = np.zeros((1, 1, 3)).astype(np.uint8)
return Image.fromarray(cut)
def _ears_crop(self, img, bbox):
bbox_x,bbox_y,bbox_w,bbox_h = bbox
xc = int((self.global_x[1] * bbox_w + self.global_x[0] * bbox_w) / 2)
yc = int((self.global_y[1] * bbox_h + self.global_y[0] * bbox_h) / 2)
k = bbox_h*1.3 / self.base_image_size
shift_l = [130,40]
shift_r = [50,40]
x0_ = xc - shift_l[0] * k
x01_ = xc - shift_r[0] * k
x10_ = xc + shift_r[0] * k
x1_ = xc + shift_l[0] * k
y0_ = yc - shift_r[1] * k
y1_ = yc + shift_l[1] * k
cut1 = img[max(0, int(bbox_y + y0_)):min(int(bbox_y + y1_), img.shape[0]),
max(0, int(bbox_x + x0_)):min(int(bbox_x + x01_), img.shape[1])]
cut2 = img[max(0, int(bbox_y + y0_)):min(int(bbox_y + y1_), img.shape[0]),
max(0, int(bbox_x + x10_)):min(int(bbox_x + x1_), img.shape[1])]
if cut1.shape[0] == 0 or cut1.shape[1] == 0:
cut1 = np.zeros((1, 1, 3)).astype(np.uint8)
if cut2.shape[0] == 0 or cut2.shape[1] == 0:
cut2 = np.zeros((1, 1, 3)).astype(np.uint8)
return Image.fromarray(cut1),Image.fromarray(cut2)
def __repr__(self):
return self.__class__.__name__
class AA(object):
def __init__(self):
augs = [
albu.HorizontalFlip(p=0.5),
albu.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=40, val_shift_limit=50, p=0.4),
albu.RandomBrightnessContrast(brightness_limit=0.2,contrast_limit=0.2,p=0.4),
albu.GaussianBlur(p=0.3),
albu.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.9), int(image_size[0])),
height=image_size[0],
width=image_size[1], p=0.3),
albu.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=5,border_mode=cv2.BORDER_CONSTANT,p=0.4),
]
self.augs = albu.Compose(augs)
def __call__(self, image):#, bbox, annotation):
image = np.array(image)
image = self.augs(image=image)['image']
return Image.fromarray(image)
class AA_soft(object):
def __init__(self):
augs = [
albu.HorizontalFlip(p=0.5),
albu.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=40, val_shift_limit=50, p=0.4),
albu.RandomBrightnessContrast(brightness_limit=0.2,contrast_limit=0.2,p=0.4),
albu.GaussianBlur(p=0.3)]
self.augs = albu.Compose(augs)
def __call__(self, image):#, bbox, annotation):
image = np.array(image)
image = self.augs(image=image)['image']
return Image.fromarray(image)
class MakeTrash(object):
def __init__(self, data_key = [],target_key = [], final_label=1, trash_size=(10,30), trash_ratio=(0.25,4)):
'''
'''
self.data_key = data_key
self.target_key = target_key
self.final_label = final_label
self.trash_size = trash_size
self.trash_ratio = trash_ratio
def __call__(self, item_dict):
item_dict[self.target_key] = self.final_label
img = item_dict[self.data_key]
tr = tv.transforms.RandomResizedCrop(img.size,scale=(self.trash_size[0]/img.size[0],self.trash_size[1]/img.size[0]), ratio=self.trash_ratio)
item_dict[self.data_key] = tr(img)
return item_dict
def __repr__(self):
return self.__class__.__name__ + '(trash_size={}-{})'.format(self.trash_size[0],self.trash_size[1])
class Transform4EachKey(object):
"""
Apply all torchvision transforms to dict by each key
"""
def __init__(self, transforms, key_list=['data']):
self.transforms = transforms
self.key_list = key_list
def __call__(self, input_dict):
for key in self.key_list:
for t in self.transforms:
input_dict[key] = t(input_dict[key])
return input_dict
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += '\n'
format_string += self.transforms.__repr__()
format_string += ',\n'
format_string += str(self.key_list)
format_string += '\n)'
return format_string
|
986,247 | 1d39e9ef564236927755decd997b899a5feb81de | from flask import render_template
from mysite import app
@app.route('/')
@app.route('/index')
def index():
title = 'mysite'
user = {
'username': 'foosinn',
}
return render_template('index.html', title=title, user=user)
@app.route('/conditionals')
def conditionals():
logged_in = True
title = 'mysite'
user = {
'username': 'foosinn',
}
return render_template('conditionals.html', title=title, user=user, logged_in=logged_in)
@app.route('/loop')
def loop():
title = 'mysite'
user = {
'username': 'foosinn',
}
things = [
{'id': 123, 'name': 'the first item', 'price': 100.09},
{'id': 124, 'name': 'the second item', 'price': 110.09},
{'id': 125, 'name': 'the third item', 'price': 104.09},
{'id': 126, 'name': 'the best item', 'price': 9100.09},
{'id': 127, 'name': 'the worst item', 'price': 999.99},
]
return render_template('loop.html', title=title, user=user, things=things)
|
986,248 | 8d07752c569dd372e6f8c2db6bf74494410b9b5d | import sqlite3
books_file = 'books.txt'
def create_book_table():
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS books(name text primary key, author text, read integer)')
connection.commit()
connection.close()
def add_book(name, author):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
cursor.execute(f'INSERT INTO books VALUES(?, ?, 0)', (name, author))
connection.commit()
connection.close()
def list_book():
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
cursor.execute(f'SELECT * FROM books')
books = [
{'name': row[0], 'author': row[1], 'read': row[2]}
for row in cursor.fetchall()
]
connection.close()
return books
def read_book(name):
books = list_book()
for book in books:
if name == book['name']:
book['read'] = 1
_save_all_books(books)
def _save_all_books(books):
with open(books_file, 'w') as file:
for book in books:
file.write(f'{book["name"]},{book["author"]},{book["read"]}\n')
def delete_book(name):
books = list_book()
books = [
book
for book in books
if book['name'] != name
]
_save_all_books(books)
|
986,249 | c95d959fa498d389bc389c7bdc12e79a8e4d20b0 | import nmap
from pprint import pprint
import time
try:
nm = nmap.PortScanner()
result = None
while True:
tmp = nm.scan(hosts='192.168.4.0/24', arguments='--exclude 192.168.4.1')
# print(tmp)
if result == tmp['scan']:
print("Nothing new")
continue
print("Network changed!")
result = tmp['scan']
pprint(result)
time.sleep(1)
except KeyboardInterrupt as c:
print("Ctrl-C pressed")
except Exception as e:
print(e)
finally:
print("Bye!")
|
986,250 | 6c02d1a214efb0d649c1a43dd1affc899e8225dc | #!/usr/bin/env python
import itertools
from pathlib import Path
import spacy
import pandas as pd
import pathlib
from pathlib import Path
from spacy.tokens import Doc, Token
nlp = spacy.load('en')
def safe_list_get (l, idx, default=0):
try:
return l[idx]
except IndexError:
return [0,0, 0]
def bert_list_test(doc: Doc, ids:list) -> list:
token_idx = 0
tokensh = []
tspacyt = []
ttoken=[]
bertids = []
spacytokens = []
current_token: Token = doc[0]
while token_idx < len(doc):
current_token: Token = doc[token_idx]
#if (str(current_token) == 'hope'):
# ipdb.set_trace()
if (str(current_token)[:1]) == '\n':
if ttoken:
spacytokens.append(tspacyt)
tokensh.append(ttoken)
bertids.append(ids)
tlabel= []
tspacyt = []
ttoken=[]
token_idx += 1
continue
ttoken.append(str(current_token))
tspacyt.append(current_token)
token_idx += 1
return bertids, tokensh, spacytokens
def bert_list(p2id: dict, doc: Doc, doc_labels: list, ids: list, binary: bool, bio: bool = False) -> list:
if binary:
offset = 1
else:
offset = 18
token_idx = 0
labels_idx = 0
tokensh = []
labelsh = []
tlabel = []
tspacyt = []
ttoken=[]
bertids = []
flagger = 0
spacytokens = []
current_token: Token = doc[0]
while token_idx < len(doc):
current_token: Token = doc[token_idx]
start_token_idx = token_idx
current_label = safe_list_get(doc_labels, labels_idx)
# advance token until it is within the label
if (str(current_token)[:1] == '\n'):
flagger = 0
if ttoken:
spacytokens.append(tspacyt)
tokensh.append(ttoken)
labelsh.append(tlabel)
bertids.append(ids)
tlabel= []
tspacyt = []
ttoken=[]
token_idx += 1
continue
if current_token.idx < current_label[0] or current_label[2]==0:
# Uncomment to get backtrack
#if flagger == 0:
ttoken.append(str(current_token))
tspacyt.append(current_token)
tlabel.append(0)
flagger = flagger - 1
if flagger < 0:
flagger = 0
token_idx += 1
continue
flagger = 0
first = True
while current_token.idx < current_label[1]:
if (str(current_token)[:1] == '\n'):
if ttoken:
spacytokens.append(tspacyt)
tokensh.append(ttoken)
labelsh.append(tlabel)
bertids.append(ids)
tlabel= []
tspacyt = []
ttoken=[]
else:
ttoken.append(str(current_token))
tspacyt.append(current_token)
if first:
tlabel.append(p2id[current_label[2]])
else:
tlabel.append(p2id[current_label[2]] + offset)
token_idx += 1
if token_idx >= len(doc):
break
current_token = doc[token_idx]
flagger = flagger+1
if bio:
first = False
else:
first = True
# advance label
labels_idx += 1
# revert token_idx because the labels might be intersecting. Uncomment to get backtrack.
#token_idx = start_token_idx
return bertids, tokensh, labelsh, spacytokens
def settings(tech_path: str, label: str = None, bio: bool = False) -> list:
prop_tech_e = None
if label:
prop_tech = [label]
else:
prop_tech = load_technique_names_from_file(tech_path)
if bio:
prop_tech_inside = prop_tech
prop_tech_begin = ["B-"+tech for tech in prop_tech]
prop_tech_inside = ["I-"+tech for tech in prop_tech_inside]
prop_tech_e = prop_tech_begin + prop_tech_inside
# TODO prop_tech or prop_tech_e
offset = len(prop_tech)
hash_token = offset + 1
end_token = offset + 2
# Insert "outside" element
prop_tech.insert(0, "O")
if prop_tech_e:
prop_tech_e.insert(0, "O")
p2id = {y: x for (x, y) in enumerate(prop_tech)}
return prop_tech_e, prop_tech, hash_token, end_token, p2id
def load_technique_names_from_file(filename: str) -> list:
with open(filename, "r") as f:
return [ line.rstrip() for line in f.readlines() ]
def read_data(path: str, isLabels: bool = True, binary: str = None) -> list:
directory = pathlib.Path(path)
ids = []
texts = []
labels = []
for f in directory.glob('*.txt'):
id = f.name.replace('article', '').replace('.txt','')
ids.append(id)
texts.append(f.read_text(encoding='utf-8'))
if isLabels:
labels.append(parse_label(f.as_posix().replace('.txt', '.task-FLC.labels'), binary=binary))
docs = list(nlp.pipe(texts))
return [ids, docs, labels]
def parse_label(label_path: str, binary: str = None) -> list:
# idx, type, start, end
labels = []
f= Path(label_path)
if not f.exists():
return labels
for line in open(label_path):
parts = line.strip().split('\t')
if binary:
if binary == 'Propaganda':
labels.append((int(parts[2]), int(parts[3]), 'Propaganda'))
else:
if (parts[1] == binary):
labels.append((int(parts[2]), int(parts[3]), parts[1]))
else:
labels.append((int(parts[2]), int(parts[3]), parts[1]))
return sorted(labels)
def corpus2list(p2id: dict, ids: list, texts: list, labels: list, binary: bool = False, bio: bool = False) -> list:
print (p2id)
berti, bertt, bertl, berts = zip(*[bert_list(p2id, d, l, idx, binary, bio) for d, l, idx in zip(texts, labels, ids)])
flat_list_text = [item for sublist in bertt for item in sublist]
flat_list_label = [item for sublist in bertl for item in sublist]
flat_list_id = [item for sublist in berti for item in sublist]
flat_list_spacy = [item for sublist in berts for item in sublist]
#print (flat_list_text[0])
return flat_list_id, flat_list_text, flat_list_label, flat_list_spacy
def test2list(ids: list, texts: list) -> list:
berti, bertt, berts = zip(*[bert_list_test(d, idx) for d, idx in zip(texts, ids)])
flat_list_text = [item for sublist in bertt for item in sublist]
flat_list_id = [item for sublist in berti for item in sublist]
flat_list_spacy = [item for sublist in berts for item in sublist]
flat_list_l = None
return flat_list_id, flat_list_text, flat_list_l, flat_list_spacy
def get_char_level(flat_list_i: list, flat_list_s: list, predictions_sample: list, cleaned: list, hash_token: int, end_token:int, prop_tech) -> pd.DataFrame:
counter = 0
for x in predictions_sample:
for j in x:
if j == 1:
counter = counter + 1
break
print (counter)
pred = []
for oindex, x in enumerate(cleaned):
index = 0
tlist = []
for iindex, j in enumerate(x):
#print (j)
#print(index)
tlist.append(predictions_sample[oindex][index])
length = len(j)
index = index + length
#print ("Token: ", j, "----- Assigned: ", predictions_sample[oindex][index])
pred.append(tlist)
tpred = pred
pred = []
for x in tpred:
tlist = []
for j in x:
if j in [hash_token, end_token]:
continue
tlist.append(j)
pred.append(tlist)
counter = 0
for x in predictions_sample:
for j in x:
if j == 1:
counter = counter + 1
break
print ("Counter check: ", counter)
lists = []
liste = []
listp = []
listid = []
for i, x in enumerate(pred):
a = flat_list_s[i]
b = flat_list_i[i]
id_text, spans = get_spans(a, x, i, b, hash_token, end_token, prop_tech)
if spans:
for span in spans:
listid.append(id_text)
liste.append(span[2])
lists.append(span[1])
listp.append(span[0])
df = {"ID": listid, "P": listp, "s": lists, "liste": liste}
df = pd.DataFrame(df)
return df
def get_spans(a: list, labelx: list, i: int, id_text: str, hash_token, end_token, prop_tech):
#if i==35:
#ipdb.set_trace()
spans = []
span_len = 0
prev = 0
for i, x in enumerate(labelx):
# End if last index\
if x == end_token:
continue
if i >= len(a)-1:
if x != 0:
# if prev element isn't equal to current and not O
if prev != x and prev !=0:
span_e= a[i-1].idx + len(a[i-1])
span_len = 0
spans.append([prop_tech[labelx[i-1]], span_f, span_e])
prev = x
span_f = a[i].idx
span_len = span_len+1
if span_len == 0:
span_f = a[i].idx
span_len = span_len+1
prev=x
if (i >= len(labelx)-1):
span_e= a[i].idx + len(a[i])
span_len = 0
spans.append([prop_tech[labelx[i]], span_f, span_e])
continue
else:
span_e= a[i].idx + len(a[i])
span_len = 0
spans.append([prop_tech[labelx[i]], span_f, span_e])
continue
else:
prev = x
if (span_len != 0):
span_e= a[i-1].idx+len(a[i-1])
span_len = 0
spans.append([prop_tech[labelx[i-1]], span_f, span_e])
continue
if x == hash_token:
continue
if x != 0:
# Check if prev element was same as current or equal to O
if prev != x and prev !=0:
span_e= a[i-1].idx + len(a[i-1])
span_len = 0
spans.append([prop_tech[labelx[i-1]], span_f, span_e])
prev = x
span_f = a[i].idx
span_len = span_len+1
if span_len == 0:
span_f = a[i].idx
span_len = span_len+1
prev=x
if (i >= len(labelx)-1):
span_e= a[i].idx + len(a[i])
span_len = 0
spans.append([prop_tech[labelx[i]], span_f, span_e])
continue
else:
if (i >= len(labelx)-1):
span_e= a[i].idx + len(a[i])
span_len = 0
spans.append([prop_tech[labelx[i]], span_f, span_e])
continue
span_len = span_len+1
else:
prev = x
if (span_len != 0):
span_e= a[i-1].idx+len(a[i-1])
span_len = 0
spans.append([prop_tech[labelx[i-1]], span_f, span_e])
continue
if (i >= len(labelx)-1):
#span_e= a[i].idx + len(a[i])
#span_len = 0
#spans.append([span_f, span_e])
continue
if spans:
return id_text, spans
else:
return (0, [])
|
986,251 | 2a96320716faa96e2c34f0711d89e542b1765751 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 12 14:48:31 2015
@author: timothy
"""
from __future__ import division
import os, sys
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_almost_equal
import pyideas
# set working directory on super folder
def test_multidim():
execfile(str(os.path.join(pyideas.BASE_DIR, "..", "examples",
"multidim_model.py")))
def test_second_order():
sys.path.append(os.path.join(pyideas.BASE_DIR, "..", "examples"))
import second_order_system
second_order_system.run_second_order_old()
second_order_system.run_second_order_new() |
986,252 | 706773e9681cfd5d3224d738c69bb80eaebdaaba | """literaturetrackercadlab URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import include, url
from django.urls import path
import genrestracker.views
admin.autodiscover()
urlpatterns = [
#url(r'^db', genrestracker.views.db, name='db'),
#url(r'^$', genrestracker.views.login, name='login'),
path('admin/', admin.site.urls),
url(r'^index/', genrestracker.views.index, name='index'),
url(r'^result/', genrestracker.views.result, name='result'),
# url(r'^addCar/', genrestracker.views.addNewCar, name='addCar'),
# url(r'^chooseCar/', genrestracker.views.chooseCar, name='chooseCar'),
url(r'^show/', genrestracker.views.show, name='show'),
# url(r'^addMiles/', genrestracker.views.addMiles, name='addMiles'), #/(.*.{0,})
#
url(r'^$', genrestracker.views.login, name='login'),
# url(r'^login/', genrestracker.views.login, name='login'),
url(r'^successfulSignup/', genrestracker.views.successfulSignup, name='successfulSignup'),
url(r'^signup/', genrestracker.views.signup, name='signup'),
#url(r'^logout/$', genrestracker.views.logout, name='logout')
]
|
986,253 | 2895e642917af717275b97f6f7971431e611d00f | import os
import random
# files
FILE_TRAIN = "dataset/NLSPARQL.train.data"
FILE_TEST = "dataset/NLSPARQL.test.data"
TRAIN_FEATS = "dataset/NLSPARQL.train.feats.txt"
TEST_FEATS = "dataset/NLSPARQL.test.feats.txt"
#variables
#base list
word_list = []
iob_list = []
line_list = []
sentence_list = []
# lemma, pos list
lp_list = []
lp_line_list = []
lp_sentence_list = []
# word, lemma, pos list
wlp_list = []
wlp_line_list = []
wlp_sentence_list = []
# word, lemma list
wl_list = []
wl_line_list = []
wl_sentence_list = []
# word, pos list
wp_list = []
wp_line_list = []
wp_sentence_list = []
#create a file for training
with open(FILE_TRAIN) as file1, open(TRAIN_FEATS) as file2:
for x, y in zip(file1, file2):
# w1 in form word , IOB
w1 = x.strip().split()
# w2 in form word , POS , lemma
w2 = y.strip().split()
#get the value
print_str = ""
# prefix and suffix extraction and add the list
if len(w1)>0 and len(w2)>0:
word_list.append(w1[0])
iob_list.append(w1[1])
line_list.append((w1[0],w1[1]))
lp_list.append((w2[2], w2[1]))
lp_line_list.append((w2[2], w2[1], w1[1]))
wlp_list.append((w1[0],w2[2],w2[1]))
wlp_line_list.append((w1[0],w2[2],w2[1],w1[1]))
wl_list.append((w1[0], w2[2]))
wl_line_list.append((w1[0], w2[2], w1[1]))
wp_list.append((w1[0], w2[1]))
wp_line_list.append((w1[0], w2[1],w1[1]))
else:
if len(line_list) > 0:
sentence_list.append(line_list)
line_list = []
if len(lp_line_list) > 0:
lp_sentence_list.append(lp_line_list)
lp_line_list = []
if len(wlp_line_list) > 0:
wlp_sentence_list.append(wlp_line_list)
wlp_line_list = []
if len(wl_line_list) > 0:
wl_sentence_list.append(wl_line_list)
wl_line_list = []
if len(wp_line_list) > 0:
wp_sentence_list.append(wp_line_list)
wp_line_list = []
#Create a Lexicon
#delete the duplicate into the list
unique_w_list = list(set(word_list))
unique_iob_list = list(set(iob_list))
unique_lp_list = list(set(lp_list))
unique_wlp_list = list(set(wlp_list))
unique_wl_list = list(set(wl_list))
unique_wp_list = list(set(wp_list))
#create file to unique lexicon
with open("lexicon/unique_w_lexicon", "w") as wlexicon:
counter = 0
for element in unique_w_list:
wlexicon.write(element + " " + str(counter) + "\n")
counter += 1
wlexicon.write("<UNK> " + str(counter) + "\n" )
with open("lexicon/unique_iob_lexicon", "w") as ioblexicon:
#create file unique iob lexicon
counter = 0
for element in unique_iob_list:
ioblexicon.write(element + " " + str(counter) + "\n")
counter += 1
ioblexicon.write("<UNK> " + str(counter) + "\n" )
with open("lexicon/unique_lp_lexicon", "w") as lplexicon:
#create unique lemma, pos lexicon
counter = 0
for element in unique_lp_list:
# separated by '_' so that can eventually be split
lplexicon.write(element[0] + "_" + element[1] + " " + str(counter) + "\n")
counter += 1
lplexicon.write("<UNK> " + str(counter) + "\n")
with open("lexicon/unique_wlp_lexicon", "w") as wlplexicon:
#create unique word, lemma, pos list
counter = 0
for element in unique_wlp_list:
# separated by '_' so that can eventually be split
wlplexicon.write(element[0] + "_" + element[1] + "_" + element[2] + " " + str(counter) + "\n")
counter += 1
wlplexicon.write("<UNK> " + str(counter) + "\n")
with open("lexicon/unique_wl_lexicon", "w") as wllexicon:
#create unique word lemma
counter = 0
for element in unique_wl_list:
# separated by '_' so that can eventually be split
wllexicon.write(element[0] + "_" + element[1] + " " + str(counter) + "\n")
counter += 1
wllexicon.write("<UNK> " + str(counter) + "\n")
with open("lexicon/unique_wp_lexicon", "w") as wplexicon:
#create unique word pos list
counter = 0
for element in unique_wp_list:
# separated by '_' so that can eventually be split
wplexicon.write(element[0] + "_" + element[1] + " " + str(counter) + "\n")
counter += 1
wplexicon.write("<UNK> " + str(counter) + "\n")
# create file of test
test_lp_line = []
test_lp_set = []
test_wlp_line = []
test_wlp_set = []
test_wl_line = []
test_wl_set = []
test_wp_line = []
test_wp_set = []
with open("dataset/NLSPARQL.test.data", "r") as file1, open("dataset/NLSPARQL.test.feats.txt") as file2:
for x, y in zip(file1, file2):
# data_tokens in form < word , IOB >
w1 = x.strip().split()
# feature_tokens in form < word , POS , lemma >
w2 = y.strip().split()
# prefix and suffix extraction (based on user's parameter)
if len(w1) > 0 and len(w2) > 0:
iob_list.append(w1[1])
test_lp_line.append((w2[2],w2[1],w1[1]))
test_wlp_line.append((w1[0],w2[2],w2[1],w1[1]))
test_wl_line.append((w1[0],w2[2],w1[1]))
test_wp_line.append((w1[0], w2[1], w1[1]))
else:
if len(test_lp_line) > 0:
test_lp_set.append(test_lp_line)
test_lp_line = []
if len(test_wlp_line) > 0:
test_wlp_set.append(test_wlp_line)
test_wlp_line = []
if len(test_wl_line) > 0:
test_wl_set.append(test_wl_line)
test_wl_line = []
if len(test_wp_line) > 0:
test_wp_set.append(test_wp_line)
test_wp_line = []
# implement the validation
#
random.shuffle(sentence_list)
random.shuffle(lp_sentence_list)
random.shuffle(wl_sentence_list)
random.shuffle(wp_sentence_list)
#create the training set and the validation set to alla sentences
training_set = sentence_list[:int(len(sentence_list) * 0.30)]
validation_set = sentence_list[int(len(sentence_list) * 0.30):]
training_lp_set = lp_sentence_list[:int(len(lp_sentence_list)*0.30)]
validation_lp_set = lp_sentence_list[int(len(lp_sentence_list)*0.30):]
training_wlp_set = wlp_sentence_list[:int(len(wlp_sentence_list)*0.30)]
validation_wlp_set = wlp_sentence_list[int(len(wlp_sentence_list)*0.30):]
training_wl_set = wl_sentence_list[:int(len(wl_sentence_list)*0.30)]
validation_wl_set = wl_sentence_list[int(len(wl_sentence_list)*0.30):]
training_wp_set = wp_sentence_list[:int(len(wp_sentence_list)*0.30)]
validation_wp_set = wp_sentence_list[int(len(wp_sentence_list)*0.30):]
# save the training and the validation set in a file
with open("training/training_set", "w") as training_set_file:
for list in training_set:
for element in list:
training_set_file.write(str(element[0]) + " " + str(element[1]) + "\n")
training_set_file.write("\n")
with open("validation/validation_set", "w") as validation_set_file:
for list in validation_set:
for element in list:
validation_set_file.write(str(element[0]) + " " + str(element[1]) + "\n")
validation_set_file.write("\n")
# save the training, the test and the validation set in a file (<lemma,pos>)
with open("training/training_lp_set", "w") as training_lp_set_file:
for list in training_lp_set:
for element in list:
training_lp_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
training_lp_set_file.write("\n")
with open("validation/validation_lp_set", "w") as validation_lp_set_file:
for list in validation_lp_set:
for element in list:
validation_lp_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
validation_lp_set_file.write("\n")
with open("test/test_lp_set", "w") as test_lp_set_file:
for list in test_lp_set:
for element in list:
test_lp_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
test_lp_set_file.write("\n")
# save the training, the test and the validation set in a file (<word,lemma,pos>)
with open("training/training_wlp_set", "w") as training_wlp_set_file:
for list in training_wlp_set:
for element in list:
training_wlp_set_file.write(element[0] + "_" + element[1] + "_" + element[2] + " " + element[3] + "\n")
training_wlp_set_file.write("\n")
with open("validation/validation_wlp_set", "w") as validation_wlp_set_file:
for list in validation_wlp_set:
for element in list:
validation_wlp_set_file.write(element[0] + "_" + element[1] + "_" + element[2] + " " + element[3] + "\n")
validation_wlp_set_file.write("\n")
with open("test/test_wlp_set", "w") as test_wlp_set_file:
for list in test_wlp_set:
for element in list:
test_wlp_set_file.write(element[0] + "_" + element[1] + "_" + element[2] + " " + element[3] + "\n")
test_wlp_set_file.write("\n")
# save the training, the test and the validation set in a file (<word,lemma>)
with open("training/training_wl_set", "w") as training_wl_set_file:
for list in training_wl_set:
for element in list:
training_wl_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
training_wl_set_file.write("\n")
with open("validation/validation_wl_set", "w") as validation_wl_set_file:
for list in validation_wl_set:
for element in list:
validation_wl_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
validation_wl_set_file.write("\n")
with open("test/test_wl_set", "w") as test_wl_set_file:
for list in test_wl_set:
for element in list:
test_wl_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
test_wl_set_file.write("\n")
# save the training, the test and the validation set in a file (<word,pos>)
with open("training/training_wp_set", "w") as training_wp_set_file:
for list in training_wp_set:
for element in list:
training_wp_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
training_wp_set_file.write("\n")
with open("validation/validation_wp_set", "w") as validation_wp_set_file:
for list in validation_wp_set:
for element in list:
validation_wp_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
validation_wp_set_file.write("\n")
with open("test/test_wp_set", "w") as test_wp_set_file:
for list in test_wp_set:
for element in list:
test_wp_set_file.write(element[0] + "_" + element[1] + " " + element[2] + "\n")
test_wp_set_file.write("\n")
|
986,254 | c24d02f3b3b389eb62d58e207d219db914060bc2 | """
修改changelog为特定格式
"""
import re
htmlextends = ['html', 'page', 'uientity', 'application']
clazzextends = ['java']
configextends = ['bo', 'svpkg', 'cmpt', 'xml']
def read_changelog(filename):
file = open(filename, mode='r', encoding='utf-8')
lines = file.readlines()
reg = re.compile(r'\W*[AM]\W*(/.*\.\w*)$')
sets = set()
for line in lines:
result = reg.match(line)
if result is not None:
sets.add(result.group(1))
return sets
def set_type(sets):
includes = []
for path in sets:
extends = path[path.rfind('.') + 1:]
type = ''
if extends in htmlextends:
type = 'html'
elif extends in clazzextends:
type = 'class'
elif extends in configextends:
type = 'config'
else:
raise NameError('未知拓展名')
includes.append(buildxml(path, type))
return includes
def buildxml(path, type):
ret = ''
if type == 'html':
ret = '<include name="' + path[path.find('html/') + 5:] + '" />\n'
elif type == 'config':
ret = '<include name="WEB-INF/classes' + path[path.find('config/') + 6:] + '" />\n'
elif type == 'class':
ret = '<include name="WEB-INF/classes' + path[path.find('src/') + 3:][:-4] + 'class" />\n'
return '\t\t\t\t\t'+ret
if __name__ == '__main__':
print('----->开始读取changeLog。txt')
rtnset = read_changelog('changeLog.txt')
print('----->读取changeLog。txt完成')
print('----->开始对内容分类处理')
rtnList = set_type(rtnset)
print('----->处理完成')
print('----->开始写入build.xml文件')
xml = open('build.xml', mode='wt', encoding='utf-8')
xml.writelines('''<?xml version="1.0" encoding="utf-8"?>
<project name="rms deploy" default="antwar" basedir=".">
<property file="build.properties" />
<target name="antwar" description="打包war" depends="">
<echo message="删除原dist目录下的${war.present}"/>
<delete file="${project.dist.dir}/${war.present}" />
<echo message="拷贝HTML文件..." />
<copy todir="${project.temp.dir}">
<fileset dir="${webroot.path}">
''')
xml.writelines(rtnList)
xml.writelines(''' </fileset>
</copy>
<echo message="生成${war.present}..." />
<war warfile="${project.dist.dir}/${war.present}.war" webxml="${project.temp.dir}/WEB-INF/web.xml">
<fileset dir="${project.temp.dir}">
<include name="**/*"/>
<exclude name="**/svn/**"/>
<exclude name="**/_apx/**"/>
</fileset>
</war>
</target>
</project>
''')
print('----->写入build.xml文件完成')
|
986,255 | 4650a3fe4d9fd3304f4475681b21267f41768fa3 | import skimage
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import model
def readImage(dir = None):
# image = skimage.io.imread(dir) # 读取指定路径的图片
image = skimage.data.chelsea() # 对skimage自带的图片
image = skimage.color.rgb2gray(image) # 将彩色图片转为为灰度图片
return image
dir = './cat.jpg'
# image = readImage(dir)
img = readImage()
skimage.io.imshow(img)
skimage.io.show()
# First conv layer
#l1_filter = numpy.random.rand(2,7,7)*20 # Preparing the filters randomly.
l1_filter = np.zeros((2,3,3))
l1_filter[0, :, :] = np.array([[[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]]])
l1_filter[1, :, :] = np.array([[[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]]])
print("\n**Working with conv layer 1**")
l1_feature_map = model.conv(img, l1_filter)
print("\n**ReLU**")
l1_feature_map_relu = model.relu(l1_feature_map)
print("\n**Pooling**")
l1_feature_map_relu_pool = model.pooling(l1_feature_map_relu, 2, 2)
print("**End of conv layer 1**\n")
# Second conv layer
l2_filter = np.random.rand(3, 5, 5, l1_feature_map_relu_pool.shape[-1])
print("\n**Working with conv layer 2**")
l2_feature_map = model.conv(l1_feature_map_relu_pool, l2_filter)
print("\n**ReLU**")
l2_feature_map_relu = model.relu(l2_feature_map)
print("\n**Pooling**")
l2_feature_map_relu_pool = model.pooling(l2_feature_map_relu, 2, 2)
print("**End of conv layer 2**\n")
# Third conv layer
l3_filter = np.random.rand(1, 7, 7, l2_feature_map_relu_pool.shape[-1])
print("\n**Working with conv layer 3**")
l3_feature_map = model.conv(l2_feature_map_relu_pool, l3_filter)
print("\n**ReLU**")
l3_feature_map_relu = model.relu(l3_feature_map)
print("\n**Pooling**")
l3_feature_map_relu_pool = model.pooling(l3_feature_map_relu, 2, 2)
print("**End of conv layer 3**\n")
# Graphing results
fig0, ax0 = matplotlib.pyplot.subplots(nrows=1, ncols=1)
ax0.imshow(img).set_cmap("gray")
ax0.set_title("Input Image")
ax0.get_xaxis().set_ticks([])
ax0.get_yaxis().set_ticks([])
matplotlib.pyplot.savefig("in_img.png", bbox_inches="tight")
matplotlib.pyplot.close(fig0)
# Layer 1
fig1, ax1 = matplotlib.pyplot.subplots(nrows=3, ncols=2)
ax1[0, 0].imshow(l1_feature_map[:, :, 0]).set_cmap("gray")
ax1[0, 0].get_xaxis().set_ticks([])
ax1[0, 0].get_yaxis().set_ticks([])
ax1[0, 0].set_title("L1-Map1")
ax1[0, 1].imshow(l1_feature_map[:, :, 1]).set_cmap("gray")
ax1[0, 1].get_xaxis().set_ticks([])
ax1[0, 1].get_yaxis().set_ticks([])
ax1[0, 1].set_title("L1-Map2")
ax1[1, 0].imshow(l1_feature_map_relu[:, :, 0]).set_cmap("gray")
ax1[1, 0].get_xaxis().set_ticks([])
ax1[1, 0].get_yaxis().set_ticks([])
ax1[1, 0].set_title("L1-Map1ReLU")
ax1[1, 1].imshow(l1_feature_map_relu[:, :, 1]).set_cmap("gray")
ax1[1, 1].get_xaxis().set_ticks([])
ax1[1, 1].get_yaxis().set_ticks([])
ax1[1, 1].set_title("L1-Map2ReLU")
ax1[2, 0].imshow(l1_feature_map_relu_pool[:, :, 0]).set_cmap("gray")
ax1[2, 0].get_xaxis().set_ticks([])
ax1[2, 0].get_yaxis().set_ticks([])
ax1[2, 0].set_title("L1-Map1ReLUPool")
ax1[2, 1].imshow(l1_feature_map_relu_pool[:, :, 1]).set_cmap("gray")
ax1[2, 0].get_xaxis().set_ticks([])
ax1[2, 0].get_yaxis().set_ticks([])
ax1[2, 1].set_title("L1-Map2ReLUPool")
matplotlib.pyplot.savefig("L1.png", bbox_inches="tight")
matplotlib.pyplot.close(fig1)
# Layer 2
fig2, ax2 = matplotlib.pyplot.subplots(nrows=3, ncols=3)
ax2[0, 0].imshow(l2_feature_map[:, :, 0]).set_cmap("gray")
ax2[0, 0].get_xaxis().set_ticks([])
ax2[0, 0].get_yaxis().set_ticks([])
ax2[0, 0].set_title("L2-Map1")
ax2[0, 1].imshow(l2_feature_map[:, :, 1]).set_cmap("gray")
ax2[0, 1].get_xaxis().set_ticks([])
ax2[0, 1].get_yaxis().set_ticks([])
ax2[0, 1].set_title("L2-Map2")
ax2[0, 2].imshow(l2_feature_map[:, :, 2]).set_cmap("gray")
ax2[0, 2].get_xaxis().set_ticks([])
ax2[0, 2].get_yaxis().set_ticks([])
ax2[0, 2].set_title("L2-Map3")
ax2[1, 0].imshow(l2_feature_map_relu[:, :, 0]).set_cmap("gray")
ax2[1, 0].get_xaxis().set_ticks([])
ax2[1, 0].get_yaxis().set_ticks([])
ax2[1, 0].set_title("L2-Map1ReLU")
ax2[1, 1].imshow(l2_feature_map_relu[:, :, 1]).set_cmap("gray")
ax2[1, 1].get_xaxis().set_ticks([])
ax2[1, 1].get_yaxis().set_ticks([])
ax2[1, 1].set_title("L2-Map2ReLU")
ax2[1, 2].imshow(l2_feature_map_relu[:, :, 2]).set_cmap("gray")
ax2[1, 2].get_xaxis().set_ticks([])
ax2[1, 2].get_yaxis().set_ticks([])
ax2[1, 2].set_title("L2-Map3ReLU")
ax2[2, 0].imshow(l2_feature_map_relu_pool[:, :, 0]).set_cmap("gray")
ax2[2, 0].get_xaxis().set_ticks([])
ax2[2, 0].get_yaxis().set_ticks([])
ax2[2, 0].set_title("L2-Map1ReLUPool")
ax2[2, 1].imshow(l2_feature_map_relu_pool[:, :, 1]).set_cmap("gray")
ax2[2, 1].get_xaxis().set_ticks([])
ax2[2, 1].get_yaxis().set_ticks([])
ax2[2, 1].set_title("L2-Map2ReLUPool")
ax2[2, 2].imshow(l2_feature_map_relu_pool[:, :, 2]).set_cmap("gray")
ax2[2, 2].get_xaxis().set_ticks([])
ax2[2, 2].get_yaxis().set_ticks([])
ax2[2, 2].set_title("L2-Map3ReLUPool")
matplotlib.pyplot.savefig("L2.png", bbox_inches="tight")
matplotlib.pyplot.close(fig2)
# Layer 3
fig3, ax3 = matplotlib.pyplot.subplots(nrows=1, ncols=3)
ax3[0].imshow(l3_feature_map[:, :, 0]).set_cmap("gray")
ax3[0].get_xaxis().set_ticks([])
ax3[0].get_yaxis().set_ticks([])
ax3[0].set_title("L3-Map1")
ax3[1].imshow(l3_feature_map_relu[:, :, 0]).set_cmap("gray")
ax3[1].get_xaxis().set_ticks([])
ax3[1].get_yaxis().set_ticks([])
ax3[1].set_title("L3-Map1ReLU")
ax3[2].imshow(l3_feature_map_relu_pool[:, :, 0]).set_cmap("gray")
ax3[2].get_xaxis().set_ticks([])
ax3[2].get_yaxis().set_ticks([])
ax3[2].set_title("L3-Map1ReLUPool")
matplotlib.pyplot.savefig("L3.png", bbox_inches="tight")
matplotlib.pyplot.close(fig3)
|
986,256 | e453bfa8824aa7c4362abc3b1a9a946bc018d632 |
import re
from dataclasses import dataclass
#############
# Constants #
#############
# allows to parse proc maps files
_RE_MAPS = re.compile(
r'^([0-9a-f]+)-([0-9a-f]+) ([rwxsp-]{4}) ([0-9a-f]+) ([^ ]+) (\d+)\s*([^ $]*?)$',
flags=re.MULTILINE
)
###########
# Classes #
###########
@dataclass
class Mapping:
'''Stores mappings information.
A mapping corresponds to one line into a proc maps files.
'''
start_address : int
end_address : int
size : int
perms : str
offset : int
dev : str
inode : str
pathname : str
#############
# Functions #
#############
def get_maps(pid, filter_=None):
'''Parses the maps file of a process.
Parameters
----------
pid : int
The pid of the process.
filter_ : callable, optional
If defined, this one allows to filter the mappings.
In order to facilitate the filtering, some filters are already defined:
has_perms, has_path, size, etc.
Examples
--------
Retrieving all the mappings of a process:
>>> get_maps(1234)
Retrieving only executable mappings:
>>> get_maps(1234, lambda m: 'x' in m.perms)
Retrieving only writable mapping with a size greater or equal to 4096:
>>> get_maps(1234, lambda m: 'w' in m.perms and m.size >= 4096)
'''
maps_path = f'/proc/{pid}/maps'
with open(maps_path, 'r') as f:
maps = f.read()
regions = []
for m in _RE_MAPS.findall(maps):
start_address = int(m[0], 16)
end_address = int(m[1], 16)
size = end_address - start_address
offset = int(m[3], 16)
region = Mapping(
start_address,
end_address,
size,
m[2],
offset,
m[4],
m[5],
m[6]
)
if filter_ is None or filter_(region):
regions.append(region)
return regions
|
986,257 | 9935d3878eb536e997c1c1a93cad8e01ad328c24 | from django import forms
# ?from apps.webscrapper.models import data
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.db import models
from apps.webscrapper.models import Text,Image,Title,Index_Name,BackgroundImg
class get_Title(forms.ModelForm):
class Meta:
model=Title
fields=('Tit',)
class get_Image(forms.ModelForm):
class Meta:
model= Image
fields=('Img',)
class get_Text(forms.ModelForm):
class Meta:
model=Text
fields=('Txt',)
class get_Index(forms.ModelForm):
class Meta:
model=Index_Name
fields=('Index',)
|
986,258 | c1e3da5e2dd9172f18dd25a83eca8839f2593e73 | from tkinter import *
import tkinter.font
from gpiozero import LED
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
#define pins
redled = LED(17)
blueled = LED(18)
greenled = LED(27)
#define Gui
win = Tk()
win.title("Control LED Lights")
myfont = tkinter.font.Font(family = 'Helvetica', size = 12, weight = "bold")
#function toggle
def toggleRedLight():
if redled.is_lit:
redled.off()
RedButton["text"]="Turn Red Led On"
else:
redled.on()
RedButton["text"]="Turn Red Led Off"
def toggleBlueLight():
if blueled.is_lit:
blueled.off()
BlueButton["text"]="Turn Blue Led On"
else:
blueled.on()
BlueButton["text"]="Turn Blue Led Off"
def toggleGreenLight():
if greenled.is_lit:
greenled.off()
GreenButton["text"]="Turn Green Led On"
else:
greenled.on()
GreenButton["text"]="Turn Green Led Off"
def close():
GPIO.cleanup()
win.destroy()
#widgets
RedButton = Button(win, text = 'Turn Red Led On', font = myfont, command = toggleRedLight, bg = 'red', height = 1, width = 24)
RedButton.grid(row = 1, column = 0)
BlueButton = Button(win, text = 'Turn Blue Led On', font = myfont, command = toggleBlueLight, bg = 'sky blue', height = 1, width = 24)
BlueButton.grid(row = 1, column = 1)
GreenButton = Button(win, text = 'Turn Green Led on', font = myfont, command = toggleGreenLight, bg = 'green', height = 1, width = 24)
GreenButton.grid(row = 1, column = 2)
ExitButton = Button(win, text = 'Exit', font = myfont, command = close, bg = 'red', height = 1, width = 5)
ExitButton.grid(row = 2, column = 1)
win.protocol("WM_DELETE_WINDOW")
win.mainloop()
|
986,259 | 9ddcabe60877c04b5e3a0a2a33d69d9d0144e1a9 | def incrementByOne(arr):
'''
given an array of integers, increment each integer by 1
'''
for i in range(len(arr)):
arr[i] += 1
return arr
|
986,260 | c48776fde49553794c4921f94626173c1780839e | # 安装
# geoip2==2.5.0
# official MaxMind geoip2 package
# https://geoip2.readthedocs.io/en/latest/#database-example
import os
import geoip2.database
# 项目根目录
Base_DIR = os.path.dirname(os.path.abspath(__file__))
Base_DIR1 = os.path.dirname(__file__)
print (Base_DIR)
print (Base_DIR1)
geoip_reader = geoip2.database.Reader(os.path.join(Base_DIR, 'GeoIP2-City.mmdb'))
resp = geoip_reader.city('111.247.233.232')
reg_country = resp.country.name,
reg_state = resp.subdivisions.most_specific.name,
reg_city = resp.city.name
print(reg_country, reg_state, reg_city)
|
986,261 | e175eadc17e24d085abbb7eb9a624d1d5383c605 | def double(lst):
'''1. a'''
return [i * 2 for i in lst]
def double(lst):
'''1. b'''
if not lst: return []
return [lst[0] * 2] + double(lst[1:])
def double(lst):
'''1. c'''
return list(map(lambda x: x * 2, lst))
def flatten(lst):
'''2. a'''
return [item for sub_lst in lst for item in sub_lst]
def flatten(lst):
'''2. b'''
if not lst: return []
return lst[0] + flatten(lst[1:])
from functools import reduce
def flatten(lst):
'''2. c'''
return list(reduce(lambda x, y: x + y, lst, []))
def lessThan(n, lst):
'''3. a'''
return [i for i in lst if i < n]
def lessThan(n, lst):
'''3. b'''
if not lst: return []
return [lst[0]] + lessThan(n, lst[1:]) if lst[0] < n else lessThan(n, lst[1:])
def lessThan(n, lst):
'''3. c'''
return list(filter(lambda x: x < n, lst))
def compose(*args):
'''4. a'''
return lambda x: x if not args else args[0](compose(*args[1:])(x))
from functools import reduce
def compose(*args):
'''4. b'''
return lambda x: reduce(lambda x, y: y(x), args[::-1], x)
|
986,262 | e58f6d9c19b0a47ee9ddc0a5fb42efee12060e8d | #############
# DEVELOPMENT
#############
# where pictures are stored
# DATA_FOLDER = '../data/'
# # where SQL database containing users is
# SNAPCAT_DB = DATA_FOLDER + 'snapcat.db'
#############
# PRODUCTION
#############
DATA_FOLDER = '/data/'
SNAPCAT_DB = DATA_FOLDER + 'snapcat.db'
|
986,263 | 326736ba5b170f5b61f53b11660c22712fd9a9d1 | from google.appengine.api import users
from ferris import Controller, route_with, messages, add_authorizations, route
from app.models.user.user import User
from app.services.user_svc import UserSvc
from protorpc import protojson
import logging
class Main(Controller):
@route_with(template='/')
def index(self):
active_user = User.get("kenneth.estrella@cloudsherpas.com")
print "\n\n\n!!!!!!!!-->>>>>>%s\n\n" % active_user
self.context['active_user'] = 'null'
self.meta.view.template_name = 'angular/app-index.html'
@route
def login_admin(self):
if not users.get_current_user():
self.context['data'] = "%s"%users.create_login_url('/admin')
else:
return 403
@route_with(template='/admin')
def admin(self):
print User.get(users.get_current_user())
if users.is_current_user_admin():
self.context['data'] = users.get_current_user()
self.context['logout_url'] = users.create_logout_url('/')
self.meta.view.template_name = 'angular/admin-index.html'
else:
return self.redirect(self.uri(action="login_admin"))
|
986,264 | 4e5ba40d10352dc39a0321da41419e24cb98cd74 | from acky.api import (
AwsCollection,
AwsApiClient,
make_filters,
)
from itertools import chain
class EC2ApiClient(AwsApiClient):
service_name = "ec2"
class EC2(EC2ApiClient):
def regions(self, continent='us', include_gov=False):
# returns (string, ...)
# DescribeRegions
regions = self.call("DescribeRegions", response_data_key="Regions")
if continent and continent != "all":
regions = [r for r in regions
if r['RegionName'].startswith("{}-".format(continent))]
return regions
def zones(self, region):
# returns (string, ...)
# DescribeAvailabilityZones
raise NotImplementedError("aws.ec2.zones")
@property
def environment(self):
env = super(EC2, self).environment
env['hoster'] = 'ec2'
return env
@property
def ACLs(self):
return ACLCollection(self._aws)
@property
def ACEs(self):
return ACECollection(self._aws)
@property
def ElasticIPs(self):
return ElasticIPCollection(self._aws)
@property
def Instances(self):
return InstanceCollection(self._aws)
@property
def SecurityGroups(self):
return SecurityGroupCollection(self._aws)
@property
def IpPermissions(self):
return IpPermissionsCollection(self._aws)
@property
def Volumes(self):
return VolumeCollection(self._aws)
@property
def Snapshots(self):
return SnapshotCollection(self._aws)
@property
def Subnets(self):
return SubnetCollection(self._aws)
@property
def VPCs(self):
return VPCCollection(self._aws)
@property
def PlacementGroups(self):
return PlacementGroupCollection(self._aws)
@property
def KeyPairs(self):
return KeyPairCollection(self._aws)
@property
def Tags(self):
return TagCollection(self._aws)
class ACLCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (acl_info, ...)
# DescribeNetworkAcls
raise NotImplementedError()
def create(self, vpc):
# returns acl_info
# CreateNetworkAcl
raise NotImplementedError()
def destroy(self, acl):
# returns bool
# DeleteNetworkAcl
raise NotImplementedError()
class ACECollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (ace_info, ...)
# DescribeNetworkAcls
raise NotImplementedError()
def add(self, acl, ace_list):
# returns ace_info
# CreateNetworkAclEntry
raise NotImplementedError()
def remove(self, acl, ace_list):
# returns bool
# DeleteNetworkAclEntry
raise NotImplementedError()
def replace(self, acl, old, new):
# returns ace_info
# CreateNetworkAclEntry, DeleteNetworkAclEntry
raise NotImplementedError()
class ElasticIPCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (eip_info, ...)
# DescribeAddresses
raise NotImplementedError()
def create(self, vpc=False):
# returns eip_info
# AllocateAddresses
raise NotImplementedError()
def destroy(self, eip):
# returns bool
# ReleaseAddresses
raise NotImplementedError()
class InstanceCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (inst_info, ...)
# DescribeInstances
params = {}
if filters:
params["filters"] = make_filters(filters)
reservations = self.call("DescribeInstances",
response_data_key="Reservations",
**params)
return list(chain(*(r["Instances"] for r in reservations)))
def control(self, inst, state):
# returns bool
# valid states: start, stop, termate, protect, unprotect
# StartInstances, StopInstances, TerminateInstances,
# ModifyInstanceAttribute(DisableApiTermination)
raise NotImplementedError()
def Launcher(self, config=None):
class _launcher(object):
# Configurable launcher for EC2 instances. Create the Launcher
# (passing an optional dict of its attributes), set its attributes
# (as described in the RunInstances API docs), then launch()
def __init__(self, aws, config):
self._aws = aws
self.config = config
def launch(self, count, ami, name):
# returns inst_info
# RunInstances
raise NotImplementedError()
if not config:
config = {}
return _launcher(self._aws, config)
class KeyPairCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (key_info, ...)
# DescribeKeyPairs
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeKeyPairs",
response_data_key="KeyPairs",
**params)
def create(self, key_name):
# returns (key_pair, ...)
# CreateKeyPair
raise NotImplementedError()
def destroy(self, key_name):
# returns bool
# DeleteKeyPair
raise NotImplementedError()
class PlacementGroupCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (sg_info, ...)
# DescribePlacementGroups
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribePlacementGroups",
response_data_key="PlacementGroups",
**params)
def create(self, group_name, strategy="cluster"):
# returns sg_info
params = {
"strategy": strategy
}
# CreatePlacementGroup
if callable(group_name):
params['group_name'] = group_name(self.environment)
else:
params['group_name'] = group_name
return self.call("CreatePlacementGroup", **params)
def destroy(self, pg):
# returns bool
# DeletePlacementGroup
return self.call("DeletePlacementGroup", group_name=pg)
class SecurityGroupCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None, exclude_vpc=False):
# returns (sg_info, ...)
# DescribeSecurityGroups
params = {}
if filters:
params["filters"] = make_filters(filters)
groups = self.call("DescribeSecurityGroups",
response_data_key="SecurityGroups",
**params)
if exclude_vpc:
# Exclude any group that belongs to a VPC
return [g for g in groups if not g.get('VpcId')]
else:
return groups
def create(self, name, description, vpc=None):
# returns sg_info
params = {
"Description": description,
}
# CreateSecurityGroup
if callable(name):
params['GroupName'] = name(self.environment)
else:
params['GroupName'] = name
if vpc:
params["VpcId"] = vpc
return self.call("CreateSecurityGroup", **params)
def destroy(self, sg):
# returns bool
# DeleteSecurityGroup
return self.call("DeleteSecurityGroup", GroupId=sg)
class IpPermissionsCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (sgr_info, ...)
# DescribeSecurityGroups
raise NotImplementedError()
def modify(self, api_action, sgid, other, proto_spec):
"""Make a change to a security group. api_action is an EC2 API name.
Other is one of:
- a group (sg-nnnnnnnn)
- a group with account (<user id>/sg-nnnnnnnn)
- a CIDR block (n.n.n.n/n)
Proto spec is a triplet (<proto>, low_port, high_port)."""
params = {'group_id': sgid, 'ip_permissions': []}
perm = {}
params['ip_permissions'].append(perm)
proto, from_port, to_port = proto_spec
perm['IpProtocol'] = proto
perm['FromPort'] = from_port or 0
perm['ToPort'] = to_port or from_port or 65535
if other.startswith("sg-"):
perm['UserIdGroupPairs'] = [{'GroupId': other}]
elif "/sg-" in other:
account, group_id = other.split("/", 1)
perm['UserIdGroupPairs'] = [{
'UserId': account,
'GroupId': group_id,
}]
else:
perm['IpRanges'] = [{'CidrIp': other}]
return self.call(api_action, **params)
def add(self, sgid, other, proto_spec, direction="in"):
"""Add a security group rule to group <sgid>.
Direction is either 'in' (ingress) or 'out' (egress).
See modify() for other parameters."""
# returns bool
# AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress
if direction == "in":
api = "AuthorizeSecurityGroupIngress"
elif direction == "out":
api = "AuthorizeSecurityGroupEgress"
else:
raise ValueError("direction must be one of ('in', 'out')")
return self.modify(api, sgid, other, proto_spec)
def remove(self, sgid, other, proto_spec, direction="in"):
"""Remove a security group rule from group <sgid>.
Direction is either 'in' (ingress) or 'out' (egress).
See modify() for other parameters."""
# returns (removed_sgr_info, ...)
# RevokeSecurityGroupIngress, RevokeSecurityGroupEgress
if direction == "in":
api = "RevokeSecurityGroupIngress"
elif direction == "out":
api = "RevokeSecurityGroupEgress"
else:
raise ValueError("direction must be one of ('in', 'out')")
return self.modify(api, sgid, other, proto_spec)
class VolumeCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (vol_info, ...)
# DescribeVolumes
# key: Volumes
raise NotImplementedError()
def create(self, az, size_or_snap, iops=None):
# returns vol_info
# CreateVolume
raise NotImplementedError()
def destroy(self, vol):
# returns bool
# DeleteVolume
raise NotImplementedError()
def attach(self, vol, inst, dev=None):
# returns bool
# AttachVolume
raise NotImplementedError()
class SnapshotCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (snap_info, ...)
# DescribeSnapshots
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeSnapshots",
response_data_key="Snapshots",
**params)
def create(self, volume_id, description=None):
# returns snap_info
# CreateSnapshot
return self.call("CreateSnapshot",
VolumeId=volume_id,
Description=description,
response_data_key="Snapshot")
def destroy(self, snapshot_id):
# returns bool
# DeleteSnapshot
return self.call("DeleteSnapshot", SnapshotId=snapshot_id)
class SubnetCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (subnet_info, ...)
# DescribeSubnets
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeSubnets",
response_data_key="Subnets",
**params)
def create(self, vpc_id, cidr, availability_zone):
# returns subnet_info
# CreateSubnet
return self.call("CreateSubnet",
VpcId=vpc_id,
CidrBlock=cidr,
response_data_key="Subnet")
def destroy(self, subnet_id):
# returns bool
# DeleteSubnet
if self.call("DeleteSubnet", SubnetId=subnet_id,
response_data_key="return"):
return True
return False
class VPCCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (vpc_info, ...)
# DescribeVpcs
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeVpcs", response_data_key="Vpcs", **params)
def create(self, cidr, tenancy="default"):
# returns vpc_info
# CreateVpc
raise NotImplementedError()
def destroy(self, vpc):
# returns bool
# DeleteVpc
raise NotImplementedError()
class TagCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (tag_info, ...)
# DescribeTags
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeTags",
response_data_key="Tags",
**params)
def create(self, resource_ids, tags):
# returns bool
# CreateTags
return self.call("CreateTags", resources=resource_ids, tags=tags)
def destroy(self, resource_ids, tags):
# returns bool
# DeleteTags
return self.call("DeleteTags", resources=resource_ids, tags=tags)
|
986,265 | 4888780c69026df513d1cca0c412166505231bc1 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_FabryPerot.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1036, 636)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.centralWidget)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_Lambda2 = QtWidgets.QLabel(self.centralWidget)
self.label_Lambda2.setObjectName("label_Lambda2")
self.gridLayout_2.addWidget(self.label_Lambda2, 1, 3, 1, 1)
self.SpinBox_Lambda1 = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.SpinBox_Lambda1.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.SpinBox_Lambda1.setDecimals(2)
self.SpinBox_Lambda1.setMinimum(300.0)
self.SpinBox_Lambda1.setMaximum(520.0)
self.SpinBox_Lambda1.setSingleStep(0.01)
self.SpinBox_Lambda1.setProperty("value", 499.7)
self.SpinBox_Lambda1.setObjectName("SpinBox_Lambda1")
self.gridLayout_2.addWidget(self.SpinBox_Lambda1, 0, 4, 1, 1)
self.label_Lambda1 = QtWidgets.QLabel(self.centralWidget)
self.label_Lambda1.setObjectName("label_Lambda1")
self.gridLayout_2.addWidget(self.label_Lambda1, 0, 3, 1, 1)
self.cbg = QtWidgets.QCheckBox(self.centralWidget)
self.cbg.setChecked(True)
self.cbg.setObjectName("cbg")
self.gridLayout_2.addWidget(self.cbg, 1, 5, 1, 1)
self.pushButton_Lambda1 = QtWidgets.QPushButton(self.centralWidget)
self.pushButton_Lambda1.setObjectName("pushButton_Lambda1")
self.gridLayout_2.addWidget(self.pushButton_Lambda1, 0, 8, 1, 1)
self.SpinBox_Lambda2 = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.SpinBox_Lambda2.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.SpinBox_Lambda2.setDecimals(2)
self.SpinBox_Lambda2.setMinimum(480.0)
self.SpinBox_Lambda2.setMaximum(564.0)
self.SpinBox_Lambda2.setSingleStep(0.01)
self.SpinBox_Lambda2.setProperty("value", 499.9)
self.SpinBox_Lambda2.setObjectName("SpinBox_Lambda2")
self.gridLayout_2.addWidget(self.SpinBox_Lambda2, 1, 4, 1, 1)
self.cbb = QtWidgets.QCheckBox(self.centralWidget)
self.cbb.setChecked(True)
self.cbb.setObjectName("cbb")
self.gridLayout_2.addWidget(self.cbb, 0, 5, 1, 1)
self.SpinBox_e = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.SpinBox_e.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.SpinBox_e.setSuffix("")
self.SpinBox_e.setDecimals(2)
self.SpinBox_e.setMinimum(100.0)
self.SpinBox_e.setMaximum(5000.0)
self.SpinBox_e.setSingleStep(0.01)
self.SpinBox_e.setProperty("value", 500.0)
self.SpinBox_e.setObjectName("SpinBox_e")
self.gridLayout_2.addWidget(self.SpinBox_e, 1, 2, 1, 1)
self.SpinBox_angle = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.SpinBox_angle.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.SpinBox_angle.setSuffix("")
self.SpinBox_angle.setDecimals(2)
self.SpinBox_angle.setMaximum(0.5)
self.SpinBox_angle.setSingleStep(0.01)
self.SpinBox_angle.setProperty("value", 0.04)
self.SpinBox_angle.setObjectName("SpinBox_angle")
self.gridLayout_2.addWidget(self.SpinBox_angle, 0, 2, 1, 1)
self.label_angle = QtWidgets.QLabel(self.centralWidget)
self.label_angle.setObjectName("label_angle")
self.gridLayout_2.addWidget(self.label_angle, 0, 1, 1, 1)
self.label_e = QtWidgets.QLabel(self.centralWidget)
self.label_e.setObjectName("label_e")
self.gridLayout_2.addWidget(self.label_e, 1, 1, 1, 1)
self.cbr = QtWidgets.QCheckBox(self.centralWidget)
self.cbr.setChecked(True)
self.cbr.setAutoRepeat(False)
self.cbr.setAutoExclusive(False)
self.cbr.setObjectName("cbr")
self.gridLayout_2.addWidget(self.cbr, 2, 5, 1, 1)
self.pushButton_Lambda2 = QtWidgets.QPushButton(self.centralWidget)
self.pushButton_Lambda2.setObjectName("pushButton_Lambda2")
self.gridLayout_2.addWidget(self.pushButton_Lambda2, 1, 8, 1, 1)
self.pushButton_Lambda3 = QtWidgets.QPushButton(self.centralWidget)
self.pushButton_Lambda3.setObjectName("pushButton_Lambda3")
self.gridLayout_2.addWidget(self.pushButton_Lambda3, 2, 8, 1, 1)
self.label_na = QtWidgets.QLabel(self.centralWidget)
self.label_na.setObjectName("label_na")
self.gridLayout_2.addWidget(self.label_na, 1, 6, 1, 1)
self.label_nt = QtWidgets.QLabel(self.centralWidget)
self.label_nt.setObjectName("label_nt")
self.gridLayout_2.addWidget(self.label_nt, 2, 6, 1, 1)
self.SpinBox_nr = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.SpinBox_nr.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.SpinBox_nr.setMaximum(1.0)
self.SpinBox_nr.setSingleStep(0.01)
self.SpinBox_nr.setProperty("value", 0.9)
self.SpinBox_nr.setObjectName("SpinBox_nr")
self.gridLayout_2.addWidget(self.SpinBox_nr, 0, 7, 1, 1)
self.SpinBox_nt = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.SpinBox_nt.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.SpinBox_nt.setDecimals(5)
self.SpinBox_nt.setMaximum(2.0)
self.SpinBox_nt.setSingleStep(1e-05)
self.SpinBox_nt.setProperty("value", 1.0)
self.SpinBox_nt.setObjectName("SpinBox_nt")
self.gridLayout_2.addWidget(self.SpinBox_nt, 2, 7, 1, 1)
self.label_nr = QtWidgets.QLabel(self.centralWidget)
self.label_nr.setObjectName("label_nr")
self.gridLayout_2.addWidget(self.label_nr, 0, 6, 1, 1)
self.SpinBox_na = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.SpinBox_na.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.SpinBox_na.setDecimals(3)
self.SpinBox_na.setMaximum(1.0)
self.SpinBox_na.setSingleStep(0.001)
self.SpinBox_na.setProperty("value", 0.0)
self.SpinBox_na.setObjectName("SpinBox_na")
self.gridLayout_2.addWidget(self.SpinBox_na, 1, 7, 1, 1)
self.label_Lambda3 = QtWidgets.QLabel(self.centralWidget)
self.label_Lambda3.setObjectName("label_Lambda3")
self.gridLayout_2.addWidget(self.label_Lambda3, 2, 3, 1, 1)
self.SpinBox_Lambda3 = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.SpinBox_Lambda3.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.SpinBox_Lambda3.setSuffix("")
self.SpinBox_Lambda3.setDecimals(2)
self.SpinBox_Lambda3.setMinimum(490.0)
self.SpinBox_Lambda3.setMaximum(800.0)
self.SpinBox_Lambda3.setSingleStep(0.01)
self.SpinBox_Lambda3.setProperty("value", 500.0)
self.SpinBox_Lambda3.setObjectName("SpinBox_Lambda3")
self.gridLayout_2.addWidget(self.SpinBox_Lambda3, 2, 4, 1, 1)
self.pushButton_Lambda123 = QtWidgets.QPushButton(self.centralWidget)
self.pushButton_Lambda123.setObjectName("pushButton_Lambda123")
self.gridLayout_2.addWidget(self.pushButton_Lambda123, 3, 8, 1, 1)
self.Slider_e = QtWidgets.QSlider(self.centralWidget)
self.Slider_e.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.Slider_e.setMaximum(5000)
self.Slider_e.setSingleStep(1)
self.Slider_e.setPageStep(1)
self.Slider_e.setProperty("value", 2000)
self.Slider_e.setOrientation(QtCore.Qt.Horizontal)
self.Slider_e.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.Slider_e.setTickInterval(200)
self.Slider_e.setObjectName("Slider_e")
self.gridLayout_2.addWidget(self.Slider_e, 2, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_2)
self.tabWidget = QtWidgets.QTabWidget(self.centralWidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.tab)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.mplwidget1 = MPL_WIDGET_2D(self.tab)
self.mplwidget1.setObjectName("mplwidget1")
self.horizontalLayout.addWidget(self.mplwidget1)
self.tabWidget.addTab(self.tab, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.tab_4)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.mplwidget2 = MPL_WIDGET_2D(self.tab_4)
self.mplwidget2.setObjectName("mplwidget2")
self.horizontalLayout_3.addWidget(self.mplwidget2)
self.tabWidget.addTab(self.tab_4, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.tab_3)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.mplwidget3 = MPL_WIDGET_2D(self.tab_3)
self.mplwidget3.setObjectName("mplwidget3")
self.horizontalLayout_5.addWidget(self.mplwidget3)
self.tabWidget.addTab(self.tab_3, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.tab_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.mplwidget4 = MPL_WIDGET_2D(self.tab_2)
self.mplwidget4.setObjectName("mplwidget4")
self.horizontalLayout_2.addWidget(self.mplwidget4)
self.tabWidget.addTab(self.tab_2, "")
self.verticalLayout.addWidget(self.tabWidget)
self.horizontalLayout_4.addLayout(self.verticalLayout)
MainWindow.setCentralWidget(self.centralWidget)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Fabry Perot"))
self.label_Lambda2.setText(_translate("MainWindow", "Wavelength Green (nm)"))
self.label_Lambda1.setText(_translate("MainWindow", "Wavelength Blue (nm)"))
self.cbg.setText(_translate("MainWindow", "View Green"))
self.pushButton_Lambda1.setText(_translate("MainWindow", "Blue light"))
self.cbb.setText(_translate("MainWindow", "View Blue"))
self.label_angle.setText(_translate("MainWindow", "Angle maximum (rad)"))
self.label_e.setText(_translate("MainWindow", "<html><head/><body><p>Thickness (µm)</p></body></html>"))
self.cbr.setText(_translate("MainWindow", "View Red"))
self.pushButton_Lambda2.setText(_translate("MainWindow", "Green light"))
self.pushButton_Lambda3.setText(_translate("MainWindow", "Red light"))
self.label_na.setText(_translate("MainWindow", "Absorption coefficient"))
self.label_nt.setText(_translate("MainWindow", "Refraction Index"))
self.label_nr.setText(_translate("MainWindow", "Reflection coefficient"))
self.label_Lambda3.setText(_translate("MainWindow", "Wavelength Red (nm)"))
self.pushButton_Lambda123.setText(_translate("MainWindow", "Wight light"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Theta"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("MainWindow", "Order"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "Phase"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Wavenember"))
from mplwidget import MPL_WIDGET_2D
|
986,266 | 3289f2965a35980591595690d2006869d77925fe | N, K = map(int, input().split())
A_ans = [1 for i in range(N)]
for i in range(K):
d = int(input())
if d > 1 :
A = list(map(int, input().split()))
for j in range(len(A)):
if A_ans[A[j]-1] == 1 :
A_ans[A[j]-1] = 0
else:
A = int(input())
if A_ans[A-1] == 1 :
A_ans[A-1] = 0
print(sum(A_ans))
|
986,267 | d440925d58112c03b0ea2f84008462a40fbc95ce | # Import custom modules
import ballot
import keys
import geth
# Output a list of all the saved ballots
def allBallots(showIndexes=False, showBallotDetail=True):
# Get the saved ballots
savedBallots = ballot.savedBallots();
# Check if there are any saved ballots
if len(savedBallots) < 1:
# No saved ballots where found
print "No ballots found."
else:
# Loop through each saved ballot
for index in xrange(len(savedBallots)):
# Initalise this ballots output
output = ''
# Check if an index needs to be added
if showIndexes:
# Add the index of the saved ballot
output += str(index + 1) + ' - '
# Add the title of the ballot
output += savedBallots[index].title
# Check if details about the ballot are to be shown
if showBallotDetail:
# Add the ballots address
output += ' - Address: ' + savedBallots[index].address
# Output the ballot
print output
# Output a list of all the saved keys
def allKeys(showIndexes=False, showKeyDetail=True):
# Get the saved keys
savedKeys = keys.savedKeys();
# Check if there are any saved keys
if len(savedKeys) < 1:
# No saved keys where found
print "No keys found."
else:
# Loop through each saved key
for index in xrange(len(savedKeys)):
# Initalise this keys output
output = ''
# Check if an index needs to be added
if showIndexes:
# Add the index of the saved key
output += str(index + 1) + ' - '
# Add the name of the key
output += savedKeys[index].name
# Check if details about the key are to be shown
if showKeyDetail:
# Check if the key has a private key
if savedKeys[index].privateKey:
# The key has both
output += ' - Public and Private'
else:
# The key is public only
output += ' - Public Only'
# Output the key
print output
# Output a list of all available Ethereum accounts
def allAccounts(showIndexes=False, showAccountDetail=True):
# Get the accounts from the geth instance
accounts = geth.accounts()
# Check if there are any accounts
if len(accounts) < 1:
# No accounts where found
print "No accounts found."
else:
# Loop through each account
for index in xrange(len(accounts)):
# Initalise this accounts output
output = ''
# Check if an index needs to be added
if showIndexes:
# Add the index of the account
output += str(index + 1) + ' - '
# Add the address of the account
output += accounts[index].lstrip('0x')
# Check if details about the account are to be shown
if showAccountDetail:
# Add the balance of the account
output += " | " + str(geth.getBalance(accounts[index])) + " Ether"
# Output the key
print output
# Output the information of a ballot
def ballotInfo(targetBallot, showCandidates=True, showVoters=True):
# Get the ballot title
title = ballot.getTitle(targetBallot)
# Get the ballot description
description = ballot.getDescription(targetBallot)
# Display the ballot information
print ''
print title
print description
# Check if the candidates should be shown
if (showCandidates):
# Get the ballot candidates
candidates = ballot.getCandidates(targetBallot)
# Display the candidates
print ''
print 'Candidates:'
for candidate in candidates: print candidate
# Check if the voters should be shown
if (showVoters):
# Get the ballot voters
voters = ballot.getVoters(targetBallot)
# Display the voters
print ''
print 'Voters:'
for voter in voters: print voter
# Output the candidates of a ballot
def candidates(targetBallot, showIndexes=True):
# Get the ballot candidates
candidates = ballot.getCandidates(targetBallot)
# Loop through each candidate
for index in xrange(len(candidates)):
# Initalise this candidates output
output = ''
# Check if an index needs to be added
if showIndexes:
# Add the index of the candidate
output += str(index + 1) + ' - '
# Add the name of the candidate
output += candidates[index]
# Output the candidate
print output |
986,268 | fa16c64ee21a4a7702243374b6969dfe44e52c38 | import re
instructions = [(line.split(" ")[0], line.strip().split(" ", 1)[1].split(", ")) for line in open('input.txt')]
registers = {'a' : 0, 'b' : 0}
i = 0
def execute(instruction, args, regs):
global i
if instruction == "hlf":
regs[args[0]] /= 2
elif instruction == "tpl":
regs[args[0]] *= 3
elif instruction == "inc":
regs[args[0]] += 1
elif instruction == "jmp":
i += eval(args[0]) - 1
elif instruction == "jie" and regs[args[0]] % 2 == 0 or instruction == "jio" and regs[args[0]] == 1:
i += eval(args[1]) - 1
i += 1
while i < len(instructions):
execute(instructions[i][0], instructions[i][1], registers)
print registers |
986,269 | de2f6b9f269a67e605f2be2e3d63851c8349201b | # from user import User
#This is my attempt in inheriting from User
class Customer():
def __init__(self, customerid, firstname, lastname):
# User.__init__(userid, firstname, lastname, phonenumber)
self.customerid=customerid
self.customername=firstname + ' ' + lastname
def getCustomerid(self):
return self.customerid
def getCustomername(self):
return self.customername
def __str__(self):
return self.customerid + ' ' + self.customername
#used w3schools code for reference
# class Person:
# def __init__(self, fname, lname):
# self.firstname = fname
# self.lastname = lname
# def printname(self):
# print(self.firstname, self.lastname)
# class Student(Person):
# def __init__(self, fname, lname, year):
# super().__init__(fname, lname)
# self.graduationyear = year
# def welcome(self):
# print("Welcome", self.firstname, self.lastname, "to the class of", self.graduationyear)
# x = Student("Mike", "Olsen", 2019)
# x.welcome() |
986,270 | 971bc1c8ed92ff2428e071d04615ea4153f8f875 | import os
import sys
import signal
import threading
import argparse
import atexit
from wsgiref.simple_server import make_server
from gitgrapher import make_app
HERE = os.path.dirname(os.path.abspath(__file__))
@atexit.register
def goodbye():
sys.stdout.write('goodbye!\n')
sys.stdout.flush()
# args
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='')
parser.add_argument('--port', type=int, default=8000)
parser.add_argument('repository', default=os.getcwd())
args = parser.parse_args()
# server
def run_server():
static_root = os.path.join(HERE, 'static')
app = make_app(args.repository, static_root)
httpd = make_server(args.host, args.port, app)
httpd.serve_forever()
server_thread = threading.Thread(target=run_server)
server_thread.daemon = True
server_thread.start()
sys.stdout.write('Server running at {}:{}\n'.format(args.host, args.port))
sys.stdout.flush()
# loopty-loop
try:
while True:
server_thread.join(timeout=1)
except (KeyboardInterrupt, SystemExit):
pass
|
986,271 | 9029f614b6903eca2d238278bbac4a211f5fd503 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 21:42:27 2021
@author: xl
"""
sA=input("sA:")
sB=input("sB:")
if sA in sB:
print("子字串的判斷為:Yes")
else:
print("子字串的判斷為:No") |
986,272 | 10a30a02ba90cc340bad36358d8d2cb0c315ce62 | num = int(input('Enter the number upto which the sum is to be found:'))
s=0
for i in range(1,num+1):
s=s+i
print('Sum of n integers')
print(s)
|
986,273 | 4b8769ee8f10c7ed0aad590c741f73f48c59b26b | from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from PIL import Image
import os
import numpy as np
import json
import torch
import torchvision.transforms as transforms
from torchvision.utils import save_image
from types import SimpleNamespace
from ArtInspiredFashionHR.analysis.fid_score import load_model
import uuid
painting_loc = '../app/public/images/'
@csrf_exempt
def create(request):
print(request)
name = str(uuid.uuid4())
#print(request.data)
artwork = request.POST.get('artwork')
clothing_type = request.POST.get('type')
coordinates = json.loads(request.POST.get('coordinates'))
img_path = os.path.join(painting_loc)
gen_path = os.path.join(img_path, 'generated_images')
painting = Image.open(os.path.join(img_path, 'example' + artwork + '.jpg'))
painting = painting.resize((1000, 1000)) #TODO check resize for SIFT and HOG
array = np.linspace(1, 1, painting.width * painting.height * 3)
mat = np.reshape(array, (painting.height, painting.width, 3))
img = Image.fromarray(np.uint8(mat * 255))
image_copy = img.copy()
painting = painting.resize((int(abs(coordinates['x2'] - coordinates['x1'])) , int(abs(coordinates['y2'] - coordinates['y1']))))
position = (int(coordinates['x1']), int(coordinates['y1']))
image_copy.paste(painting, position)
if not os.path.exists(gen_path):
os.mkdir(gen_path)
new_path = os.path.join(gen_path, name + '.png')
image_copy.save(new_path)
##load model
model_param_loc ='/home/kieran/Documents/college/AIF_Paper_Demo/server/ArtInspiredFashionHR/analysis/results/Graphic_t-shirt_dress_high_res_fake_rec/model_params.json'
model_path ='/home/kieran/Documents/college/AIF_Paper_Demo/server/ArtInspiredFashionHR/analysis/results/Graphic_t-shirt_dress_high_res_fake_rec/model/'
model_iter = 90000
model_name = 'G_B'
with open(model_param_loc, 'r') as f:
model_param = json.load(f)
model_param['continue_train'] = False
if not torch.cuda.is_available():
print('Changing')
model_param['gpu_ids'] = '-1'
model_args = SimpleNamespace(**model_param)
transform = transforms.Compose([transforms.Resize((model_args.img_size, model_args.img_size), ), # Image.BICUBIC), #Temp
transforms.ToTensor(),
transforms.Normalize((model_args.mean, model_args.mean, model_args.mean),
(model_args.sd, model_args.sd, model_args.sd))
])
model, model_args = load_model(model_param_loc, model_path, model_iter)
model.load_weights(model_path, model_iter)
##save generated image
image_copy = image_copy.convert('RGB')
image_copy = transform(image_copy)
image_copy = torch.unsqueeze(image_copy, 0)
batch = {'Painting': image_copy, 'Dress': image_copy, 'Bb': image_copy, 'Bb_reverse': image_copy, 'label': clothing_type}
model.set_input(batch, model_args)
fake = model.__dict__['G_B'](model.real_B)
save_image(model.unorm(fake.data),
'%s' % (new_path), normalize=False)
painting = Image.open(new_path)
print('Fini')
return JsonResponse({"generated_image": name + '.png'}) |
986,274 | 21686a3adbe2ce1a019d160607f8fd6aca2bf623 | from sys import argv, exit
import cs50
# Check correct usage
if len(argv) != 2:
print("Usage: python roaster.py [house]")
exit(1)
# Open the database
hogwarts_db = cs50.SQL("sqlite:///students.db")
# Ask dbase for relevant info
query = hogwarts_db.execute(
"""SELECT first, middle, last, birth FROM students
WHERE house = ?
ORDER BY last ASC, first ASC
""", argv[1]
)
# Print info
for stdnt in query:
middle = " " + stdnt["middle"] if stdnt["middle"] != None else ""
print(f"{stdnt['first']}{middle} {stdnt['last']}, born {stdnt['birth']}")
|
986,275 | 5afd0fcbfd5cd90701fd97c19bfe290f93473f14 | """Assumption - find mininum value in the tree
def min(node):
base -> if node is None:
return float('inf') # Largest floating point infinite number
generic->
leftmin = min (node.left)
rightmin = min (node.right)
return min(node.val, leftmin, rightmin)
"""
class BinaryTreeNode:
def __init__(self, data):
self.data = data # Root Node
self.left = None # Left Child
self.right = None # Right Child
# set data
def setData(self, data):
self.data = data
# get data
def getData(self):
return self.data
# get left child of a node
def getLeft(self):
return self.left
# get right child of a node
def getRight(self):
return self.right
def find_min_recursive(root):
global minData
if root is None:
return minData
if root.getData() < minData:
minData = root.getData()
find_min_recursive(root.getLeft())
find_min_recursive(root.getRight())
return minData
def main():
root = BinaryTreeNode(5)
root.left = BinaryTreeNode(19)
root.right = BinaryTreeNode(21)
root.left.left = BinaryTreeNode(2)
root.left.right = BinaryTreeNode(45)
root.right.left = BinaryTreeNode(1)
root.right.right = BinaryTreeNode(-1)
print ("Minimum element in the Binary Tree is : ",find_min_recursive(root))
if __name__ == '__main__':
minData = float("infinity")
main()
|
986,276 | 2927a59b6ed06de174f1eaca8f846ff5f8e626f9 | from django.db import models
from api import file_upload_path_for_db
# Create your models here.
class Video(models.Model):
videourl = models.CharField(max_length=1000, blank=True)
title = models.CharField(max_length=200)
threshold = models.CharField(max_length=20)
tags = models.CharField(max_length=500)
class VideoFile(models.Model):
file_save_name = models.FileField(upload_to=file_upload_path_for_db, blank=False, null=False)
# 파일의 원래 이름
file_origin_name = models.CharField(max_length=100)
# 파일 저장 경로
file_path = models.CharField(max_length=100)
def __str__(self):
return self.file.name
|
986,277 | 4aceee264d7aeeebc1087da623a147576c09260d | #!/usr/bin/env python
import rospy
import numpy as np
import cv2 as cv
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class BlobsRellocation:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/lepton_output", Image, self.callback)
self.image_pub = rospy.Publisher("/image_vector", Image, queue_size=10)
self.previous_image = None
self.inProcess = False
#its a vector of rellocation for single pixels size=(img_rows, img_cols)
self.dx = None
self.dy = None
self.precision = None
#its a help to draw delate
self.image_to_view = None
self.x_pos = None
self.y_pos = None
def callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data,"mono16")
self.handleNewRellocation(cv_image)
except CvBridgeError as e:
print(e)
def handleNewRellocation(self, current_image):
#if still calculate
if self.inProcess:
return
#if its first image save and return
if self.previous_image is None:
self.previous_image = current_image
return
#set flag in process
self.inProcess = True
self.calculateRellocation(current_image)
#its only for help
scale = 8
recal_image = self.recalculate(current_image)
resized_image = self.rescale_image(recal_image, scale)
arrowed_image = self.add_arrows(resized_image, scale)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(arrowed_image,"mono8"))
#after calculate and sending vector save current as prev
self.previous_image = current_image
self.inProcess = False
def calculateRellocation(self, current_image):
cols, rows = current_image.shape
#its only for help now
spread = 10.0
self.dx = np.array([np.linspace(-spread, spread, rows), ]*cols).astype(int)
self.dy = np.array([np.linspace(spread, -spread, cols), ]*rows).transpose().astype(int)
self.dx = self.dx.reshape(self.dx.size)
self.dy = self.dy.reshape(self.dy.size)
self.x_pos = np.array([np.arange(rows), ]*cols).astype(int)
self.y_pos = np.array([np.arange(cols), ]*rows).transpose().astype(int)
self.x_pos = self.x_pos.reshape(self.x_pos.size)
self.y_pos = self.y_pos.reshape(self.y_pos.size)
def recalculate(self, data):
"""
:param data: numpy array
:return:Array with values from 0-255 (8bits)
"""
minimum = np.min(data)
maximum = np.max(data)
difference = maximum - minimum
data = (((data - minimum) / difference) * 255).astype('uint8')
return data
def rescale_image(self, image, scale):
height, width = image.shape[:2]
resize_image = cv.resize(image,(scale * width, scale * height),interpolation=cv.INTER_CUBIC)
return resize_image
def add_arrows(self, image, scale):
for i in range(self.dx.size):
cv.arrowedLine(image, (self.x_pos[i]*scale, self.y_pos[i]*8), ((self.x_pos[i]+self.dx[i])*scale, (self.y_pos[i]-self.dy[i])*scale), (0,0,0), 1)
return image
def main():
blobs_relo = BlobsRellocation()
rospy.init_node("blobs_rellocation", anonymous=True)
rospy.spin()
if __name__ == '__main__':
main()
|
986,278 | ffd017d1d21b95441f7a1858ac24d1f5417e53e6 | import mysql.connector
from mysql.connector import Error
try:
myconn= mysql.connector.connect(host='localhost',user='root',password='H@ckmeifyoucan1',database='job_portal')
mycur= myconn.cursor()
mycur.execute("create table jobpost(application_id mediumint auto_increment not null PRIMARY KEY,role varchar(50),company_name varchar(100),min_salary int,max_salary int,compan_address varchar(250),email_address varchar(250),work_location varchar(2500),about_us varchar(2500),min_requirement varchar(2500),min_education varchar(2500),key_skills varchar(2500))")
except Error as err:
print(err)
finally:
if myconn.is_connected():
mycur.close()
myconn.close()
print("the the table is created") |
986,279 | 2cb8efb1a857ee013bec2241ecd9b116a7673e27 |
digit = 1
#stringDig = "1"
#count = 0
def nextDigit():
#global stringDig
global digit
#global count
toReturn = ""
#if not, increment digit, convert to a string and assign it to stringdig
#if stringDig == None or len(stringDig) < 1:
# digit = digit + 1
# stringDig = str(digit)
if digit
#See if there's anything left in string. if there is, return the first piece, and reassign string to what's left.
toReturn = stringDig[0]
stringDig=stringDig[1:]
#stringDig count
# count = count + 1
return toReturn
sumVal = 1
modVal = 10
for i in range(1, 1000001):
if (i % modVal) == 0:
nextDig = nextDigit()
print "found NextDigit:"+nextDig
sumVal = sumVal * int(nextDig)
modVal = modVal * 10
else:
nextDigit()
print sumVal
|
986,280 | c2d54bee6b051d48b4da31dd85ea91d2693e2642 | # Write a for loop which print "Hello!, " plus each name in the list. i.e.: "Hello!, Sam"
lst=["Sam", "Lisa", "Micha", "Dave", "Wyatt", "Emma", "Sage"]
for i in lst:
print(f'Hello! {i}') |
986,281 | 5b1cf02236c0b45f7aedb416cbaf8b0b976ebdd9 | from django.urls import path
from PendingWork.views import pendingwork_view,pendingwork_update_view
urlpatterns = [
path('',pendingwork_view,name='add-pendingwork'),
path('<int:id>/update',pendingwork_update_view,name='update-pendingwork'),
] |
986,282 | d93252b989f756657b2bdcdfa9970ca005028ea1 | # -*- coding: utf-8 -*-
class Masterpiece():
"""Masterpiece is a collection of arts and reports generated by the artists
using information about the model and the dataset
Parameters:
===========
artists: list
list of :class:`Artist` objects
model: :class:`Model`
dataset: :class:`Dataset`
"""
def __init__(self, artists=None, model=None, dataset=None):
self.model = model
self.dataset = dataset
self.artists = [a(model=model, dataset=dataset) for a in artists]
def expose(self):
"""expose collects the arts from all the artists and publish them
in the input media
Paramters:
=========
media: :class:`Media`
media object to render the resulting figures and texts
"""
arts = [a.art() for a in self.artists]
return arts
class IPythonNotebook(Masterpiece):
"""Media is a base class for that exposes all the art products. Examples
of medias are IPython notebooks, html files, pdfs, etc
"""
def __init__(self, *args, **kwargs):
super(IPythonNotebook, self).__init__(*args, **kwargs)
def expose(self):
arts = super(IPythonNotebook, self).expose()
self.nb = nbf.new_notebook()
for art, artist in zip(arts, self.artists):
cells += [nbf.new_title_cell(artist.header)]
for viz, text in art:
cells += nbf.new_code_cell(viz)
cells += nbf.new_text_cell('markdown', text)
|
986,283 | 7972df29091028119c58c58d030a94b30f84ee33 | from datetime import datetime
from sqlalchemy.schema import Column
from sqlalchemy.types import Integer, String, DateTime, Text
from ._base import DeclarativeBase
class Message(DeclarativeBase):
__tablename__ = 'messages'
id = Column(Integer, primary_key=True)
world = Column(String(255))
message = Column(Text)
updated_at = Column(DateTime, default=datetime.now())
created_at = Column(DateTime, default=datetime.now())
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
|
986,284 | 2e6ac77e973e79d6735e4da774226aa37812f93f | import random
import collections
import numpy as np
R, P, S = moves = range(3)
move_idx = {"R": R, "P": P, "S": S}
names = "RPS"
beat = (P, S, R)
beaten = (S, R, P)
def react(my_loaded, opp_loaded, my_history, opp_history):
if not opp_history:
return random.randrange(0, 3)
counts = [0, 0, 0]
counts[beat[opp_history[-1]]] += 1
return counts
def random_max(scores):
scores = [s + random.normalvariate(0, 1) for s in scores]
return scores.index(max(scores))
def argmax(scores):
m = max(scores)
return [s == m for s in scores]
def greedy_margin(my_loaded, opp_loaded, my_history, opp_history):
scores = [my_loaded[move] - opp_loaded[beat[move]] for move in moves]
return argmax(scores)
recent_counts = None
def best_move(counts, my_loaded, opp_loaded):
scores = [(counts[beaten[move]] + 0.5) * my_loaded[move] -
(counts[beat[move]] + 0.5) * opp_loaded[move] for move in moves]
return argmax(scores)
def recent_stats(my_loaded, opp_loaded, my_history, opp_history):
if len(opp_history) >= 10:
recent_counts[opp_history[-10]] -= 1
recent_counts[opp_history[-1]] += 1
return best_move(recent_counts, my_loaded, opp_loaded)
order2_counts = None
def order2(my_loaded, opp_loaded, my_history, opp_history):
if len(my_history) >= 2:
base0 = 9 * my_history[-2] + 3 * opp_history[-2]
order2_counts[base0 + opp_history[-1]] += 1
base1 = 9 * my_history[-1] + 3 * opp_history[-1]
counts = [order2_counts[base1 + move] for move in moves]
return best_move(counts, my_loaded, opp_loaded)
def nash(my_loaded, opp_loaded, my_history, opp_history):
third = 1.0 / 3
p = np.full(3, third)
q = np.full(3, third)
u = np.array(my_loaded)
v = np.array(opp_loaded)
m0 = np.zeros(3)
m1 = np.zeros(3)
lr = 0.2
for _ in range(10):
de0 = u * np.roll(q, 1) - np.roll(v * q, 2)
de1 = v * np.roll(p, 1) - np.roll(u * p, 2)
m0 = 0.9 * m0 + 0.1 * de0
m1 = 0.9 * m1 + 0.1 * de1
p += lr * m0
q += lr * m1
p[p < 0] = 0
q[q < 0] = 0
tp, tq = np.sum(p), np.sum(q)
if tp == 0 or tq == 0:
return np.full(3, third)
p /= tp
q /= tq
lr *= 0.9
return p
strategies = [react, greedy_margin, recent_stats, order2, nash]
predictions = strategy_scores = mh = oh = None
def statistician2func(my_points, opp_points, my_loaded, opp_loaded, my_history, opp_history):
global strategy_scores, history, recent_counts, mh, oh, predictions, order2_counts
if not opp_history:
strategy_scores = [0 for _ in strategies]
recent_counts = collections.Counter()
order2_counts = collections.Counter()
mh, oh = [], []
predictions = None
return random.choice(names)
my_move = move_idx[my_history[-1]]
opp_move = move_idx[opp_history[-1]]
if predictions is not None:
for j, p in enumerate(predictions):
good = beat[opp_move]
bad = beaten[opp_move]
strategy_scores[j] += (my_loaded[good] * p[good] - opp_loaded[opp_move] * p[bad]) / sum(p)
mh.append(my_move)
oh.append(opp_move)
predictions = [strategy(my_loaded, opp_loaded, mh, oh) for strategy in strategies]
strategy = random_max(strategy_scores)
p = predictions[strategy]
r = random.random()
for i, pi in enumerate(p):
r -= pi
if r <= 0:
break
return names[i]
|
986,285 | 86eb7114cd6611fce31580c349372ad9ba71d7e1 | directions = [(0, -1), (1, 0), (0, 1), (-1, 0)]
position = (12, 12)
facing = 0 # directions index
infected = set()
with open('22.input', 'r') as handle:
for i, line in enumerate(handle):
for j, character in enumerate(line):
if character == '#':
infected.add((j, i))
count = 0
for i in range(10000):
if position in infected:
facing += 1
facing %= len(directions)
infected.remove(position)
else:
facing -= 1
facing %= len(directions)
infected.add(position)
count += 1
position = (position[0] + directions[facing][0], position[1] + directions[facing][1])
print(count)
|
986,286 | f72dbb2e4ca703b96eb4b3d502c227a3d0125a0a | import uiautomation as automation
from uiautomation import Win32API
from uiautomation import WaitForExist, WaitForDisappear
from PageObject import Notepad, Calculator
import os
from os.path import join, isfile
import re
current_dir = os.getcwd()
def openProgramViaRunDialog(program):
Win32API.SendKeys("{LWIN}{R}")
WaitForExist(Notepad.dlgRun(), 5)
Win32API.SendKeys(program)
Win32API.SendKeys("{ENTER}")
def openNotepad():
openProgramViaRunDialog("notepad")
WaitForExist(Notepad.wndNotepad(), 5)
def closeNotepad():
Notepad.btnClose().Click()
WaitForDisappear(Notepad.wndNotepad(), 5)
def typeInNotepad(strText):
Notepad.wndNotepad().Click()
Win32API.SendKeys(strText)
def saveFile(fileName):
Notepad.mnuitemFile().Click()
WaitForExist(Notepad.mnuFile(), 5)
Notepad.mnuitemSave().Click()
WaitForExist(Notepad.dlgSave(), 5)
Notepad.editFileName().SetValue(join(current_dir, fileName))
Notepad.btnSave().Click()
def checkFileIsSaved(fileName):
return isfile(join(current_dir, fileName))
def deleteFileNotepad():
files = os.listdir(current_dir)
for file in files:
if file.endswith(".txt"):
os.remove(join(current_dir, file))
def openCalculator():
openProgramViaRunDialog("calc")
WaitForExist(Calculator.wndCalculator(), 5)
def closeCalculator():
Calculator.btnCloseCalc().Click()
WaitForDisappear(Calculator.wndCalculator(), 5)
def operate_2Plus4():
Calculator.btnTwo().Click()
Calculator.btnPlus().Click()
Calculator.btnFour().Click()
Calculator.btnEqual().Click()
def operate_15Plus30():
Calculator.btnOne().Click()
Calculator.btnFive().Click()
Calculator.btnPlus().Click()
Calculator.btnThree().Click()
Calculator.btnZero().Click()
Calculator.btnEqual().Click()
def operate_99Minus83():
Calculator.btnNine().Click()
Calculator.btnNine().Click()
Calculator.btnMinus().Click()
Calculator.btnEight().Click()
Calculator.btnThree().Click()
Calculator.btnEqual().Click()
def getActualResult():
actual_result = int(re.findall(r'\d+', Calculator.txtResults().AccessibleCurrentName())[0])
return actual_result |
986,287 | 924c95a82a7d7830229e4d7e22f222da9c97951e | #Exercício Python 14: Escreva um programa que converta uma temperatura digitando em graus Celsius e converta para graus Fahrenheit.
c = float(input('Informe a temperatura em ºC: '))
f = ((9 * c) / 5) + 32
print(f'A temperatura de {c}ºc corresponde a {f}ºf') |
986,288 | c5149db7da989a978e309544e02cb99ff035716f | # Jianming Wang aka Yonner Ming
from tkinter import *
import math
import random
import time
from nchoosekvisualization import nChooseKVisualization as vis
# first level functions
def init(data):
data.cursor = 0
data.inputNames = ["Number of People", "Size of Subcommittee"]
data.inputs = [8, 3]
data.mouse = [0, 0]
updateNM(data)
def mousePressed(event, data):
pass
def keyDown(event, data):
if event.keysym == "Left":
data.cursor = (data.cursor - 1) % len(data.inputs)
elif event.keysym == "Right":
data.cursor = (data.cursor + 1) % len(data.inputs)
elif event.keysym == "Up":
data.inputs[data.cursor] += 1
inputRestrictions(data)
updateNM(data)
elif event.keysym == "Down":
data.inputs[data.cursor] -= 1
inputRestrictions(data)
updateNM(data)
def inputRestrictions(data):
if (data.inputs[0] < 0):
data.inputs[0] = 0
data.inputs[1] = data.inputs[1] % (data.inputs[0] + 1)
def updateNM(data):
data.visuals = [None] * ((data.inputs[0] - data.inputs[1] + 2) * 2 - 1)
r = (data.width - 260) // 440
for i in range(0, data.inputs[0] - data.inputs[1] + 1):
x = i % r
y = i // r
bounds = (300 + x * 440, 200 + y * 220, 200, 200)
data.visuals[2 * i] = vis(data.inputs[0], data.inputs[1] + i, bounds)
bounds = (300 + x * 440 + 200, 200 + y * 220, 200, 200)
data.visuals[2 * i + 1] = vis(data.inputs[1] + i, data.inputs[1], bounds)
bounds = (50, 200, 200, 200)
data.visuals[-1] = vis(data.inputs[0], data.inputs[1], bounds)
def keyUp(event, data):
pass
def mouseMoved(event, data):
data.mouse = [event.x, event.y]
def inBounds(mouse, bounds):
if (mouse[0] < bounds[0] or mouse[1] < bounds[1]):
return False
if (mouse[0] > bounds[0] + bounds[2] or
mouse[1] > bounds[1] + bounds[3]):
return False
return True
def timerFired(data):
for v in data.visuals:
b = v.bounds
if inBounds(data.mouse, b):
v.update(data.timerDelay / 500)
def redrawAll(canvas, data):
drawFrameRate(canvas, data)
for v in data.visuals:
b = v.bounds
canvas.create_rectangle(b[0], b[1], b[0] + b[2], b[1] + b[3])
if inBounds(data.mouse, b):
v.draw(canvas)
else:
text = ("Chooseing " + str(v.k) + " people from a group of " +
str(v.n) + " people")
canvas.create_text(b[0], b[1], text=text, anchor="nw", width=b[2],
font = "120")
drawPlusSigns(canvas, data)
drawInputUI(canvas, data)
def drawPlusSigns(canvas, data):
# first draw the equal sign
cY = 300
cX = (300 + 250) / 2
canvas.create_text(cX, cY, text="=", font="40")
# now draw a plus sign after every other box
r = (data.width - 260) // 440
for i in range(0, data.inputs[0] - data.inputs[1]):
x = i % r
y = i // r
bounds = (300 + x * 440 + 200, 200 + y * 220, 200, 200)
cX = 300 + x * 440 + 200 + 200 + 40 / 2
cY = 200 + y * 220 + 200 / 2
canvas.create_text(cX, cY, text="+", font="40")
def drawInputUI(canvas, data):
for i in range(len(data.inputs)):
cX = 300 + i * 200
cY = 50
canvas.create_text(cX, cY, text=data.inputNames[i], font="40")
cY = 100
if (data.cursor == i):
r = 20
canvas.create_rectangle(cX-r, cY-r, cX+r, cY+r, fill="yellow")
canvas.create_text(cX, cY, text=str(data.inputs[i]), font="40")
def drawFrameRate(canvas, data):
frameRate = len(data.t)
canvas.create_text(data.width-10, 0, anchor=NE, text="fps:"+str(frameRate))
# run function adapted from 15-112 course website
def run(width=900, height=900):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
canvas.create_rectangle(0, 0, data.width, data.height,
fill='white', width=0)
redrawAll(canvas, data)
canvas.update()
def mousePressedWrapper(event, canvas, data):
mousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyDownWrapper(event, canvas, data):
keyDown(event, data)
def keyUpWrapper(event, canvas, data):
keyUp(event, data)
def mouseMovedWrapper(event, canvas, data):
mouseMoved(event, data)
def timerFiredWrapper(canvas, data):
# update times
t0 = time.time()
data.timerDelay = (t0 - data.lastTime) * 1000
data.t.append(data.timerDelay)
while (sum(data.t) > 1000):
data.t.pop(0)
data.lastTime = t0
timerFired(data)
redrawAllWrapper(canvas, data)
# call timerFired again
canvas.after(0, timerFiredWrapper, canvas, data)
# create the root and the canvas
root = Tk()
root.overrideredirect(True)
root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(),
root.winfo_screenheight()))
canvas = Canvas(root, width=root.winfo_screenwidth(),
height=root.winfo_screenheight())
canvas.pack()
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = root.winfo_screenwidth()
data.height = root.winfo_screenheight()
data.lastTime = time.time()
data.timerDelay = 0
data.t = []
init(data)
# set up events
root.bind("<Button-1>", lambda event:
mousePressedWrapper(event, canvas, data))
root.bind("<KeyPress>", lambda event:
keyDownWrapper(event, canvas, data))
root.bind("<KeyRelease>", lambda event:
keyUpWrapper(event, canvas, data))
root.bind("<Motion>", lambda event:
mouseMovedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
if __name__ == "__main__":
run()
|
986,289 | 6467a0823d4cf6e2745ca448c89ee4d2481b5688 | '''
1. 完成一下步骤:
(1) 在任意位置创建一个目录,如'~/小练习'
(2) 在此目录下创建一个文件Blowing in the wind.txt
将以下内容写入文件
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
(3) 在文件头部插入标题“The Zen of Python”
(4) 在标题后插入作者“Tim Peters”
(5) 在文件末尾加上字符串“你的名字和作业时间”
(6) 在屏幕上打印文件内容
(7) 以上每一个要求封装成小函数
'''
import os
import time
def func():
if os.path.exists("./小练习")!=True:
os.mkdir("./小练习")
f=open("./小练习/"+"Blowing in the wind.txt","w+")
f.write("Beautiful is better than ugly.\n")
f.write("Explicit is better than implicit.\n")
f.write("Simple is better than complex.\n")
f.write("Complex is better than complicated.\n")
f.write("Flat is better than nested.\n")
f.close()
with open("./小练习/"+"Blowing in the wind.txt","r+")as f:
context = f.read()
f.seek(0,0)
f.write("The Zen of Python\n"+context)
with open("./小练习/"+"Blowing in the wind.txt","r+")as f:
c=f.readline()
context = f.read()
f.seek(0,0)
f.write(c+"Tim Peters\n "+context)
with open("./小练习/"+"Blowing in the wind.txt","a+")as f:
f.write("caixiya"+time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())))
with open("./小练习/"+"Blowing in the wind.txt","r+")as f:
while True:
l = f.readline()
if (l ==""):
break
print(l)
if __name__=="__main__":
func()
|
986,290 | 82b6d0178ab9cfb191bffc98e71a77f1d161376a | from pushbullet import PushBullet, errors
def push_to_iOS(title, body, pb_key):
pb = PushBullet(pb_key)
pb.push_note(title, body) |
986,291 | baa1d7a3730355051b077c44e488927b30897777 | from os import environ
BANANO_HTTP_PROVIDER_URI = environ.get(
"BANANO_HTTP_PROVIDER_URI", "https://api-beta.banano.cc"
)
PORT = environ.get("PORT", 7072)
print(f"Using {BANANO_HTTP_PROVIDER_URI} as API provider on port {PORT}")
|
986,292 | 1ae40b276cad640c4e7bbc1eceb806e08ffee23a | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 23:17:45 2015
@author: Maximus
Pulling data from Youtube
"""
import json
import urllib
# https://www.googleapis.com/youtube/v3/videos?part=statistics&id=Q5mHPo2yDG8&key=YOUR_API_KEY
api_key = "AIzaSyCPGlBmzySKUuEId0C9GS0jrKZfArvSk6M"
service_url = 'https://www.googleapis.com/youtube/v3/videos?'
part="snippet,statistics&chart=mostPopular&fields=items(id,snippet(title),statistics)&maxResults=2®ionCode=US&key="+api_key
url = "https://www.googleapis.com/youtube/v3/videos?part=" + part
topic = json.loads(urllib.urlopen(url).read()) |
986,293 | e4ad1900c668b74d4777e46b74c060e94c13881a | from . import pos_voucher
|
986,294 | a43635cf4ced232cfd049154642f4ae06c8afbbd | from src.module.deploy_utils import parse_war_path
from commands import getoutput
from log import LOG
import utility
def invoke(fingerengine, fingerprint, deployer):
"""
"""
if fingerengine.service in ["jboss", "tomcat"]:
return invoke_war(fingerengine, fingerprint)
elif fingerengine.service in ["coldfusion"]:
return invoke_cf(fingerengine, fingerprint, deployer)
else:
utility.Msg("Platform %s does not support --invoke" %
fingerengine.options.remote_service, LOG.ERROR)
def invoke_war(fingerengine, fingerprint):
""" Invoke a deployed WAR file on the remote server.
This uses unzip because Python's zip module isn't very portable or
fault tolerant; i.e. it fails to parse msfpayload-generated WARs, though
this is a fault of metasploit, not the Python module.
"""
dfile = fingerengine.options.deploy
jsp = getoutput("unzip -l %s | grep jsp" % dfile).split(' ')[-1]
if jsp == '':
utility.Msg("Failed to find a JSP in the deployed WAR", LOG.DEBUG)
return
else:
utility.Msg("Using JSP {0} from {1} to invoke".format(jsp, dfile), LOG.DEBUG)
url = "http://{0}:{1}/{2}/{3}".format(fingerengine.options.ip,
fingerprint.port,
parse_war_path(dfile),
jsp)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(parse_war_path(dfile, True)),
LOG.ERROR)
def invoke_cf(fingerengine, fingerprint, deployer):
"""
"""
dfile = parse_war_path(fingerengine.options.deploy, True)
if fingerprint.version in ["10.0"]:
# deployments to 10 require us to trigger a 404
url = "http://{0}:{1}/CFIDE/ad123.cfm".format(fingerengine.options.ip,
fingerprint.port)
elif fingerprint.version in ["8.0"] and "fck_editor" in deployer.__name__:
# invoke a shell via FCKeditor deployer
url = "http://{0}:{1}/userfiles/file/{2}".format(fingerengine.options.ip,
fingerprint.port,
dfile)
else:
url = "http://{0}:{1}/CFIDE/{2}".format(fingerengine.options.ip,
fingerprint.port,
dfile)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(dfile), LOG.ERROR)
def _invoke(url):
""" Make the request
"""
status = False
try:
response = utility.requests_get(url)
if response.status_code == 200:
status = True
except Exception, e:
utility.Msg("Failed to invoke payload: %s" % e, LOG.ERROR)
status = False
return status
|
986,295 | 147f4a99e5e5f874ba7303ea159ac0af362e9ba1 | # Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RQMC support."""
from tf_quant_finance.experimental.rqmc import utils
from tf_quant_finance.experimental.rqmc.digital_net import random_scrambling_matrices
from tf_quant_finance.experimental.rqmc.digital_net import sample_digital_net
from tf_quant_finance.experimental.rqmc.digital_net import scramble_generating_matrices
from tf_quant_finance.experimental.rqmc.lattice_rule import random_scrambling_vectors
from tf_quant_finance.experimental.rqmc.lattice_rule import sample_lattice_rule
from tf_quant_finance.experimental.rqmc.sobol import sample_sobol
from tf_quant_finance.experimental.rqmc.sobol import sobol_generating_matrices
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'random_scrambling_matrices',
'random_scrambling_vectors',
'sample_digital_net',
'sample_lattice_rule',
'sample_sobol',
'scramble_generating_matrices',
'sobol_generating_matrices',
'utils',
]
remove_undocumented(__name__, _allowed_symbols)
|
986,296 | ba75ca97ad8cf44fd36879bd1d44d6a499a0bfbd | class Person:
def __init__(self, name, age, contact, salary):
self.name = name
self.age = age
self.contact = contact
self.salary = salary
def get_name(self):
return self.name
def get_salary(self):
return self.salary
jon = Person(name="Jon", age=24, contact="Rajshahi", salary=200000)
# print(jon.get_name())
# print(jon.get_salary())
karim = Person(name="Karim", age=30, contact="Dhaka", salary=2000)
print(karim.get_name())
print(karim.get_salary())
print(karim.name)
person ={
'name': 'Karim',
'age': 23
}
person['name']
|
986,297 | af9415a06e117144255881b818b1123cb7bc3806 | #!/usr/bin/env python
from random import randint
class Tile(object):
def __init__(self, safe, rollagain):
self.safe = safe
self.rollagain = rollagain
self.piece = 0
class Player(object):
def __init__(self, player, path):
self.player = player
self.path = path
self.score = 0
#number of piece each player has ready to move onto the board
self.reserve = 7
def board_init():
#assumes the most likely configuration of paths
#to use alternatives, maybe a new function or
#some if/else stuff
#initializes all the tiles
board = [[Tile(False, False) for y in range(3)] for x in range(8)]
#designates the safe spaces
board[3][1].safe = True
#designates the spaes that let you roll agian
for n in [[0, 0], [0, 2], [7, 0], [7, 2], [3, 1]]:
board[n[0]][n[1]].rollagain = True
#initializes the traditional paths
path1 = [board[x][0] for x in range(3, -1, -1)]
path2 = [board[x][2] for x in range(3, -1, -1)]
for x in range(8):
path1.append(board[x][1])
path2.append(board[x][1])
for x in range(7, 5, -1):
path1.append(board[x][0])
path2.append(board[x][2])
players = [Player(1, path1), Player(2, path2)]
return board, players
def roll():
return sum(randint(0, 1) for _ in range(4))
def printboard(board):
horedge = "\033[0;37;40m+---+---+---+---+ +---+---+"
hormid = "\033[0;37;40m+---+---+---+---+---+---+---+---+"
print(horedge)
print(printline(board,0))
print(hormid)
print(printline(board,1))
print(hormid)
print(printline(board,2))
print(horedge)
return
def printline(board, y):
output = "\033[0;37;40m|"
for x in range(len(board)):
if board[x][y].piece == 0:
output += " "
elif board[x][y].piece == 1:
output += "\033[0;37;41m "
elif board[x][y].piece == 2:
output += "\033[0;37;44m "
if (((y == 0) or (y == 2)) and (x == 4)):
output += " "
else:
output += "\033[0;37;40m|"
return output
def turn(board, players, playerturn):
diceroll = roll()
#find legal moves
#counts pieces from the start to the finish for designations
legalmoves = []
if players[playerturn].reserve > 0:
if players[playerturn].path[diceroll] != (playerturn + 1):
legalmoves.append(0)
count = 0
for pos in range(len(players[playerturn].path)):
if players[playerturn].path[pos + diceroll].piece == (playerturn + 1):
count += 1
if (pos + diceroll) <= len(players[playerturn].path):
if players[playerturn].path[pos + diceroll] != (playerturn + 1):
if players[playerturn].path[pos + diceroll].safe == False:
legalmoves.append(count)
#gets player input
if len(legalmoves) == 0:
print('no legal moves or roll of zero: skipping turn')
playerturn = (playerturn + 1) % 2
else:
print('legal moves are: {}'.format(legalmoves))
print('pieces are numbered in order from start to finish')
print('moving pieces onto the table is 0')
while (choice in legalmoves) == false:
print('select which piece to move')
choice = input()
#finds the choice
count = 0
for pos in range(len(players[playerturn].path)):
if players[playerturn].path[pos + diceroll].piece == (playerturn + 1):
count += 1
if count == choice:
startspace = pos
endspace = pos + diceroll
#resolve move
players[playerturn].path[startspace].piece = 0
if endspace == len(players[playerturn].path):
players[playerturn].score += 1
else:
if players[playerturn].path[endspace].piece != 0:
players[(playerturn + 1) % 2].reserve += 1
players[playerturn].path[endspace].piece = playerturn + 1
#figure out who rolls next
if players[playerturn].path[endspace].rollagain == False:
playerturn = (playerturn + 1) % 2
return board, players, playerturn
def wincon(players):
if players[0].score == 7:
print("Player 1 Wins!")
return True
elif players[1].score == 7:
print("Player 2 Wins!")
return True
else:
return False
def main():
#initializes the game
board, players = board_init()
#takes each player turn
playerturn = randint(0, 1)
while True:
board, players, playerturn = turn(
board, players, playerturn
)
printboard(board)
#exits if anyone wins
if wincon(players) == True:
break
if __name__ == '__main__':
main()
|
986,298 | d6946b142be2b6b452d3693997913f34a06ab6ef | from .rdm_plot import show_rdm, show_rdm_panel, add_descriptor_x_labels, add_descriptor_y_labels
from .mds_plot import mds, rdm_dimension_reduction
from .model_plot import plot_model_comparison
from .icon import Icon
from .icon import icons_from_folder
|
986,299 | f19d2b974904edfd508aa8bbe9bb47e19bb9ddf6 | import RPi.GPIO as GPIO
import io
import logging
import subprocess
import shared_cfg
MODE_SWITCH_SCRIPT = "/home/pi/switch-mode.sh"
# TFT buttons; button 1 is left-most
TFT_BUTTON_1_PIN = 11 # GPIO17
TFT_BUTTON_2_PIN = 15 # GPIO22
TFT_BUTTON_3_PIN = 16 # GPIO23
TFT_BUTTON_4_PIN = 13 # GPIO27
# Encoder inputs
ENC_A_PIN = 40 # GPIO21
ENC_B_PIN = 38 # GPIO20
ENC_BUTTON_PIN = 35 # GPIO19
#ENC_BIT4_PIN = 36 # GPIO16
ENC_COMMON_PIN = 33 # GPIO13
ENC_QUAD_PINS = [ ENC_B_PIN, ENC_A_PIN ]
log = logging.getLogger(__name__)
def set_device_mode(mode):
if mode == shared_cfg.HID_USB_MODE:
subprocess.call([MODE_SWITCH_SCRIPT, "hid"])
elif mode == shared_cfg.RNDIS_USB_MODE:
subprocess.call([MODE_SWITCH_SCRIPT, "rndis"])
else:
log.warn("Unknown device mode requested.")
def setup_gpio():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD) # RPi pin-numbering scheme
GPIO.setup(TFT_BUTTON_1_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(TFT_BUTTON_1_PIN, GPIO.RISING, bouncetime=200)
GPIO.setup(TFT_BUTTON_2_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(TFT_BUTTON_2_PIN, GPIO.RISING, bouncetime=200)
GPIO.setup(TFT_BUTTON_3_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(TFT_BUTTON_3_PIN, GPIO.RISING, bouncetime=200)
GPIO.setup(TFT_BUTTON_4_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(TFT_BUTTON_4_PIN, GPIO.RISING, bouncetime=200)
GPIO.setup(ENC_COMMON_PIN, GPIO.OUT)
GPIO.output(ENC_COMMON_PIN, GPIO.LOW)
GPIO.setup(ENC_A_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(ENC_A_PIN, GPIO.BOTH)
GPIO.setup(ENC_B_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(ENC_B_PIN, GPIO.BOTH)
GPIO.setup(ENC_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(ENC_BUTTON_PIN, GPIO.RISING, bouncetime=50)
#GPIO.setup(ENC_BIT4_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#GPIO.add_event_detect(ENC_BIT4_PIN, GPIO.BOTH, bouncetime=50)
def get_enc_value():
new_val = 0
for pin in ENC_QUAD_PINS:
new_val = new_val * 2 + (1 if GPIO.input(pin) else 0)
#print("Encoder value: " + str(new_val))
return new_val
def check_gpio(current_enc_value):
enc_button_pressed = False
hw_button_pressed = 0
if GPIO.event_detected(TFT_BUTTON_1_PIN):
hw_button_pressed = 1
if GPIO.event_detected(TFT_BUTTON_2_PIN):
hw_button_pressed = 2
if GPIO.event_detected(TFT_BUTTON_3_PIN):
hw_button_pressed = 3
if GPIO.event_detected(TFT_BUTTON_4_PIN):
hw_button_pressed = 4
if GPIO.event_detected(ENC_BUTTON_PIN):
enc_button_pressed = True
encoder_changed = False
for pin in ENC_QUAD_PINS:
if GPIO.event_detected(pin):
log.debug("Event for pin {0} detected".format(pin))
encoder_changed = True
new_enc_value = current_enc_value
if encoder_changed:
new_enc_value = get_enc_value()
return new_enc_value, enc_button_pressed, hw_button_pressed
NULL_CHAR = chr(0)
# Standard US keyboard key code mapping
KEY_CODE_DICT = {
'a': NULL_CHAR * 2 + chr(4) + NULL_CHAR * 5,
'A': chr(32) + NULL_CHAR + chr(4) + NULL_CHAR * 5,
'b': NULL_CHAR * 2 + chr(5) + NULL_CHAR * 5,
'B': chr(32) + NULL_CHAR + chr(5) + NULL_CHAR * 5,
'c': NULL_CHAR * 2 + chr(6) + NULL_CHAR * 5,
'C': chr(32) + NULL_CHAR + chr(6) + NULL_CHAR * 5,
'd': NULL_CHAR * 2 + chr(7) + NULL_CHAR * 5,
'D': chr(32) + NULL_CHAR + chr(7) + NULL_CHAR * 5,
'e': NULL_CHAR * 2 + chr(8) + NULL_CHAR * 5,
'E': chr(32) + NULL_CHAR + chr(8) + NULL_CHAR * 5,
'f': NULL_CHAR * 2 + chr(9) + NULL_CHAR * 5,
'F': chr(32) + NULL_CHAR + chr(9) + NULL_CHAR * 5,
'g': NULL_CHAR * 2 + chr(10) + NULL_CHAR * 5,
'G': chr(32) + NULL_CHAR + chr(10) + NULL_CHAR * 5,
'h': NULL_CHAR * 2 + chr(11) + NULL_CHAR * 5,
'H': chr(32) + NULL_CHAR + chr(11) + NULL_CHAR * 5,
'i': NULL_CHAR * 2 + chr(12) + NULL_CHAR * 5,
'I': chr(32) + NULL_CHAR + chr(12) + NULL_CHAR * 5,
'j': NULL_CHAR * 2 + chr(13) + NULL_CHAR * 5,
'J': chr(32) + NULL_CHAR + chr(13) + NULL_CHAR * 5,
'k': NULL_CHAR * 2 + chr(14) + NULL_CHAR * 5,
'K': chr(32) + NULL_CHAR + chr(14) + NULL_CHAR * 5,
'l': NULL_CHAR * 2 + chr(15) + NULL_CHAR * 5,
'L': chr(32) + NULL_CHAR + chr(15) + NULL_CHAR * 5,
'm': NULL_CHAR * 2 + chr(16) + NULL_CHAR * 5,
'M': chr(32) + NULL_CHAR + chr(16) + NULL_CHAR * 5,
'n': NULL_CHAR * 2 + chr(17) + NULL_CHAR * 5,
'N': chr(32) + NULL_CHAR + chr(17) + NULL_CHAR * 5,
'o': NULL_CHAR * 2 + chr(18) + NULL_CHAR * 5,
'O': chr(32) + NULL_CHAR + chr(18) + NULL_CHAR * 5,
'p': NULL_CHAR * 2 + chr(19) + NULL_CHAR * 5,
'P': chr(32) + NULL_CHAR + chr(19) + NULL_CHAR * 5,
'q': NULL_CHAR * 2 + chr(20) + NULL_CHAR * 5,
'Q': chr(32) + NULL_CHAR + chr(20) + NULL_CHAR * 5,
'r': NULL_CHAR * 2 + chr(21) + NULL_CHAR * 5,
'R': chr(32) + NULL_CHAR + chr(21) + NULL_CHAR * 5,
's': NULL_CHAR * 2 + chr(22) + NULL_CHAR * 5,
'S': chr(32) + NULL_CHAR + chr(22) + NULL_CHAR * 5,
't': NULL_CHAR * 2 + chr(23) + NULL_CHAR * 5,
'T': chr(32) + NULL_CHAR + chr(23) + NULL_CHAR * 5,
'u': NULL_CHAR * 2 + chr(24) + NULL_CHAR * 5,
'U': chr(32) + NULL_CHAR + chr(24) + NULL_CHAR * 5,
'v': NULL_CHAR * 2 + chr(25) + NULL_CHAR * 5,
'V': chr(32) + NULL_CHAR + chr(25) + NULL_CHAR * 5,
'w': NULL_CHAR * 2 + chr(26) + NULL_CHAR * 5,
'W': chr(32) + NULL_CHAR + chr(26) + NULL_CHAR * 5,
'x': NULL_CHAR * 2 + chr(27) + NULL_CHAR * 5,
'X': chr(32) + NULL_CHAR + chr(27) + NULL_CHAR * 5,
'y': NULL_CHAR * 2 + chr(28) + NULL_CHAR * 5,
'Y': chr(32) + NULL_CHAR + chr(28) + NULL_CHAR * 5,
'z': NULL_CHAR * 2 + chr(29) + NULL_CHAR * 5,
'Z': chr(32) + NULL_CHAR + chr(29) + NULL_CHAR * 5,
'1': NULL_CHAR * 2 + chr(30) + NULL_CHAR * 5,
'!': chr(32) + NULL_CHAR + chr(30) + NULL_CHAR * 5,
'2': NULL_CHAR * 2 + chr(31) + NULL_CHAR * 5,
'@': chr(32) + NULL_CHAR + chr(31) + NULL_CHAR * 5,
'3': NULL_CHAR * 2 + chr(32) + NULL_CHAR * 5,
'#': chr(32) + NULL_CHAR + chr(32) + NULL_CHAR * 5,
'4': NULL_CHAR * 2 + chr(33) + NULL_CHAR * 5,
'$': chr(32) + NULL_CHAR + chr(33) + NULL_CHAR * 5,
'5': NULL_CHAR * 2 + chr(34) + NULL_CHAR * 5,
'%': chr(32) + NULL_CHAR + chr(34) + NULL_CHAR * 5,
'6': NULL_CHAR * 2 + chr(35) + NULL_CHAR * 5,
'^': chr(32) + NULL_CHAR + chr(35) + NULL_CHAR * 5,
'7': NULL_CHAR * 2 + chr(36) + NULL_CHAR * 5,
'&': chr(32) + NULL_CHAR + chr(36) + NULL_CHAR * 5,
'8': NULL_CHAR * 2 + chr(37) + NULL_CHAR * 5,
'*': chr(32) + NULL_CHAR + chr(37) + NULL_CHAR * 5,
'9': NULL_CHAR * 2 + chr(38) + NULL_CHAR * 5,
'(': chr(32) + NULL_CHAR + chr(38) + NULL_CHAR * 5,
'0': NULL_CHAR * 2 + chr(39) + NULL_CHAR * 5,
')': chr(32) + NULL_CHAR + chr(39) + NULL_CHAR * 5,
' ': NULL_CHAR * 2 + chr(44) + NULL_CHAR * 5,
'-': NULL_CHAR * 2 + chr(45) + NULL_CHAR * 5,
'_': chr(32) + NULL_CHAR + chr(45) + NULL_CHAR * 5,
'=': NULL_CHAR * 2 + chr(46) + NULL_CHAR * 5,
'+': chr(32) + NULL_CHAR + chr(46) + NULL_CHAR * 5,
'[': NULL_CHAR * 2 + chr(47) + NULL_CHAR * 5,
'{': chr(32) + NULL_CHAR + chr(47) + NULL_CHAR * 5,
']': NULL_CHAR * 2 + chr(48) + NULL_CHAR * 5,
'}': chr(32) + NULL_CHAR + chr(48) + NULL_CHAR * 5,
'\\': NULL_CHAR * 2 + chr(49) + NULL_CHAR * 5,
'|': chr(32) + NULL_CHAR + chr(49) + NULL_CHAR * 5,
'`': NULL_CHAR * 2 + chr(50) + NULL_CHAR * 5,
'~': chr(32) + NULL_CHAR + chr(50) + NULL_CHAR * 5,
';': NULL_CHAR * 2 + chr(51) + NULL_CHAR * 5,
':': chr(32) + NULL_CHAR + chr(51) + NULL_CHAR * 5,
'\'': NULL_CHAR * 2 + chr(52) + NULL_CHAR * 5,
'"': chr(32) + NULL_CHAR + chr(52) + NULL_CHAR * 5,
',': NULL_CHAR * 2 + chr(54) + NULL_CHAR * 5,
'<': chr(32) + NULL_CHAR + chr(54) + NULL_CHAR * 5,
'.': NULL_CHAR * 2 + chr(55) + NULL_CHAR * 5,
'>': chr(32) + NULL_CHAR + chr(55) + NULL_CHAR * 5,
'/': NULL_CHAR * 2 + chr(56) + NULL_CHAR * 5,
'?': chr(32) + NULL_CHAR + chr(56) + NULL_CHAR * 5
}
def write_report(report):
with io.open("/dev/hidg0","wb") as dev:
dev.write(report.encode())
def keyboard_out(text):
last_char = None
for c in text:
if c == last_char:
write_report(NULL_CHAR * 8)
write_report(KEY_CODE_DICT[c])
last_char = c
write_report(NULL_CHAR*8)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.