hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3edb25141ff3bac7e8859e484447cd10c09ff20
| 85,475
|
py
|
Python
|
models/networks.py
|
tkuri/CGIntrinsics
|
e84b73aa3784112b389b955258966f827b1f03d9
|
[
"MIT"
] | null | null | null |
models/networks.py
|
tkuri/CGIntrinsics
|
e84b73aa3784112b389b955258966f827b1f03d9
|
[
"MIT"
] | null | null | null |
models/networks.py
|
tkuri/CGIntrinsics
|
e84b73aa3784112b389b955258966f827b1f03d9
|
[
"MIT"
] | 1
|
2021-06-19T16:50:19.000Z
|
2021-06-19T16:50:19.000Z
|
import torch
import torch.nn as nn
import torch.sparse
from torch.autograd import Variable
import numpy as np
import sys
from torch.autograd import Function
import math
import h5py
import json
# from . import resnet1
import matplotlib.pyplot as plt
from skimage.transform import resize
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type):
if norm_type == 'batch':
norm_layer = nn.BatchNorm2d
elif norm_type == 'instance':
norm_layer = nn.InstanceNorm2d
else:
print('normalization layer [%s] is not found' % norm)
return norm_layer
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_256':
# netG = SingleUnetGenerator_S(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
# netG = SingleUnetGenerator_R(input_nc, output_nc, 7, ngf, norm_layer=nn.BatchNorm2d, use_dropout=use_dropout, gpu_ids=gpu_ids, )
output_nc = 3
# netG2 = SingleUnetGenerator_R2(input_nc, output_nc, 7, ngf, norm_layer=nn.BatchNorm2d, use_dropout=use_dropout, gpu_ids=gpu_ids)
# netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
netG = MultiUnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
# output_nc_R = 3
# netR = SingleUnetGenerator_R(input_nc, output_nc_R, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
# output_nc_L = 3
# netL = SingleUnetGenerator_R(input_nc, output_nc_L, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
print('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
# netR.cuda(gpu_ids[0])
# netL.cuda(gpu_ids[0])
netG.apply(weights_init)
# netR.apply(weights_init)
# netL.apply(weights_init)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
print('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
class Sparse(Function):
# Sparse matrix for S
def forward(self, input, S):
self.save_for_backward(S)
output = torch.mm(S, input)
# output = output.cuda()
return output
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
S, = self.saved_tensors
grad_weight = None
grad_input = torch.mm(S.t(), grad_output)
# grad_input = grad_input.cuda()
return grad_input, grad_weight
class JointLoss(nn.Module):
def __init__(self):
super(JointLoss, self).__init__()
self.w_ss_local = 2.0
self.w_SAW = 1.0
self.w_rs_local = 1.0
self.w_reconstr = 2.0
self.w_reconstr_real = 2.0
self.w_rs_dense = 2.0
self.w_ls = 2.0
self.w_ss_dense = 4.0
self.w_sp = 0.25
self.w_IIW = 4.0
self.w_feature = 0.75
self.w_grad = 0.25
self.local_s_w = np.array([[0.5, 0.5, 0.5, 0.5, 0.5], \
[0.5, 1 , 1 , 1, 0.5],\
[0.5, 1, 1, 1, 0.5],\
[0.5, 1, 1, 1, 0.5],\
[0.5, 0.5, 0.5, 0.5, 0.5]])
x = np.arange(-1, 2)
y = np.arange(-1, 2)
self.X, self.Y = np.meshgrid(x, y)
# self.h_offset = [0,0,0,1,1,2,2,2,1]
# self.w_offset = [0,1,2,0,2,0,1,2,1]
self.total_loss = None
self.running_stage = 0
def BilateralRefSmoothnessLoss(self, pred_R, targets, att, num_features):
# pred_R = pred_R.cpu()
total_loss = Variable(torch.cuda.FloatTensor(1))
total_loss[0] = 0
N = pred_R.size(2) * pred_R.size(3)
Z = (pred_R.size(1) * N )
# grad_input = torch.FloatTensor(pred_R.size())
# grad_input = grad_input.zero_()
for i in range(pred_R.size(0)): # for each image
B_mat = targets[att+'B_list'][i] # still list of blur sparse matrices
S_mat = Variable(targets[att + 'S'][i].cuda(), requires_grad = False) # Splat and Slicing matrix
n_vec = Variable(targets[att + 'N'][i].cuda(), requires_grad = False) # bi-stochatistic vector, which is diagonal matrix
p = pred_R[i,:,:,:].view(pred_R.size(1),-1).t() # NX3
# p'p
# p_norm = torch.mm(p.t(), p)
# p_norm_sum = torch.trace(p_norm)
p_norm_sum = torch.sum(torch.mul(p,p))
# S * N * p
Snp = torch.mul(n_vec.repeat(1,pred_R.size(1)), p)
sp_mm = Sparse()
Snp = sp_mm(Snp, S_mat)
Snp_1 = Snp.clone()
Snp_2 = Snp.clone()
# # blur
for f in range(num_features+1):
B_var1 = Variable(B_mat[f].cuda(), requires_grad = False)
sp_mm1 = Sparse()
Snp_1 = sp_mm1(Snp_1, B_var1)
B_var2 = Variable(B_mat[num_features-f].cuda(), requires_grad = False)
sp_mm2 = Sparse()
Snp_2 = sp_mm2(Snp_2, B_var2)
Snp_12 = Snp_1 + Snp_2
pAp = torch.sum(torch.mul(Snp, Snp_12))
total_loss = total_loss + ((p_norm_sum - pAp)/Z)
total_loss = total_loss/pred_R.size(0)
# average over all images
return total_loss
def SUNCGReconstLoss(self, R, S, mask, targets):
rgb_img = Variable(targets['rgb_img'].cuda(), requires_grad = False)
S = S.repeat(1,3,1,1)
chromaticity = Variable(targets['chromaticity'].cuda(), requires_grad = False)
R = torch.mul(chromaticity, R.repeat(1,3,1,1))
return torch.mean( torch.pow(torch.mul(mask, rgb_img - torch.mul(R, S)), 2) )
def IIWReconstLoss(self, R, S, targets):
S = S.repeat(1,3,1,1)
rgb_img = Variable(targets['rgb_img'].cuda(), requires_grad = False)
# 1 channel
chromaticity = Variable(targets['chromaticity'].cuda(), requires_grad = False)
p_R = torch.mul(chromaticity, R.repeat(1,3,1,1))
# return torch.mean( torch.mul(L, torch.pow( torch.log(rgb_img) - torch.log(p_R) - torch.log(S), 2)))
return torch.mean( torch.pow( rgb_img - torch.mul(p_R, S), 2))
def Ranking_Loss(self, prediction_R, judgements, is_flip):
#ranking loss for each prediction feature
tau = 0.25 #abs(I1 - I2)) ) #1.2 * (1 + math.fabs(math.log(I1) - math.log(I2) ) )
points = judgements['intrinsic_points']
comparisons = judgements['intrinsic_comparisons']
id_to_points = {p['id']: p for p in points}
rows = prediction_R.size(1)
cols = prediction_R.size(2)
num_valid_comparisons = 0
num_valid_comparisons_ineq =0
num_valid_comparisons_eq = 0
total_loss_eq = Variable(torch.cuda.FloatTensor(1))
total_loss_eq[0] = 0
total_loss_ineq = Variable(torch.cuda.FloatTensor(1))
total_loss_ineq[0] = 0
for c in comparisons:
# "darker" is "J_i" in our paper
darker = c['darker']
if darker not in ('1', '2', 'E'):
continue
# "darker_score" is "w_i" in our paper
# remove unconfident point
weight = c['darker_score']
if weight < 0.5 or weight is None:
continue
point1 = id_to_points[c['point1']]
point2 = id_to_points[c['point2']]
if not point1['opaque'] or not point2['opaque']:
continue
# if is_flip:
# l1 = prediction_R[:, int(point1['y'] * rows), cols - 1 - int( point1['x'] * cols)]
# l2 = prediction_R[:, int(point2['y'] * rows), cols - 1 - int( point2['x'] * cols)]
# else:
l1 = prediction_R[:, int(point1['y'] * rows), int(point1['x'] * cols)]
l2 = prediction_R[:, int(point2['y'] * rows), int(point2['x'] * cols)]
l1_m = l1 #torch.mean(l1)
l2_m = l2 #torch.mean(l2)
# print(int(point1['y'] * rows), int(point1['x'] * cols), int(point2['y'] * rows), int(point2['x'] * cols), darker)
# print(point1['y'], point1['x'], point2['y'], point2['x'], c['point1'], c['point2'])
# print("===============================================================")
# l2 > l1, l2 is brighter
# if darker == '1' and ((l1_m.data[0] / l2_m.data[0]) > 1.0/tau):
# # loss =0
# loss = weight * torch.mean((tau - (l2_m / l1_m)))
# num_valid_comparisons += 1
# # l1 > l2, l1 is brighter
# elif darker == '2' and ((l2_m.data[0] / l1_m.data[0]) > 1.0/tau):
# # loss =0
# loss = weight * torch.mean((tau - (l1_m / l2_m)))
# num_valid_comparisons += 1
# # is equal
# elif darker == 'E':
# loss = weight * torch.mean(torch.abs(l2 - l1))
# num_valid_comparisons += 1
# else:
# loss = 0.0
# l2 is brighter
if darker == '1' and ((l1_m.data[0] - l2_m.data[0]) > - tau):
# print("dark 1", l1_m.data[0] - l2_m.data[0])
total_loss_ineq += weight * torch.mean( torch.pow( tau - (l2_m - l1_m), 2) )
num_valid_comparisons_ineq += 1.
# print("darker 1 loss", l2_m.data[0], l1_m.data[0], loss.data[0])
# l1 > l2, l1 is brighter
elif darker == '2' and ((l2_m.data[0] - l1_m.data[0]) > - tau):
# print("dark 2", l2_m.data[0] - l1_m.data[0])
total_loss_ineq += weight * torch.mean( torch.pow( tau - (l1_m - l2_m),2) )
num_valid_comparisons_ineq += 1.
# print("darker 2 loss", l2_m.data[0], l1_m.data[0], loss.data[0])
elif darker == 'E':
total_loss_eq += weight * torch.mean( torch.pow(l2 - l1,2) )
num_valid_comparisons_eq += 1.
else:
loss = 0.0
total_loss = total_loss_ineq + total_loss_eq
num_valid_comparisons = num_valid_comparisons_eq + num_valid_comparisons_ineq
# print("average eq loss", total_loss_eq.data[0]/(num_valid_comparisons_eq + 1e-6))
# print("average ineq loss", total_loss_ineq.data[0]/(num_valid_comparisons_ineq + 1e-6))
return total_loss/(num_valid_comparisons + 1e-6)
def BatchRankingLoss(self, prediction_R, judgements_eq, judgements_ineq, random_filp):
eq_loss, ineq_loss = 0, 0
num_valid_eq = 0
num_valid_ineq = 0
tau = 0.425
rows = prediction_R.size(1)
cols = prediction_R.size(2)
num_channel = prediction_R.size(0)
# evaluate equality annotations densely
if judgements_eq.size(1) > 2:
judgements_eq = judgements_eq.cuda()
R_vec = prediction_R.view(num_channel, -1)
# R_vec = torch.exp(R_vec)
# I_vec = I.view(1, -1)
y_1 = torch.floor(judgements_eq[:,0] * rows).long()
y_2 = torch.floor(judgements_eq[:,2] * rows).long()
if random_filp:
x_1 = cols - 1 - torch.floor(judgements_eq[:,1] * cols).long()
x_2 = cols - 1 - torch.floor(judgements_eq[:,3] * cols).long()
else:
x_1 = torch.floor(judgements_eq[:,1] * cols).long()
x_2 = torch.floor(judgements_eq[:,3] * cols).long()
# compute linear index for point 1
# y_1 = torch.floor(judgements_eq[:,0] * rows).long()
# x_1 = torch.floor(judgements_eq[:,1] * cols).long()
point_1_idx_linaer = y_1 * cols + x_1
# compute linear index for point 2
# y_2 = torch.floor(judgements_eq[:,2] * rows).long()
# x_2 = torch.floor(judgements_eq[:,3] * cols).long()
point_2_idx_linear = y_2 * cols + x_2
# extract all pairs of comparisions
points_1_vec = torch.index_select(R_vec, 1, Variable(point_1_idx_linaer, requires_grad = False))
points_2_vec = torch.index_select(R_vec, 1, Variable(point_2_idx_linear, requires_grad = False))
# I1_vec = torch.index_select(I_vec, 1, point_1_idx_linaer)
# I2_vec = torch.index_select(I_vec, 1, point_2_idx_linear)
weight = Variable(judgements_eq[:,4], requires_grad = False)
# weight = confidence#* torch.exp(4.0 * torch.abs(I1_vec - I2_vec) )
# compute loss
# eq_loss = torch.sum(torch.mul(weight, torch.mean(torch.abs(points_1_vec - points_2_vec),0) ))
eq_loss = torch.sum(torch.mul(weight, torch.mean(torch.pow(points_1_vec - points_2_vec,2),0) ))
num_valid_eq += judgements_eq.size(0)
# compute inequality annotations
if judgements_ineq.size(1) > 2:
judgements_ineq = judgements_ineq.cuda()
R_intensity = torch.mean(prediction_R, 0)
# R_intensity = torch.log(R_intensity)
R_vec_mean = R_intensity.view(1, -1)
y_1 = torch.floor(judgements_ineq[:,0] * rows).long()
y_2 = torch.floor(judgements_ineq[:,2] * rows).long()
# x_1 = torch.floor(judgements_ineq[:,1] * cols).long()
# x_2 = torch.floor(judgements_ineq[:,3] * cols).long()
if random_filp:
x_1 = cols - 1 - torch.floor(judgements_ineq[:,1] * cols).long()
x_2 = cols - 1 - torch.floor(judgements_ineq[:,3] * cols).long()
else:
x_1 = torch.floor(judgements_ineq[:,1] * cols).long()
x_2 = torch.floor(judgements_ineq[:,3] * cols).long()
# y_1 = torch.floor(judgements_ineq[:,0] * rows).long()
# x_1 = torch.floor(judgements_ineq[:,1] * cols).long()
point_1_idx_linaer = y_1 * cols + x_1
# y_2 = torch.floor(judgements_ineq[:,2] * rows).long()
# x_2 = torch.floor(judgements_ineq[:,3] * cols).long()
point_2_idx_linear = y_2 * cols + x_2
# extract all pairs of comparisions
points_1_vec = torch.index_select(R_vec_mean, 1, Variable(point_1_idx_linaer, requires_grad = False)).squeeze(0)
points_2_vec = torch.index_select(R_vec_mean, 1, Variable(point_2_idx_linear, requires_grad = False)).squeeze(0)
weight = Variable(judgements_ineq[:,4], requires_grad = False)
# point 2 should be always darker than (<) point 1
# compute loss
relu_layer = nn.ReLU(True)
# ineq_loss = torch.sum(torch.mul(weight, relu_layer(points_2_vec - points_1_vec + tau) ) )
ineq_loss = torch.sum(torch.mul(weight, torch.pow( relu_layer(points_2_vec - points_1_vec + tau),2) ) )
# ineq_loss = torch.sum(torch.mul(weight, torch.pow(relu_layer(tau - points_1_vec/points_2_vec),2)))
num_included = torch.sum( torch.ge(points_2_vec.data - points_1_vec.data, -tau).float().cuda() )
# num_included = torch.sum(torch.ge(points_2_vec.data/points_1_vec.data, 1./tau).float().cuda())
num_valid_ineq += num_included
# avoid divide by zero
return eq_loss/(num_valid_eq + 1e-8) + ineq_loss/(num_valid_ineq + 1e-8)
def ShadingPenaltyLoss(self, S):
return torch.mean(torch.pow(S - 0.5,2) )
# return torch.sum( torch.mul(sky_mask, torch.abs(S - np.log(0.5))/num_val_pixels ))
def AngleLoss(self, prediction_n, targets):
mask = Variable(targets['mask'].cuda(), requires_grad = False)
normal = Variable(targets['normal'].cuda(), requires_grad = False)
num_valid = torch.sum(mask[:,0,:,:])
# compute dot product
angle_loss = - torch.sum( torch.mul(mask, torch.mul(prediction_n, normal)), 1)
return 1 + torch.sum(angle_loss)/num_valid
def GradientLoss(self, prediction_n, mask, gt_n):
N = torch.sum(mask)
# horizontal angle difference
h_mask = torch.mul(mask[:,:,:,0:-2], mask[:,:,:,2:])
h_gradient = prediction_n[:,:,:,0:-2] - prediction_n[:,:,:,2:]
h_gradient_gt = gt_n[:,:,:,0:-2] - gt_n[:,:,:,2:]
h_gradient_loss = torch.mul(h_mask, torch.abs(h_gradient - h_gradient_gt))
# Vertical angle difference
v_mask = torch.mul(mask[:,:,0:-2,:], mask[:,:,2:,:])
v_gradient = prediction_n[:,:,0:-2,:] - prediction_n[:,:,2:,:]
v_gradient_gt = gt_n[:,:,0:-2,:] - gt_n[:,:,2:,:]
v_gradient_loss = torch.mul(v_mask, torch.abs(v_gradient - v_gradient_gt))
gradient_loss = torch.sum(h_gradient_loss) + torch.sum(v_gradient_loss)
gradient_loss = gradient_loss/(N*2.0)
return gradient_loss
def SmoothLoss(self, prediction_n, mask):
N = torch.sum(mask[:,0,:,:])
# horizontal angle difference
h_mask = torch.mul(mask[:,:,:,0:-2], mask[:,:,:,2:])
h_gradient = torch.sum( torch.mul(h_mask, torch.mul(prediction_n[:,:,:,0:-2], prediction_n[:,:,:,2:])), 1)
h_gradient_loss = 1 - torch.sum(h_gradient)/N
# Vertical angle difference
v_mask = torch.mul(mask[:,:,0:-2,:], mask[:,:,2:,:])
v_gradient = torch.sum( torch.mul(v_mask, torch.mul(prediction_n[:,:,0:-2,:], prediction_n[:,:,2:,:])), 1)
v_gradient_loss = 1 - torch.sum(v_gradient)/N
gradient_loss = h_gradient_loss + v_gradient_loss
return gradient_loss
def UncertaintyLoss(self, prediction_n, uncertainty, targets):
uncertainty = torch.squeeze(uncertainty, 1)
mask = Variable(targets['mask'].cuda(), requires_grad = False)
normal = Variable(targets['normal'].cuda(), requires_grad = False)
num_valid = torch.sum(mask[:,0,:,:])
angle_diff = ( torch.sum( torch.mul(prediction_n, normal), 1) + 1.0) * 0.5
uncertainty_loss = torch.sum( torch.mul(mask[:,0,:,:], torch.pow(uncertainty - angle_diff, 2) ) )
return uncertainty_loss/num_valid
def MaskLocalSmoothenessLoss(self, R, M, targets):
h = R.size(2)
w = R.size(3)
num_c = R.size(1)
half_window_size = 1
total_loss = Variable(torch.cuda.FloatTensor(1))
total_loss[0] = 0
mask_center = M[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \
half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size]]
R_center = R[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \
half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size] ]
c_idx = 0
for k in range(0,half_window_size*2+1):
for l in range(0,half_window_size*2+1):
# albedo_weights = Variable(targets["r_w_s"+str(scale_idx)][:,c_idx,:,:].unsqueeze(1).repeat(1,num_c,1,1).float().cuda(), requires_grad = False)
R_N = R[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l],
half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ]
mask_N = M[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l],
half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ]
composed_M = torch.mul(mask_N, mask_center)
# albedo_weights = torch.mul(albedo_weights, composed_M)
r_diff = torch.mul( composed_M, torch.pow(R_center - R_N,2) )
total_loss = total_loss + torch.mean(r_diff)
c_idx = c_idx + 1
return total_loss/(8.0 * num_c)
def LocalAlebdoSmoothenessLoss(self, R, targets, scale_idx):
h = R.size(2)
w = R.size(3)
num_c = R.size(1)
half_window_size = 1
total_loss = Variable(torch.cuda.FloatTensor(1))
total_loss[0] = 0
R_center = R[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \
half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size] ]
c_idx = 0
for k in range(0,half_window_size*2+1):
for l in range(0,half_window_size*2+1):
albedo_weights = targets["r_w_s"+str(scale_idx)][:,c_idx,:,:].unsqueeze(1).repeat(1,num_c,1,1).float().cuda()
R_N = R[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ]
# mask_N = M[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ]
# composed_M = torch.mul(mask_N, mask_center)
# albedo_weights = torch.mul(albedo_weights, composed_M)
r_diff = torch.mul( Variable(albedo_weights, requires_grad = False), torch.abs(R_center - R_N) )
total_loss = total_loss + torch.mean(r_diff)
c_idx = c_idx + 1
return total_loss/(8.0 * num_c)
def Data_Loss(self, log_prediction, mask, log_gt):
N = torch.sum(mask)
log_diff = log_prediction - log_gt
log_diff = torch.mul(log_diff, mask)
s1 = torch.sum( torch.pow(log_diff,2) )/N
s2 = torch.pow(torch.sum(log_diff),2)/(N*N)
data_loss = s1 - s2
return data_loss
def L2GradientMatchingLoss(self, log_prediction, mask, log_gt):
N = torch.sum(mask)
log_diff = log_prediction - log_gt
log_diff = torch.mul(log_diff, mask)
v_gradient = torch.pow(log_diff[:,:,0:-2,:] - log_diff[:,:,2:,:],2)
v_mask = torch.mul(mask[:,:,0:-2,:], mask[:,:,2:,:])
v_gradient = torch.mul(v_gradient, v_mask)
h_gradient = torch.pow(log_diff[:,:,:,0:-2] - log_diff[:,:,:,2:],2)
h_mask = torch.mul(mask[:,:,:,0:-2], mask[:,:,:,2:])
h_gradient = torch.mul(h_gradient, h_mask)
gradient_loss = (torch.sum(h_gradient) + torch.sum(v_gradient))
gradient_loss = gradient_loss/N
return gradient_loss
def L1GradientMatchingLoss(self, log_prediction, mask, log_gt):
N = torch.sum( mask )
log_diff = log_prediction - log_gt
log_diff = torch.mul(log_diff, mask)
v_gradient = torch.abs(log_diff[:,:,0:-2,:] - log_diff[:,:,2:,:])
v_mask = torch.mul(mask[:,:,0:-2,:], mask[:,:,2:,:])
v_gradient = torch.mul(v_gradient, v_mask)
h_gradient = torch.abs(log_diff[:,:,:,0:-2] - log_diff[:,:,:,2:])
h_mask = torch.mul(mask[:,:,:,0:-2], mask[:,:,:,2:])
h_gradient = torch.mul(h_gradient, h_mask)
gradient_loss = (torch.sum(h_gradient) + torch.sum(v_gradient))/2.0
gradient_loss = gradient_loss/N
return gradient_loss
def L1Loss(self, prediction_n, mask, gt):
num_valid = torch.sum( mask )
diff = torch.mul(mask, torch.abs(prediction_n - gt))
return torch.sum(diff)/num_valid
def L2Loss(self, prediction_n, mask, gt):
num_valid = torch.sum( mask )
diff = torch.mul(mask, torch.pow(prediction_n - gt,2))
return torch.sum(diff)/num_valid
def HuberLoss(self, prediction, mask, gt):
tau = 1.0
num_valid = torch.sum(mask)
diff_L1 = torch.abs(prediction - gt)
diff_L2 = torch.pow(prediction - gt ,2)
mask_L2 = torch.le(diff_L1, tau).float().cuda()
mask_L1 = 1.0 - mask_L2
L2_loss = 0.5 * torch.sum(torch.mul(mask, torch.mul(mask_L2, diff_L2)))
L1_loss = torch.sum(torch.mul(mask, torch.mul(mask_L1, diff_L1))) - 0.5
final_loss = (L2_loss + L1_loss)/num_valid
return final_loss
# def DirectFramework(self, input_images, prediction_R, prediction_S, targets, epoch):
# # downsample all the images
# prediction_R_1 = prediction_R[:,:,::2,::2]
# prediction_R_2 = prediction_R_1[:,:,::2,::2]
# prediction_R_3 = prediction_R_2[:,:,::2,::2]
# mask_0 = Variable(targets['mask'].cuda(), requires_grad = False)
# mask_0 = mask_0[:,0,:,:].unsqueeze(1)
# mask_1 = mask_0[:,:,::2,::2]
# mask_2 = mask_1[:,:,::2,::2]
# mask_3 = mask_2[:,:,::2,::2]
# R_gt_0 = Variable(targets['gt_R'].cuda(), requires_grad = False)
# R_gt_1 = R_gt_0[:,:,::2,::2]
# R_gt_2 = R_gt_1[:,:,::2,::2]
# R_gt_3 = R_gt_2[:,:,::2,::2]
# S_gt_0 = Variable(targets['gt_S'].cuda(), requires_grad = False)
# S_gt_1 = S_gt_0[:,:,::2,::2]
# S_gt_2 = S_gt_1[:,:,::2,::2]
# S_gt_3 = S_gt_2[:,:,::2,::2]
# # gt_normal = Variable(targets['normal'].cuda(), requires_grad = False)
# prediction_S_1 = prediction_S[:,:,::2,::2]
# prediction_S_2 = prediction_S_1[:,:,::2,::2]
# prediction_S_3 = prediction_S_2[:,:,::2,::2]
# # R L2 loss
# w_data = 1.0
# w_grad = 0.5
# R_loss = w_data * self.L2Loss(prediction_R, mask_0, R_gt_0)
# R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R , mask_0, R_gt_0)
# R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_1, mask_1, R_gt_1)
# R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_2, mask_2, R_gt_2)
# R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_3, mask_3, R_gt_3)
# S_mask_0 = mask_0[:,0,:,:].unsqueeze(1)
# S_mask_1 = mask_1[:,0,:,:].unsqueeze(1)
# S_mask_2 = mask_2[:,0,:,:].unsqueeze(1)
# S_mask_3 = mask_3[:,0,:,:].unsqueeze(1)
# # S Huber Loss
# S_loss = w_data * self.HuberLoss(prediction_S, S_mask_0, S_gt_0)
# S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S , S_mask_0, S_gt_0)
# S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_1, S_mask_1, S_gt_1)
# S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_2, S_mask_2, S_gt_2)
# S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_3, S_mask_3, S_gt_3)
# Reconstr_loss = 2.0 * self.SUNCGReconstLoss(input_images, prediction_R, prediction_S, mask_0, targets)
# # Ls_loss = 8.0 * self.BilateralRefSmoothnessLoss(prediction_L, targets, 'S', 2)
# print("R_loss", R_loss.data[0])
# print("S_loss", S_loss.data[0])
# # print("Reconstr_loss", Reconstr_loss.data[0])
# # print("Lighting Loss", Ls_loss.data[0])
# total_loss = R_loss + S_loss + Reconstr_loss
# return total_loss
# def ScaleInvarianceFramework(self, input_images, prediction_R, prediction_S, targets, epoch):
# prediction_R_1 = prediction_R[:,:,::2,::2]
# prediction_R_2 = prediction_R_1[:,:,::2,::2]
# prediction_R_3 = prediction_R_2[:,:,::2,::2]
# # downsample all the images
# mask_0 = Variable(targets['mask'].cuda(), requires_grad = False)
# mask_0 = mask_0[:,0,:,:].unsqueeze(1)
# mask_1 = mask_0[:,:,::2,::2]
# mask_2 = mask_1[:,:,::2,::2]
# mask_3 = mask_2[:,:,::2,::2]
# R_gt_0 = torch.log(Variable(targets['gt_R'].cuda(), requires_grad = False))
# R_gt_1 = R_gt_0[:,:,::2,::2]
# R_gt_2 = R_gt_1[:,:,::2,::2]
# R_gt_3 = R_gt_2[:,:,::2,::2]
# S_gt_0 = torch.log(Variable(targets['gt_S'].cuda(), requires_grad = False))
# S_gt_1 = S_gt_0[:,:,::2,::2]
# S_gt_2 = S_gt_1[:,:,::2,::2]
# S_gt_3 = S_gt_2[:,:,::2,::2]
# # end of downsample
# w_data = 1.0
# w_grad = 0.5
# R_loss = w_data * self.Data_Loss(prediction_R, mask_0, R_gt_0)
# R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R , mask_0, R_gt_0)
# R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_1, mask_1, R_gt_1)
# R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_2, mask_2, R_gt_2)
# R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_3, mask_3, R_gt_3)
# S_mask_0 = mask_0[:,0,:,:].unsqueeze(1)
# S_mask_1 = mask_1[:,0,:,:].unsqueeze(1)
# S_mask_2 = mask_2[:,0,:,:].unsqueeze(1)
# S_mask_3 = mask_3[:,0,:,:].unsqueeze(1)
# prediction_S_1 = prediction_S[:,:,::2,::2]
# prediction_S_2 = prediction_S_1[:,:,::2,::2]
# prediction_S_3 = prediction_S_2[:,:,::2,::2]
# S_loss = w_data * self.Data_Loss(prediction_S, S_mask_0, S_gt_0)
# S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S , S_mask_0, S_gt_0)
# S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_1, S_mask_1, S_gt_1)
# S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_2, S_mask_2, S_gt_2)
# S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_3, S_mask_3, S_gt_3)
# # S_loss += 2.0 * self. (prediction_S, targets, 'S', 2)
# Reconstr_loss = self.SUNCGReconstLoss(input_images, torch.exp(prediction_R), torch.exp(prediction_S), mask_0, targets)
# # # lighting smoothness loss
# # Ls_loss = 32.0 * self.LocalLightingSmoothenessLoss(prediction_L, targets)
# # Ls_loss = 8.0 * self.BilateralRefSmoothnessLoss(prediction_L, targets, 'S', 2)
# print("Reconstr_loss", Reconstr_loss.data[0])
# print("R_loss", R_loss.data[0])
# print("S_loss", S_loss.data[0])
# # print("Lighting Loss", Ls_loss.data[0])
# total_loss = R_loss + S_loss + Reconstr_loss #+ Ls_loss
# return total_loss
# def NormalShadingSmoothnessLoss(self, S, targets, scale_idx):
# h = S.size(2)
# w = S.size(3)
# num_c = S.size(1)
# half_window_size = 1
# total_loss = Variable(torch.cuda.FloatTensor(1))
# total_loss[0] = 0
# # mask_center = M[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \
# # half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size]]
# S_center = S[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \
# half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size] ]
# c_idx = 0
# for k in range(0,half_window_size*2+1):
# for l in range(0,half_window_size*2+1):
# normal_weights = targets["s_w_"+str(scale_idx)][:,c_idx,:,:].unsqueeze(1).repeat(1,num_c,1,1).float().cuda()
# S_N = S[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ]
# # mask_N = M[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ]
# # composed_M = torch.mul(mask_N, mask_center)
# # normal_weights = torch.mul(normal_weights, composed_M)
# r_diff = torch.mul( Variable(normal_weights, requires_grad = False), torch.pow(S_center - S_N, 2) )
# total_loss = total_loss + torch.mean(r_diff)
# c_idx = c_idx + 1
# return total_loss/(8.0 * num_c)
def CCLoss(self, prediction_S, saw_mask, gts, num_cc):
diff = prediction_S - gts
total_loss = Variable(torch.cuda.FloatTensor(1))
total_loss[0] = 0
num_regions = 0
# for each prediction
for i in range(prediction_S.size(0)):
log_diff = diff[i,:,:,:]
mask = saw_mask[i,:,:,:].int()
for k in range(1, num_cc[i]+1):
new_mask = (mask == k).float().cuda()
masked_log_diff = torch.mul(new_mask, log_diff)
N = torch.sum(new_mask)
s1 = torch.sum( torch.pow(masked_log_diff,2) )/N
s2 = torch.pow(torch.sum(masked_log_diff),2)/(N*N)
total_loss += (s1 - s2)
num_regions +=1
return total_loss/(num_regions + 1e-6)
def SAWLoss(self, prediction_S, targets):
# Shading smoothness ignore mask region
lambda_1, lambda_2 = 0.1, 1.
# saw_mask_0 = Variable(targets['saw_mask_0'].cuda(), requires_grad = False)
# prediction_S_1 = prediction_S[:,:,::2,::2]
# prediction_S_2 = prediction_S_1[:,:,::2,::2]
# prediction_S_3 = prediction_S_2[:,:,::2,::2]
# mask_0 = saw_mask_0
# mask_1 = mask_0[:,:,::2,::2]
# mask_2 = mask_1[:,:,::2,::2]
# mask_3 = mask_2[:,:,::2,::2]
# saw_loss_0 = self.w_ss_local * self.MaskLocalSmoothenessLoss(prediction_S, mask_0, targets)
# saw_loss_0 += self.w_ss_local * 0.5 * self.MaskLocalSmoothenessLoss(prediction_S_1, mask_1, targets)
# saw_loss_0 += self.w_ss_local * 0.333 * self.MaskLocalSmoothenessLoss(prediction_S_2, mask_2, targets)
# saw_loss_0 += self.w_ss_local * 0.25 * self.MaskLocalSmoothenessLoss(prediction_S_3, mask_3, targets)
# shadow boundary
saw_mask_1 = Variable(targets['saw_mask_1'].cuda(), requires_grad = False)
linear_I = torch.mean( Variable(targets['rgb_img'].cuda(), requires_grad = False),1)
linear_I = linear_I.unsqueeze(1)
linear_I[linear_I < 1e-4] = 1e-4
# linear_I = linear_I.data[0,0,:,:].cpu().numpy()
# srgb_img = np.transpose(linear_I, (1 , 2 ,0))
# mask_1 = saw_mask_1.data[0,0,:,:].cpu().numpy()
# R_np = np.transpose(R_np, (1 , 2 ,0
# print(targets['num_mask_1'][0])
# plt.figure()
# plt.imshow(mask_1, cmap='gray')
# plt.show() # display i
# plt.figure()
# plt.imshow(linear_I, cmap='gray')
# plt.show() # display i
# sys.exit()
saw_loss_1 = lambda_1 * self.CCLoss(prediction_S, saw_mask_1, torch.log(linear_I), targets['num_mask_1'])
# smooth region
saw_mask_2 = Variable(targets['saw_mask_2'].cuda(), requires_grad = False)
saw_loss_2 = lambda_2 * self.CCLoss(prediction_S, saw_mask_2, 0, targets['num_mask_2'])
# print("saw_loss_1 ", saw_loss_1.data[0])
# print("saw_loss_2 ", saw_loss_2.data[0])
return saw_loss_2 + saw_loss_1
def DirectFramework(self, prediction, gt, mask):
w_data = 1.0
w_grad = 0.5
final_loss = w_data * self.L2Loss(prediction, mask, gt)
# level 0
prediction_1 = prediction[:,:,::2,::2]
prediction_2 = prediction_1[:,:,::2,::2]
prediction_3 = prediction_2[:,:,::2,::2]
mask_1 = mask[:,:,::2,::2]
mask_2 = mask_1[:,:,::2,::2]
mask_3 = mask_2[:,:,::2,::2]
gt_1 = gt[:,:,::2,::2]
gt_2 = gt_1[:,:,::2,::2]
gt_3 = gt_2[:,:,::2,::2]
final_loss += w_grad * self.L1GradientMatchingLoss(prediction , mask, gt)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3)
return final_loss
# all parameter in log space, presumption
def ScaleInvarianceFramework(self, prediction, gt, mask, w_grad):
assert(prediction.size(1) == gt.size(1))
assert(prediction.size(1) == mask.size(1))
w_data = 1.0
final_loss = w_data * self.Data_Loss(prediction, mask, gt)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction , mask, gt)
# level 0
prediction_1 = prediction[:,:,::2,::2]
prediction_2 = prediction_1[:,:,::2,::2]
prediction_3 = prediction_2[:,:,::2,::2]
mask_1 = mask[:,:,::2,::2]
mask_2 = mask_1[:,:,::2,::2]
mask_3 = mask_2[:,:,::2,::2]
gt_1 = gt[:,:,::2,::2]
gt_2 = gt_1[:,:,::2,::2]
gt_3 = gt_2[:,:,::2,::2]
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3)
return final_loss
def LinearScaleInvarianceFramework(self, prediction, gt, mask, w_grad):
assert(prediction.size(1) == gt.size(1))
assert(prediction.size(1) == mask.size(1))
w_data = 1.0
# w_grad = 0.5
gt_vec = gt[mask > 0.1]
pred_vec = prediction[mask > 0.1]
gt_vec = gt_vec.unsqueeze(1).float().cpu()
pred_vec = pred_vec.unsqueeze(1).float().cpu()
scale, _ = torch.gels(gt_vec.data, pred_vec.data)
# scale, _ = torch.lstsq(gt_vec.data, pred_vec.data)
scale = scale[0,0]
# print("scale" , scale)
# sys.exit()
prediction_scaled = prediction * scale
final_loss = w_data * self.L2Loss(prediction_scaled, mask, gt)
prediction_1 = prediction_scaled[:,:,::2,::2]
prediction_2 = prediction_1[:,:,::2,::2]
prediction_3 = prediction_2[:,:,::2,::2]
mask_1 = mask[:,:,::2,::2]
mask_2 = mask_1[:,:,::2,::2]
mask_3 = mask_2[:,:,::2,::2]
gt_1 = gt[:,:,::2,::2]
gt_2 = gt_1[:,:,::2,::2]
gt_3 = gt_2[:,:,::2,::2]
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_scaled , mask, gt)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3)
return final_loss
def WeightedLinearScaleInvarianceFramework(self, prediction, gt, mask, w_grad):
w_data = 1.0
assert(prediction.size(1) == gt.size(1))
assert(prediction.size(1) == mask.size(1))
if torch.sum(mask.data) < 10:
return 0
# w_grad = 0.5
gt_vec = gt[mask > 0.1]
pred_vec = prediction[mask > 0.1]
gt_vec = gt_vec.unsqueeze(1).float().cpu()
pred_vec = pred_vec.unsqueeze(1).float().cpu()
scale, _ = torch.gels(gt_vec.data, pred_vec.data)
# scale, _ = torch.lstsq(gt_vec.data, pred_vec.data)
scale = scale[0,0]
prediction_scaled = prediction * scale
ones_matrix = Variable(torch.zeros(gt.size(0), gt.size(1), gt.size(2), gt.size(3)) + 1, requires_grad = False)
weight = torch.min(1/gt, ones_matrix.float().cuda())
weight_mask = torch.mul(weight, mask)
final_loss = w_data * self.L2Loss(prediction_scaled, weight_mask, gt)
prediction_1 = prediction_scaled[:,:,::2,::2]
prediction_2 = prediction_1[:,:,::2,::2]
prediction_3 = prediction_2[:,:,::2,::2]
mask_1 = weight_mask[:,:,::2,::2]
mask_2 = mask_1[:,:,::2,::2]
mask_3 = mask_2[:,:,::2,::2]
gt_1 = gt[:,:,::2,::2]
gt_2 = gt_1[:,:,::2,::2]
gt_3 = gt_2[:,:,::2,::2]
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_scaled , weight_mask, gt)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2)
final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3)
return final_loss
def SUNCGBatchRankingLoss(self, prediction_R, judgements_eq, judgements_ineq):
eq_loss, ineq_loss = 0, 0
num_valid_eq = 0
num_valid_ineq = 0
tau = 0.4
rows = prediction_R.size(1)
cols = prediction_R.size(2)
num_channel = prediction_R.size(0)
# evaluate equality annotations densely
if judgements_eq.size(1) > 2:
judgements_eq = judgements_eq.cuda()
R_vec = prediction_R.view(num_channel, -1)
# R_vec = torch.exp(R_vec)
y_1 = judgements_eq[:,0].long()
y_2 = judgements_eq[:,2].long()
# if random_filp:
# x_1 = cols - 1 - judgements_eq[:,1].long()
# x_2 = cols - 1 - judgements_eq[:,3].long()
# else:
x_1 = judgements_eq[:,1].long()
x_2 = judgements_eq[:,3].long()
# compute linear index for point 1
# y_1 = torch.floor(judgements_eq[:,0] * rows).long()
# x_1 = torch.floor(judgements_eq[:,1] * cols).long()
point_1_idx_linear = y_1 * cols + x_1
# compute linear index for point 2
# y_2 = torch.floor(judgements_eq[:,2] * rows).long()
# x_2 = torch.floor(judgements_eq[:,3] * cols).long()
point_2_idx_linear = y_2 * cols + x_2
# extract all pairs of comparisions
points_1_vec = torch.index_select(R_vec, 1, Variable(point_1_idx_linear, requires_grad = False))
points_2_vec = torch.index_select(R_vec, 1, Variable(point_2_idx_linear, requires_grad = False))
# I1_vec = torch.index_select(I_vec, 1, point_1_idx_linaer)
# I2_vec = torch.index_select(I_vec, 1, point_2_idx_linear)
# weight = Variable(judgements_eq[:,4], requires_grad = False)
# weight = confidence#* torch.exp(4.0 * torch.abs(I1_vec - I2_vec) )
# compute Loss
# eq_loss = torch.sum(torch.mul(weight, torch.mean(torch.abs(points_1_vec - points_2_vec),0) ))
eq_loss = torch.sum( torch.mean( torch.pow(points_1_vec - points_2_vec,2) ,0) )
num_valid_eq += judgements_eq.size(0)
# # compute inequality annotations
if judgements_ineq.size(1) > 2:
judgements_ineq = judgements_ineq.cuda()
R_intensity = torch.mean(prediction_R, 0)
# R_intensity = torch.log(R_intensity)
R_vec_mean = R_intensity.view(1, -1)
y_1 = judgements_ineq[:,0].long()
y_2 = judgements_ineq[:,2].long()
# x_1 = torch.floor(judgements_ineq[:,1] * cols).long()
# x_2 = torch.floor(judgements_ineq[:,3] * cols).long()
x_1 = judgements_ineq[:,1].long()
x_2 = judgements_ineq[:,3].long()
# y_1 = torch.floor(judgements_ineq[:,0] * rows).long()
# x_1 = torch.floor(judgements_ineq[:,1] * cols).long()
point_1_idx_linear = y_1 * cols + x_1
# y_2 = torch.floor(judgements_ineq[:,2] * rows).long()
# x_2 = torch.floor(judgements_ineq[:,3] * cols).long()
point_2_idx_linear = y_2 * cols + x_2
# extract all pairs of comparisions
points_1_vec = torch.index_select(R_vec_mean, 1, Variable(point_1_idx_linear, requires_grad = False)).squeeze(0)
points_2_vec = torch.index_select(R_vec_mean, 1, Variable(point_2_idx_linear, requires_grad = False)).squeeze(0)
# point 2 should be always darker than (<) point 1
# compute loss
relu_layer = nn.ReLU(True)
# ineq_loss = torch.sum(torch.mul(weight, relu_layer(points_2_vec - points_1_vec + tau) ) )
ineq_loss = torch.sum(torch.pow( relu_layer(points_2_vec - points_1_vec + tau),2) )
# ineq_loss = torch.sum(torch.mul(weight, torch.pow(relu_layer(tau - points_1_vec/points_2_vec),2)))
num_included = torch.sum( torch.ge(points_2_vec.data - points_1_vec.data, -tau).float().cuda() )
# num_included = torch.sum(torch.ge(points_2_vec.data/points_1_vec.data, 1./tau).float().cuda())
num_valid_ineq += num_included
# avoid divide by zero
return (eq_loss)/(num_valid_eq + 1e-8) + ineq_loss/(num_valid_ineq + 1e-8)
def __call__(self, input_images, prediction_R, prediction_S, targets, data_set_name, epoch):
lambda_CG = 0.5
if data_set_name == "IIW":
print("IIW Loss")
num_images = prediction_R.size(0)
# Albedo smoothness term
# rs_loss = self.w_rs_dense * self.BilateralRefSmoothnessLoss(prediction_R, targets, 'R', 5)
# multi-scale smoothness term
prediction_R_1 = prediction_R[:,:,::2,::2]
prediction_R_2 = prediction_R_1[:,:,::2,::2]
prediction_R_3 = prediction_R_2[:,:,::2,::2]
rs_loss = self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R, targets,0)
rs_loss = rs_loss + 0.5 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_1, targets,1)
rs_loss = rs_loss + 0.3333 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_2, targets,2)
rs_loss = rs_loss + 0.25 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_3, targets,3)
# # Lighting smoothness Loss
ss_loss = self.w_ss_dense * self.BilateralRefSmoothnessLoss(prediction_S, targets, 'S', 2)
# # Reconstruction Loss
reconstr_loss = self.w_reconstr_real * self.IIWReconstLoss(torch.exp(prediction_R), \
torch.exp(prediction_S), targets)
# IIW Loss
total_iiw_loss = Variable(torch.cuda.FloatTensor(1))
total_iiw_loss[0] = 0
for i in range(0, num_images):
# judgements = json.load(open(targets["judgements_path"][i]))
# total_iiw_loss += self.w_IIW * self.Ranking_Loss(prediction_R[i,:,:,:], judgements, random_filp)
judgements_eq = targets["eq_mat"][i]
judgements_ineq = targets["ineq_mat"][i]
random_filp = targets["random_filp"][i]
total_iiw_loss += self.w_IIW * self.BatchRankingLoss(prediction_R[i,:,:,:], judgements_eq, judgements_ineq, random_filp)
total_iiw_loss = (total_iiw_loss)/num_images
# print("reconstr_loss ", reconstr_loss.data[0])
# print("rs_loss ", rs_loss.data[0])
# print("ss_loss ", ss_loss.data[0])
# print("total_iiw_loss ", total_iiw_loss.data[0])
total_loss = total_iiw_loss + reconstr_loss + rs_loss + ss_loss
elif data_set_name == "Render":
print("Render LOSS")
mask = Variable(targets['mask'].cuda(), requires_grad = False)
mask_R = mask[:,0,:,:].unsqueeze(1).repeat(1,prediction_R.size(1),1,1)
mask_S = mask[:,0,:,:].unsqueeze(1).repeat(1,prediction_S.size(1),1,1)
mask_img = mask[:,0,:,:].unsqueeze(1).repeat(1,input_images.size(1),1,1)
gt_R = Variable(targets['gt_R'].cuda(), requires_grad = False)
gt_S = Variable(targets['gt_S'].cuda(), requires_grad = False)
R_loss = lambda_CG * self.LinearScaleInvarianceFramework(torch.exp(prediction_R), gt_R, mask_R, 0.5)
# using ScaleInvarianceFramework might achieve better performance if we train on both IIW and SAW,
# but LinearScaleInvarianceFramework could produce better perforamnce if trained on CGIntrinsics only
S_loss = lambda_CG * self.LinearScaleInvarianceFramework(torch.exp(prediction_S), gt_S, mask_S, 0.5)
# S_loss = lambda_CG * self.ScaleInvarianceFramework(prediction_S, torch.log(gt_S), mask_S, 0.5)
reconstr_loss = lambda_CG * self.w_reconstr * self.SUNCGReconstLoss(torch.exp(prediction_R), torch.exp(prediction_S), mask_img, targets)
# print("R_loss ", R_loss.data[0])
# print("S_loss ", S_loss.data[0])
# print("reconstr_loss ", reconstr_loss.data[0])
total_loss = R_loss + S_loss + reconstr_loss
elif data_set_name == "CGIntrinsics":
# ============================================================================================== This is scale invariance loss ===============
print("CGIntrinsics LOSS")
mask = Variable(targets['mask'].cuda(), requires_grad = False)
mask_R = mask[:,0,:,:].unsqueeze(1).repeat(1,prediction_R.size(1),1,1)
mask_S = mask[:,0,:,:].unsqueeze(1).repeat(1,prediction_S.size(1),1,1)
mask_img = mask[:,0,:,:].unsqueeze(1).repeat(1,input_images.size(1),1,1)
gt_R = Variable(targets['gt_R'].cuda(), requires_grad = False)
gt_S = Variable(targets['gt_S'].cuda(), requires_grad = False)
R_loss = lambda_CG *self.LinearScaleInvarianceFramework(torch.exp(prediction_R), gt_R, mask_R, 0.5)
# using ScaleInvarianceFramework might achieve better performance if we train on both IIW and SAW,
# but LinearScaleInvarianceFramework could produce better perforamnce if trained on CGIntrinsics only
S_loss = lambda_CG * self.LinearScaleInvarianceFramework(torch.exp(prediction_S), gt_S, mask_S, 0.5)
# S_loss = lambda_CG * self.ScaleInvarianceFramework(prediction_S, torch.log(gt_S), mask_S, 0.5)
reconstr_loss = lambda_CG * self.w_reconstr * self.SUNCGReconstLoss(torch.exp(prediction_R), torch.exp(prediction_S), mask_img, targets)
# Why put this? Because some ground truth shadings are nosiy
Ss_loss = lambda_CG * self.w_ss_dense * self.BilateralRefSmoothnessLoss(prediction_S, targets, 'S', 2)
total_iiw_loss = 0
for i in range(0, prediction_R.size(0)):
judgements_eq = targets["eq_mat"][i]
judgements_ineq = targets["ineq_mat"][i]
random_filp = targets["random_filp"][i]
total_iiw_loss += lambda_CG * self.SUNCGBatchRankingLoss(prediction_R[i,:,:,:], judgements_eq, judgements_ineq)
total_iiw_loss = total_iiw_loss/prediction_R.size(0)
# print("R_loss ", R_loss.data[0])
# print("S_loss ", S_loss.data[0])
# print("reconstr_loss ", reconstr_loss.data[0])
# print("Ss_loss ", Ss_loss.data[0])
# print("SUNCGBatchRankingLoss ", total_iiw_loss.data[0])
total_loss = R_loss + S_loss + reconstr_loss + Ss_loss + total_iiw_loss
elif data_set_name == "SAW":
print("SAW Loss")
prediction_R_1 = prediction_R[:,:,::2,::2]
prediction_R_2 = prediction_R_1[:,:,::2,::2]
prediction_R_3 = prediction_R_2[:,:,::2,::2]
rs_loss = self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R, targets,0)
rs_loss = rs_loss + 0.5 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_1, targets,1)
rs_loss = rs_loss + 0.3333 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_2, targets,2)
rs_loss = rs_loss + 0.25 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_3, targets,3)
reconstr_loss = self.w_reconstr_real * self.IIWReconstLoss(torch.exp(prediction_R), \
torch.exp(prediction_S), targets)
ss_loss = self.w_ss_dense * self.BilateralRefSmoothnessLoss(prediction_S, targets, 'S', 2)
SAW_loss = self.w_SAW * self.SAWLoss(prediction_S, targets)
# print("rs_loss ", rs_loss.data[0])
# print("SAW_loss ", SAW_loss.data[0])
# print("reconstr_loss ", reconstr_loss.data[0])
# print("ss_loss ", ss_loss.data[0])
total_loss = rs_loss + SAW_loss + reconstr_loss + ss_loss
else:
print("NORMAL Loss")
sys.exit()
self.total_loss = total_loss
# return total_loss.data[0]
return total_loss.data
def compute_whdr(self, reflectance, judgements, delta=0.1):
points = judgements['intrinsic_points']
comparisons = judgements['intrinsic_comparisons']
id_to_points = {p['id']: p for p in points}
rows, cols = reflectance.shape[0:2]
error_sum = 0.0
error_equal_sum = 0.0
error_inequal_sum = 0.0
weight_sum = 0.0
weight_equal_sum = 0.0
weight_inequal_sum = 0.0
for c in comparisons:
# "darker" is "J_i" in our paper
darker = c['darker']
if darker not in ('1', '2', 'E'):
continue
# "darker_score" is "w_i" in our paper
weight = c['darker_score']
if weight <= 0.0 or weight is None:
continue
point1 = id_to_points[c['point1']]
point2 = id_to_points[c['point2']]
if not point1['opaque'] or not point2['opaque']:
continue
# convert to grayscale and threshold
l1 = max(1e-10, np.mean(reflectance[
int(point1['y'] * rows), int(point1['x'] * cols), ...]))
l2 = max(1e-10, np.mean(reflectance[
int(point2['y'] * rows), int(point2['x'] * cols), ...]))
# # convert algorithm value to the same units as human judgements
if l2 / l1 > 1.0 + delta:
alg_darker = '1'
elif l1 / l2 > 1.0 + delta:
alg_darker = '2'
else:
alg_darker = 'E'
if darker == 'E':
if darker != alg_darker:
error_equal_sum += weight
weight_equal_sum += weight
else:
if darker != alg_darker:
error_inequal_sum += weight
weight_inequal_sum += weight
if darker != alg_darker:
error_sum += weight
weight_sum += weight
if weight_sum:
return (error_sum / weight_sum), error_equal_sum/( weight_equal_sum + 1e-10), error_inequal_sum/(weight_inequal_sum + 1e-10)
else:
return None
def evaluate_WHDR(self, prediction_R, targets):
# num_images = prediction_S.size(0) # must be even number
total_whdr = float(0)
total_whdr_eq = float(0)
total_whdr_ineq = float(0)
count = float(0)
for i in range(0, prediction_R.size(0)):
prediction_R_np = prediction_R.data[i,:,:,:].cpu().numpy()
prediction_R_np = np.transpose(np.exp(prediction_R_np * 0.4545), (1,2,0))
# o_h = targets['oringinal_shape'][0].numpy()
# o_w = targets['oringinal_shape'][1].numpy()
# prediction_R_srgb_np = prediction_R_srgb.data[i,:,:,:].cpu().numpy()
# prediction_R_srgb_np = np.transpose(prediction_R_srgb_np, (1,2,0))
o_h = targets['oringinal_shape'][0].numpy()
o_w = targets['oringinal_shape'][1].numpy()
# resize to original resolution
prediction_R_np = resize(prediction_R_np, (o_h[i],o_w[i]), order=1, preserve_range=True)
# print(targets["judgements_path"][i])
# load Json judgement
judgements = json.load(open(targets["judgements_path"][i]))
whdr, whdr_eq, whdr_ineq = self.compute_whdr(prediction_R_np, judgements, 0.1)
total_whdr += whdr
total_whdr_eq += whdr_eq
total_whdr_ineq += whdr_ineq
count += 1.
return total_whdr, total_whdr_eq, total_whdr_ineq, count
def evaluate_RC_loss(self, prediction_n, targets):
normal_norm = torch.sqrt( torch.sum(torch.pow(prediction_n , 2) , 1) )
normal_norm = normal_norm.unsqueeze(1).repeat(1,3,1,1)
prediction_n = torch.div(prediction_n , normal_norm)
# mask_0 = Variable(targets['mask'].cuda(), requires_grad = False)
# n_gt_0 = Variable(targets['normal'].cuda(), requires_grad = False)
total_loss = self.AngleLoss(prediction_n, targets)
# return total_loss.data[0]
return total_loss.data
def evaluate_L0_loss(self, prediction_R, targets):
# num_images = prediction_S.size(0) # must be even number
total_whdr = float(0)
count = float(0)
for i in range(0, 1):
prediction_R_np = prediction_R
# prediction_R_np = prediction_R.data[i,:,:,:].cpu().numpy()
# prediction_R_np = np.transpose(prediction_R_np, (1,2,0))
# load Json judgement
judgements = json.load(open(targets["judgements_path"][i]))
whdr = self.compute_whdr(prediction_R_np, judgements, 0.1)
total_whdr += whdr
count += 1
return total_whdr, count
def get_loss_var(self):
return self.total_loss
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# currently support only input_nc == output_nc
# assert(input_nc == output_nc)
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, False)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(False)
upnorm = norm_layer(outer_nc, affine=True)
if outermost:
n_output_dim = 3
uprelu1 = nn.ReLU(False)
uprelu2 = nn.ReLU(False)
upconv_1 = nn.ConvTranspose2d(inner_nc * 2, inner_nc,
kernel_size=4, stride=2,
padding=1)
upconv_2 = nn.ConvTranspose2d(inner_nc * 2, inner_nc,
kernel_size=4, stride=2,
padding=1)
conv_1 = nn.Conv2d(inner_nc, inner_nc, kernel_size=3,
stride=1, padding=1)
conv_2 = nn.Conv2d(inner_nc, inner_nc, kernel_size=3,
stride=1, padding=1)
# conv_1_o = nn.Conv2d(inner_nc, 1, kernel_size=3,
# stride=1, padding=1)
conv_2_o = nn.Conv2d(inner_nc, n_output_dim, kernel_size=3,
stride=1, padding=1)
upnorm_1 = norm_layer(inner_nc, affine=True)
upnorm_2 = norm_layer(inner_nc, affine=True)
# uprelu2_o = nn.ReLU(False)
down = [downconv]
up_1 = [uprelu1, upconv_1, upnorm_1, nn.ReLU(False), conv_1, nn.ReLU(False), conv_1_o]
up_2 = [uprelu2, upconv_2, upnorm_2, nn.ReLU(False), conv_2, nn.ReLU(False), conv_2_o]
self.downconv_model = nn.Sequential(*down)
self.upconv_model_1 = nn.Sequential(*up_1)
self.upconv_model_2 = nn.Sequential(*up_2)
self.submodule = submodule
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
self.model = nn.Sequential(*model)
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
# self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
# return self.model(x)
down_x = self.downconv_model(x)
y = self.submodule.forward(down_x)
y_1 = self.upconv_model_1(y)
y_2 = self.upconv_model_2(y)
return y_1, y_2
else:
return torch.cat([self.model(x), x], 1)
class SingleUnetGenerator_S(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(SingleUnetGenerator_S, self).__init__()
self.gpu_ids = gpu_ids
# currently support only input_nc == output_nc
# assert(input_nc == output_nc)
# construct unet structure
unet_block = SingleUnetSkipConnectionBlock_S(ngf * 8, ngf * 8, innermost=True)
for i in range(num_downs - 5):
unet_block = SingleUnetSkipConnectionBlock_S(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = SingleUnetSkipConnectionBlock_S(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_S(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_S(ngf, ngf * 2, unet_block, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_S(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class SingleUnetSkipConnectionBlock_S(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(SingleUnetSkipConnectionBlock_S, self).__init__()
self.outermost = outermost
self.innermost = innermost
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, False)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(False)
upnorm = norm_layer(outer_nc, affine=True)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, 1,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv]
model = down + [submodule]
self.model = nn.Sequential(*model)
self.up_model = nn.Sequential(*up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
# model = down + up
int_conv = [nn.AdaptiveAvgPool2d((2,2)), nn.Conv2d(inner_nc, inner_nc/2, kernel_size=3, stride=2, padding=1), nn.ReLU(False)]
fc = [nn.Linear(256, 3)]
self.int_conv = nn.Sequential(* int_conv)
self.fc = nn.Sequential(* fc)
self.down_model = nn.Sequential(*down)
self.up_model = nn.Sequential(*up)
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] #+ up + [nn.Dropout(0.5)]
else:
model = down + [submodule] # + up
if use_dropout:
upconv_model = up + [nn.Dropout(0.5)]
else:
upconv_model = up
self.model = nn.Sequential(*model)
self.up_model = nn.Sequential(*upconv_model)
def forward(self, x):
if self.outermost:
y_1, color_s = self.model(x)
y_1 = self.up_model(y_1)
return y_1, color_s
elif self.innermost:
y_1 = self.down_model(x)
color_s = self.int_conv(y_1)
color_s = color_s.view(color_s.size(0), -1)
color_s = self.fc(color_s)
y_1 = self.up_model(y_1)
y_1 = torch.cat([y_1, x], 1)
return y_1, color_s
else:
y_1, color_s = self.model(x)
y_1 = self.up_model(y_1)
return torch.cat([y_1, x], 1), color_s
class SingleUnetGenerator_R(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(SingleUnetGenerator_R, self).__init__()
self.gpu_ids = gpu_ids
# currently support only input_nc == output_nc
# assert(input_nc == output_nc)
# construct unet structure
unet_block = SingleUnetSkipConnectionBlock_R(ngf * 8, ngf * 8, innermost=True)
for i in range(num_downs - 5):
unet_block = SingleUnetSkipConnectionBlock_R(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = SingleUnetSkipConnectionBlock_R(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_R(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_R(ngf, ngf * 2, unet_block, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_R(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class SingleUnetSkipConnectionBlock_R(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(SingleUnetSkipConnectionBlock_R, self).__init__()
self.outermost = outermost
if outermost:
downconv = nn.Conv2d(3, inner_nc, kernel_size=4,
stride=2, padding=1)
else:
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
num_output = outer_nc
if outermost:
# upconv = nn.ConvTranspose2d(inner_nc * 2, num_output,
# kernel_size=4, stride=2,
# padding=1)
upconv = [uprelu, nn.ConvTranspose2d(inner_nc * 2, inner_nc,
kernel_size=4, stride=2, padding=1), nn.ReLU(False),
nn.Conv2d(inner_nc, num_output, kernel_size=1)]
down = [downconv]
up = upconv
model = down + [submodule] + up
self.model = nn.Sequential(*model)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
self.model = nn.Sequential(*model)
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
# self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([self.model(x), x], 1)
class SingleUnetGenerator_L(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(SingleUnetGenerator_L, self).__init__()
self.gpu_ids = gpu_ids
# currently support only input_nc == output_nc
# assert(input_nc == output_nc)
# construct unet structure
unet_block = SingleUnetSkipConnectionBlock_L(ngf * 8, ngf * 8, innermost=True)
for i in range(num_downs - 5):
unet_block = SingleUnetSkipConnectionBlock_L(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = SingleUnetSkipConnectionBlock_L(ngf * 4, ngf * 8, unet_block, gird = True, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_L(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_L(ngf, ngf * 2, unet_block, norm_layer=norm_layer)
unet_block = SingleUnetSkipConnectionBlock_L(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class SingleUnetSkipConnectionBlock_L(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, gird =False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(SingleUnetSkipConnectionBlock_L, self).__init__()
self.outermost = outermost
self.gird = grid
if outermost:
downconv = nn.Conv2d(3, inner_nc, kernel_size=4,
stride=2, padding=1)
else:
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
num_output = outer_nc
if outermost:
# upconv = nn.ConvTranspose2d(inner_nc * 2, num_output,
# kernel_size=4, stride=2,
# padding=1)
upconv = [uprelu, nn.ConvTranspose2d(inner_nc * 2, inner_nc,
kernel_size=4, stride=2, padding=1), nn.ReLU(False),
nn.Conv2d(inner_nc, 1, kernel_size=1), nn.Sigmoid()]
down = [downconv]
up = upconv
model = down + [submodule] + up
self.model = nn.Sequential(*model)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
model = down + up
self.model = nn.Sequential(*model)
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
if self.gird:
grid_layer = [nn.ConvTranspose2d(inner_nc * 2, inner_nc,
kernel_size=4, stride=2,
padding=1), norm_layer(inner_nc, affine=True), nn.ReLU(False),
nn.Conv2d(inner_nc, inner_nc/4, kernel_size=3, padding=1), nn.ReLU(False),
nn.Conv2d(inner_nc/4, num_output, kernel_size=1)]
self.grid_layer = nn.Sequential(*grid_layer)
self.model = nn.Sequential(*model)
# self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
y = self.model(x)
return y, self.grid_y
else:
y = self.model(x)
if self.grid:
upsample_layer = nn.Upsample(scale_factor= 8, mode='bilinear')
self.grid_y = upsample_layer(self.grid_layer(y))
return torch.cat([y, x], 1)
class MultiUnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(MultiUnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# currently support only input_nc == output_nc
# assert(input_nc == output_nc)
# construct unet structure
unet_block = MultiUnetSkipConnectionBlock(ngf * 8, ngf * 8, innermost=True)
for i in range(num_downs - 5):
unet_block = MultiUnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = MultiUnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer)
unet_block = MultiUnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer)
unet_block = MultiUnetSkipConnectionBlock(ngf, ngf * 2, unet_block, norm_layer=norm_layer)
unet_block = MultiUnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class MultiUnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(MultiUnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
self.innermost = innermost
# print("we are in mutilUnet")
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, False)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(False)
upnorm = norm_layer(outer_nc, affine=True)
if outermost:
n_output_dim = 3
# upconv = nn.ConvTranspose2d(inner_nc * 2, n_output_dim,
# kernel_size=4, stride=2,
# padding=1)
# downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
# stride=2, padding=1)
# conv1 = nn.Conv2d(inner_nc, 1, kernel_size=5,
# stride=1, padding=2)
# conv2 = nn.Conv2d(inner_nc, 3, kernel_size=5,
# stride=1, padding=2)
down = [downconv]
# upconv_model_1 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, 1,
# kernel_size=4, stride=2, padding=1)]
# upconv_model_2 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, 1,
# kernel_size=4, stride=2, padding=1)]
# upconv_model_u = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, inner_nc,
# kernel_size=4, stride=2, padding=1), nn.ReLU(False),
# nn.Conv2d(inner_nc, 1, kernel_size=1) , nn.Sigmoid()]
# self.upconv_model_u = nn.Sequential(*upconv_model_u)
upconv_model_1 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, inner_nc,
kernel_size=4, stride=2, padding=1), norm_layer(inner_nc, affine=True), nn.ReLU(False),
nn.Conv2d(inner_nc, 1, kernel_size= 1, bias=True)]
upconv_model_2 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, inner_nc,
kernel_size=4, stride=2, padding=1) , norm_layer(inner_nc, affine=True), nn.ReLU(False),
nn.Conv2d(inner_nc, 1, kernel_size= 1, bias=True)]
# model = down + [submodule] + up
# upconv_model_1 = up_1
# upconv_model_2 = up_2
elif innermost:
# upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
# kernel_size=4, stride=2,
# padding=1)
down = [downrelu, downconv]
upconv_model_1 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1), norm_layer(outer_nc, affine=True)]
upconv_model_2 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1), norm_layer(outer_nc, affine=True)]
# for rgb shading
# int_conv = [nn.ReLU(False), nn.Conv2d(inner_nc, inner_nc/2, kernel_size=3,
# stride=1, padding=1)]
# int_conv = [nn.AdaptiveAvgPool2d((2,2)) , nn.ReLU(False), nn.Conv2d(inner_nc, inner_nc/2, kernel_size=3, stride=2, padding=1), nn.ReLU(False)]
# int_conv = [nn.AdaptiveAvgPool2d((2,2)) , nn.ReLU(False), nn.Conv2d(inner_nc, inner_nc/2, kernel_size=3, stride=2, padding=1), nn.ReLU(False) \
# nn.Conv2d(inner_nc/2, inner_nc/4, kernel_size=3, stride=1, padding=1), nn.ReLU(False)]
# fc = [nn.Linear(256, 3)]
# self.int_conv = nn.Sequential(* int_conv)
# self.fc = nn.Sequential(* fc)
else:
# upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
# kernel_size=4, stride=2,
# padding=1)
down = [downrelu, downconv, downnorm]
up_1 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1), norm_layer(outer_nc, affine=True)]
up_2 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1), norm_layer(outer_nc, affine=True)]
if use_dropout:
upconv_model_1 = up_1 + [nn.Dropout(0.5)]
upconv_model_2 = up_2 + [nn.Dropout(0.5)]
# model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
upconv_model_1 = up_1
upconv_model_2 = up_2
# model = down + [submodule]
self.downconv_model = nn.Sequential(*down)
self.submodule = submodule
self.upconv_model_1 = nn.Sequential(*upconv_model_1)
self.upconv_model_2 = nn.Sequential(*upconv_model_2)
def forward(self, x):
if self.outermost:
down_x = self.downconv_model(x)
y_1, y_2 = self.submodule.forward(down_x)
# y_u = self.upconv_model_u(y_1)
y_1 = self.upconv_model_1(y_1)
y_2 = self.upconv_model_2(y_2)
return y_1, y_2
# return self.model(x)
elif self.innermost:
down_output = self.downconv_model(x)
y_1 = self.upconv_model_1(down_output)
y_2 = self.upconv_model_2(down_output)
y_1 = torch.cat([y_1, x], 1)
y_2 = torch.cat([y_2, x], 1)
return y_1, y_2
else:
down_x = self.downconv_model(x)
y_1, y_2 = self.submodule.forward(down_x)
y_1 = self.upconv_model_1(y_1)
y_2 = self.upconv_model_2(y_2)
y_1 = torch.cat([y_1, x], 1)
y_2 = torch.cat([y_2, x], 1)
return y_1, y_2
| 43.631955
| 167
| 0.57494
| 11,393
| 85,475
| 4.03704
| 0.046695
| 0.004827
| 0.028308
| 0.01722
| 0.798909
| 0.761969
| 0.73951
| 0.709419
| 0.686024
| 0.663369
| 0
| 0.035266
| 0.292378
| 85,475
| 1,958
| 168
| 43.654239
| 0.725167
| 0.251407
| 0
| 0.555249
| 0
| 0
| 0.012296
| 0.000665
| 0
| 0
| 0
| 0
| 0.007366
| 1
| 0.054328
| false
| 0
| 0.01105
| 0.001842
| 0.131676
| 0.010129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b60403bd375522e254e1a281973d2f31ef65e87d
| 1,239
|
py
|
Python
|
model/operationoutcome.py
|
beda-software/fhir-py-experements
|
363cfb894fa6f971b9be19340cae1b0a3a4377d8
|
[
"MIT"
] | null | null | null |
model/operationoutcome.py
|
beda-software/fhir-py-experements
|
363cfb894fa6f971b9be19340cae1b0a3a4377d8
|
[
"MIT"
] | null | null | null |
model/operationoutcome.py
|
beda-software/fhir-py-experements
|
363cfb894fa6f971b9be19340cae1b0a3a4377d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/OperationOutcome) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .domainresource import DomainResource
@dataclass
class OperationOutcomeIssue(BackboneElement):
""" A single issue associated with the action.
An error, warning, or information message that results from a system
action.
"""
resource_type: ClassVar[str] = "OperationOutcomeIssue"
severity: str = None
code: str = None
details: Optional[CodeableConcept] = None
diagnostics: Optional[str] = None
location: Optional[List[str]] = None
expression: Optional[List[str]] = None
@dataclass
class OperationOutcome(DomainResource):
""" Information about the success/failure of an action.
A collection of error, warning, or information messages that result from a
system action.
"""
resource_type: ClassVar[str] = "OperationOutcome"
issue: List[OperationOutcomeIssue] = field(default_factory=list)
| 30.219512
| 113
| 0.740113
| 143
| 1,239
| 6.391608
| 0.524476
| 0.038293
| 0.030635
| 0.054705
| 0.087527
| 0.087527
| 0.087527
| 0.087527
| 0
| 0
| 0
| 0.023324
| 0.169492
| 1,239
| 41
| 114
| 30.219512
| 0.864917
| 0.356739
| 0
| 0.105263
| 1
| 0
| 0.049007
| 0.027815
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.315789
| 0
| 0.894737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
b611e06647cf25f662795557ff10a0901313ad2e
| 16,736
|
py
|
Python
|
mrcnn/prep_notebook.py
|
kbardool/Mask_RCNN_2
|
dc0c5ef3615cff8ffea162c347aec7a7ab88188b
|
[
"MIT"
] | 7
|
2018-08-07T13:56:32.000Z
|
2021-04-06T11:07:20.000Z
|
mrcnn/prep_notebook.py
|
kbardool/Contextual_Inference
|
dc0c5ef3615cff8ffea162c347aec7a7ab88188b
|
[
"MIT"
] | null | null | null |
mrcnn/prep_notebook.py
|
kbardool/Contextual_Inference
|
dc0c5ef3615cff8ffea162c347aec7a7ab88188b
|
[
"MIT"
] | 1
|
2019-02-01T06:49:58.000Z
|
2019-02-01T06:49:58.000Z
|
'''
prep_dev_notebook:
pred_newshapes_dev: Runs against new_shapes
'''
import os
import sys
import random
import math
import re
import gc
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
import keras.backend as KB
import mrcnn.model_mod as modellib
import mrcnn.visualize as visualize
from mrcnn.config import Config
from mrcnn.dataset import Dataset
from mrcnn.utils import stack_tensors, stack_tensors_3d, log
from mrcnn.datagen import data_generator, load_image_gt
import platform
syst = platform.system()
if syst == 'Windows':
# Root directory of the project
print(' windows ' , syst)
# WINDOWS MACHINE ------------------------------------------------------------------
ROOT_DIR = "E:\\"
MODEL_PATH = os.path.join(ROOT_DIR, "models")
DATASET_PATH = os.path.join(ROOT_DIR, 'MLDatasets')
#### MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_logs")
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs")
COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
elif syst == 'Linux':
print(' Linx ' , syst)
# LINUX MACHINE ------------------------------------------------------------------
ROOT_DIR = os.getcwd()
MODEL_PATH = os.path.expanduser('~/models')
DATASET_PATH = os.path.expanduser('~/MLDatasets')
#### MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_development_logs")
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014")
DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
else :
raise Error('unreconized system ' )
print("Tensorflow Version: {} Keras Version : {} ".format(tf.__version__,keras.__version__))
import pprint
pp = pprint.PrettyPrinter(indent=2, width=100)
np.set_printoptions(linewidth=100,precision=4,threshold=1000, suppress = True)
##------------------------------------------------------------------------------------
## Old Shapes TRAINING
##------------------------------------------------------------------------------------
def prep_oldshapes_train(init_with = None, FCN_layers = False, batch_sz = 5, epoch_steps = 4, folder_name= "mrcnn_oldshape_training_logs"):
import mrcnn.shapes as shapes
MODEL_DIR = os.path.join(MODEL_PATH, folder_name)
# Build configuration object -----------------------------------------------
config = shapes.ShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
dataset_train = shapes.ShapesDataset()
dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = shapes.ShapesDataset()
dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers)
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
print(' Resent Model Path : ', RESNET_MODEL_PATH)
load_model(model, init_with = init_with)
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
val_generator = data_generator(dataset_val, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment=False)
model.config.display()
return [model, dataset_train, dataset_val, train_generator, val_generator, config]
##------------------------------------------------------------------------------------
## Old Shapes TESTING
##------------------------------------------------------------------------------------
def prep_oldshapes_test(init_with = None, FCN_layers = False, batch_sz = 5, epoch_steps = 4, folder_name= "mrcnn_oldshape_test_logs"):
import mrcnn.shapes as shapes
MODEL_DIR = os.path.join(MODEL_PATH, folder_name)
# MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_development_logs")
# Build configuration object -----------------------------------------------
config = shapes.ShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
dataset_test = shapes.ShapesDataset()
dataset_test.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_test.prepare()
# Recreate the model in inference mode
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="inference",
config=config,
model_dir=MODEL_DIR,
FCN_layers = FCN_layers )
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
print(' Resent Model Path : ', RESNET_MODEL_PATH)
load_model(model, init_with = init_with)
test_generator = data_generator(dataset_test, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
model.config.display()
return [model, dataset_test, test_generator, config]
##------------------------------------------------------------------------------------
## New Shapes TESTING
##------------------------------------------------------------------------------------
def prep_newshapes_test(init_with = 'last', FCN_layers = False, batch_sz = 5, epoch_steps = 4,folder_name= "mrcnn_newshape_test_logs"):
import mrcnn.new_shapes as new_shapes
MODEL_DIR = os.path.join(MODEL_PATH, folder_name)
# Build configuration object -----------------------------------------------
config = new_shapes.NewShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
# Training dataset
dataset_test = new_shapes.NewShapesDataset()
dataset_test.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_test.prepare()
# Recreate the model in inference mode
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="inference",
config=config,
model_dir=MODEL_DIR,
FCN_layers = FCN_layers )
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
print(' Resent Model Path : ', RESNET_MODEL_PATH)
load_model(model, init_with = init_with)
test_generator = data_generator(dataset_test, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
model.config.display()
return [model, dataset_test, test_generator, config]
##------------------------------------------------------------------------------------
## New Shapes TRAINING
##------------------------------------------------------------------------------------
def prep_newshapes_train(init_with = "last", FCN_layers= False, batch_sz =5, epoch_steps = 4, folder_name= "mrcnn_newshape_training_logs"):
import mrcnn.new_shapes as new_shapes
MODEL_DIR = os.path.join(MODEL_PATH, folder_name)
# Build configuration object -----------------------------------------------
config = new_shapes.NewShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
# Training dataset
dataset_train = new_shapes.NewShapesDataset()
dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = new_shapes.NewShapesDataset()
dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers)
print('MODEL_PATH : ', MODEL_PATH)
print('COCO_MODEL_PATH : ', COCO_MODEL_PATH)
print('RESNET_MODEL_PATH : ', RESNET_MODEL_PATH)
print('MODEL_DIR : ', MODEL_DIR)
print('Last Saved Model : ', model.find_last())
load_model(model, init_with = 'last')
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
config.display()
return [model, dataset_train, train_generator, config]
##------------------------------------------------------------------------------------
## LOAD MODEL
##------------------------------------------------------------------------------------
def load_model(model, init_with = None):
'''
methods to load weights
1 - load a specific file
2 - find a last checkpoint in a specific folder
3 - use init_with keyword
'''
# Which weights to start with?
print('-----------------------------------------------')
print(' Load model with init parm: ', init_with)
# print(' find last chkpt :', model.find_last())
# print(' n)
print('-----------------------------------------------')
## 1- look for a specific weights file
## Load trained weights (fill in path to trained weights here)
# model_path = 'E:\\Models\\mrcnn_logs\\shapes20180428T1819\\mask_rcnn_shapes_5784.h5'
# print(' model_path : ', model_path )
# print("Loading weights from ", model_path)
# model.load_weights(model_path, by_name=True)
# print('Load weights complete')
# ## 2- look for last checkpoint file in a specific folder (not working correctly)
# model.config.LAST_EPOCH_RAN = 5784
# model.model_dir = 'E:\\Models\\mrcnn_logs\\shapes20180428T1819'
# last_model_found = model.find_last()
# print(' last model in MODEL_DIR: ', last_model_found)
# # loc= model.load_weights(model.find_last()[1], by_name=True)
# # print('Load weights complete :', loc)
## 3- Use init_with keyword
## Which weights to start with?
# init_with = "last" # imagenet, coco, or last
if init_with == "imagenet":
# loc=model.load_weights(model.get_imagenet_weights(), by_name=True)
loc=model.load_weights(RESNET_MODEL_PATH, by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
loc=model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training, placing checkpouints in same folder
loc= model.load_weights(model.find_last()[1], by_name=True)
else:
assert init_with != "", "Provide path to trained weights"
print("Loading weights from ", init_with)
loc = model.load_weights(init_with, by_name=True)
print('Load weights complete', loc)
"""
##------------------------------------------------------------------------------------
## Old Shapes DEVELOPMENT
##------------------------------------------------------------------------------------
def prep_oldshapes_dev(init_with = None, FCN_layers = False, batch_sz = 5):
import mrcnn.shapes as shapes
MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_oldshape_dev_logs")
config = build_config(batch_sz = batch_sz)
dataset_train = shapes.ShapesDataset()
dataset_train.load_shapes(150, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers)
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
print(' Resent Model Path : ', RESNET_MODEL_PATH)
load_model(model, init_with = init_with)
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
model.config.display()
return [model, dataset_train, train_generator, config]
##------------------------------------------------------------------------------------
## New Shapes DEVELOPMENT
##------------------------------------------------------------------------------------
def prep_newshapes_dev(init_with = "last", FCN_layers= False, batch_sz = 5):
import mrcnn.new_shapes as new_shapes
MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_newshape_dev_logs")
config = build_config(batch_sz = batch_sz, newshapes=True)
# Build shape dataset -----------------------------------------------
# Training dataset
dataset_train = new_shapes.NewShapesDataset()
dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = new_shapes.NewShapesDataset()
dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
try :
del model, train_generator, val_generator, mm
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers)
print('MODEL_PATH : ', MODEL_PATH)
print('COCO_MODEL_PATH : ', COCO_MODEL_PATH)
print('RESNET_MODEL_PATH : ', RESNET_MODEL_PATH)
print('MODEL_DIR : ', MODEL_DIR)
print('Last Saved Model : ', model.find_last())
load_model(model, init_with = 'last')
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
config.display()
return [model, dataset_train, train_generator, config]
"""
| 41.631841
| 139
| 0.571881
| 1,844
| 16,736
| 4.92679
| 0.12961
| 0.059439
| 0.038745
| 0.024766
| 0.744854
| 0.7224
| 0.714695
| 0.710952
| 0.695432
| 0.66087
| 0
| 0.01108
| 0.234226
| 16,736
| 401
| 140
| 41.735661
| 0.6978
| 0.224426
| 0
| 0.628866
| 0
| 0
| 0.127104
| 0.031182
| 0
| 0
| 0
| 0
| 0.005155
| 1
| 0.025773
| false
| 0.020619
| 0.134021
| 0
| 0.180412
| 0.164948
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
37364d93f41797c2d94de64e1a22fc15b1eaf8b9
| 809
|
py
|
Python
|
apply/models.py
|
4yub1k/job-portal-django
|
1e6df39cd56cc4a9b0a810a0257e3f1c5b103c5d
|
[
"MIT"
] | null | null | null |
apply/models.py
|
4yub1k/job-portal-django
|
1e6df39cd56cc4a9b0a810a0257e3f1c5b103c5d
|
[
"MIT"
] | null | null | null |
apply/models.py
|
4yub1k/job-portal-django
|
1e6df39cd56cc4a9b0a810a0257e3f1c5b103c5d
|
[
"MIT"
] | null | null | null |
from django.db import models
from listings.models import PostJob
# Create your models here.
class ApplicantForm(models.Model):
job=models.CharField(max_length=100)
name = models.CharField(max_length=50)
email = models.EmailField(max_length=200)
mobile = models.CharField(max_length=15)
education = models.CharField(max_length=50)
exp = models.CharField(max_length=50)
resume = models.FileField(upload_to='resume/%Y/%m/%d/')
def __str__(self):
return self.name
class Review(models.Model):
name = models.ForeignKey(ApplicantForm, on_delete=models.CASCADE, null=False)
reviewd = models.BooleanField(default=False)
ratings =models.CharField(max_length=2,default=0)
remarks = models.TextField(blank=True)
def __str__(self):
return self.name.name
| 38.52381
| 81
| 0.733004
| 108
| 809
| 5.333333
| 0.509259
| 0.109375
| 0.1875
| 0.25
| 0.21875
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0.023392
| 0.154512
| 809
| 21
| 82
| 38.52381
| 0.818713
| 0.029666
| 0
| 0.105263
| 0
| 0
| 0.020408
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0.105263
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
376c2ba2e639b495e4597e5d4756a44025763255
| 244
|
py
|
Python
|
modules/xia2/command_line/resolutionizer.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | null | null | null |
modules/xia2/command_line/resolutionizer.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | null | null | null |
modules/xia2/command_line/resolutionizer.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T15:39:06.000Z
|
2020-02-04T15:39:06.000Z
|
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1
from __future__ import absolute_import, division, print_function
import sys
if __name__ == "__main__":
from dials.util.Resolutionizer import run
run(sys.argv[1:])
| 24.4
| 69
| 0.79918
| 34
| 244
| 5.117647
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009434
| 0.131148
| 244
| 9
| 70
| 27.111111
| 0.811321
| 0.27459
| 0
| 0
| 0
| 0
| 0.045714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
3771d8b8b10ada3965f1cd8fc8bc1d0e18b2817c
| 89
|
py
|
Python
|
networks/__init__.py
|
maplel/vehicle-counting
|
c30372a9695fd7b838491461ee787f72c846d0b4
|
[
"MIT"
] | 47
|
2020-11-08T08:14:22.000Z
|
2022-03-26T15:18:04.000Z
|
networks/__init__.py
|
alikaz3mi/vehicle-counting
|
2319714acdc8dcb97b0b7a2c87391b94095bf6fa
|
[
"MIT"
] | 16
|
2020-11-08T09:05:36.000Z
|
2022-03-22T04:24:37.000Z
|
networks/__init__.py
|
alikaz3mi/vehicle-counting
|
2319714acdc8dcb97b0b7a2c87391b94095bf6fa
|
[
"MIT"
] | 25
|
2020-11-08T08:14:24.000Z
|
2022-03-15T07:19:31.000Z
|
from .yolo import get_model
from .detector import Detector
from .deepsort import DeepSort
| 29.666667
| 30
| 0.842697
| 13
| 89
| 5.692308
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123596
| 89
| 3
| 31
| 29.666667
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3775adf96a68d0edf0da11b01b76bbd7517e3cbf
| 68
|
py
|
Python
|
lambda-vpc/lambda.py
|
jeffbrl/terraform-examples
|
c25aeda07d26f0f12b6a233cab28227fa668b8fa
|
[
"MIT"
] | 6
|
2019-03-11T19:07:38.000Z
|
2021-11-08T13:17:55.000Z
|
lambda-simple/lambda.py
|
jeffbrl/terraform-examples
|
c25aeda07d26f0f12b6a233cab28227fa668b8fa
|
[
"MIT"
] | null | null | null |
lambda-simple/lambda.py
|
jeffbrl/terraform-examples
|
c25aeda07d26f0f12b6a233cab28227fa668b8fa
|
[
"MIT"
] | 7
|
2019-07-28T13:25:23.000Z
|
2022-02-21T10:12:14.000Z
|
def lambda_handler(event, context):
print("Hello from Lambda")
| 17
| 35
| 0.720588
| 9
| 68
| 5.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161765
| 68
| 3
| 36
| 22.666667
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.253731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
806a59c3e5ca274ac7ce3f0a467e29b7924a7c78
| 130
|
py
|
Python
|
sandbox/measureIntensities.py
|
soylentdeen/CIAO-commissioning-tools
|
8cb3d7412106d3b18054df2e82796000df0035bb
|
[
"MIT"
] | null | null | null |
sandbox/measureIntensities.py
|
soylentdeen/CIAO-commissioning-tools
|
8cb3d7412106d3b18054df2e82796000df0035bb
|
[
"MIT"
] | null | null | null |
sandbox/measureIntensities.py
|
soylentdeen/CIAO-commissioning-tools
|
8cb3d7412106d3b18054df2e82796000df0035bb
|
[
"MIT"
] | null | null | null |
import scipy
import pyfits
import numpy
import VLTTools
ciao = VLTTools.VLTConnection(simulate=False)
ciao.averageIntensities()
| 14.444444
| 45
| 0.830769
| 15
| 130
| 7.2
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107692
| 130
| 8
| 46
| 16.25
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8084a4f3321bdd6b0f54cc24dd55849061adcdd0
| 160
|
py
|
Python
|
src/foriforloop.py
|
Leopold2020/code_repo
|
24b1a932d77ba1456f8df4978f029dd841c7b177
|
[
"MIT"
] | null | null | null |
src/foriforloop.py
|
Leopold2020/code_repo
|
24b1a932d77ba1456f8df4978f029dd841c7b177
|
[
"MIT"
] | null | null | null |
src/foriforloop.py
|
Leopold2020/code_repo
|
24b1a932d77ba1456f8df4978f029dd841c7b177
|
[
"MIT"
] | null | null | null |
#Oskar Svedlund
#TEINF-20
#2021-09-20
#For i For loop
for i in range(1,10):
for j in range(1,10):
print(i*j, end="\t")
print()
| 16
| 28
| 0.51875
| 29
| 160
| 2.862069
| 0.586207
| 0.096386
| 0.192771
| 0.240964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 0.325
| 160
| 10
| 29
| 16
| 0.62037
| 0.2875
| 0
| 0
| 0
| 0
| 0.018018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
8097a5a678a48a0f40893c973ba73f456ebd2dc0
| 71
|
py
|
Python
|
mohdAkibUddin.py
|
mohdAkibUddin/Week2
|
f8ad361a3d734dbc32d1bcc571a7fe1abf41b481
|
[
"MIT"
] | null | null | null |
mohdAkibUddin.py
|
mohdAkibUddin/Week2
|
f8ad361a3d734dbc32d1bcc571a7fe1abf41b481
|
[
"MIT"
] | null | null | null |
mohdAkibUddin.py
|
mohdAkibUddin/Week2
|
f8ad361a3d734dbc32d1bcc571a7fe1abf41b481
|
[
"MIT"
] | 11
|
2020-09-21T18:23:21.000Z
|
2020-10-03T18:18:14.000Z
|
while (True):
print("mohammed uddin made changes")
print(":D")
| 17.75
| 40
| 0.619718
| 9
| 71
| 4.888889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211268
| 71
| 3
| 41
| 23.666667
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0.408451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
809bd2a29d3c3b05833188596c8dc99b707ea9c0
| 397
|
py
|
Python
|
tests/plugins/test_stv.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 5
|
2017-03-21T19:43:17.000Z
|
2018-10-03T14:04:29.000Z
|
tests/plugins/test_stv.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 7
|
2016-10-13T23:29:31.000Z
|
2018-06-28T14:04:32.000Z
|
tests/plugins/test_stv.py
|
bumplzz69/streamlink
|
34abc43875d7663ebafa241573dece272e93d88b
|
[
"BSD-2-Clause"
] | 2
|
2016-11-24T18:37:33.000Z
|
2017-03-21T19:43:49.000Z
|
import unittest
from streamlink.plugins.stv import STV
class TestPluginSTV(unittest.TestCase):
def test_can_handle_url(self):
self.assertTrue(STV.can_handle_url('https://player.stv.tv/live'))
self.assertTrue(STV.can_handle_url('http://player.stv.tv/live'))
def test_can_handle_url_negative(self):
self.assertFalse(STV.can_handle_url('http://example.com/live'))
| 30.538462
| 73
| 0.738035
| 57
| 397
| 4.912281
| 0.438596
| 0.160714
| 0.214286
| 0.160714
| 0.425
| 0.207143
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128463
| 397
| 12
| 74
| 33.083333
| 0.809249
| 0
| 0
| 0
| 0
| 0
| 0.186398
| 0
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
80a5b928bb6d1053f9027149c1b0bf7b0a002c9f
| 24
|
py
|
Python
|
butterflow/version.py
|
changhaitravis/butterflow
|
d9f92e31a57e800a56b98fa17786144b50d3e3bb
|
[
"MIT"
] | 1,340
|
2015-01-04T01:43:39.000Z
|
2022-03-29T04:44:41.000Z
|
butterflow/version.py
|
pmorris2012/butterflow
|
08f09a0c5e70cdd24953c638b8785e6deee9022e
|
[
"MIT"
] | 134
|
2015-04-05T10:04:02.000Z
|
2022-02-16T22:34:03.000Z
|
butterflow/version.py
|
pmorris2012/butterflow
|
08f09a0c5e70cdd24953c638b8785e6deee9022e
|
[
"MIT"
] | 116
|
2015-04-14T17:37:33.000Z
|
2022-02-19T21:36:22.000Z
|
__version__ = '0.2.4a4'
| 12
| 23
| 0.666667
| 4
| 24
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0.125
| 24
| 1
| 24
| 24
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
80a7758b26ee685bbdd108bed7010842ba5f6ca5
| 191
|
py
|
Python
|
app/commands.py
|
Addovej/simple_catalog
|
3972c57796538958227a924d618df29c456389ab
|
[
"MIT"
] | null | null | null |
app/commands.py
|
Addovej/simple_catalog
|
3972c57796538958227a924d618df29c456389ab
|
[
"MIT"
] | null | null | null |
app/commands.py
|
Addovej/simple_catalog
|
3972c57796538958227a924d618df29c456389ab
|
[
"MIT"
] | null | null | null |
from flask import current_app as app
@app.cli.command('sync-data')
def sync_data_cli():
from app.tasks import sync_data
print('Sync data was launched')
sync_data.apply_async()
| 19.1
| 36
| 0.727749
| 31
| 191
| 4.290323
| 0.548387
| 0.300752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172775
| 191
| 9
| 37
| 21.222222
| 0.841772
| 0
| 0
| 0
| 0
| 0
| 0.162304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.5
| 0.166667
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
80d33d703d579b6c6f9d8c149124f4f729a68c98
| 89
|
py
|
Python
|
game_core/app/user/__init__.py
|
meseta/advent-of-code-2020
|
a6871b2efa99c38d5d13d872e53a8e9649f8322b
|
[
"MIT"
] | 1
|
2020-12-30T11:25:17.000Z
|
2020-12-30T11:25:17.000Z
|
game_core/app/user/__init__.py
|
meseta/advent-of-code-2020
|
a6871b2efa99c38d5d13d872e53a8e9649f8322b
|
[
"MIT"
] | 13
|
2020-12-29T19:08:20.000Z
|
2021-02-01T04:27:36.000Z
|
game_core/app/user/__init__.py
|
meseta/advent-of-code-2020
|
a6871b2efa99c38d5d13d872e53a8e9649f8322b
|
[
"MIT"
] | 1
|
2020-12-27T19:57:16.000Z
|
2020-12-27T19:57:16.000Z
|
from .user import User
from .models import UserData, Source
from .sentinels import NoUid
| 22.25
| 36
| 0.808989
| 13
| 89
| 5.538462
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146067
| 89
| 3
| 37
| 29.666667
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
80f45a78b0c2b125069f786891a8a66588be6f0b
| 53
|
py
|
Python
|
researchutils/chainer/training/__init__.py
|
keio-ytlab/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 1
|
2018-10-25T12:57:38.000Z
|
2018-10-25T12:57:38.000Z
|
researchutils/chainer/training/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 28
|
2018-08-25T03:54:30.000Z
|
2018-10-14T12:09:47.000Z
|
researchutils/chainer/training/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | null | null | null |
from researchutils.chainer.training import extensions
| 53
| 53
| 0.90566
| 6
| 53
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 53
| 1
| 53
| 53
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
03816d13c5acc34622656515ab97d00edc0dbff5
| 30
|
py
|
Python
|
hilbert/discontinu.py
|
kiaderouiche/hilbmetrics
|
c106d86f9cfad5902e0a6aaa0b3e88312910c42a
|
[
"Apache-2.0"
] | null | null | null |
hilbert/discontinu.py
|
kiaderouiche/hilbmetrics
|
c106d86f9cfad5902e0a6aaa0b3e88312910c42a
|
[
"Apache-2.0"
] | 1
|
2020-10-07T14:09:50.000Z
|
2020-10-07T14:09:50.000Z
|
hilbert/discontinu.py
|
kiaderouiche/hilbmetrics
|
c106d86f9cfad5902e0a6aaa0b3e88312910c42a
|
[
"Apache-2.0"
] | null | null | null |
"""
Draw discontinu form
"""
| 6
| 20
| 0.6
| 3
| 30
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 30
| 4
| 21
| 7.5
| 0.75
| 0.666667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
03fea0a393e3529279b7c095f4fe579bb316d449
| 254
|
py
|
Python
|
src/pytest_alembic/plugin/__init__.py
|
ZipFile/pytest-alembic
|
f0f761e6ae2cd7764582f8c6e4c15f3623512de9
|
[
"MIT"
] | 47
|
2020-04-16T20:03:04.000Z
|
2022-03-23T09:51:01.000Z
|
src/pytest_alembic/plugin/__init__.py
|
ZipFile/pytest-alembic
|
f0f761e6ae2cd7764582f8c6e4c15f3623512de9
|
[
"MIT"
] | 30
|
2020-06-26T15:46:45.000Z
|
2022-03-12T17:29:38.000Z
|
src/pytest_alembic/plugin/__init__.py
|
ZipFile/pytest-alembic
|
f0f761e6ae2cd7764582f8c6e4c15f3623512de9
|
[
"MIT"
] | 8
|
2021-03-04T16:44:22.000Z
|
2022-01-21T19:16:33.000Z
|
# flake8: noqa
from pytest_alembic.plugin.fixtures import alembic_config, alembic_engine, alembic_runner
from pytest_alembic.plugin.hooks import (
pytest_addoption,
pytest_collection_modifyitems,
pytest_configure,
pytest_itemcollected,
)
| 28.222222
| 89
| 0.811024
| 29
| 254
| 6.758621
| 0.586207
| 0.102041
| 0.173469
| 0.234694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004545
| 0.133858
| 254
| 8
| 90
| 31.75
| 0.886364
| 0.047244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.285714
| 0
| 0.285714
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ff16761ac6ac64a735ea2b3c625e7fab066e67b4
| 97
|
py
|
Python
|
Hello-World/Python/hello_world_weiyim.py
|
RoyalTechie/Hacktoberfest
|
5a66f81972a3db15d7c48c8d163d8de4df01d0fe
|
[
"Apache-2.0"
] | 1
|
2021-10-08T17:18:23.000Z
|
2021-10-08T17:18:23.000Z
|
Hello-World/Python/hello_world_weiyim.py
|
RoyalTechie/Hacktoberfest
|
5a66f81972a3db15d7c48c8d163d8de4df01d0fe
|
[
"Apache-2.0"
] | null | null | null |
Hello-World/Python/hello_world_weiyim.py
|
RoyalTechie/Hacktoberfest
|
5a66f81972a3db15d7c48c8d163d8de4df01d0fe
|
[
"Apache-2.0"
] | null | null | null |
'''
LANGUAGE: Python
AUTHOR: Weiyi
GITHUB: https://github.com/weiyi-m
'''
print("Hello World!")
| 12.125
| 34
| 0.680412
| 13
| 97
| 5.076923
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113402
| 97
| 7
| 35
| 13.857143
| 0.767442
| 0.670103
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
ff1b51db173b135f17b515974a4212da0d589ff5
| 673
|
py
|
Python
|
music/migrations/0003_auto_20170326_1725.py
|
wilk16/music
|
1fce992fa3d3b0f00202c4e5db7cdd3129794325
|
[
"MIT"
] | 1
|
2017-04-27T19:47:52.000Z
|
2017-04-27T19:47:52.000Z
|
music/migrations/0003_auto_20170326_1725.py
|
wilk16/music
|
1fce992fa3d3b0f00202c4e5db7cdd3129794325
|
[
"MIT"
] | null | null | null |
music/migrations/0003_auto_20170326_1725.py
|
wilk16/music
|
1fce992fa3d3b0f00202c4e5db7cdd3129794325
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-26 17:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('music', '0002_auto_20170325_1658'),
]
operations = [
migrations.AlterModelOptions(
name='ownedrecord',
options={'ordering': ['-purchase_date']},
),
migrations.AlterModelOptions(
name='record',
options={'ordering': ['-release_date']},
),
migrations.AlterModelOptions(
name='track',
options={'ordering': ['number']},
),
]
| 24.035714
| 53
| 0.569094
| 59
| 673
| 6.322034
| 0.711864
| 0.217158
| 0.24933
| 0.187668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069328
| 0.292719
| 673
| 27
| 54
| 24.925926
| 0.714286
| 0.10104
| 0
| 0.3
| 1
| 0
| 0.177741
| 0.038206
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
207e722020a615f43ed5982780d18acdc6149839
| 687
|
py
|
Python
|
httprider/core/util_functions.py
|
iSWORD/http-rider
|
5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9
|
[
"MIT"
] | 27
|
2019-12-20T00:10:28.000Z
|
2022-03-09T18:04:23.000Z
|
httprider/core/util_functions.py
|
iSWORD/http-rider
|
5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9
|
[
"MIT"
] | 6
|
2019-10-13T08:50:21.000Z
|
2020-06-05T12:23:08.000Z
|
httprider/core/util_functions.py
|
iSWORD/http-rider
|
5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9
|
[
"MIT"
] | 7
|
2019-08-10T01:38:31.000Z
|
2021-08-23T05:28:46.000Z
|
import base64
from httprider.core.constants import UTF_8_ENCODING
def str_to_base64e(arg, url_safe=False):
if not arg:
return ""
if url_safe:
return base64.urlsafe_b64encode(bytes(arg, UTF_8_ENCODING)).decode(
UTF_8_ENCODING
)
else:
return base64.b64encode(bytes(arg, UTF_8_ENCODING)).decode(UTF_8_ENCODING)
def str_to_base64d(arg, url_safe=False):
if not arg:
return ""
if url_safe:
return base64.urlsafe_b64decode(arg).decode(UTF_8_ENCODING)
else:
return base64.b64decode(arg).decode(UTF_8_ENCODING)
utility_func_map = {"base64Encode": str_to_base64e, "base64Decode": str_to_base64d}
| 23.689655
| 83
| 0.695779
| 95
| 687
| 4.715789
| 0.336842
| 0.0625
| 0.1875
| 0.160714
| 0.732143
| 0.732143
| 0.575893
| 0.464286
| 0.464286
| 0.464286
| 0
| 0.068519
| 0.213974
| 687
| 28
| 84
| 24.535714
| 0.761111
| 0
| 0
| 0.421053
| 0
| 0
| 0.034935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.526316
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
20cf61fa63479f73f04541a938e1a69f0899fa45
| 159
|
py
|
Python
|
async_pluct/__init__.py
|
lucasts/async-pluct
|
466e60ed6ed418cd5aa28cae457af61ef6984325
|
[
"MIT"
] | 4
|
2017-11-08T19:43:05.000Z
|
2017-11-10T15:03:46.000Z
|
async_pluct/__init__.py
|
lucasts/async-pluct
|
466e60ed6ed418cd5aa28cae457af61ef6984325
|
[
"MIT"
] | 1
|
2021-06-01T21:20:10.000Z
|
2021-06-01T21:20:10.000Z
|
async_pluct/__init__.py
|
lucasts/async-pluct
|
466e60ed6ed418cd5aa28cae457af61ef6984325
|
[
"MIT"
] | 1
|
2018-05-25T18:02:10.000Z
|
2018-05-25T18:02:10.000Z
|
# Used to mock validate method on tests
try:
from async_pluct import resource
resources = resource
except ImportError:
pass
__version__ = '0.3.4'
| 17.666667
| 39
| 0.72327
| 22
| 159
| 5
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024194
| 0.220126
| 159
| 8
| 40
| 19.875
| 0.862903
| 0.232704
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
4549a01687a779349f7bebd7bb8082ab53880b54
| 85
|
py
|
Python
|
fabric_colors/main.py
|
fabric-colors/fabric-colors
|
de43a4e78b87b6fbfecacca1525178dea1827680
|
[
"BSD-2-Clause"
] | 3
|
2015-01-14T06:45:44.000Z
|
2016-11-15T13:37:16.000Z
|
fabric_colors/main.py
|
fabric-colors/fabric-colors
|
de43a4e78b87b6fbfecacca1525178dea1827680
|
[
"BSD-2-Clause"
] | null | null | null |
fabric_colors/main.py
|
fabric-colors/fabric-colors
|
de43a4e78b87b6fbfecacca1525178dea1827680
|
[
"BSD-2-Clause"
] | null | null | null |
def main():
"""
Main command-line execution loop.
"""
print "hello!"
| 14.166667
| 37
| 0.529412
| 9
| 85
| 5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.305882
| 85
| 5
| 38
| 17
| 0.762712
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
45666129dd2cfce487e7cf0c08300367cf69e77e
| 226
|
py
|
Python
|
service_data/store_data/admin.py
|
Djalyarim/Test_task
|
c4f66b8ef50fcde679c4dff62ddee162064f26e0
|
[
"MIT"
] | 1
|
2022-02-01T08:29:39.000Z
|
2022-02-01T08:29:39.000Z
|
service_data/store_data/admin.py
|
Djalyarim/Test_task
|
c4f66b8ef50fcde679c4dff62ddee162064f26e0
|
[
"MIT"
] | null | null | null |
service_data/store_data/admin.py
|
Djalyarim/Test_task
|
c4f66b8ef50fcde679c4dff62ddee162064f26e0
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import UserWeight
@admin.register(UserWeight)
class UserweightAdmin(admin.ModelAdmin):
list_display = ('id', 'user_id', 'day', 'weight')
empty_value_display = '-пусто-'
| 22.6
| 53
| 0.734513
| 27
| 226
| 6
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137168
| 226
| 9
| 54
| 25.111111
| 0.830769
| 0
| 0
| 0
| 0
| 0
| 0.110619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
458f340ccbc47b26ed840f1fde9b3e90c4681d97
| 876
|
py
|
Python
|
tests/util.py
|
cook-health/messaging
|
1a827b97d9af6e56d55c362b29dd79a6cb373f88
|
[
"MIT"
] | null | null | null |
tests/util.py
|
cook-health/messaging
|
1a827b97d9af6e56d55c362b29dd79a6cb373f88
|
[
"MIT"
] | 2
|
2018-03-14T10:42:37.000Z
|
2018-03-14T11:01:31.000Z
|
tests/util.py
|
Seliniux777/Nexmo-nexmo-python
|
d1d60e8068b1cb23f12507a6ec1cd500285890b5
|
[
"MIT"
] | 1
|
2020-10-18T09:41:15.000Z
|
2020-10-18T09:41:15.000Z
|
import re
import pytest
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import responses
def request_body():
return responses.calls[0].request.body
def request_query():
return urlparse(responses.calls[0].request.url).query
def request_user_agent():
return responses.calls[0].request.headers['User-Agent']
def request_authorization():
return responses.calls[0].request.headers['Authorization'].decode('utf-8')
def request_content_type():
return responses.calls[0].request.headers['Content-Type']
def stub(method, url):
responses.add(method, url, body='{"key":"value"}', status=200, content_type='application/json')
def assert_re(pattern, string):
__tracebackhide__ = True
if not re.search(pattern, string):
pytest.fail("Cannot find pattern %r in %r" % (pattern, string))
| 21.365854
| 99
| 0.723744
| 115
| 876
| 5.4
| 0.434783
| 0.080515
| 0.120773
| 0.177134
| 0.214171
| 0.169082
| 0
| 0
| 0
| 0
| 0
| 0.012048
| 0.14726
| 876
| 40
| 100
| 21.9
| 0.819277
| 0
| 0
| 0
| 0
| 0
| 0.113014
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.304348
| false
| 0
| 0.26087
| 0.217391
| 0.782609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
45a8ff6d95e1c12c273882b1f0f7453ab2352319
| 1,643
|
py
|
Python
|
test/pandas/1_select.py
|
wull566/tensorflow_demo
|
c2c45050867cb056b8193eb53466d26b80b0ec13
|
[
"MIT"
] | 2
|
2019-03-24T12:58:17.000Z
|
2021-05-18T06:21:21.000Z
|
test/pandas/1_select.py
|
wull566/tensorflow_demo
|
c2c45050867cb056b8193eb53466d26b80b0ec13
|
[
"MIT"
] | null | null | null |
test/pandas/1_select.py
|
wull566/tensorflow_demo
|
c2c45050867cb056b8193eb53466d26b80b0ec13
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pandas 学习
字典形式的numpy
"""
from __future__ import print_function
import numpy as np
import pandas as pd
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6,4)),index=dates, columns=['A','B','C','D'])
"""
A B C D
2013-01-01 0 1 2 3
2013-01-02 4 5 6 7
2013-01-03 8 9 10 11
2013-01-04 12 13 14 15
2013-01-05 16 17 18 19
2013-01-06 20 21 22 23
"""
# print(df['A'])
print(df.A)
print(df[0:3])
"""
A B C D
2013-01-01 0 1 2 3
2013-01-02 4 5 6 7
2013-01-03 8 9 10 11
"""
print(df['20130102':'20130104'])
"""
A B C D
2013-01-02 4 5 6 7
2013-01-03 8 9 10 11
2013-01-04 12 13 14 15
"""
print(df.loc['20130102'])
"""
A 4
B 5
C 6
D 7
Name: 2013-01-02 00:00:00, dtype: int64
"""
print(df.loc[:,['A','B']])
"""
A B
2013-01-01 0 1
2013-01-02 4 5
2013-01-03 8 9
2013-01-04 12 13
2013-01-05 16 17
2013-01-06 20 21
"""
print(df.loc['20130102',['A','B']])
"""
A 4
B 5
Name: 2013-01-02 00:00:00, dtype: int64
"""
print(df.iloc[3,1])
# 13
print(df.iloc[3:5,1:3])
"""
B C
2013-01-04 13 14
2013-01-05 17 18
"""
print(df.iloc[[1,3,5],1:3])
"""
B C
2013-01-02 5 6
2013-01-04 13 14
2013-01-06 21 22
"""
# 根据混合的这两种 ix
print(df.ix[:3,['A','C']])
"""
A C
2013-01-01 0 2
2013-01-02 4 6
2013-01-03 8 10
"""
print(df[df.A>8])
"""
A B C D
2013-01-04 12 13 14 15
2013-01-05 16 17 18 19
2013-01-06 20 21 22 23
"""
| 14.286957
| 86
| 0.506999
| 338
| 1,643
| 2.446746
| 0.221893
| 0.224909
| 0.077388
| 0.024184
| 0.58162
| 0.428053
| 0.41838
| 0.382104
| 0.353083
| 0.353083
| 0
| 0.416814
| 0.312234
| 1,643
| 114
| 87
| 14.412281
| 0.315044
| 0.057821
| 0
| 0
| 0
| 0
| 0.098039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0.75
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
afd9196a7899334abc62e4ac1a6e707be3e9bbb2
| 45
|
py
|
Python
|
pyipma/__init__.py
|
joaocps/pyipma
|
d10e14bed66213c328e5f409d2cd7b0fddffa6e4
|
[
"MIT"
] | null | null | null |
pyipma/__init__.py
|
joaocps/pyipma
|
d10e14bed66213c328e5f409d2cd7b0fddffa6e4
|
[
"MIT"
] | null | null | null |
pyipma/__init__.py
|
joaocps/pyipma
|
d10e14bed66213c328e5f409d2cd7b0fddffa6e4
|
[
"MIT"
] | null | null | null |
from .consts import *
__version__ = '2.0.5'
| 11.25
| 21
| 0.666667
| 7
| 45
| 3.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 0.177778
| 45
| 3
| 22
| 15
| 0.621622
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
afe0fb0fe777cecce7f8b46ab7a630caecdcd1dd
| 135
|
py
|
Python
|
wmc/__main__.py
|
axju/wmc
|
caa54c3bfe809104c8c65972d116388dcfb066f5
|
[
"MIT"
] | 1
|
2019-09-16T22:24:23.000Z
|
2019-09-16T22:24:23.000Z
|
wmc/__main__.py
|
axju/wmc
|
caa54c3bfe809104c8c65972d116388dcfb066f5
|
[
"MIT"
] | null | null | null |
wmc/__main__.py
|
axju/wmc
|
caa54c3bfe809104c8c65972d116388dcfb066f5
|
[
"MIT"
] | null | null | null |
"""Only the entry point for the python command"""
from wmc.cli import main
if __name__ == '__main__': # pragma: no cover
main()
| 19.285714
| 49
| 0.674074
| 20
| 135
| 4.15
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207407
| 135
| 6
| 50
| 22.5
| 0.775701
| 0.451852
| 0
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
afe39d9a773ee94edb0c80681cedc259ff4d532a
| 735
|
py
|
Python
|
dynamicserialize/dstypes/com/raytheon/uf/common/localization/stream/LocalizationStreamGetRequest.py
|
mjames-upc/python-awips
|
e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c
|
[
"BSD-3-Clause"
] | null | null | null |
dynamicserialize/dstypes/com/raytheon/uf/common/localization/stream/LocalizationStreamGetRequest.py
|
mjames-upc/python-awips
|
e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c
|
[
"BSD-3-Clause"
] | null | null | null |
dynamicserialize/dstypes/com/raytheon/uf/common/localization/stream/LocalizationStreamGetRequest.py
|
mjames-upc/python-awips
|
e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c
|
[
"BSD-3-Clause"
] | null | null | null |
##
##
# File auto-generated against equivalent DynamicSerialize Java class
import os
from dynamicserialize.dstypes.com.raytheon.uf.common.localization.stream import AbstractLocalizationStreamRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.auth.user import User
class LocalizationStreamGetRequest(AbstractLocalizationStreamRequest):
def __init__(self):
super(LocalizationStreamGetRequest, self).__init__()
self.offset = None
self.numBytes = None
def getOffset(self):
return self.offset
def setOffset(self, offset):
self.offset = offset
def getNumBytes(self):
return self.numBytes
def setNumBytes(self, numBytes):
self.numBytes = numBytes
| 25.344828
| 113
| 0.737415
| 74
| 735
| 7.216216
| 0.459459
| 0.074906
| 0.101124
| 0.11236
| 0.172285
| 0.172285
| 0.172285
| 0
| 0
| 0
| 0
| 0
| 0.186395
| 735
| 28
| 114
| 26.25
| 0.892977
| 0.089796
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3125
| false
| 0
| 0.1875
| 0.125
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
b32fba3b2a1b186843d40ad148d4d4fdef0edbb2
| 189
|
py
|
Python
|
Chap03DataStructures/2-2-3.分数.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 5
|
2019-03-06T12:28:47.000Z
|
2022-01-06T14:06:02.000Z
|
Chap03DataStructures/2-2-3.分数.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 6
|
2021-02-02T22:40:49.000Z
|
2022-03-12T00:27:54.000Z
|
Chap03DataStructures/2-2-3.分数.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 4
|
2019-03-06T14:29:25.000Z
|
2020-06-02T15:16:40.000Z
|
from fractions import Fraction
f1=Fraction(1,2)
f2=f1
print(f1,f2)
print(f1 is f2)
print(f1.numerator ,f1.denominator)
f1.numerator = 3
f3=Fraction(1,3)
f1+=f3
print(f1,f2)
print(f1 is f2)
| 15.75
| 35
| 0.740741
| 38
| 189
| 3.684211
| 0.368421
| 0.25
| 0.192857
| 0.2
| 0.285714
| 0.285714
| 0.285714
| 0
| 0
| 0
| 0
| 0.130952
| 0.111111
| 189
| 12
| 36
| 15.75
| 0.702381
| 0
| 0
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.454545
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
b33449ad57f994a9b1bcc9f11a29cde1d8020790
| 577
|
py
|
Python
|
loader.py
|
LTS4/TIGraNet
|
22ba11b665e8445f1f759c0d13414429d9a03265
|
[
"MIT"
] | 8
|
2018-08-21T20:58:05.000Z
|
2020-05-15T03:42:06.000Z
|
loader.py
|
LTS4/TIGraNet
|
22ba11b665e8445f1f759c0d13414429d9a03265
|
[
"MIT"
] | 1
|
2020-12-24T05:12:14.000Z
|
2021-03-23T15:04:46.000Z
|
loader.py
|
LTS4/TIGraNet
|
22ba11b665e8445f1f759c0d13414429d9a03265
|
[
"MIT"
] | 3
|
2018-10-14T14:54:07.000Z
|
2021-02-28T21:59:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Data loader for the PyTorch framework.
"""
from tqdm import tqdm
import os, re
import torch
import torch.utils.data as data
from utils import select
class MNIST_bis(data.Dataset):
def __init__(self, dataset, size, digits_to_keep, stratified_sampling=True):
self.dataset=dataset
self.indices=select(dataset, size, digits_to_keep, stratified_sampling)
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
| 23.08
| 80
| 0.686308
| 78
| 577
| 4.833333
| 0.525641
| 0.087533
| 0.090186
| 0.100796
| 0.217507
| 0.217507
| 0.217507
| 0
| 0
| 0
| 0
| 0.002188
| 0.207972
| 577
| 25
| 81
| 23.08
| 0.822757
| 0.140381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.384615
| 0.153846
| 0.846154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
b34b3b5f61a158f01b335653ba2d517f0b3bc95c
| 1,015
|
py
|
Python
|
gcompiler/python/delta_infer/subgraphs/__init__.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 1,442
|
2019-07-09T07:34:28.000Z
|
2020-11-15T09:52:09.000Z
|
gcompiler/python/delta_infer/subgraphs/__init__.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 93
|
2019-07-22T09:20:20.000Z
|
2020-11-13T01:59:30.000Z
|
gcompiler/python/delta_infer/subgraphs/__init__.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 296
|
2019-07-09T07:35:28.000Z
|
2020-11-16T02:27:51.000Z
|
from .transformer import *
from .common import *
#tf.compat.v1.disable_eager_execution()
#
#batch_size = 40
#seq_length = 200
#hidden_size = 768
#num_attention_heads =12
#size_per_head = int(hidden_size / num_attention_heads)
#
#layer_input = tf.compat.v1.placeholder(tf.float32, shape=(batch_size*seq_length, hidden_size))
## Tensor of shape [batch_size, from_seq_length, to_seq_length].
#attention_mask = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, seq_length, seq_length))
#
#output_rnn = transformer_cell(input_tensor=layer_input,#tf.reshape(layer_input, [-1, hidden_size]),
# attention_mask=attention_mask,
# hidden_size=hidden_size,
# num_attention_heads=num_attention_heads,
# attention_head_size=size_per_head,
# batch_size = batch_size,
# seq_length = seq_length,
# intermediate_size=1280)
| 42.291667
| 100
| 0.635468
| 121
| 1,015
| 4.933884
| 0.338843
| 0.120603
| 0.113903
| 0.090452
| 0.328308
| 0.237856
| 0.177554
| 0.177554
| 0.177554
| 0.177554
| 0
| 0.02965
| 0.268966
| 1,015
| 23
| 101
| 44.130435
| 0.774933
| 0.90936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2fa394fb34c74e16598d361838124eaea72619af
| 109
|
py
|
Python
|
backend/services/web/api/files/interface.py
|
noasck/EduARd
|
f4a95a92d513b017ff2f0b0c3591207a741b1110
|
[
"MIT"
] | 3
|
2021-04-16T14:37:47.000Z
|
2021-06-28T21:13:50.000Z
|
backend/services/web/api/files/interface.py
|
noasck/EduARd
|
f4a95a92d513b017ff2f0b0c3591207a741b1110
|
[
"MIT"
] | 1
|
2021-04-17T14:45:59.000Z
|
2021-04-17T14:45:59.000Z
|
backend/services/web/api/files/interface.py
|
noasck/EduARd
|
f4a95a92d513b017ff2f0b0c3591207a741b1110
|
[
"MIT"
] | null | null | null |
from typing_extensions import TypedDict
class IFile(TypedDict, total=False):
id: int
filename: str
| 15.571429
| 39
| 0.743119
| 14
| 109
| 5.714286
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192661
| 109
| 6
| 40
| 18.166667
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
2ff558a5f19db2df344154d8a9eff0fb4b842486
| 299
|
py
|
Python
|
Dataset/Leetcode/test/53/589.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/53/589.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/53/589.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, nums: List[int]) -> int:
local_max_sum, global_max_sum = nums[0], nums[0]
for num in nums[1:]:
local_max_sum = max(num, local_max_sum + num)
global_max_sum = max(global_max_sum, local_max_sum)
return global_max_sum
| 33.222222
| 63
| 0.622074
| 47
| 299
| 3.617021
| 0.382979
| 0.282353
| 0.258824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 0.277592
| 299
| 8
| 64
| 37.375
| 0.773148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ff25b9dab4a541d3a2c0dc1b783f7e334e3ee6df
| 364
|
py
|
Python
|
mmhoidet/core/hoi/__init__.py
|
noobying/mmhoidet
|
138e3fbf34ecbc66f98ad26b10e08a9d49a61c38
|
[
"Apache-2.0"
] | 2
|
2021-09-06T13:09:42.000Z
|
2021-09-15T09:18:00.000Z
|
mmhoidet/core/hoi/__init__.py
|
noobying/mmhoidet
|
138e3fbf34ecbc66f98ad26b10e08a9d49a61c38
|
[
"Apache-2.0"
] | null | null | null |
mmhoidet/core/hoi/__init__.py
|
noobying/mmhoidet
|
138e3fbf34ecbc66f98ad26b10e08a9d49a61c38
|
[
"Apache-2.0"
] | null | null | null |
from .assigners import BaseAssigner, HungarianAssigner
from .builder import build_sampler, build_assigner
from .samplers import BaseSampler, PseudoSampler, SamplingResult
from .transforms import hoi2result
__all__ = [
'BaseAssigner', 'HungarianAssigner', 'build_assigner', 'build_sampler',
'BaseSampler', 'PseudoSampler', 'SamplingResult', 'hoi2result'
]
| 36.4
| 75
| 0.793956
| 33
| 364
| 8.515152
| 0.484848
| 0.206406
| 0.270463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006192
| 0.112637
| 364
| 9
| 76
| 40.444444
| 0.863777
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ff409c4db87ccb558ceb6212ad3bca445cc63841
| 168
|
py
|
Python
|
Term 2/21/1-file.py
|
theseana/ajisa
|
1c92b00acd3fad7c92b8222b5f6a86fc6db4bcae
|
[
"MIT"
] | null | null | null |
Term 2/21/1-file.py
|
theseana/ajisa
|
1c92b00acd3fad7c92b8222b5f6a86fc6db4bcae
|
[
"MIT"
] | null | null | null |
Term 2/21/1-file.py
|
theseana/ajisa
|
1c92b00acd3fad7c92b8222b5f6a86fc6db4bcae
|
[
"MIT"
] | null | null | null |
file = open('names.txt', 'w')
file.write('amirreza\n')
file.write('setayesh\n')
file.write('artin\n')
file.write('iliya\n')
file.write('mohammadjavad\n')
file.close()
| 18.666667
| 29
| 0.684524
| 27
| 168
| 4.259259
| 0.481481
| 0.391304
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065476
| 168
| 9
| 30
| 18.666667
| 0.732484
| 0
| 0
| 0
| 0
| 0
| 0.349112
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ff5606d8330c5567ad9e9e09631f157af4a919bd
| 201
|
py
|
Python
|
htkupdate.py
|
otherbeast/hackers-tool-kit
|
12991889db1f6843dde82e7da4b4cdfb50740da5
|
[
"Apache-2.0"
] | 393
|
2019-01-21T05:52:54.000Z
|
2022-03-29T06:07:04.000Z
|
htkupdate.py
|
otherbeast/hackers-tool-kit
|
12991889db1f6843dde82e7da4b4cdfb50740da5
|
[
"Apache-2.0"
] | 19
|
2019-02-22T00:49:28.000Z
|
2021-12-30T20:28:59.000Z
|
htkupdate.py
|
otherbeast/hackers-tool-kit
|
12991889db1f6843dde82e7da4b4cdfb50740da5
|
[
"Apache-2.0"
] | 138
|
2019-03-15T23:22:19.000Z
|
2022-03-20T17:19:09.000Z
|
import os
print "UPDATING..."
os.system("cd")
os.system('cd /root/ && rm -fr hackers-tool-kit && git clone https://github.com/unkn0wnh4ckr/hackers-tool-kit && echo "[UPDATED]: Restart Your Terminal"')
| 50.25
| 155
| 0.701493
| 30
| 201
| 4.7
| 0.766667
| 0.113475
| 0.141844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.104478
| 201
| 4
| 155
| 50.25
| 0.772222
| 0
| 0
| 0
| 0
| 0.25
| 0.767327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ff57741d06b201b53f9b59f8f8fe670385ec9aec
| 198
|
py
|
Python
|
patreonmanager/apps.py
|
crydotsnake/djangogirls
|
0e764294085d6d7d3c4f61a7fe36f91640abedcd
|
[
"BSD-3-Clause"
] | 446
|
2015-01-04T20:58:26.000Z
|
2022-03-30T23:08:26.000Z
|
patreonmanager/apps.py
|
serenasensini/TheRedCode_Docker-per-Django-e-Postgres
|
78a2ca1f09ab956a6936d14a5fd99336ff39f472
|
[
"BSD-3-Clause"
] | 649
|
2015-01-09T23:42:14.000Z
|
2022-03-31T17:27:19.000Z
|
patreonmanager/apps.py
|
serenasensini/TheRedCode_Docker-per-Django-e-Postgres
|
78a2ca1f09ab956a6936d14a5fd99336ff39f472
|
[
"BSD-3-Clause"
] | 319
|
2015-01-06T20:58:42.000Z
|
2022-03-30T06:29:04.000Z
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class PatreonManagerConfig(AppConfig):
name = 'patreonmanager'
verbose_name = _("Patreon Manager")
| 24.75
| 54
| 0.782828
| 22
| 198
| 6.863636
| 0.772727
| 0.13245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146465
| 198
| 7
| 55
| 28.285714
| 0.893491
| 0
| 0
| 0
| 0
| 0
| 0.146465
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
ff95cfb4e34bef3bca6c8137953c87f8b4fd0840
| 121
|
py
|
Python
|
006/006.py
|
ridvanaltun/project-euler-solutions
|
2a413d06de1402435d4e5ac8442cd0ab4e465bf4
|
[
"MIT"
] | null | null | null |
006/006.py
|
ridvanaltun/project-euler-solutions
|
2a413d06de1402435d4e5ac8442cd0ab4e465bf4
|
[
"MIT"
] | null | null | null |
006/006.py
|
ridvanaltun/project-euler-solutions
|
2a413d06de1402435d4e5ac8442cd0ab4e465bf4
|
[
"MIT"
] | null | null | null |
sq_sum, sum = 0, 0
for i in range(1, 101):
sq_sum = sq_sum + (i * i)
sum = sum + i
print((sum * sum) - sq_sum)
| 15.125
| 29
| 0.528926
| 25
| 121
| 2.4
| 0.4
| 0.333333
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.305785
| 121
| 7
| 30
| 17.285714
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4440f487f379a731540f734b1c318940cea0c7e4
| 29
|
py
|
Python
|
cross_compile/__init__.py
|
aws-ros-dev/cross_compile
|
a0cb03e7ed0d4fa2e637ea94af57c20042bc8bd9
|
[
"Apache-2.0"
] | 2
|
2019-06-18T22:23:37.000Z
|
2019-10-08T18:42:28.000Z
|
cross_compile/__init__.py
|
aws-ros-dev/cross_compile
|
a0cb03e7ed0d4fa2e637ea94af57c20042bc8bd9
|
[
"Apache-2.0"
] | 16
|
2019-08-06T22:11:09.000Z
|
2021-06-02T02:45:19.000Z
|
cross_compile/__init__.py
|
aws-ros-dev/cross_compile
|
a0cb03e7ed0d4fa2e637ea94af57c20042bc8bd9
|
[
"Apache-2.0"
] | null | null | null |
"""Cross Compile package."""
| 14.5
| 28
| 0.655172
| 3
| 29
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.730769
| 0.758621
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
448b9cdfbd54af23caa8644685b2841179fe4d75
| 239
|
py
|
Python
|
firecares/firecares_core/ext/invitations/adapters.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 12
|
2016-01-30T02:28:35.000Z
|
2019-05-29T15:49:56.000Z
|
firecares/firecares_core/ext/invitations/adapters.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 455
|
2015-07-27T20:21:56.000Z
|
2022-03-11T23:26:20.000Z
|
firecares/firecares_core/ext/invitations/adapters.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 14
|
2015-07-29T09:45:53.000Z
|
2020-10-21T20:03:17.000Z
|
from invitations.adapters import BaseInvitationsAdapter
from registration.signals import user_registered
class DepartmentInvitationsAdapter(BaseInvitationsAdapter):
def get_user_signed_up_signal(self):
return user_registered
| 29.875
| 59
| 0.849372
| 24
| 239
| 8.208333
| 0.75
| 0.142132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117155
| 239
| 7
| 60
| 34.142857
| 0.933649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
4495366c3c6fa97f0940d198cabe5dd5baee3692
| 60
|
py
|
Python
|
dl_in_iot_course/l05_tflite_delegate/__init__.py
|
antmicro/dl-in-iot-course
|
2072b88c97c8f643de6055ee7e3b1506303dab98
|
[
"Apache-2.0"
] | null | null | null |
dl_in_iot_course/l05_tflite_delegate/__init__.py
|
antmicro/dl-in-iot-course
|
2072b88c97c8f643de6055ee7e3b1506303dab98
|
[
"Apache-2.0"
] | 1
|
2021-11-09T08:47:50.000Z
|
2021-11-09T08:47:50.000Z
|
dl_in_iot_course/l05_tflite_delegate/__init__.py
|
antmicro/dl-in-iot-course
|
2072b88c97c8f643de6055ee7e3b1506303dab98
|
[
"Apache-2.0"
] | 2
|
2021-11-04T19:52:21.000Z
|
2021-11-05T18:58:44.000Z
|
"""
Module containing tasks regarding TFLite delegates.
"""
| 15
| 51
| 0.75
| 6
| 60
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 60
| 3
| 52
| 20
| 0.865385
| 0.85
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
923e6e3961f511d62da01bc03cc5bb9d98a31c78
| 80
|
py
|
Python
|
get_parks.py
|
Matzexxxxx/Monocle
|
af2b1dd163a33c3ea3bb4bdacf37622df65f4b8e
|
[
"MIT"
] | 21
|
2017-11-08T12:56:31.000Z
|
2021-08-19T17:56:35.000Z
|
get_parks.py
|
Matzexxxxx/Monocle
|
af2b1dd163a33c3ea3bb4bdacf37622df65f4b8e
|
[
"MIT"
] | 5
|
2017-12-16T10:11:35.000Z
|
2018-03-21T09:30:25.000Z
|
get_parks.py
|
Matzexxxxx/Monocle
|
af2b1dd163a33c3ea3bb4bdacf37622df65f4b8e
|
[
"MIT"
] | 33
|
2017-12-11T12:30:42.000Z
|
2018-04-10T01:48:38.000Z
|
from monocle.parks import *
with Parks() as park:
park.reset_parks(True)
| 11.428571
| 27
| 0.7
| 12
| 80
| 4.583333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 80
| 6
| 28
| 13.333333
| 0.859375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
926204a877eb44abcc4ae6601aad47f8065816ac
| 721
|
py
|
Python
|
acrofestival/booking/forms.py
|
ivoruetsche/acrofestival
|
7ec798a577fbd66f9b0503a009e13cebe264afd1
|
[
"MIT"
] | null | null | null |
acrofestival/booking/forms.py
|
ivoruetsche/acrofestival
|
7ec798a577fbd66f9b0503a009e13cebe264afd1
|
[
"MIT"
] | 4
|
2020-06-06T01:28:01.000Z
|
2021-06-10T22:37:34.000Z
|
acrofestival/booking/forms.py
|
ivoruetsche/acrofestival
|
7ec798a577fbd66f9b0503a009e13cebe264afd1
|
[
"MIT"
] | 1
|
2020-03-02T22:12:21.000Z
|
2020-03-02T22:12:21.000Z
|
from django import forms
class UrbanAcroForm(forms.Form):
name = forms.CharField(required=True)
address = forms.CharField(required=True)
phone = forms.CharField(required=True)
email = forms.EmailField(required=True)
option = forms.CharField(required=False)
comment = forms.CharField(required=False)
class WinterAcroForm(forms.Form):
name = forms.CharField(required=False)
address = forms.CharField(required=False)
phone = forms.CharField(required=False)
email = forms.CharField(required=False)
option = forms.CharField(required=False)
allergies = forms.CharField(required=False)
donation = forms.CharField(required=False)
date = forms.DateField(required=False)
| 32.772727
| 47
| 0.739251
| 82
| 721
| 6.5
| 0.280488
| 0.315197
| 0.49531
| 0.45591
| 0.255159
| 0.131332
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152566
| 721
| 21
| 48
| 34.333333
| 0.87234
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
927cce0ce2eabfab247e5f0e2c9384845f1ce34a
| 64
|
py
|
Python
|
sequence_field/exceptions.py
|
gnrfan/django-sequence-field
|
466f609a08ef39203336ca912dae57d2e12a4a30
|
[
"BSD-3-Clause"
] | 15
|
2015-06-03T03:50:31.000Z
|
2021-06-14T15:14:02.000Z
|
sequence_field/exceptions.py
|
gnrfan/django-sequence-field
|
466f609a08ef39203336ca912dae57d2e12a4a30
|
[
"BSD-3-Clause"
] | 2
|
2015-03-05T15:25:28.000Z
|
2017-01-01T10:11:22.000Z
|
sequence_field/exceptions.py
|
gnrfan/django-sequence-field
|
466f609a08ef39203336ca912dae57d2e12a4a30
|
[
"BSD-3-Clause"
] | 20
|
2015-01-08T00:33:40.000Z
|
2021-09-30T16:02:22.000Z
|
# Exceptions
class SequenceFieldException(Exception):
pass
| 12.8
| 40
| 0.78125
| 5
| 64
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 64
| 4
| 41
| 16
| 0.925926
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
2baa20c0aa6b23dad4a344a86a36d26e489a3d0c
| 330
|
py
|
Python
|
backend/project404_t8/project404_t8/router.py
|
peterweckend/group-project-cmput404
|
d59912dbe0252868452a2e142e4c20f953792740
|
[
"MIT"
] | 5
|
2019-02-22T21:15:48.000Z
|
2019-03-16T22:59:17.000Z
|
backend/project404_t8/project404_t8/router.py
|
cjlee1/group-project-cmput404
|
791ac00494b1005d5b3792492060806bcddc5cf6
|
[
"MIT"
] | 66
|
2019-03-13T07:03:42.000Z
|
2022-03-11T23:41:00.000Z
|
backend/project404_t8/project404_t8/router.py
|
cjlee1/group-project-cmput404
|
791ac00494b1005d5b3792492060806bcddc5cf6
|
[
"MIT"
] | 7
|
2019-01-25T21:09:23.000Z
|
2019-07-20T16:11:33.000Z
|
from rest_framework import routers
import API.viewsets as Viewsets
# these are the API methods
api_router = routers.SimpleRouter(trailing_slash=False)
api_router.register(r'posts', Viewsets.PostsViewSet)
api_router.register(r'author', Viewsets.AuthorViewSet)
api_router.register(r'friendrequest', Viewsets.FriendRequestViewSet)
| 33
| 68
| 0.839394
| 43
| 330
| 6.302326
| 0.581395
| 0.132841
| 0.188192
| 0.199262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 330
| 9
| 69
| 36.666667
| 0.888525
| 0.075758
| 0
| 0
| 0
| 0
| 0.07947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2bc3cd8f8392faf88888db3fbe677503fb340c38
| 3,996
|
py
|
Python
|
show_data.py
|
liriqingone/IP_Agent
|
f562dfe1e4096ed8496767cffa2596704e285861
|
[
"Apache-2.0"
] | null | null | null |
show_data.py
|
liriqingone/IP_Agent
|
f562dfe1e4096ed8496767cffa2596704e285861
|
[
"Apache-2.0"
] | null | null | null |
show_data.py
|
liriqingone/IP_Agent
|
f562dfe1e4096ed8496767cffa2596704e285861
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[5]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pymssql
import re
# In[8]:
try:
con = pymssql.connect(host='welldatadb.c9rukeih98lt.us-west-1.rds.amazonaws.com',user='welltitled',password='Welltitled888', database='WellDataDB',charset="utf8")
sql = "select d.id,d.date,d.is_download,d.case_id from welldata_project p left join welldata_case c on p.id=c.project_id left join dbo.welldata_downloadfile d on c.name_en=d.case_id where p.name_en='USBank'"
dfGSE = pd.read_sql(con=con, sql=sql)
print dfGSE.head()
sns.catplot(x="case_id", kind="count", palette="ch:.25", data=dfGSE)
print(plt.show())
except pymssql.Error as e:
print e
print 0.392157*2.750408e+08
# # `OrigYear` and `wac`
# In[40]:
# get_ipython().run_cell_magic(u'time', u'', u"\ndfSumm_wac = \\\ndfGSE[['origyear', 'histdate', 'wac', 'fico', 'vsmm', 'curbal']] \\\n.assign(wac = lambda row: 0.5 * np.round(row.wac / 0.5) \\\n ,fico = lambda row: 100 * np.round(row.fico / 100) \\\n ,vsmm = lambda row: row.vsmm * row.curbal \\\n ) \\\n.groupby(['origyear', 'histdate', 'wac'], as_index = False) \\\n.agg({'vsmm': 'sum' \\\n , 'curbal': 'sum'}) \\\n.assign(vsmm = lambda row: row.vsmm / row.curbal \\\n ,histdate = lambda row: pd.to_datetime(row.histdate)) \\")
#
#
# # In[44]:
#
#
# sns.FacetGrid(data = dfSumm_wac.query('origyear > 2009 & wac > 3 & wac < 6.5'), col = 'origyear'
# , row = 'wac',
# despine = False, height = 3) \
# .map_dataframe(plt.plot, 'histdate', 'vsmm')
#
#
# # # `Origyear` and `wac` and `fico`
# #
# # 下边 `arrWAC`和 `arrFICO` 是用户的选择。格式是:
# #
# # `arrWAC = np.array([最小值,最大值,宽度值])
# #
# # 这个的目的是尽量保留数据, 把筛选留到最后
#
# # In[153]:
#
#
# get_ipython().run_cell_magic(u'time', u'', u"\narrWAC = np.array([3.5, 6.5, 0.5])\narrFICO = np.array([500, 800, 100])\n\ndfSumm_wac_fico = \\\ndfGSE[['origyear', 'histdate', 'wac', 'fico', 'vsmm', 'curbal', 'cnt']] \\\n.assign(wac = lambda row: arrWAC[2] * \\\n np.round( \\\n np.where(row.wac < arrWAC[0] \\\n , arrWAC[0]\n ,np.where(row.wac > arrWAC[1]\n , arrWAC[1]\n , row.wac) \\\n ) \\\n / arrWAC[2]) \\\n ,fico = lambda row: arrFICO[2] * \\\n np.round( \\\n np.where(row.fico < arrFICO[0] \\\n , arrFICO[0] \\\n , np.where(row.fico > arrFICO[1] \\\n , arrFICO[1] \\\n , row.fico))\\\n / arrFICO[2]) \\\n ,vsmm = lambda row: row.vsmm * row.curbal \\\n ) \\\n.groupby(['origyear', 'histdate', 'wac', 'fico'], as_index = False) \\\n.agg({'vsmm': 'sum' \\\n , 'curbal': 'sum'\\\n , 'cnt': 'sum'}) \\\n.assign(vsmm = lambda row: row.vsmm / row.curbal \\\n ,histdate = lambda row: pd.to_datetime(row.histdate)) \\")
#
#
# # In[154]:
#
#
# dfSumm_wac_fico_orig = dfSumm_wac_fico[['origyear', 'histdate', 'wac', 'fico', 'curbal', 'cnt']] .groupby(['origyear', 'histdate', 'wac', 'fico'], as_index = False) .agg('sum') .groupby(['origyear', 'wac', 'fico'], as_index = False) .agg('max') .rename(index = str, columns = {'curbal': 'curbal_orig', 'cnt': 'cnt_orig'}) .drop(['histdate'], axis = 1)
#
#
#
# # In[155]:
#
#
# dfSumm_wac_fico = dfSumm_wac_fico .merge(dfSumm_wac_fico_orig
# , left_on = ['origyear', 'wac', 'fico']
# , right_on = ['origyear', 'wac', 'fico']) \
#
#
# # In[161]:
#
#
# sns.FacetGrid(data = dfSumm_wac_fico .query('origyear > 2009 & origyear < 2014 & cnt_orig > 20000')
# , col = 'origyear'
# , row = 'wac', hue = 'fico'
# , sharey = False
# ,despine = False, height = 3) \
# .map_dataframe(sns.lineplot, 'histdate', 'vsmm', lw = 3)
| 48.731707
| 1,289
| 0.539289
| 532
| 3,996
| 3.960526
| 0.298872
| 0.049834
| 0.054105
| 0.05458
| 0.38206
| 0.339345
| 0.268628
| 0.231609
| 0.190793
| 0.163265
| 0
| 0.035535
| 0.253504
| 3,996
| 81
| 1,290
| 49.333333
| 0.670801
| 0.783534
| 0
| 0
| 0
| 0.0625
| 0.385876
| 0.139975
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.0625
| 0.375
| null | null | 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
2bdc73fe69b3ab66102d99083cd1e9e02dbbcd5d
| 188
|
py
|
Python
|
yambopy/plot/spectra.py
|
QU-XIAO/yambopy
|
ff65a4f90c1bfefe642ebc61e490efe781709ff9
|
[
"BSD-3-Clause"
] | 21
|
2016-04-07T20:53:29.000Z
|
2021-05-14T08:06:02.000Z
|
yambopy/plot/spectra.py
|
alexmoratalla/yambopy
|
8ec0e1e18868ccaadb3eab36c55e6a47021e257d
|
[
"BSD-3-Clause"
] | 22
|
2016-06-14T22:29:47.000Z
|
2021-09-16T15:36:26.000Z
|
yambopy/plot/spectra.py
|
alexmoratalla/yambopy
|
8ec0e1e18868ccaadb3eab36c55e6a47021e257d
|
[
"BSD-3-Clause"
] | 15
|
2016-06-14T18:40:57.000Z
|
2021-08-07T13:17:43.000Z
|
class YamboSpectra():
"""
Class to show optical absorption spectra
"""
def __init__(self,energies,data):
self.energies = energies
self.data = data
| 20.888889
| 44
| 0.590426
| 19
| 188
| 5.631579
| 0.631579
| 0.224299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.31383
| 188
| 8
| 45
| 23.5
| 0.829457
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2bed312f5331da546ffb3e61097ef926eb9510a2
| 240
|
py
|
Python
|
tests/config.py
|
null-none/fhir-py
|
f5e7e13f88188c944696c146954f823be922cbeb
|
[
"MIT"
] | null | null | null |
tests/config.py
|
null-none/fhir-py
|
f5e7e13f88188c944696c146954f823be922cbeb
|
[
"MIT"
] | null | null | null |
tests/config.py
|
null-none/fhir-py
|
f5e7e13f88188c944696c146954f823be922cbeb
|
[
"MIT"
] | null | null | null |
import os
from aiohttp import BasicAuth
FHIR_SERVER_URL = os.environ.get("FHIR_SERVER_URL", "http://localhost:8080/fhir")
FHIR_SERVER_AUTHORIZATION = os.environ.get(
"FHIR_SERVER_AUTHORIZATION", BasicAuth("root", "secret").encode()
)
| 26.666667
| 81
| 0.766667
| 32
| 240
| 5.5
| 0.53125
| 0.227273
| 0.147727
| 0.181818
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018433
| 0.095833
| 240
| 8
| 82
| 30
| 0.792627
| 0
| 0
| 0
| 0
| 0
| 0.316667
| 0.104167
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2bf5a286e05214b62beae5fa0cc265910bdbdbbc
| 548
|
py
|
Python
|
lib/pylint/test/input/func_e0101.py
|
willemneal/Docky
|
d3504e1671b4a6557468234c263950bfab461ce4
|
[
"MIT"
] | 3
|
2018-11-25T01:09:55.000Z
|
2021-08-24T01:56:36.000Z
|
lib/pylint/test/input/func_e0101.py
|
willemneal/Docky
|
d3504e1671b4a6557468234c263950bfab461ce4
|
[
"MIT"
] | 7
|
2021-02-08T20:22:15.000Z
|
2022-03-11T23:19:41.000Z
|
lib/pylint/test/input/func_e0101.py
|
willemneal/Docky
|
d3504e1671b4a6557468234c263950bfab461ce4
|
[
"MIT"
] | 3
|
2018-11-09T03:38:09.000Z
|
2020-02-24T06:26:10.000Z
|
# pylint: disable=R0903
"""test __init__ return
"""
__revision__ = 'yo'
class MyClass(object):
"""dummy class"""
def __init__(self):
return 1
class MyClass2(object):
"""dummy class"""
def __init__(self):
return
class MyClass3(object):
"""dummy class"""
def __init__(self):
return None
class MyClass4(object):
"""dummy class"""
def __init__(self):
yield None
class MyClass5(object):
"""dummy class"""
def __init__(self):
self.callable = lambda: (yield None)
| 15.222222
| 44
| 0.59854
| 60
| 548
| 5
| 0.383333
| 0.183333
| 0.266667
| 0.316667
| 0.51
| 0.51
| 0.33
| 0
| 0
| 0
| 0
| 0.022222
| 0.260949
| 548
| 35
| 45
| 15.657143
| 0.718519
| 0.187956
| 0
| 0.3125
| 0
| 0
| 0.004843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3125
| false
| 0
| 0
| 0.1875
| 0.8125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
9200bad3f6b2369900c4fd8dcf5ca12a1bb52381
| 297
|
py
|
Python
|
erri/python/lesson_47/pokedex.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | null | null | null |
erri/python/lesson_47/pokedex.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | 16
|
2020-05-30T12:38:13.000Z
|
2022-02-19T09:23:31.000Z
|
erri/python/lesson_47/pokedex.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | null | null | null |
from pokemon import Pokemon
class Pokedex:
def __init__(self, pokemons=[]):
self.pokemons = pokemons
def __str__(self):
return "pokemons=\n\t" + '\n\t'.join([str(pokemon) for pokemon in self.pokemons])
def add(self, pokemon):
self.pokemons.append(pokemon)
| 21.214286
| 89
| 0.643098
| 38
| 297
| 4.815789
| 0.473684
| 0.262295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225589
| 297
| 13
| 90
| 22.846154
| 0.795652
| 0
| 0
| 0
| 0
| 0
| 0.057239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0.125
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
a609c292f61df1cc25f65cc25bd963f12412cf06
| 38
|
py
|
Python
|
venv/lib/python3.6/encodings/kz1048.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2019-04-17T13:35:50.000Z
|
2021-12-21T00:11:36.000Z
|
venv/lib/python3.6/encodings/kz1048.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2021-03-31T19:51:24.000Z
|
2021-06-10T23:05:09.000Z
|
venv/lib/python3.6/encodings/kz1048.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2019-10-01T08:47:35.000Z
|
2020-07-11T06:32:16.000Z
|
/usr/lib/python3.6/encodings/kz1048.py
| 38
| 38
| 0.815789
| 7
| 38
| 4.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 0
| 38
| 1
| 38
| 38
| 0.657895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a613753a0f678d9a92eded4c22220a7eb2bde121
| 1,111
|
py
|
Python
|
tests/test_read_hdf5.py
|
kyledecker/Heart_Rate_Monitor
|
858eff6a48097327ba92d516ad94632c3ad66843
|
[
"MIT"
] | null | null | null |
tests/test_read_hdf5.py
|
kyledecker/Heart_Rate_Monitor
|
858eff6a48097327ba92d516ad94632c3ad66843
|
[
"MIT"
] | null | null | null |
tests/test_read_hdf5.py
|
kyledecker/Heart_Rate_Monitor
|
858eff6a48097327ba92d516ad94632c3ad66843
|
[
"MIT"
] | null | null | null |
def test_read_hdf5():
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from read_hdf5 import read_hdf5
import os
import numpy as np
import h5py
# Make tmp mat file and save
fs = np.uint16(np.array([100]))
ecg = np.uint16(np.array([2,3]))
pp = np.uint16(np.array([4,5]))
f = h5py.File('tmp.h5', 'w')
f.create_dataset('fs',data=fs)
f.create_dataset('ecg',data=ecg)
f.create_dataset('pp',data=pp)
f.close()
data = read_hdf5('tmp.h5',offset=0,count_read=4,init_flag=0)
os.system('rm tmp.h5')
assert np.array_equal(data,[2,4,3,5])
# Make tmp mat file and save
fs = np.uint16(np.array([100]))
ecg = np.uint16(np.array([2,3]))
pp = np.uint16(np.array([4,5]))
f = h5py.File('tmp.h5', 'w')
f.create_dataset('fs',data=fs)
f.create_dataset('ecg',data=ecg)
f.create_dataset('pp',data=pp)
f.close()
data_info = read_hdf5('tmp.h5',offset=0,count_read=2,init_flag=1)
os.system('rm tmp.h5')
assert np.array_equal(data_info,[5*2,100])
| 31.742857
| 89
| 0.626463
| 197
| 1,111
| 3.411168
| 0.263959
| 0.083333
| 0.089286
| 0.133929
| 0.71131
| 0.71131
| 0.71131
| 0.71131
| 0.625
| 0.625
| 0
| 0.061179
| 0.190819
| 1,111
| 34
| 90
| 32.676471
| 0.686318
| 0.047705
| 0
| 0.6
| 0
| 0
| 0.054976
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.033333
| false
| 0
| 0.2
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a69084691ead154f1294eda4aa284570b2d2adbf
| 131
|
py
|
Python
|
joplin/pages/official_documents_collection/apps.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 15
|
2018-09-27T07:36:30.000Z
|
2021-08-03T16:01:21.000Z
|
joplin/pages/official_documents_collection/apps.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 183
|
2017-11-16T23:30:47.000Z
|
2020-12-18T21:43:36.000Z
|
joplin/pages/official_documents_collection/apps.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 12
|
2017-12-12T22:48:05.000Z
|
2021-03-01T18:01:24.000Z
|
from django.apps import AppConfig
class OfficialDocumentsCollectionConfig(AppConfig):
name = 'official_documents_collection'
| 21.833333
| 51
| 0.832061
| 12
| 131
| 8.916667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114504
| 131
| 5
| 52
| 26.2
| 0.922414
| 0
| 0
| 0
| 0
| 0
| 0.221374
| 0.221374
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
a6bdd1e0026132efc3537733ac817e4914b9e1ef
| 254
|
py
|
Python
|
NSE/indices.py
|
GamesBond008/NSE-India-Scrapper
|
9963d1b99ee5557e61d6be329bf9f50444d2820a
|
[
"MIT"
] | 1
|
2021-06-01T19:36:42.000Z
|
2021-06-01T19:36:42.000Z
|
NSE/indices.py
|
GamesBond008/NSE-India-Scrapper
|
9963d1b99ee5557e61d6be329bf9f50444d2820a
|
[
"MIT"
] | null | null | null |
NSE/indices.py
|
GamesBond008/NSE-India-Scrapper
|
9963d1b99ee5557e61d6be329bf9f50444d2820a
|
[
"MIT"
] | null | null | null |
from ._MarketData import MarketData
class Indices(MarketData):
def __init__(self,timeout: int=5):
super().__init__(timeout)
self._BaseURL="https://www.nseindia.com/api/allIndices"
def IndicesMarketWatch(self):
return self._GrabData(self._BaseURL)
| 36.285714
| 57
| 0.783465
| 32
| 254
| 5.84375
| 0.6875
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00431
| 0.086614
| 254
| 7
| 58
| 36.285714
| 0.801724
| 0
| 0
| 0
| 0
| 0
| 0.152941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
a6e6e31b01c2e6112f8d5f7ef425dbbf969157e4
| 42
|
py
|
Python
|
dingtalk_python_sdk/__init__.py
|
jiasir/dingtalk-python-sdk
|
913188347bee8c3537aacefc64f28a0e5ce8f383
|
[
"MIT"
] | 2
|
2019-05-13T09:41:39.000Z
|
2021-11-16T11:21:59.000Z
|
dingtalk_python_sdk/__init__.py
|
jiasir/dingtalk-python-sdk
|
913188347bee8c3537aacefc64f28a0e5ce8f383
|
[
"MIT"
] | null | null | null |
dingtalk_python_sdk/__init__.py
|
jiasir/dingtalk-python-sdk
|
913188347bee8c3537aacefc64f28a0e5ce8f383
|
[
"MIT"
] | 1
|
2019-03-06T09:42:01.000Z
|
2019-03-06T09:42:01.000Z
|
__all__ = ["robot"]
__version__ = '0.0.1'
| 14
| 21
| 0.619048
| 6
| 42
| 3
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.142857
| 42
| 2
| 22
| 21
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a6e9550e9909c69bce0c6efb10f4faea541c4361
| 190
|
py
|
Python
|
src/utils.py
|
eklitzke/icfp08
|
d0bbd1a0900a744286f8a1efec3ada8dec275431
|
[
"0BSD"
] | 1
|
2016-05-08T10:38:12.000Z
|
2016-05-08T10:38:12.000Z
|
src/utils.py
|
eklitzke/icfp08
|
d0bbd1a0900a744286f8a1efec3ada8dec275431
|
[
"0BSD"
] | null | null | null |
src/utils.py
|
eklitzke/icfp08
|
d0bbd1a0900a744286f8a1efec3ada8dec275431
|
[
"0BSD"
] | null | null | null |
import sys
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-15s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M', stream=sys.stderr)
| 23.75
| 69
| 0.647368
| 26
| 190
| 4.730769
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 0.163158
| 190
| 7
| 70
| 27.142857
| 0.754717
| 0
| 0
| 0
| 0
| 0
| 0.328042
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
a6f9e948f059d84524b4285af4dee39e818ba9d7
| 184
|
py
|
Python
|
python_polar_coding/polar_codes/fast_scan/codec.py
|
MingxuZhang/python-polar-coding
|
bfab8e1cdcffaefea8e6d0209b13465fbd7fa936
|
[
"MIT"
] | 2
|
2021-12-07T09:52:15.000Z
|
2022-01-06T14:35:37.000Z
|
python_polar_coding/polar_codes/fast_scan/codec.py
|
manhduc1811/python-polar-coding
|
bfab8e1cdcffaefea8e6d0209b13465fbd7fa936
|
[
"MIT"
] | null | null | null |
python_polar_coding/polar_codes/fast_scan/codec.py
|
manhduc1811/python-polar-coding
|
bfab8e1cdcffaefea8e6d0209b13465fbd7fa936
|
[
"MIT"
] | 4
|
2020-07-03T14:20:04.000Z
|
2021-07-04T13:20:40.000Z
|
from python_polar_coding.polar_codes.rc_scan import RCSCANPolarCodec
from .decoder import FastSCANDecoder
class FastSCANCodec(RCSCANPolarCodec):
decoder_class = FastSCANDecoder
| 23
| 68
| 0.853261
| 20
| 184
| 7.6
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 184
| 7
| 69
| 26.285714
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5b25ee0b235628c7ded5d4b5d1800b04d349a836
| 175
|
py
|
Python
|
python/django/python-hello-world/src/helloapp/views.py
|
davidponder/cloud-code-samples
|
9d7f08e48fd8355c31ac428ff660a24bf1bef742
|
[
"0BSD"
] | 319
|
2019-03-29T02:21:27.000Z
|
2022-03-12T00:03:32.000Z
|
python/django/python-hello-world/src/helloapp/views.py
|
davidponder/cloud-code-samples
|
9d7f08e48fd8355c31ac428ff660a24bf1bef742
|
[
"0BSD"
] | 779
|
2019-03-29T16:53:09.000Z
|
2022-03-31T18:48:08.000Z
|
python/django/python-hello-world/src/helloapp/views.py
|
davidponder/cloud-code-samples
|
9d7f08e48fd8355c31ac428ff660a24bf1bef742
|
[
"0BSD"
] | 182
|
2019-03-29T14:17:33.000Z
|
2022-03-14T22:31:12.000Z
|
from django.shortcuts import render
import os
def homePageView(request):
return render(request, 'homepage.html', context={
"message": "It's running!"
})
| 19.444444
| 53
| 0.662857
| 20
| 175
| 5.8
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217143
| 175
| 9
| 54
| 19.444444
| 0.846715
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
5b38e79a377ccb487655e34396ae67ca6d0baf77
| 312
|
py
|
Python
|
cnn/struct/layer/relu_layer_module.py
|
hslee1539/GIS_GANs
|
6901c830b924e59fd06247247db3f925bab26583
|
[
"MIT"
] | null | null | null |
cnn/struct/layer/relu_layer_module.py
|
hslee1539/GIS_GANs
|
6901c830b924e59fd06247247db3f925bab26583
|
[
"MIT"
] | null | null | null |
cnn/struct/layer/relu_layer_module.py
|
hslee1539/GIS_GANs
|
6901c830b924e59fd06247247db3f925bab26583
|
[
"MIT"
] | null | null | null |
from import_lib import lib
from tensor.main_module import Tensor
from cnn.struct.layer_module import Layer
from ctypes import Structure, c_int, POINTER
def createReluLayer():
return lib.cnn_create_relu_layer()
#lib.cnn_create_relu_layer.argtypes = (Layer, Layer)
lib.cnn_create_relu_layer.restype = Layer
| 26
| 52
| 0.817308
| 48
| 312
| 5.041667
| 0.4375
| 0.07438
| 0.14876
| 0.198347
| 0.280992
| 0.214876
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 312
| 11
| 53
| 28.363636
| 0.876812
| 0.163462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.571429
| 0.142857
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
5b4947c1df2c2c11a9c526533e38a8c276e1a76d
| 253
|
py
|
Python
|
nikola/packages/tzlocal/__init__.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | 1,901
|
2015-01-02T02:49:51.000Z
|
2022-03-30T23:31:35.000Z
|
nikola/packages/tzlocal/__init__.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | 1,755
|
2015-01-01T08:17:16.000Z
|
2022-03-24T18:02:22.000Z
|
nikola/packages/tzlocal/__init__.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | 421
|
2015-01-02T18:06:37.000Z
|
2022-03-28T23:18:54.000Z
|
"""Try to figure out what your local timezone is."""
import sys
__version__ = "2.0.0-nikola"
if sys.platform == "win32":
from .win32 import get_localzone, reload_localzone # NOQA
else:
from .unix import get_localzone, reload_localzone # NOQA
| 28.111111
| 62
| 0.72332
| 37
| 253
| 4.72973
| 0.675676
| 0.102857
| 0.205714
| 0.274286
| 0.422857
| 0.422857
| 0
| 0
| 0
| 0
| 0
| 0.033493
| 0.173913
| 253
| 8
| 63
| 31.625
| 0.803828
| 0.225296
| 0
| 0
| 0
| 0
| 0.089947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
5b641bd2268abc885c0f727b04d7a8e03e2bc714
| 190
|
py
|
Python
|
src/com/ssafy/test/DevMatching2022_2/pro4.py
|
ehddn5252/AlgorithmStorage
|
c9b3464029181767c73f7607725cb47d99b7b7f2
|
[
"MIT"
] | null | null | null |
src/com/ssafy/test/DevMatching2022_2/pro4.py
|
ehddn5252/AlgorithmStorage
|
c9b3464029181767c73f7607725cb47d99b7b7f2
|
[
"MIT"
] | null | null | null |
src/com/ssafy/test/DevMatching2022_2/pro4.py
|
ehddn5252/AlgorithmStorage
|
c9b3464029181767c73f7607725cb47d99b7b7f2
|
[
"MIT"
] | null | null | null |
-- 코드를 입력하세요
SELECT c_p.cart_id as CART_ID ,if(sum(price)>minimum_requirement,0,1) as abused
from Cart_products as c_p join coupons as c
where c_p.cart_id = c.cart_id
group by c_p.cart_id;
| 27.142857
| 79
| 0.778947
| 43
| 190
| 3.186047
| 0.534884
| 0.218978
| 0.131387
| 0.175182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.136842
| 190
| 6
| 80
| 31.666667
| 0.823171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5b94a5735d213c448e683218e28b77df98453cc1
| 135
|
py
|
Python
|
email_validation/urls.py
|
rezaramadhan/ValidationSystem
|
1e36f37ee79bd3a8e618dec492e5cfbe83791ebe
|
[
"BSD-3-Clause"
] | null | null | null |
email_validation/urls.py
|
rezaramadhan/ValidationSystem
|
1e36f37ee79bd3a8e618dec492e5cfbe83791ebe
|
[
"BSD-3-Clause"
] | null | null | null |
email_validation/urls.py
|
rezaramadhan/ValidationSystem
|
1e36f37ee79bd3a8e618dec492e5cfbe83791ebe
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.validation_form, name='validation_form'),
]
| 19.285714
| 62
| 0.711111
| 18
| 135
| 5.222222
| 0.666667
| 0.297872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 135
| 6
| 63
| 22.5
| 0.817391
| 0
| 0
| 0
| 0
| 0
| 0.125926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
5b9f5359a99fc21561af13513dcf810a58d7c4ab
| 243
|
py
|
Python
|
app/app/monitoring/__init__.py
|
ARgorithm/Server
|
b25b593721bab87263c49dddf52066288e45c272
|
[
"MIT"
] | 2
|
2021-02-24T17:23:46.000Z
|
2021-03-07T12:43:31.000Z
|
app/app/monitoring/__init__.py
|
ARgorithm/Server
|
b25b593721bab87263c49dddf52066288e45c272
|
[
"MIT"
] | 14
|
2020-10-18T14:50:43.000Z
|
2021-06-18T07:35:13.000Z
|
app/app/monitoring/__init__.py
|
ARgorithm/Server
|
b25b593721bab87263c49dddf52066288e45c272
|
[
"MIT"
] | null | null | null |
"""The monitoring module deals with prometheus monitoring for server performance as well as logs
"""
from .logging import logger
from .middleware import MonitoringMiddleware
from .view import metrics
from .performance import PerformanceMonitor
| 40.5
| 96
| 0.835391
| 30
| 243
| 6.766667
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123457
| 243
| 6
| 97
| 40.5
| 0.953052
| 0.382716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5bae1409b1600cfbb9380afb8dd3c2261008ab22
| 158
|
py
|
Python
|
bitcoin_client/__init__.py
|
ryan-shaw/app-bitcoin-new
|
aa0a27703f8dbe24c6825c86383a2689e7c4c126
|
[
"Apache-2.0"
] | 16
|
2021-09-25T11:46:17.000Z
|
2022-03-10T15:47:14.000Z
|
bitcoin_client/__init__.py
|
ryan-shaw/app-bitcoin-new
|
aa0a27703f8dbe24c6825c86383a2689e7c4c126
|
[
"Apache-2.0"
] | 20
|
2021-09-24T08:51:48.000Z
|
2022-03-28T20:00:00.000Z
|
bitcoin_client/__init__.py
|
ryan-shaw/app-bitcoin-new
|
aa0a27703f8dbe24c6825c86383a2689e7c4c126
|
[
"Apache-2.0"
] | 26
|
2021-09-21T07:03:00.000Z
|
2022-03-26T04:18:49.000Z
|
# this folder is not meant to be a python package, but adding this file allows pytest tests
# to import the test_utils module in the repository's root folder
| 52.666667
| 91
| 0.791139
| 29
| 158
| 4.275862
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183544
| 158
| 2
| 92
| 79
| 0.96124
| 0.968354
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5bd1b15e65873e8009faaac31e7f6a24996fae29
| 230
|
py
|
Python
|
tests/core/rules/test_collection_rule.py
|
manoadamro/flapi
|
e5ed4ebbb49ac88ce842c04ce73d0a97ce3fe00d
|
[
"MIT"
] | 3
|
2019-01-07T20:20:30.000Z
|
2019-01-11T11:15:19.000Z
|
tests/core/rules/test_collection_rule.py
|
manoadamro/flapi
|
e5ed4ebbb49ac88ce842c04ce73d0a97ce3fe00d
|
[
"MIT"
] | null | null | null |
tests/core/rules/test_collection_rule.py
|
manoadamro/flapi
|
e5ed4ebbb49ac88ce842c04ce73d0a97ce3fe00d
|
[
"MIT"
] | 1
|
2019-01-11T11:15:27.000Z
|
2019-01-11T11:15:27.000Z
|
import unittest
from flapi.core.rules import _CollectionRule
class CollectionRuleTest(unittest.TestCase):
def test_fails(self):
rule = _CollectionRule()
self.assertRaises(NotImplementedError, rule, "token")
| 23
| 61
| 0.747826
| 23
| 230
| 7.347826
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169565
| 230
| 9
| 62
| 25.555556
| 0.884817
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5be3df800845edec650c50ee6175a3b1022a9f9a
| 543
|
py
|
Python
|
trac/trac/web/tests/wikisyntax.py
|
HelionDevPlatform/bloodhound
|
206b0d9898159fa8297ad1e407d38484fa378354
|
[
"Apache-2.0"
] | 84
|
2015-01-07T03:42:53.000Z
|
2022-01-10T11:57:30.000Z
|
trac/trac/web/tests/wikisyntax.py
|
HelionDevPlatform/bloodhound
|
206b0d9898159fa8297ad1e407d38484fa378354
|
[
"Apache-2.0"
] | 1
|
2021-11-04T12:52:03.000Z
|
2021-11-04T12:52:03.000Z
|
trac/trac/web/tests/wikisyntax.py
|
HelionDevPlatform/bloodhound
|
206b0d9898159fa8297ad1e407d38484fa378354
|
[
"Apache-2.0"
] | 35
|
2015-01-06T11:30:27.000Z
|
2021-11-10T16:34:52.000Z
|
import unittest
from trac.wiki.tests import formatter
TEST_CASES = """
============================== htdocs: links resolver
htdocs:release-1.0.tar.gz
[htdocs:release-1.0.tar.gz Release 1.0]
------------------------------
<p>
<a href="/chrome/site/release-1.0.tar.gz">htdocs:release-1.0.tar.gz</a>
</p>
<p>
<a href="/chrome/site/release-1.0.tar.gz">Release 1.0</a>
</p>
------------------------------
"""
def suite():
return formatter.suite(TEST_CASES, file=__file__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 21.72
| 71
| 0.576427
| 77
| 543
| 3.883117
| 0.38961
| 0.187291
| 0.210702
| 0.200669
| 0.461538
| 0.461538
| 0.441472
| 0.441472
| 0.381271
| 0.381271
| 0
| 0.028513
| 0.095764
| 543
| 24
| 72
| 22.625
| 0.580448
| 0
| 0
| 0.315789
| 0
| 0.105263
| 0.635359
| 0.469613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0.052632
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5bf20cefee7f45afee4dc39c0038a1484863aee3
| 46
|
py
|
Python
|
04_datacamp/solutions/16_solutions.py
|
HirahTang/datascience_starter_course
|
a4429db9ae1795eaf52b795d16897466d769c40c
|
[
"CC0-1.0"
] | 3
|
2020-09-06T06:01:41.000Z
|
2020-09-23T19:03:04.000Z
|
02_pandas/solutions/16_solutions.py
|
glemaitre/smob_paristech_12_2018
|
b669206f204a3e57e71efb3dd22e2ffbc4e0a309
|
[
"CC0-1.0"
] | 4
|
2019-02-22T21:37:20.000Z
|
2019-03-12T13:20:29.000Z
|
02_pandas/solutions/16_solutions.py
|
glemaitre/smob_paristech_12_2018
|
b669206f204a3e57e71efb3dd22e2ffbc4e0a309
|
[
"CC0-1.0"
] | 5
|
2020-10-26T05:03:09.000Z
|
2022-03-24T04:22:09.000Z
|
df.groupby('Pclass')['Fare'].hist(alpha=0.4);
| 23
| 45
| 0.652174
| 8
| 46
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 0.021739
| 46
| 1
| 46
| 46
| 0.622222
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
752c81ba27125f6ffa835642cb7d13b228b30ad9
| 54,220
|
py
|
Python
|
Survival.py
|
SimplyNate/PySurvival
|
c49d52b5e3c60d1063d75b0eb7d43a118647d4fc
|
[
"MIT"
] | null | null | null |
Survival.py
|
SimplyNate/PySurvival
|
c49d52b5e3c60d1063d75b0eb7d43a118647d4fc
|
[
"MIT"
] | null | null | null |
Survival.py
|
SimplyNate/PySurvival
|
c49d52b5e3c60d1063d75b0eb7d43a118647d4fc
|
[
"MIT"
] | null | null | null |
########################################################################################################################
"""
Inspired by http://usingpython.com/programs/ 'Crafting Challenge' Game
Created by SimplyNate
Coding Module - Python Lab
Craft the items indicated in the Quests panel to win the game.
Hunger ticks down after each input by the amount indicated below.
When hunger reaches 0, Health will begin ticking down instead after each input.
When health reaches 0, the game ends.
Made for the GenCyber Hawaii SecurityX Camp 2018
"""
########################################################################################################################
# Imports dependencies required for drawing the GUI and other functions
import tkinter
from tkinter import *
from tkinter import ttk
# Class that initiates data for use in the GUI class
class Game:
# Function that initializes variables with data
def __init__(self):
# EDIT VARIABLES BELOW #########################################################################################
# List of commands - Gets displayed in the "Help" menu
self.commands = {
"i": "see inventory",
"c": "see crafting options",
"h": "see help",
"q": "see quests",
"craft [item] [amount]": "craft something from inventory items",
"eat [item]": "Eat something from inventory to restore hunger",
"gather [item]": "Increase resources in your inventory"
}
# an inventory of items - Gets listed in the "Inventory" menu
# Edit the number values to change your starting amount
self.items = {
"flint": 50,
"grass": 100,
"hay": 0,
"tree": 100,
"log": 0,
"sapling": 100,
"twig": 0,
"boulder": 30,
"rock": 0,
"pickaxe": 0,
"axe": 0,
"firepit": 0,
"tent": 0,
"torch": 0,
}
# List of Gatherable items
# Add items from the items list below to be able to gather different items
self.gatherable = [
"flint",
"grass",
"tree",
"sapling",
"boulder",
]
# Inventory of Food items
# Edit the "amount" numbers to change how much you start with.
# Edit the "restores" number to change how much each food restores hunger by.
self.foods = {
"potato": {
"amount": 10,
"restores": 5
},
"bread": {
"amount": 5,
"restores": 10
},
"apple": {
"amount": 20,
"restores": 2
},
"porkchop": {
"amount": 5,
"restores": 20
}
}
# rules to make new objects
# Change the number values to change how much resources required to craft the item
self.craft = {
"hay": {"grass": 1},
"twig": {"sapling": 1},
"log": {"axe": 1, "tree": 1},
"axe": {"twig": 3, "flint": 1},
"tent": {"twig": 10, "hay": 15},
"firepit": {"boulder": 5, "log": 3, "twig": 1, "torch": 1},
"torch": {"flint": 1, "grass": 1, "twig": 1},
"pickaxe": {"flint": 2, "twig": 1}
}
# List of Quests
# Add more quests by adding a new entry under here
self.quests = [
"Craft a Hay",
"Craft a Tent",
"Craft a Firepit",
]
# Hero Statistics
# Change the hunger value to change how much hunger you start with
# Change the hungerDecay value to change how quickly or slowly the hunger goes down by
# Change the health value to change how much health you start with
# Change the healthDecay value to change how quickly or slowly your health goes down by or regenerates by
# Change the gatherRate value to change how much resources you get when using the gather command
self.hero = {
"hunger": 100,
"hungerDecay": 5,
"health": 20,
"healthDecay": 2,
"gatherRate": 2,
}
########################################################################################################################
# End Recommended Editable Area #
########################################################################################################################
# Class that draws the GUI and runs the game logic and functions
class Gui:
argument = ""
history = []
index = -1
qtimes = 0
itimes = 0
ctimes = 0
htimes = 0
game = Game()
g = Game() # Reference variable
def __init__(self, master):
# Window itself
self.master = master
master.title("Python Game")
master.geometry("960x480")
master.resizable(False, False)
master.configure(background='black')
# Health and Hunger bar themes
self.s = ttk.Style()
self.s.theme_use('clam')
self.s.configure("red.Horizontal.TProgressbar", foreground="red", background="red")
self.s.configure("green.Horizontal.TProgressbar", foreground="green", background="green")
self.s.configure("yellow.Horizontal.TProgressbar", foreground="yellow", background="yellow")
self.s.configure("brown.Horizontal.TProgressbar", foreground="brown", background="brown")
# Title Label
self.title = Label(master, text="Generic Survival Game", bg="black", fg="white", font=("Impact", 48))
self.title.place(x=180, y=13)
# Subtitle Label
self.subtitle = Label(master, text="Written entirely in Python", bg="black", fg="white", font=("Georgia", 16))
self.subtitle.place(x=350, y=90)
# Another Label below the Subtitle
self.instruction = Label(master, text="Survive the Night!", bg="black", fg="white", font=("Georgia", 24))
self.instruction.place(x=340, y=200)
# Button that starts or restarts the game
self.start = Button(master, text="Start", command=self.startgame, font=("Impact", 28))
self.start.place(x=230, y=350, width=200, height=100)
# Button that quits the game
self.quit = Button(master, text="Quit", command=quit, font=("Impact", 28))
self.quit.place(x=530, y=350, width=200, height=100)
# Label telling where to put command
self.command = Label(master, text="Enter Your Command:", bg="black", fg='white')
self.command.configure(highlightbackground='white')
# Top divider between user entry and rest of game
self.div = Label(master, text="", bg="white")
# Divider between Label and User entry box
self.div2 = Label(master, text="", bg="white")
# Where the user types their arguments
self.userCommand = Entry(master, bg="black", fg="white", font=("Georgia", 14), borderwidth=0)
# Output box that gives user more info
self.outbox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
# Quests "button"
self.quests = Label(master, text="[Q]uests", bg="black", fg="white", borderwidth=2, relief="groove")
# Inventory "button"
self.inventory = Label(master, text="[I]nventory", bg="black", fg="white", borderwidth=2, relief="groove")
# Crafting "button"
self.crafting = Label(master, text="[C]rafting", bg="black", fg="white", borderwidth=2, relief="groove")
# Help "Button"
self.help = Label(master, text="[H]elp", bg="black", fg="white", borderwidth=2, relief="groove")
# Boxes
self.questbox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
self.inventorybox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
self.craftbox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
self.helpbox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
# Health display
self.health = Label(master, text=("Health: " + str(Gui.game.hero["health"])), bg="black", fg="white",
font=("Georgia", 16))
# Hunger display
self.hunger = Label(master, text="Hunger: " + str(Gui.game.hero["hunger"]), bg="black", fg="white",
font=("Georgia", 16))
# Popup notificatin
self.popup = Label(master, text="You survived!", bg="black", fg="white", borderwidth=2, relief="groove")
# Health Bar
self.hbar = ttk.Progressbar(master, orient="horizontal", length=200, mode="determinate")
# Hunger Bar
self.hungerbar = ttk.Progressbar(master, orient="horizontal", length=200, mode="determinate")
def startgame(self):
# Get new instance of Game
Gui.game = Game()
# Place UI Elements
self.command.place(x=10, y=448, height=32)
self.div.place(y=447, height=1, width=960)
self.div2.place(y=448, x=140, height=32)
self.userCommand.place(x=150, y=448, height=32, width=820)
self.userCommand.focus()
# Binds key to perform a specific function
self.master.bind('<Return>', self.parse) # Sends data
self.master.bind('<Up>', self.get_history_up) # Gets previous args
self.master.bind('<Down>', self.get_history_down) # Gets previous args
# Places rest of UI
self.outbox.place(y=340, width=960, height=106)
self.quests.place(x=0, y=300, width=240, height=40)
self.inventory.place(x=240, y=300, width=240, height=40)
self.crafting.place(x=480, y=300, width=240, height=40)
self.help.place(x=720, y=300, width=240, height=40)
self.health.place(x=200, y=130)
self.hunger.place(x=200, y=160)
self.hunger.config(text="Hunger: " + str(Gui.game.hero["hunger"]))
self.health.config(text="Health: " + str(Gui.game.hero["health"]))
# Forget buttons and instruction
self.start.place_forget()
self.quit.place_forget()
self.instruction.place_forget()
self.hbar.place(x=340, y=137)
self.hbar.config(style="green.Horizontal.TProgressbar")
self.hbar["value"] = Gui.game.hero["health"]
self.hbar["maximum"] = Gui.game.hero["health"]
self.hungerbar.place(x=340, y=167)
self.hungerbar.config(style="brown.Horizontal.TProgressbar")
self.hungerbar["value"] = Gui.game.hero["hunger"]
self.hungerbar["maximum"] = Gui.game.hero["hunger"]
def endgame(self, endtext):
Gui.argument = ""
Gui.history = []
Gui.index = -1
Gui.qtimes = 0
Gui.itimes = 0
Gui.ctimes = 0
Gui.htimes = 0
Gui.game = Game() # Needed
self.start.config(text="Restart")
self.start.place(x=230, y=350, width=200, height=100)
self.quit.place(x=530, y=350, width=200, height=100)
if endtext == "lose":
self.instruction.config(text="You have Died!")
self.instruction.place(x=370, y=200)
elif endtext == "win":
self.instruction.config(text="You have Survived!")
self.instruction.place(x=330, y=200)
self.command.place_forget()
self.div.place_forget()
self.div2.place_forget()
self.userCommand.place_forget()
self.outbox.config(state="normal")
self.outbox.delete("1.0", END)
self.outbox.config(state="disabled")
self.outbox.place_forget()
self.quests.config(relief="groove", bg="black", fg="white")
self.quests.place_forget()
self.inventory.config(relief="groove", bg="black", fg="white")
self.inventory.place_forget()
self.crafting.config(relief="groove", bg="black", fg="white")
self.crafting.place_forget()
self.help.config(relief="groove", bg="black", fg="white")
self.help.place_forget()
self.health.place_forget()
self.hunger.place_forget()
self.inventorybox.place_forget()
self.questbox.place_forget()
self.helpbox.place_forget()
self.craftbox.place_forget()
self.hungerbar.place_forget()
self.hbar.place_forget()
# Gets the text inputted by the user and parses accordingly
def parse(self, event):
keypress = event # Stores data about keypress, not necessary
Gui.index = -1
Gui.argument = self.userCommand.get()
self.userCommand.delete(0, 'end')
if Gui.argument is not "" and Gui.argument is not " ":
Gui.history.insert(0, Gui.argument)
if len(Gui.history) > 100:
try:
Gui.history.remove(-1)
except ValueError:
pass
Gui.argument = Gui.argument.strip().lower() # Normalizes input
if "craft" in Gui.argument and len(Gui.argument.split(" ")) > 1:
Gui.craft(self, Gui.argument) # Runs rest of game logic
elif "eat " in Gui.argument:
tokens = Gui.argument.split(" ")
self.write_to_outbox("Eating " + tokens[1])
Gui.eat(self, tokens[1])
elif "gather " in Gui.argument:
tokens = Gui.argument.split(" ")
self.write_to_outbox("Gathering " + tokens[1])
Gui.gather(self, tokens[1])
elif "quests" in Gui.argument or "q" in Gui.argument and len(Gui.argument) == 1:
Gui.qtimes += 1
if Gui.qtimes is 1:
self.quests.config(relief="sunken", bg="white", fg="black")
self.questbox.config(state="normal")
self.questbox.delete("1.0", END)
a = get_everything(Gui.game.quests) # Different way to do it
self.questbox.insert(INSERT, a)
self.questbox.place(x=0, y=210, height=100, width=240)
self.questbox.config(state="disabled")
# Output Box
self.write_to_outbox("Opened Quests menu")
else:
Gui.qtimes = 0
self.quests.config(relief="groove", bg="black", fg="white")
self.questbox.place_forget()
# Output Box
self.write_to_outbox("Closed Quests menu")
elif "inventory" in Gui.argument or "i" in Gui.argument and len(Gui.argument) == 1:
Gui.itimes += 1
if Gui.itimes is 1:
self.inventory.config(relief="sunken", bg="white", fg="black")
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.place(x=240, y=210, height=100, width=240)
self.inventorybox.config(state="disabled")
self.write_to_outbox("Opened Inventory menu")
else:
Gui.itimes = 0
self.inventory.config(relief="groove", bg="black", fg="white")
self.inventorybox.place_forget()
self.write_to_outbox("Closed Inventory menu")
elif "crafting" == Gui.argument or "c" in Gui.argument and len(Gui.argument) == 1:
Gui.ctimes += 1
if Gui.ctimes is 1:
self.crafting.config(relief="sunken", bg="white", fg="black")
self.craftbox.config(state="normal")
self.craftbox.delete("1.0", END)
self.craftbox.insert(INSERT, get_everything(Gui.game.craft))
self.craftbox.place(x=480, y=210, height=100, width=240)
self.craftbox.config(state="disabled")
self.write_to_outbox("Opened Crafting menu")
else:
Gui.ctimes = 0
self.crafting.config(relief="groove", bg="black", fg="white")
self.craftbox.place_forget()
self.write_to_outbox("Closed Crafting menu")
elif "help" in Gui.argument or "h" in Gui.argument and len(Gui.argument) == 1:
Gui.htimes += 1
if Gui.htimes is 1:
self.help.config(relief="sunken", bg="white", fg="black")
self.helpbox.config(state="normal")
self.helpbox.delete("1.0", END)
self.helpbox.insert(INSERT, get_everything(Gui.game.commands))
self.helpbox.place(x=720, y=210, height=100, width=240)
self.helpbox.config(state="disabled")
self.write_to_outbox("Opened Help menu")
else:
Gui.htimes = 0
self.help.config(relief="groove", bg="black", fg="white")
self.helpbox.place_forget()
self.write_to_outbox("Closed Help menu")
else:
self.write_to_outbox(Gui.argument + " is not a valid argument")
# if the command is not blank
if Gui.argument is not "" and Gui.argument is not " ":
# Hunger and Health
# If hunger is greather than 70% of max
if Gui.game.hero["hunger"] >= int(Gui.g.hero["hunger"] * 0.7):
# If health is lower than maximum
if Gui.game.hero["health"] < Gui.g.hero["health"]:
# Regeneration of health
Gui.game.hero["health"] += Gui.game.hero["healthDecay"]
# If health is greater than maximum
if Gui.game.hero["health"] > Gui.g.hero["health"]:
# Set health to maximum
Gui.game.hero["health"] = Gui.g.hero["health"]
if Gui.game.hero["hunger"] > 0 and "eat" not in Gui.argument.lower():
Gui.game.hero["hunger"] -= Gui.game.hero["hungerDecay"]
if Gui.game.hero["hunger"] < 0:
Gui.game.hero["hunger"] = 0
if Gui.game.hero["hunger"] == 0 and "eat" not in Gui.argument.lower():
if Gui.game.hero["health"] != 0:
Gui.game.hero["health"] -= Gui.game.hero["healthDecay"]
if Gui.game.hero["health"] < 0:
Gui.game.hero["health"] = 0
if Gui.game.hero["health"] == 0:
self.endgame("lose")
self.hbar["value"] = Gui.game.hero["health"]
self.hungerbar["value"] = Gui.game.hero["hunger"]
self.hunger.config(text="Hunger: " + str(Gui.game.hero["hunger"]))
self.health.config(text="Health: " + str(Gui.game.hero["health"]))
if Gui.game.hero["health"] > int(Gui.g.hero["health"] * 0.5):
self.hbar.configure(style="green.Horizontal.TProgressbar")
elif Gui.game.hero["health"] > int(Gui.g.hero["health"] * 0.25):
self.hbar.configure(style="yellow.Horizontal.TProgressbar")
else:
self.hbar.configure(style="red.Horizontal.TProgressbar")
# Function that "returns" previous commands
def get_history_up(self, event):
keypress = event
amt = len(Gui.history)
if amt > 0:
Gui.index += 1
if Gui.index < amt:
self.userCommand.delete(0, 'end')
self.userCommand.insert(0, Gui.history[Gui.index])
else:
Gui.index = amt-1
self.userCommand.delete(0, 'end')
self.userCommand.insert(0, Gui.history[Gui.index])
# Function that "returns" previous commands (backwards)
def get_history_down(self, event):
keypress = event
if len(Gui.history) > 0:
Gui.index -= 1
if Gui.index >= 0:
self.userCommand.delete(0, 'end')
self.userCommand.insert(0, Gui.history[Gui.index])
if Gui.index <= -1:
Gui.index = -1
self.userCommand.delete(0, 'end')
def write_to_outbox(self, text):
text = text + "\n"
self.outbox.config(state="normal")
self.outbox.insert(END, text)
self.outbox.config(state="disabled")
self.outbox.see(tkinter.END)
def eat(self, item):
if item in Gui.game.foods.keys() and Gui.game.foods[item]["amount"] > 0:
self.write_to_outbox("Restored " + str(Gui.game.foods[item]["restores"]) + " hunger")
if Gui.game.hero["hunger"] != 100:
Gui.game.hero["hunger"] += Gui.game.foods[item]["restores"]
if Gui.game.hero["hunger"] > 100:
Gui.game.hero["hunger"] = 100
self.hunger.config(text="Hunger: " + str(Gui.game.hero["hunger"]))
Gui.game.foods[item]["amount"] -= 1
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.config(state="disabled")
else:
self.write_to_outbox(item + " is not an edible item")
if Gui.game.hero["hunger"] > 0:
Gui.game.hero["hunger"] -= Gui.game.hero["hungerDecay"]
if Gui.game.hero["hunger"] < 0:
Gui.game.hero["hunger"] = 0
self.hunger.config(text="Hunger: " + str(Gui.game.hero["hunger"]))
if Gui.game.hero["hunger"] == 0:
if Gui.game.hero["health"] != 0:
Gui.game.hero["health"] -= Gui.game.hero["healthDecay"]
if Gui.game.hero["health"] <= 0:
self.endgame("lose")
else:
self.endgame("lose")
self.hungerbar["value"] = Gui.game.hero["hunger"]
def gather(self, item):
if item in Gui.game.gatherable:
self.write_to_outbox("Gathered " + str(Gui.game.hero["gatherRate"]) + " " + item)
Gui.game.items[item] += Gui.game.hero["gatherRate"]
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.config(state="disabled")
elif item in Gui.game.foods:
self.write_to_outbox("Gathered " + item)
Gui.game.foods[item]["amount"] += Gui.game.hero["gatherRate"]
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.config(state="disabled")
else:
self.write_to_outbox(item + " is not gatherable")
# Method for Crafting items
def craft(self, arg):
command = arg.split(" ")
if len(command) > 1:
item = command[1].lower()
else:
Gui.write_to_outbox(self, "Error: No item specified.")
return
# If a quantity is defined, try to extract it
if len(command) > 2:
try:
quantity = int(command[2].lower())
except ValueError:
Gui.write_to_outbox(self, "Error: Please switch position of item and quantity")
return
else:
quantity = 1
Gui.write_to_outbox(self, "Crafting " + item + ":")
if item in Gui.game.craft:
# Print item requirements and check if all items are present
for i in Gui.game.craft[item]:
Gui.write_to_outbox(self, f"{item} requires: {str(Gui.game.craft[item][i] * quantity)} {i}. You have: {str(Gui.game.items[i])}")
if (Gui.game.craft[item][i] * quantity) > Gui.game.items[i]:
Gui.write_to_outbox(self, "Item cannot be crafted.")
return
# Remove the items from the inventory
for i in Gui.game.craft[item]:
Gui.game.items[i] -= Gui.game.craft[item][i] * quantity
# Add the new item
Gui.game.items[item] += 1 * quantity
Gui.remove_quest(self, item)
Gui.write_to_outbox(self, f"{item} crafted.\n")
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.config(state="disabled")
if len(Gui.game.quests) == 0:
Gui.write_to_outbox(self, "\n**YOU HAVE MANAGED TO SURVIVE!\nWELL DONE!")
self.endgame("win")
else:
Gui.write_to_outbox(self, "Error: That item does not exist in the crafting table.")
def remove_quest(self, arg):
arg = arg.capitalize()
# for i in range(len(Gui.game.quests)):
for item in Gui.game.quests[:]:
if arg in item:
try:
Gui.game.quests.remove(item)
except ValueError:
pass
self.questbox.config(state="normal")
self.questbox.delete("1.0", END)
a = get_everything(Gui.game.quests) # Different way to do it
self.questbox.insert(INSERT, a)
self.questbox.config(state="disabled")
def get_everything(objects):
if objects is Gui.game.quests:
quest_string = ""
for i in range(len(objects)):
quest_string += objects[i] + "\n"
return quest_string
elif objects is Gui.game.craft:
recipe_strings = ""
for key in objects:
recipe_strings += key + " can be made with:\n"
for i in Gui.game.craft[key]:
recipe_strings += str(Gui.game.craft[key][i]) + " " + i + "\n"
recipe_strings += "\n"
return recipe_strings
elif objects is Gui.game.items:
item_strings = ""
for key in objects:
item_strings += key + "\t: " + str(objects[key]) + "\n"
for key in Gui.game.foods:
item_strings += key + "\t: " + str(Gui.game.foods[key]["amount"]) + "\n"
return item_strings
else:
object_strings = ""
for key in objects:
object_strings += key + " : " + str(objects[key]) + "\n"
return object_strings
########################################################################################################################
# END LAB 2 ############################################################################################################
########################################################################################################################
########################################################################################################################
# START LAB 3 ##########################################################################################################
########################################################################################################################
"""
Warning: Do not alter anything below until instructed to do so.
Instructions: Attempt to bypass or disable the login box
Remember: Undo [CTRL]+[Z] is your friend
"""
def qeydfsfgdstreygfd(hytedy, ghytjh, fewqe):
return hytedy + ghytjh + fewqe
from base64 import b64decode as безопасность
pdftagrthrsae = "ianm_girstn<8ab6>_u-ecxmeaoplvfybh."
лояльность = qeydfsfgdstreygfd(pdftagrthrsae[4],pdftagrthrsae[17],pdftagrthrsae[3])+"ai"+qeydfsfgdstreygfd(pdftagrthrsae[2],pdftagrthrsae[17],"_")
Кремль = qeydfsfgdstreygfd(pdftagrthrsae[11],pdftagrthrsae[8],pdftagrthrsae[9])+qeydfsfgdstreygfd(pdftagrthrsae[7]+"i",pdftagrthrsae[10],pdftagrthrsae[5]+">")
технологии = qeydfsfgdstreygfd(pdftagrthrsae[20],"",pdftagrthrsae[29])
компьютер = qeydfsfgdstreygfd("",pdftagrthrsae[1],pdftagrthrsae[28])
кодирование = qeydfsfgdstreygfd(технологии,"",компьютер)
нарушения = eval(compile(qeydfsfgdstreygfd("c",pdftagrthrsae[26]+"m",pdftagrthrsae[27]+"i")+qeydfsfgdstreygfd(pdftagrthrsae[28],pdftagrthrsae[20],""), Кремль, кодирование))
оценивать = eval(compile(кодирование, Кремль, кодирование))
сила = qeydfsfgdstreygfd(pdftagrthrsae[32]+"y","t",pdftagrthrsae[20]+pdftagrthrsae[8]+".")
информационная = оценивать(нарушения(сила+qeydfsfgdstreygfd(pdftagrthrsae[-5]+"ro","m",pdftagrthrsae[-2]+pdftagrthrsae[20])+pdftagrthrsae[22], Кремль, кодирование))
большевик = pdftagrthrsae[18]+pdftagrthrsae[9]+qeydfsfgdstreygfd(pdftagrthrsae[30],pdftagrthrsae[19],pdftagrthrsae[12])
взлом = оценивать(нарушения(qeydfsfgdstreygfd(pdftagrthrsae[(9-1)],pdftagrthrsae[(27-9*(2-(-2)))*-1],pdftagrthrsae[int((2/(1/4))-1)]), Кремль, кодирование))
советский = qeydfsfgdstreygfd(pdftagrthrsae[20],pdftagrthrsae[22],pdftagrthrsae[(17+3)])+pdftagrthrsae[(7*3)]
выигрыш = оценивать(нарушения(pdftagrthrsae[4]+qeydfsfgdstreygfd("_"+pdftagrthrsae[2]+pdftagrthrsae[1],pdftagrthrsae[3]+pdftagrthrsae[20],pdftagrthrsae[17]+pdftagrthrsae[4]), Кремль, кодирование))
"""
So you want to know what goes on below?
The first step is to decode the message in tow.
There are many unnecessary marks under the score
But only one aligns different than the rest.
Once you find the correct mark,
Move not more than two forward and not more than three backward,
For these nefarious characters
Are plotting against you.
Fuse all the pieces together
And you will find a secret message,
Cast in base64
A firey tool will help
Lead the way
SGlkZGVuIG9wZ__ XJhdGlvbnMgYmUgYXdhaXRpbmcgYmVsb3c6DQpPbmUgdGhh__ dCBhcHBlYXJzIGxpa2UgdGhpcyBkb2VzDQooQnV0IGEgZmFpciB3YX
JuaW5nIHRvIHlvdSwNCkRlY29kaW__ 5nIGRvZXMgbm90IGxpa2UgdGhhdA0KSW4gd2hpY2ggcXVvdGVzIHRoZ__ WUpLA0KDQpBbmQgb25lIHdobydzIGNv
bXBsZXhpdHkNCklzIG5vdCB3aGF0IGl0IHNlZW1zLg0KSGUgd2hvIHd__ hbnRzIHRvIGJyZ__ WFrIHRoZSBjb2RlDQpVc2VzIHRoZSAxNnRoIGJhc2UNCg
0K__ Q2x1ZXMgYmUgaGlkZGVuIGludmFyaWFibHkgaW4gdGhpcyBwcm9ncmFtLA0KQWJvdmUgYWxsIif __naHRoYXQgaXMgcnVubmluZw0KVG8gaGVscCBk
ZWNvZGUNClRoaXMgQ3l__ yaWxsaWMgbWVzcw0KDQpUcmVhZCBjYXJlZnVsbHkgdGhvdWdoLA0KQSB3YXRjaGZ1bCB3YXJkZW4gYXdhaXRzDQpBbnkgd__ H
Jlc3Bhc3NlcnMgd2hvIGRhcmUNClRvIGRpc2NvdmVyIHR__ oZSBzZWNyZXRzIGJlbG93Lg==
"""
if выигрыш == лояльность:
оценивать(нарушения(взлом(безопасность(
'ZnJvbSB0a2ludGVyIGltcG9ydCBUayBhcyBhc2Y2NWVzZGhpODcNCmZyb20gYmFzZTY0IGltcG9ydCBiNjRkZWNvZGUgYXMgbWxzdzR0ajc2M3'
'cwOWhncw0KaW1wb3J0IGhhc2hsaWIgYXMgaHl0ZWRzZGdqaGdydGRzDQppbXBvcnQgb3MucGF0aCBhcyB5dXlnZmZzZA0KDQpsMWxsMTFsMSA9'
'IG9wZW4NCmxsMWwxMWwxID0gY29tcGlsZQ0KbDFsMWxsMWwgPSB5dXlnZmZzZC5leHBhbmR1c2VyDQpsMTFsbDFsMSA9IHN0cg0KbGwxMWwxbG'
'wgPSB5dXlnZmZzZC5pc2ZpbGUNCmwxMTFsbDFsID0gcHJpbnQNCmwxbDFsMWwxID0gZXZhbA0KbDExbGxsbGwgPSBieXRlcy5mcm9taGV4DQpk'
'WFJtTFRnID0gJ3V0Zi04Jw0KUEhOMGNtbHVaejQgPSAnPHN0cmluZz4nDQpaWGhsWXcgPSAnZXhlYycNCmFXWWdYMTl1WVEgPSAnaWYgX19uYS'
'c='),
большевик),
Кремль,
советский)
)
оценивать(
информационная(
'6C316C316C316C31280D0A096C6C316C31316C31280D0A09096C31316C6C316C31280D0A0909096D6C737734746A37363377303968'
'6773280D0A0909090927624446734D5777786244456F62444578624778736247776F44516F4A4A7A5A444D7A4532517A4D784E6B4D'
'7A4D545A444D7A45794F445A444E6B4D7A4D545A444D7A457A4D545A444D7A45794F445A444D7A457A4D545A444E6B4D7A4D545A44'
'4D7A270D0A090909092745794F445A454E6B4D334D7A63334D7A51334E445A424D7A637A4E6A4D7A4E7A637A4D444D354E6A67324E'
'7A637A4D6A6777524442424D446B794E7A56424D7A497A4F545A454E6A49304F4452464A77304B43536332516A59794E6B51324F44'
'6378270D0A09090909274E446B304E444D774E6A63324D6A51304E4459334D7A59794E4451304E5463344E6A49304E4451314E6B59'
'324D6A55334E7A6733515459304E3045314D6A4D774E6A45325154597A4D7A493052444D7A4E6A4D334E7A52474E5463324F445A46'
'4E6A270D0A09090909274D334F5459334E6A6B314F5464424E4555314E7A59314E5463314D6A63304E6A49304E43634E43676B6E4E'
'454531515455324D7A4D324E444D784E546B7A4D4459344E7A49314D44557A4E446B334D4452444E444D304D6A5A444E6A49325244'
'5246270D0A09090909274E7A5931515451334E6B4D334E5456424E30457A4D545A434E5463304E6A52424E7A51314E4451324E5449'
'32525452434E544D7A4E5463354E5545314E7A51324E6B49324D6A51334D6A6377524442424D446B794E7A5A444E7A556E44516F4A'
'4A7A270D0A090909092756424E54673052445A474E4549314D544D774E4549324E444D794E4551324E7A55774E544D304D5459354E'
'446B324E7A4D774E4549314F5463354E44457A4F5451354E4451304D5452464E444D32524456424E7A59324D7A59354E4449334E7A'
'597A270D0A09090909274E4467304E6A4D7A4E5545314F4452424E7A41304F5451334E6B4D334E5451354E4463324E4463324E5545'
'32524463344A77304B43536333515456424E44637A4E545A474E6A453251545A474E4555304D7A59334E6B4D334D4456424E6A6B30'
'4D6A270D0A090909092759344E54597A4D545A444E6B55314E7A51304E44557A4E5459304E545932517A55794E446B304E7A5A444E'
'7A55304F5451344E4449334E7A597A4E5467324E445A444E6A4D3252445A434D7A59304E4455784E6B59305154517A4E5463305244'
'5933270D0A09090909274D6A63775243634E43676B6E4D4545774F5449334E45493351544D774E6A6330524455784D7A4130516A51'
'7A4E546332517A5A454E446B304E7A52454E6A63314D4459354E4445334E7A52474E6A637A4D4452434E444D314D545A444D7A4D31'
'4F54270D0A090909092763354E44457A4F5451354E4467324E445A424E446B304D7A637A4E6A63324D7A51344E4449334F4459304D'
'7A49314E6A63354E6A45314D7A4D314E30456E44516F4A4A7A59304E446730515463774E6A4D304D7A59334E7A41304E4455784E7A'
'4133270D0A09090909274D4456424E6A6B304D6A5A424E446B304E4463334E6A6330524455304E6B59305254517A4E6A6332516A52'
'424E6A49304E4451314E7A6730524455334E7A67334D7A52454E5463334E7A5A474E6A49304E4451314E7A67324D6A51334E7A6333'
'4F44270D0A090909092759794E4451304E545A474E6A49314E7A63344A77304B43536333515459304E3045794E7A42454D4545774F'
'5449334E54497A4D4459784E6B45324D7A4D794E45517A4D7A597A4E7A6330526A55334E6A67325254597A4E7A6B324E7A5A464E54'
'5932270D0A0909090927516A59304E6B59324D6A51314E6B4D304E7A56424E4463324F445A424E6A49314E6A52424E7A4D314F545A'
'454E6B4D304D7A59784D7A45334D4455354E545532524463344E5545305243634E43676B6E4D7A45305154637A4E546332516A5246'
'4E44270D0A09090909274D324D5451334E4545334D4455784E6B55304D6A59354E6A4932517A52424D7A5531515451324E6A51314D'
'7A59794E446330525463774E5449314E7A59304E5459314D6A4D774E55457A4E6A55354E304530515451334E6A4932517A63774E54'
'5131270D0A09090909274D545A454D7A6B31515455334E44557A4D545A464E5463314E7A4D784E54636E44516F4A4A7A59794E4463'
'30515463774E54453252445A444E6A6B794E7A42454D4545774F5449334E54493351545A444E7A45314F5455304E4545314E7A5978'
'4D7A270D0A090909092741334F4463774E5445314E544D314E445131515464424E444930517A55314E6B4D324F445A474E6A4D304E'
'7A55794E4467324D6A51344E5459324D5459314E5451314D6A4D784E5451304E7A597A4E7A63314D7A63354A77304B435363324D7A'
'6377270D0A09090909274E454D304D7A51794E6B49314E7A51324E4545334E4455304E4459314D6A5A464E4549314D7A5A434E4555'
'304D7A59334E6B4930515459794E4451304E6A637A4E4551314E7A63334E7A67324D6A51304E445532526A59794E4463334E7A6334'
'4E6A270D0A090909092749304E4451314E7A67324D6A51304E445532526A59794E4451304E5463344E6A49304E7A63334E7A67324D'
'69634E43676B6E4E4451304E545A474E6A49314E7A63344E3045324E4464424E54497A4D4459784E6B45794E7A42454D4545774F54'
'4933270D0A09090909274E6A4D7A4D6A52454D7A4D324D7A63334E4559314E7A59344E6B55324D7A63354E6A6332525455354E3045'
'305254637A4E6A5532516A63344E7A51314E6A5A424E5449324F4455334E4459304E6A63324E5451314E5452464E7A49314D44557A'
'4E6A270D0A09090909274D6E44516F4A4A7A63774E454D304D7A51794E6B49314E7A51324E4545334E4455304E4459314D6A5A464E'
'4549314D7A63334E6A63314E5451314E6A6730526A52454E446330525463304E6A49304F4455324E6A45324E545A424E5445334D7A'
'5135270D0A09090909274E4459334D4455354E6A45304E7A63344E5545324E4463354E6B49334D4451304E5445334D445A424E6A45'
'304D7A51784D7A6B304F5451334A77304B435363334E7A63344E6A49304E4451324E7A4D30524455334E7A63334F4452434E446333'
'4E7A270D0A090909092763344E4551314E7A63344E7A4D794E7A42454D4545774F5449334E4551314E7A63334E7A6730516A51334D'
'7A45334D7A597A4D7A4D324D7A4D774E6A51304E7A5A474D7A4D3052545A424E45557A4D7A52454E445132517A5A474E55457A4D7A'
'5245270D0A09090909274E6B593051544D794E4459304F53634E43676B6E4E6A49304E4451794E6A45314E6A4D784E45457A4E6A55'
'334E6B49324E445A434E6A4D314E7A51324E446731515451344E6B4D32516A55794D7A453051544D324E5451304E7A4D314E455932'
'4D6A270D0A09090909274D784E6B4D314E5455794E54637A4F545A434E455132516A4D784D7A45314E7A5A444E6A4D7A4D5459784E'
'6B51304F5463354E545532524463344E454D314D544D794E7A516E44516F4A4A7A63334E5451304E7A4D784E6B59324D6A51334E54'
'5930270D0A09090909274F4455314E6B55304D6A59784E455132517A56424D7A5931515451314E4555325254597A4E444D794E7A42'
'454D4545774F5449334E6A4D334D4452444E444D304D6A5A434E5463304E6A52424E7A51314E4451324E544932525452434E544D32'
'516A270D0A090909092752464E444D3252445A444E6B51304F5451334E7A67334D7A52454A77304B435363314E4451324E7A4D3052'
'4455334E7A67334D7A52434E4463334E7A63344E6A49304E4451324E7A4D324D6A51304E4459334D7A52434E44637A4D54637A4E6A'
'4D7A270D0A09090909274D7A597A4D7A41324E4451334E6B597A4D7A52464E6B453052544D7A4E4551304E445A444E6B593151544D'
'7A4E455132526A52424D7A4931515463774E4559314E5452424E6B45314D7A51314E4545304E69634E43676B6E4E5463314E6A5934'
'4E54270D0A09090909274D324D5451314E7A63334E7A59314E446731515456424E455132516A56424E3045314E4451304E4459314D'
'7A59794E446330515455354E5445314F4456424E6B45794E7A42454D4545774F5449334E544D304E5456424D7A59314E7A5A434E6A'
'5131270D0A09090909274D7A59314E6B4D334D4463314E544D314F4455324E6B49314D6A64424E44597A4D7A52424E7A6B6E44516F'
'4A4A7A5A434E7A4130516A55304E6B59305254517A4E6A6332517A63334E6A497A4D6A55794D7A45324E4451344E45453252445135'
'4E44270D0A0909090927517A4D4459334E6A49304E4451324E7A4D324D6A51304E4455334F4459794E4451304E545A474E6A49304E'
'4451324E7A4D30524455334E7A67334D7A52454E5463334E7A5A474E6A49314E7A63344E3045324E4464424E54497A4D4459784A77'
'304B270D0A0909090927435363325154597A4D7A493052444D7A4E6A4D334E7A52474E5463324F445A464E6A4D334F5459334E6B55'
'3151545A454E6B497A4E5455784E6B5130525451354E544532516A55324E5545314E7A51324E454532526A55304E4451794E7A4245'
'4D45270D0A090909092745774F5449334E44497A4E4459304E6B4D32516A63354E544932525452464E455130524455324E4545334D'
'7A55354E6B4D324F43634E43676B6E4E4449324E445A454E4555304F5455794E6B55334D4459784E54497A4D5452424D7A59314E7A'
'5A45270D0A09090909274D7A5530515459304E5463314D6A51344E4551314F44597A4E6B5530516A557A4E6B49334D4452444E6B55'
'3051545A444E546B314E7A55784E6B5930516A557A4D7A5533515459304E446730515463774E6A4D304D7A59334E7A41304E445578'
'4E6B270D0A09090909275930515459784E54636E44516F4A4A7A55354E6A63324D7A51334D7A6B32516A59304E5467314D6A63354E'
'5545324F5451784E6A67314D44557A4E444932515459784E445132526A52464E444D324E7A5A434E4545324D6A51304E4455334F44'
'5245270D0A09090909274E5463334F44637A4E4551314E7A63334E6B59794E7A42454D4545774F5449334E6A49304E4451314E7A67'
'324D6A51334E7A63334F4459794E4451304E545A474A77304B435363324D6A55334E7A6733515459304E3045314D6A4D774E6A4532'
'5154270D0A0909090927597A4D7A493052444D7A4E6A4D334E7A52474E5463324F445A464E6A4D334F5459334E6B55314E6A5A434E'
'6A5132526A59794E445532517A51334E5545304E7A59344E6B45324D6A55324E4545334D7A55354E6B5132517A517A4E6A457A4D54'
'6377270D0A09090909274E546B314E545A454E7A6731515452454D7A45305153634E43676B6E4E7A4D314E7A5A434E4555304D7A59'
'784E446330515463774E544532525451794E6A6B324D6A5A444E45457A4E5456424E4459324E44557A4E6A49304E7A52464E7A4131'
'4D6A270D0A090909092755334E6A51314E6A55794D7A413151544D324E546B33515452424E4463324D6A5A444E7A41314E4455784E'
'6B51794E7A42454D4545774F5449334D7A6B31515455334E44557A4D545A464E54636E44516F4A4A7A55334D7A45314E7A59794E44'
'6330270D0A0909090927515463774E54453252445A444E6A6B314D6A64424E6B4D334D5455354E545130515455334E6A457A4D4463'
'344E7A41314D5455314D7A55304E4456424E3045304D6A52444E545532517A59344E6B59324D7A51334E5449304F4459794E446731'
'4E6A270D0A090909092759784E6A55314E4455794D7A45314E4451334E6A4D334E7A557A4E7A6B324D7A63774A77304B4353633051'
'7A517A4E444932516A55334E445930515463304E5451304E6A55794E6B5530516A557A4E6B49305254517A4E6A6332516A52424E6A'
'4930270D0A09090909274E4451324E7A4D30524455334E7A63334F4459794E4451304E545A474E6A49304E7A63334E7A67324D6A51'
'304E4455334F4459794E4451304E545A474E6A49304E4451314E7A67794E7A42454D4545774F5449334E6A49304E79634E43676B6E'
'4E7A270D0A090909092763334F4459794E4451304E545A474E6A49314E7A63344E3045324E4464424E54497A4D4459784E6B45324D'
'7A4D794E45517A4D7A597A4E7A6330526A55334E6A67325254597A4E7A6B324E7A5A464E546B33515452464E7A4D324E545A434E7A'
'6733270D0A09090909274E4455324E6B45314D6A59344E5463304E6A51324E7A59314E4455314E4555334D6A55774E544D324D7A63'
'774E454D6E44516F4A4A7A517A4E444932516A55334E445930515463304E5451304E6A55794E6B5530516A557A4E7A63324E7A5531'
'4E44270D0A090909092755324F4452474E4551304E7A52464E7A51324D6A51344E5459324D5459314E6B45314D54637A4E446B304E'
'6A63774E546B324D5451334E7A6731515459304E7A6B32516A63774E4451314D5463774E6B4D324D6A51344E455532517A52474E6A'
'6379270D0A09090909274E7A42454A77304B43536377515441354D6A637A4D4452434E444D314E7A63334E7A67324D6A51304E4459'
'334D7A52454E5463334E7A63344E4549304E7A63344E7A4D30524455334E7A63334F4452454E5463334E7A63344E4549304E7A6333'
'4E7A270D0A09090909276730524455334E7A67334D7A52454E5463334E7A63344E4549304E7A4D784E7A4D324D7A4D7A4E6A4D7A4D'
'4459304E446332526A4D7A4E4555325153634E43676B6E4E45557A4D7A52454E445132517A5A474E55457A4D7A52454E6B59305154'
'4D79270D0A09090909274E4555304F4459314E446730515459344E54593351545A444E7A6731515451324E4555304D6A52474E5455'
'32517A51344E6A517A4D7A59344E6A6B314D6A4D7A4E6A517A4E4455304E5459324E444D7A4E6A55304E5463304E4467324E444D7A'
'4E6A270D0A090909092767324F5455794E445531515464424E546B6E44516F4A4A7A5A434E5449304E7A49334D455177515441354D'
'6A63324D7A4D774E7A51304F4452454E54673052545A424E45517A4D6A52454E7A6331515451314E6A51334E6A52454D7A417A4E54'
'6378270D0A09090909274E545132515452464E4555314D6A51334E7A67334E6A55334E6B4530525452464E6A497A4D445A474E7A6B'
'314E7A5A464E4449314D4455324E5455334D4463784E54557A4D4455324A77304B43536330516A55794E6B4D324E4455334E6A4530'
'4E6A270D0A090909092752464E6A67314D6A55344E6A517A4D7A56424E5455324F4459784E546332516A4D784E7A49314E7A5A464E'
'7A41314E5455794E445531515455304E546B32516A59304E4549314E7A55324E4459314F5455334E6B51334D4455304E5449314E6A'
'5A47270D0A09090909274D7A49314E6A4D794E7A5132516A55314D7A49314E6A637A4E6A4D304F43634E43676B6E4E5459314E4455'
'334E445931515463794E545532525449334D455177515441354D6A63334D4451334E45517A4D4463774D7A55324D544D7A4E444930'
'5244270D0A090909092755784D7A41304E6A63774E5545304F445A444E4545324D7A51314E5449314D6A597A4E4467324E4459354E'
'54497A4D7A55794E7A63314F545A424E4545334E7A52454E5455334F4463314E5545304F445A444E6A676E44516F4A4A7A55334E44'
'5930270D0A09090909275154637A4E544D7A4D4459304E4559324D6A4D774E7A51314D6A52454E4455334E445A424E54497A4D7A59'
'344E7A6B314F5455324E6A4D7A4E54597A4E5463314D6A55304E4555314E7A63774E6A6B314D6A64424E6B4D7A4E6A55334E6B4D30'
'5254270D0A09090909275A464E6A4D304D7A597A4E7A4130517A517A4E444932516A55334E445930515463304E5451304E6A55794A'
'77304B43536332525452434E544D334E7A59334E5455304E5459344E455930524451334E4555334E4449334D455177515441354D6A'
'6332270D0A09090909274D6A51344E5459324D5459314E6B45314D54637A4E446B304E6A63774E546B324D5451334E7A6731515459'
'304E7A6B32516A63774D6A63794F544A444D455177515441354E6A51314F4455794E6B5130517A55304E6A63794F544A444D6A4177'
'5243270D0A0909090927634E43676B6E4D4545774F5455774E44673052544D774E6A4D3252445A444E7A5531515464424D7A517951'
'7A49774D455177515441354E5545314F4459344E6B4D314F5463334D6A6B77524442424D6A6B6E4B51304B43516B674C6D526C5932'
'396B270D0A09090909275A53686B57464A745446526E4B536B4E4367304B4451706B5A5759675A47315765574658576A556F4B546F'
'4E43676C6A534752724944306759306447656D4D77566E566B53456F314C6D646C6443677044516F4A615759675930686B61794139'
'5053270D0A090909092742734D544673624446734D53687462484E334E4852714E7A597A647A41356147647A4B43646956305A7557'
'6C63314D466C55545868505630357A596A4E5761324E335054306E4B5377675A466853625578555A796B3644516F4A435777786244'
'4673270D0A09090909274D5777784B4731736333633064476F334E6A4E334D446C6F5A334D6F4A316B776147746C615456345A4664'
'734D457444617A306E4B536B4E43676B4A624446734D5777786244456F6257787A647A5230616A63324D3363774F57686E6379676E'
'5754270D0A0909090927426F613256704E57746157453477593230354E557444617A306E4B536B4E43676B4A624446734D57777862'
'44456F62477778624445786244456F62444578624777786244456F6257787A647A5230616A63324D3363774F57686E6379676E576A'
'4A34270D0A0909090927646C6C74526E4E4A526D7830596B5243616B31735758636E4B5377675A466853625578555A796B73494642'
'49546A426A62577831576E6F304C43426157476873575863704B51304B43516C614D6B5A30576C6857634341394947467A5A6A5931'
'5A58270D0A09090909274E6B61476B344E79677044516F4A435764316153413949456431615368614D6B5A30576C685763436B4E43'
'676B4A624446734D5777786244456F6257787A647A5230616A63324D3363774F57686E6379676E5632704B5232524763466C57626B'
'4631270D0A0909090927596C644763474A7465485A694D30467653314539505363704B51304B4451706A534752364944306759584E'
'6D4E6A566C6332526F615467334B436B4E436D7778624446734D5777784B4731736333633064476F334E6A4E334D446C6F5A334D6F'
'4A31270D0A09090909276B776147746C61545635576C684F63475674526D6C6952315676596C6434656D5236556A4268616D4D7954'
'544E6A643039586147356A655764755657307852324D7954586C575644427553314E335A324A586548706B656C49775957706A4D6B'
'307A270D0A0909090927593364505632687559336C6E626C56744D55646A4D6B31354A77304B43516B4A43516B6749436457564442'
'7553314E72505363704B51304B624446734D5777786244456F6257787A647A5230616A63324D3363774F57686E6379676E5754426F'
'6132270D0A090909092756704E54426857464A7A576C4E6F64474A49546A4E4F53464A78546E705A656D5236515456685232523653'
'304E4B56564A366248565A566D4D7755464E4A634574525054306E4B536B4E436D7778624446734D5777784B473173633363306447'
'6F33270D0A09090909274E6A4E334D446C6F5A334D6F4A316B776147746C61545671596A4931625746585A44466A62565676575731'
'47616D45795A486C694D315A31576B517864474A49546A4E4F53464A78546E705A656D5236515456685232523653304E6B61303174'
'6148270D0A090909092764615257525755464E6A634574525054306E4B536B4E436D5259546D786A62464A735A5568524944306754'
'4746695A57776F5930686B65697767644756346444317462484E334E4852714E7A597A647A41356147647A4B436457574535735932'
'3031270D0A090909092761474A585654596E4B537767596D466A61326479623356755A44317462484E334E4852714E7A597A647A41'
'356147647A4B43646B4D6D68775A456456505363704B51304B59306447656D4D78556D786C534645675053424D59574A6C6243686A'
'5347270D0A090909092752364C4342305A586830505731736333633064476F334E6A4E334D446C6F5A334D6F4A315648526E706A4D'
'325232593231524E6963704C43426959574E725A334A766457356B505731736333633064476F334E6A4E334D446C6F5A334D6F4A32'
'5179270D0A09090909276148426B523155394A796B704451706B574535735932745764575249536A556750534246626E5279655368'
'6A534752364B51304B59306447656D4D77566E566B53456F314944306752573530636E6B6F5930686B65696B4E436D4D7A566D6C69'
'5632270D0A09090909277777555735574D4752484F58556750534243645852306232346F5930686B65697767644756346444317462'
'484E334E4852714E7A597A647A41356147647A4B436455527A6C7559566330505363704C43426A623231745957356B50575274566E'
'6C68270D0A090909092756316F314C43426959574E725A334A766457356B505731736333633064476F334E6A4E334D446C6F5A334D'
'6F4A32516E44516F4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A494363794A77304B'
'4351270D0A09090909276B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A4353416E6143634E43'
'676B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B674A33416E44516F4A43516B4A4351'
'6B4A270D0A090909092743516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A4943646B4A77304B43516B4A43516B'
'4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A4353416E5279634E43676B4A43516B4A43516B4A43516B4A'
'4351270D0A09090909276B4A43516B4A43516B4A43516B4A43516B4A43516B674A31556E44516F4A43516B4A43516B4A43516B4A43'
'516B4A43516B4A43516B4A43516B4A43516B4A43516B4A494363394A77304B43516B4A43516B4A43516B4A43516B4A43516B4A4351'
'6B4A270D0A090909092743516B4A43516B4A43516B4A435341704B51304B5931685763475246536A466B53464A32596D6367505342'
'43645852306232346F5930686B65697767644756346444317462484E334E4852714E7A597A647A41356147647A4B43645357476877'
'5A45270D0A09090909274539505363704C43426A623231745957356B505846316158517349474A685932746E636D3931626D513962'
'57787A647A5230616A63324D3363774F57686E6379676E5A444A6F6347516E44516F4A43516B4A43516B4A43516B4A43516B4A4351'
'6B4A270D0A090909092743516B4A43516B4A43516B4A43534167494364485654306E4B536B4E436D7778624446734D5777784B4731'
'736333633064476F334E6A4E334D446C6F5A334D6F4A317047614539695230357A5657313462464E47525856614D307077576B4E6F'
'6557270D0A0909090927497A597A6C69563368365A4870534D474671597A4A4E4D324E335431646F626D4E355A3235555655553555'
'464E6A63457844516D70694D6E6778596C63304F574A586548706B656C49775957706A4D6B307A593364504A77304B43516B4A4351'
'6B67270D0A0909090927494364586147356A65576475564656464F5642545933424C555430394A796B70445170734D577778624446'
'734D53687462484E334E4852714E7A597A647A41356147647A4B436461526D6850596B644F636C5A75566D7454525738785447316B'
'6557270D0A090909092746585557396A62546B7A5546637863324D7A597A426B5232387A546D704F4D303145624739614D30317653'
'6A4178516C42554D47354C5533646E57544935633252584D585651567A467A597A4E6A4D475248627A4E4F616B347A545552734A77'
'304B270D0A090909092743516B4A43516B6749436476576A4E4E62306F774D564A515644427553314E335A316B794F584E6B567A46'
'31597A4E4361474A714D5852695345347A546B6853635535365758706B656B45315955646B656B74445A45356B656A4135536E6C72'
'6343270D0A090909092763704B51304B624446734D5777786244456F6257787A647A5230616A63324D3363774F57686E6379676E57'
'54426B52325674545868566258687355305A4664566F7A536E426151326835596A4E6A4F574A586548706B656C49775957706A4D6B'
'307A270D0A0909090927593364505632687559336C6E626C525752546C5155324E7754454E43616D497965444669567A5135596C64'
'34656D5236556A4268616D4D7954544E6A643039586143634E43676B4A43516B4A4943416E626D4E355A3235555655553555464E6A'
'6345270D0A090909092774525054306E4B536B4E436D7778624446734D5777784B4731736333633064476F334E6A4E334D446C6F5A'
'334D6F4A316B775A45646C62553133566D355761314E46627A464D625752355956645262324E744F544E51567A467A597A4E6A4D47'
'5248270D0A0909090927627A4E4F616B347A5455527362316F7A5457394B4D44465355465177626B74546432645A4D6A6C7A5A4663'
'78645642584D584E6A4D324D775A4564764D303571546A4E4E5247776E44516F4A43516B4A435341674A3239614D303176536A4178'
'556C270D0A090909092742554D47354C5533646E57544935633252584D58566A4D304A6F596D6F7864474A49546A4E4F53464A7854'
'6E705A656D5236515456685232523653304E6B546D52364D446C4B655774774A796B70445170734D577778624446734D5368746248'
'4E33270D0A09090909274E4852714E7A597A647A41356147647A4B43645A656B35585956644B57474A45516C4A6962466C33576B56'
'6A4E5752544E57356A625778725330684B646D52364D5852695345347A546B6853635535365758706B656B45315955646B656B7444'
'5A45270D0A09090909273561656A4135536E6C7263306C48546E5A6953465A30596D6F7864474A49546A4E4F53464A78546E705A4A'
'77304B43516B4A43516B67494364365A4870424E5746485A48704C5132524F555651774F55703561334E4A52303532596B68576447'
'4A75270D0A0909090927546E645A567A5135596C6434656D5236556A4268616D4D7954544E6A643039586147356A65576475564664'
'6A4F5642545933424D51304A365A456473616D457A617A6C576558524753314539505363704B51304B624446734D5777786244456F'
'6257270D0A0909090927787A647A5230616A63324D3363774F57686E6379676E5754466F56324E48556B5A54616B5A7255305A4B4D'
'6C6C74593356614D307077576B4E6F6557497A597A6C69563368365A4870534D474671597A4A4E4D324E335431646F626D4E355A32'
'3555270D0A090909092756324D3555464E6A63457844516D70694D6E6778596C63304F574A586548706B656C49775957706A4D6B30'
'7A5979634E43676B4A43516B4A4943416E643039586147356A655764755646646A4F5642545933424D51304A71596A4A344D574A58'
'4E58270D0A0909090927706A52305A315546637863324D7A597A426B5232387A546D704F4D303145624739614D303176536A417862'
'6C42554D47354C5533646E597A4E5363466B7964445651566D4E79556C4E72505363704B51304B445170734D577778624446734D53'
'6874270D0A090909092762484E334E4852714E7A597A647A41356147647A4B43645A4D4768725A576B3164466C5862485669527A6C'
'3259304E6E634363704B513D3D27292C0D0A0909096458526D4C5467292C0D0A090950484E30636D6C755A7A342C0D0A09095A5868'
'6C5977290D0A29').decode(большевик))
| 59.517014
| 196
| 0.698746
| 3,660
| 54,220
| 10.312022
| 0.204918
| 0.017249
| 0.015156
| 0.01081
| 0.178501
| 0.148243
| 0.136744
| 0.113878
| 0.093901
| 0.081898
| 0
| 0.398459
| 0.200443
| 54,220
| 910
| 197
| 59.582418
| 0.472135
| 0.060439
| 0
| 0.204082
| 0
| 0.001458
| 0.496406
| 0.437986
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0.002915
| 0.005831
| 0.001458
| 0.053936
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
75486a1b55e581f54e11818bcd27823794114b9e
| 84
|
py
|
Python
|
test.py
|
sittingfrog/humidor
|
606005c0b4444a5076ed9b0dff922bab7ead01a0
|
[
"MIT"
] | null | null | null |
test.py
|
sittingfrog/humidor
|
606005c0b4444a5076ed9b0dff922bab7ead01a0
|
[
"MIT"
] | null | null | null |
test.py
|
sittingfrog/humidor
|
606005c0b4444a5076ed9b0dff922bab7ead01a0
|
[
"MIT"
] | null | null | null |
import os
import json
from humidor import Sensors
s = Sensors()
s.read_sensors()
| 9.333333
| 27
| 0.75
| 13
| 84
| 4.769231
| 0.615385
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 84
| 8
| 28
| 10.5
| 0.898551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
754c04d778c584f8c0ad28a2e9e0a3e439fe3904
| 541
|
py
|
Python
|
paperbroker/adapters/quotes/QuoteAdapter.py
|
yutiansut/paperbroker
|
3b4124ee79532d4b56b5fd5e864ed2ca1f4f857e
|
[
"MIT"
] | 227
|
2017-07-14T19:10:04.000Z
|
2022-03-23T01:29:46.000Z
|
paperbroker/adapters/quotes/QuoteAdapter.py
|
KloudTrader/paperbroker
|
85844b9841a9fced6ea66e137c33a13136cd5faf
|
[
"MIT"
] | 7
|
2017-07-14T01:59:49.000Z
|
2021-05-19T06:10:55.000Z
|
paperbroker/adapters/quotes/QuoteAdapter.py
|
KloudTrader/paperbroker
|
85844b9841a9fced6ea66e137c33a13136cd5faf
|
[
"MIT"
] | 59
|
2017-07-15T06:55:56.000Z
|
2022-03-22T21:20:22.000Z
|
import arrow
class QuoteAdapter:
def get_quote(self, asset):
raise NotImplementedError("QuoteAdapter.get_quote: You should subclass this and create an adapter.")
def get_options(self, underlying_asset=None, expiration_date=None):
raise NotImplementedError("QuoteAdapter.get_options: You should subclass this and create an adapter.")
def get_expiration_dates(self, underlying_asset=None):
raise NotImplementedError("QuoteAdapter.get_expiration_dates: You should subclass this and create an adapter.")
| 38.642857
| 119
| 0.772643
| 67
| 541
| 6.074627
| 0.373134
| 0.044226
| 0.265356
| 0.287469
| 0.528256
| 0.316953
| 0.316953
| 0.316953
| 0.22113
| 0.22113
| 0
| 0
| 0.157116
| 541
| 13
| 120
| 41.615385
| 0.892544
| 0
| 0
| 0
| 0
| 0
| 0.418519
| 0.151852
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0.125
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
755305a2704400bc4a0ad42ab98790d9142a9a2d
| 89
|
py
|
Python
|
filters/apps.py
|
CAPSLOCKFURY/django-eshop
|
18d47be47e568800e51c4b6ff868138a7350893b
|
[
"MIT"
] | 2
|
2021-05-28T11:39:36.000Z
|
2021-08-20T04:43:00.000Z
|
filters/apps.py
|
CAPSLOCKFURY/django-eshop
|
18d47be47e568800e51c4b6ff868138a7350893b
|
[
"MIT"
] | null | null | null |
filters/apps.py
|
CAPSLOCKFURY/django-eshop
|
18d47be47e568800e51c4b6ff868138a7350893b
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class FiltersConfig(AppConfig):
name = 'filters'
| 14.833333
| 33
| 0.752809
| 10
| 89
| 6.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 89
| 5
| 34
| 17.8
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
75585789e6a2c26289176f0bf2960396e26d5f7c
| 68,181
|
py
|
Python
|
Steam Key Safe/venv/Lib/site-packages/steam/protobufs/steammessages_twofactor_pb2.py
|
baxter5469/steam-key-safe
|
f9f721fa4776db650956400b1b808f707f07c5c1
|
[
"MIT"
] | 2
|
2020-04-10T02:47:52.000Z
|
2020-04-10T03:31:12.000Z
|
Steam Key Safe/venv/Lib/site-packages/steam/protobufs/steammessages_twofactor_pb2.py
|
baxter5469/steam-key-safe
|
f9f721fa4776db650956400b1b808f707f07c5c1
|
[
"MIT"
] | 6
|
2020-04-12T01:03:48.000Z
|
2020-12-21T04:34:37.000Z
|
Steam Key Safe/venv/Lib/site-packages/steam/protobufs/steammessages_twofactor_pb2.py
|
baxter5469/steam-key-safe
|
f9f721fa4776db650956400b1b808f707f07c5c1
|
[
"MIT"
] | 1
|
2020-04-13T01:47:10.000Z
|
2020-04-13T01:47:10.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: steammessages_twofactor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import service as _service
from google.protobuf import service_reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import steam.protobufs.steammessages_unified_base_pb2 as steammessages__unified__base__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='steammessages_twofactor.proto',
package='',
syntax='proto2',
serialized_pb=_b('\n\x1dsteammessages_twofactor.proto\x1a steammessages_unified_base.proto\"@\n\x19\x43TwoFactor_Status_Request\x12#\n\x07steamid\x18\x01 \x01(\x06\x42\x12\x82\xb5\x18\x0esteamid to use\"\xc6\x07\n\x1a\x43TwoFactor_Status_Response\x12&\n\x05state\x18\x01 \x01(\rB\x17\x82\xb5\x18\x13\x41uthenticator state\x12=\n\x13inactivation_reason\x18\x02 \x01(\rB \x82\xb5\x18\x1cInactivation reason (if any)\x12\x35\n\x12\x61uthenticator_type\x18\x03 \x01(\rB\x19\x82\xb5\x18\x15Type of authenticator\x12L\n\x15\x61uthenticator_allowed\x18\x04 \x01(\x08\x42-\x82\xb5\x18)Account allowed to have an authenticator?\x12;\n\x11steamguard_scheme\x18\x05 \x01(\rB \x82\xb5\x18\x1cSteam Guard scheme in effect\x12\x41\n\ttoken_gid\x18\x06 \x01(\tB.\x82\xb5\x18*String rep of token GID assigned by server\x12\x42\n\x0f\x65mail_validated\x18\x07 \x01(\x08\x42)\x82\xb5\x18%Account has verified email capability\x12?\n\x11\x64\x65vice_identifier\x18\x08 \x01(\tB$\x82\xb5\x18 Authenticator (phone) identifier\x12\x34\n\x0ctime_created\x18\t \x01(\rB\x1e\x82\xb5\x18\x1aWhen the token was created\x12W\n\x1drevocation_attempts_remaining\x18\n \x01(\rB0\x82\xb5\x18,Number of revocation code attempts remaining\x12^\n\x10\x63lassified_agent\x18\x0b \x01(\tBD\x82\xb5\x18@Agent that added the authenticator (e.g., ios / android / other)\x12g\n\x1c\x61llow_external_authenticator\x18\x0c \x01(\x08\x42\x41\x82\xb5\x18=Allow a third-party authenticator (in addition to two-factor)\x12_\n\x10time_transferred\x18\r \x01(\rBE\x82\xb5\x18\x41When the token was transferred from another device, if applicable\"\xb2\x03\n#CTwoFactor_AddAuthenticator_Request\x12#\n\x07steamid\x18\x01 \x01(\x06\x42\x12\x82\xb5\x18\x0esteamid to use\x12:\n\x12\x61uthenticator_time\x18\x02 \x01(\x04\x42\x1e\x82\xb5\x18\x1a\x43urrent authenticator time\x12?\n\rserial_number\x18\x03 \x01(\x06\x42(\x82\xb5\x18$locally computed serial (deprecated)\x12\x32\n\x12\x61uthenticator_type\x18\x04 \x01(\rB\x16\x82\xb5\x18\x12\x41uthenticator type\x12\x37\n\x11\x64\x65vice_identifier\x18\x05 \x01(\tB\x1c\x82\xb5\x18\x18\x41uthenticator identifier\x12\x41\n\x0csms_phone_id\x18\x06 \x01(\tB+\x82\xb5\x18\'ID of phone to use for SMS verification\x12\x39\n\x0chttp_headers\x18\x07 \x03(\tB#\x82\xb5\x18\x1fHTTP headers alternating by K/V\"\xf3\x04\n$CTwoFactor_AddAuthenticator_Response\x12I\n\rshared_secret\x18\x01 \x01(\x0c\x42\x32\x82\xb5\x18.Shared secret between server and authenticator\x12I\n\rserial_number\x18\x02 \x01(\x06\x42\x32\x82\xb5\x18.Authenticator serial number (unique per token)\x12>\n\x0frevocation_code\x18\x03 \x01(\tB%\x82\xb5\x18!code used to revoke authenticator\x12+\n\x03uri\x18\x04 \x01(\tB\x1e\x82\xb5\x18\x1aURI for QR code generation\x12,\n\x0bserver_time\x18\x05 \x01(\x04\x42\x17\x82\xb5\x18\x13\x43urrent server time\x12\x41\n\x0c\x61\x63\x63ount_name\x18\x06 \x01(\tB+\x82\xb5\x18\'Account name to display on token client\x12\x33\n\ttoken_gid\x18\x07 \x01(\tB \x82\xb5\x18\x1cToken GID assigned by server\x12V\n\x0fidentity_secret\x18\x08 \x01(\x0c\x42=\x82\xb5\x18\x39Secret used for identity attestation (e.g., for eventing)\x12)\n\x08secret_1\x18\t \x01(\x0c\x42\x17\x82\xb5\x18\x13Spare shared secret\x12\x1f\n\x06status\x18\n \x01(\x05\x42\x0f\x82\xb5\x18\x0bResult code\"\xdd\x01\n\x1c\x43TwoFactor_SendEmail_Request\x12#\n\x07steamid\x18\x01 \x01(\x06\x42\x12\x82\xb5\x18\x0eSteamid to use\x12\x46\n\nemail_type\x18\x02 \x01(\rB2\x82\xb5\x18.Type of email to send (ETwoFactorEmailType::*)\x12P\n\x17include_activation_code\x18\x03 \x01(\x08\x42/\x82\xb5\x18+Include activation code in email parameters\"\x1f\n\x1d\x43TwoFactor_SendEmail_Response\"\xc3\x02\n+CTwoFactor_FinalizeAddAuthenticator_Request\x12#\n\x07steamid\x18\x01 \x01(\x06\x42\x12\x82\xb5\x18\x0esteamid to use\x12\x31\n\x12\x61uthenticator_code\x18\x02 \x01(\tB\x15\x82\xb5\x18\x11\x43urrent auth code\x12:\n\x12\x61uthenticator_time\x18\x03 \x01(\x04\x42\x1e\x82\xb5\x18\x1a\x43urrent authenticator time\x12\x45\n\x0f\x61\x63tivation_code\x18\x04 \x01(\tB,\x82\xb5\x18(Activation code from out-of-band message\x12\x39\n\x0chttp_headers\x18\x05 \x03(\tB#\x82\xb5\x18\x1fHTTP headers alternating by K/V\"\xe9\x01\n,CTwoFactor_FinalizeAddAuthenticator_Response\x12:\n\x07success\x18\x01 \x01(\x08\x42)\x82\xb5\x18%True if succeeded, or want more tries\x12.\n\twant_more\x18\x02 \x01(\x08\x42\x1b\x82\xb5\x18\x17True if want more tries\x12,\n\x0bserver_time\x18\x03 \x01(\x04\x42\x17\x82\xb5\x18\x13\x43urrent server time\x12\x1f\n\x06status\x18\x04 \x01(\x05\x42\x0f\x82\xb5\x18\x0bResult code\"\xcb\x02\n&CTwoFactor_RemoveAuthenticator_Request\x12<\n\x0frevocation_code\x18\x02 \x01(\tB#\x82\xb5\x18\x1fPassword needed to remove token\x12H\n\x11revocation_reason\x18\x05 \x01(\rB-\x82\xb5\x18)Reason the authenticator is being removed\x12O\n\x11steamguard_scheme\x18\x06 \x01(\rB4\x82\xb5\x18\x30Type of Steam Guard to use once token is removed\x12H\n\x1dremove_all_steamguard_cookies\x18\x07 \x01(\x08\x42!\x82\xb5\x18\x1dRemove all steamguard cookies\"\xfe\x01\n\'CTwoFactor_RemoveAuthenticator_Response\x12L\n\x07success\x18\x01 \x01(\x08\x42;\x82\xb5\x18\x37True if request succeeeded. The mobile app checks this.\x12,\n\x0bserver_time\x18\x03 \x01(\x04\x42\x17\x82\xb5\x18\x13\x43urrent server time\x12W\n\x1drevocation_attempts_remaining\x18\x05 \x01(\rB0\x82\xb5\x18,Number of revocation code attempts remaining\")\n\'CTwoFactor_CreateEmergencyCodes_Request\"N\n(CTwoFactor_CreateEmergencyCodes_Response\x12\"\n\x05\x63odes\x18\x01 \x03(\tB\x13\x82\xb5\x18\x0f\x45mergency codes\"O\n(CTwoFactor_DestroyEmergencyCodes_Request\x12#\n\x07steamid\x18\x01 \x01(\x06\x42\x12\x82\xb5\x18\x0esteamid to use\"+\n)CTwoFactor_DestroyEmergencyCodes_Response\"F\n CTwoFactor_ValidateToken_Request\x12\"\n\x04\x63ode\x18\x01 \x01(\tB\x14\x82\xb5\x18\x10\x63ode to validate\"L\n!CTwoFactor_ValidateToken_Response\x12\'\n\x05valid\x18\x01 \x01(\x08\x42\x18\x82\xb5\x18\x14result of validation2\x84\n\n\tTwoFactor\x12\x8c\x01\n\x0bQueryStatus\x12\x1a.CTwoFactor_Status_Request\x1a\x1b.CTwoFactor_Status_Response\"D\x82\xb5\x18@Get two-factor authentication settings for the logged-in account\x12\x9a\x01\n\x10\x41\x64\x64\x41uthenticator\x12$.CTwoFactor_AddAuthenticator_Request\x1a%.CTwoFactor_AddAuthenticator_Response\"9\x82\xb5\x18\x35\x41\x64\x64 two-factor authenticator to the logged-in account\x12i\n\tSendEmail\x12\x1d.CTwoFactor_SendEmail_Request\x1a\x1e.CTwoFactor_SendEmail_Response\"\x1d\x82\xb5\x18\x19Send email to the account\x12\xc1\x01\n\x18\x46inalizeAddAuthenticator\x12,.CTwoFactor_FinalizeAddAuthenticator_Request\x1a-.CTwoFactor_FinalizeAddAuthenticator_Response\"H\x82\xb5\x18\x44\x46inalize two-factor authentication addition to the logged-in account\x12\xb2\x01\n\x13RemoveAuthenticator\x12\'.CTwoFactor_RemoveAuthenticator_Request\x1a(.CTwoFactor_RemoveAuthenticator_Response\"H\x82\xb5\x18\x44Remove two-factor authentication addition from the logged-in account\x12\x97\x01\n\x14\x43reateEmergencyCodes\x12(.CTwoFactor_CreateEmergencyCodes_Request\x1a).CTwoFactor_CreateEmergencyCodes_Response\"*\x82\xb5\x18&Generate emergency authenticator codes\x12\xa9\x01\n\x15\x44\x65stroyEmergencyCodes\x12).CTwoFactor_DestroyEmergencyCodes_Request\x1a*.CTwoFactor_DestroyEmergencyCodes_Response\"9\x82\xb5\x18\x35\x44\x65stroy emergency authenticator codes for the account\x12z\n\rValidateToken\x12!.CTwoFactor_ValidateToken_Request\x1a\".CTwoFactor_ValidateToken_Response\"\"\x82\xb5\x18\x1eValidate (and consume) a token\x1a%\x82\xb5\x18!Two Factor Authentication ServiceB\x03\x90\x01\x01')
,
dependencies=[steammessages__unified__base__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CTWOFACTOR_STATUS_REQUEST = _descriptor.Descriptor(
name='CTwoFactor_Status_Request',
full_name='CTwoFactor_Status_Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='steamid', full_name='CTwoFactor_Status_Request.steamid', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016steamid to use'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=131,
)
_CTWOFACTOR_STATUS_RESPONSE = _descriptor.Descriptor(
name='CTwoFactor_Status_Response',
full_name='CTwoFactor_Status_Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='CTwoFactor_Status_Response.state', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Authenticator state'))),
_descriptor.FieldDescriptor(
name='inactivation_reason', full_name='CTwoFactor_Status_Response.inactivation_reason', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\034Inactivation reason (if any)'))),
_descriptor.FieldDescriptor(
name='authenticator_type', full_name='CTwoFactor_Status_Response.authenticator_type', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\025Type of authenticator'))),
_descriptor.FieldDescriptor(
name='authenticator_allowed', full_name='CTwoFactor_Status_Response.authenticator_allowed', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030)Account allowed to have an authenticator?'))),
_descriptor.FieldDescriptor(
name='steamguard_scheme', full_name='CTwoFactor_Status_Response.steamguard_scheme', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\034Steam Guard scheme in effect'))),
_descriptor.FieldDescriptor(
name='token_gid', full_name='CTwoFactor_Status_Response.token_gid', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030*String rep of token GID assigned by server'))),
_descriptor.FieldDescriptor(
name='email_validated', full_name='CTwoFactor_Status_Response.email_validated', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030%Account has verified email capability'))),
_descriptor.FieldDescriptor(
name='device_identifier', full_name='CTwoFactor_Status_Response.device_identifier', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030 Authenticator (phone) identifier'))),
_descriptor.FieldDescriptor(
name='time_created', full_name='CTwoFactor_Status_Response.time_created', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\032When the token was created'))),
_descriptor.FieldDescriptor(
name='revocation_attempts_remaining', full_name='CTwoFactor_Status_Response.revocation_attempts_remaining', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030,Number of revocation code attempts remaining'))),
_descriptor.FieldDescriptor(
name='classified_agent', full_name='CTwoFactor_Status_Response.classified_agent', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030@Agent that added the authenticator (e.g., ios / android / other)'))),
_descriptor.FieldDescriptor(
name='allow_external_authenticator', full_name='CTwoFactor_Status_Response.allow_external_authenticator', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030=Allow a third-party authenticator (in addition to two-factor)'))),
_descriptor.FieldDescriptor(
name='time_transferred', full_name='CTwoFactor_Status_Response.time_transferred', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030AWhen the token was transferred from another device, if applicable'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=134,
serialized_end=1100,
)
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST = _descriptor.Descriptor(
name='CTwoFactor_AddAuthenticator_Request',
full_name='CTwoFactor_AddAuthenticator_Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='steamid', full_name='CTwoFactor_AddAuthenticator_Request.steamid', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016steamid to use'))),
_descriptor.FieldDescriptor(
name='authenticator_time', full_name='CTwoFactor_AddAuthenticator_Request.authenticator_time', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\032Current authenticator time'))),
_descriptor.FieldDescriptor(
name='serial_number', full_name='CTwoFactor_AddAuthenticator_Request.serial_number', index=2,
number=3, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030$locally computed serial (deprecated)'))),
_descriptor.FieldDescriptor(
name='authenticator_type', full_name='CTwoFactor_AddAuthenticator_Request.authenticator_type', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\022Authenticator type'))),
_descriptor.FieldDescriptor(
name='device_identifier', full_name='CTwoFactor_AddAuthenticator_Request.device_identifier', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\030Authenticator identifier'))),
_descriptor.FieldDescriptor(
name='sms_phone_id', full_name='CTwoFactor_AddAuthenticator_Request.sms_phone_id', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\'ID of phone to use for SMS verification'))),
_descriptor.FieldDescriptor(
name='http_headers', full_name='CTwoFactor_AddAuthenticator_Request.http_headers', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\037HTTP headers alternating by K/V'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1103,
serialized_end=1537,
)
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE = _descriptor.Descriptor(
name='CTwoFactor_AddAuthenticator_Response',
full_name='CTwoFactor_AddAuthenticator_Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shared_secret', full_name='CTwoFactor_AddAuthenticator_Response.shared_secret', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030.Shared secret between server and authenticator'))),
_descriptor.FieldDescriptor(
name='serial_number', full_name='CTwoFactor_AddAuthenticator_Response.serial_number', index=1,
number=2, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030.Authenticator serial number (unique per token)'))),
_descriptor.FieldDescriptor(
name='revocation_code', full_name='CTwoFactor_AddAuthenticator_Response.revocation_code', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030!code used to revoke authenticator'))),
_descriptor.FieldDescriptor(
name='uri', full_name='CTwoFactor_AddAuthenticator_Response.uri', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\032URI for QR code generation'))),
_descriptor.FieldDescriptor(
name='server_time', full_name='CTwoFactor_AddAuthenticator_Response.server_time', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Current server time'))),
_descriptor.FieldDescriptor(
name='account_name', full_name='CTwoFactor_AddAuthenticator_Response.account_name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\'Account name to display on token client'))),
_descriptor.FieldDescriptor(
name='token_gid', full_name='CTwoFactor_AddAuthenticator_Response.token_gid', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\034Token GID assigned by server'))),
_descriptor.FieldDescriptor(
name='identity_secret', full_name='CTwoFactor_AddAuthenticator_Response.identity_secret', index=7,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\0309Secret used for identity attestation (e.g., for eventing)'))),
_descriptor.FieldDescriptor(
name='secret_1', full_name='CTwoFactor_AddAuthenticator_Response.secret_1', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Spare shared secret'))),
_descriptor.FieldDescriptor(
name='status', full_name='CTwoFactor_AddAuthenticator_Response.status', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\013Result code'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1540,
serialized_end=2167,
)
_CTWOFACTOR_SENDEMAIL_REQUEST = _descriptor.Descriptor(
name='CTwoFactor_SendEmail_Request',
full_name='CTwoFactor_SendEmail_Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='steamid', full_name='CTwoFactor_SendEmail_Request.steamid', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016Steamid to use'))),
_descriptor.FieldDescriptor(
name='email_type', full_name='CTwoFactor_SendEmail_Request.email_type', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030.Type of email to send (ETwoFactorEmailType::*)'))),
_descriptor.FieldDescriptor(
name='include_activation_code', full_name='CTwoFactor_SendEmail_Request.include_activation_code', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030+Include activation code in email parameters'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2170,
serialized_end=2391,
)
_CTWOFACTOR_SENDEMAIL_RESPONSE = _descriptor.Descriptor(
name='CTwoFactor_SendEmail_Response',
full_name='CTwoFactor_SendEmail_Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2393,
serialized_end=2424,
)
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST = _descriptor.Descriptor(
name='CTwoFactor_FinalizeAddAuthenticator_Request',
full_name='CTwoFactor_FinalizeAddAuthenticator_Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='steamid', full_name='CTwoFactor_FinalizeAddAuthenticator_Request.steamid', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016steamid to use'))),
_descriptor.FieldDescriptor(
name='authenticator_code', full_name='CTwoFactor_FinalizeAddAuthenticator_Request.authenticator_code', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\021Current auth code'))),
_descriptor.FieldDescriptor(
name='authenticator_time', full_name='CTwoFactor_FinalizeAddAuthenticator_Request.authenticator_time', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\032Current authenticator time'))),
_descriptor.FieldDescriptor(
name='activation_code', full_name='CTwoFactor_FinalizeAddAuthenticator_Request.activation_code', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030(Activation code from out-of-band message'))),
_descriptor.FieldDescriptor(
name='http_headers', full_name='CTwoFactor_FinalizeAddAuthenticator_Request.http_headers', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\037HTTP headers alternating by K/V'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2427,
serialized_end=2750,
)
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE = _descriptor.Descriptor(
name='CTwoFactor_FinalizeAddAuthenticator_Response',
full_name='CTwoFactor_FinalizeAddAuthenticator_Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='CTwoFactor_FinalizeAddAuthenticator_Response.success', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030%True if succeeded, or want more tries'))),
_descriptor.FieldDescriptor(
name='want_more', full_name='CTwoFactor_FinalizeAddAuthenticator_Response.want_more', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\027True if want more tries'))),
_descriptor.FieldDescriptor(
name='server_time', full_name='CTwoFactor_FinalizeAddAuthenticator_Response.server_time', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Current server time'))),
_descriptor.FieldDescriptor(
name='status', full_name='CTwoFactor_FinalizeAddAuthenticator_Response.status', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\013Result code'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2753,
serialized_end=2986,
)
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST = _descriptor.Descriptor(
name='CTwoFactor_RemoveAuthenticator_Request',
full_name='CTwoFactor_RemoveAuthenticator_Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='revocation_code', full_name='CTwoFactor_RemoveAuthenticator_Request.revocation_code', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\037Password needed to remove token'))),
_descriptor.FieldDescriptor(
name='revocation_reason', full_name='CTwoFactor_RemoveAuthenticator_Request.revocation_reason', index=1,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030)Reason the authenticator is being removed'))),
_descriptor.FieldDescriptor(
name='steamguard_scheme', full_name='CTwoFactor_RemoveAuthenticator_Request.steamguard_scheme', index=2,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\0300Type of Steam Guard to use once token is removed'))),
_descriptor.FieldDescriptor(
name='remove_all_steamguard_cookies', full_name='CTwoFactor_RemoveAuthenticator_Request.remove_all_steamguard_cookies', index=3,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\035Remove all steamguard cookies'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2989,
serialized_end=3320,
)
_CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE = _descriptor.Descriptor(
name='CTwoFactor_RemoveAuthenticator_Response',
full_name='CTwoFactor_RemoveAuthenticator_Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='CTwoFactor_RemoveAuthenticator_Response.success', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\0307True if request succeeeded. The mobile app checks this.'))),
_descriptor.FieldDescriptor(
name='server_time', full_name='CTwoFactor_RemoveAuthenticator_Response.server_time', index=1,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Current server time'))),
_descriptor.FieldDescriptor(
name='revocation_attempts_remaining', full_name='CTwoFactor_RemoveAuthenticator_Response.revocation_attempts_remaining', index=2,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030,Number of revocation code attempts remaining'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3323,
serialized_end=3577,
)
_CTWOFACTOR_CREATEEMERGENCYCODES_REQUEST = _descriptor.Descriptor(
name='CTwoFactor_CreateEmergencyCodes_Request',
full_name='CTwoFactor_CreateEmergencyCodes_Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3579,
serialized_end=3620,
)
_CTWOFACTOR_CREATEEMERGENCYCODES_RESPONSE = _descriptor.Descriptor(
name='CTwoFactor_CreateEmergencyCodes_Response',
full_name='CTwoFactor_CreateEmergencyCodes_Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='codes', full_name='CTwoFactor_CreateEmergencyCodes_Response.codes', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\017Emergency codes'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3622,
serialized_end=3700,
)
_CTWOFACTOR_DESTROYEMERGENCYCODES_REQUEST = _descriptor.Descriptor(
name='CTwoFactor_DestroyEmergencyCodes_Request',
full_name='CTwoFactor_DestroyEmergencyCodes_Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='steamid', full_name='CTwoFactor_DestroyEmergencyCodes_Request.steamid', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016steamid to use'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3702,
serialized_end=3781,
)
_CTWOFACTOR_DESTROYEMERGENCYCODES_RESPONSE = _descriptor.Descriptor(
name='CTwoFactor_DestroyEmergencyCodes_Response',
full_name='CTwoFactor_DestroyEmergencyCodes_Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3783,
serialized_end=3826,
)
_CTWOFACTOR_VALIDATETOKEN_REQUEST = _descriptor.Descriptor(
name='CTwoFactor_ValidateToken_Request',
full_name='CTwoFactor_ValidateToken_Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='CTwoFactor_ValidateToken_Request.code', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\020code to validate'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3828,
serialized_end=3898,
)
_CTWOFACTOR_VALIDATETOKEN_RESPONSE = _descriptor.Descriptor(
name='CTwoFactor_ValidateToken_Response',
full_name='CTwoFactor_ValidateToken_Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='valid', full_name='CTwoFactor_ValidateToken_Response.valid', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\024result of validation'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3900,
serialized_end=3976,
)
DESCRIPTOR.message_types_by_name['CTwoFactor_Status_Request'] = _CTWOFACTOR_STATUS_REQUEST
DESCRIPTOR.message_types_by_name['CTwoFactor_Status_Response'] = _CTWOFACTOR_STATUS_RESPONSE
DESCRIPTOR.message_types_by_name['CTwoFactor_AddAuthenticator_Request'] = _CTWOFACTOR_ADDAUTHENTICATOR_REQUEST
DESCRIPTOR.message_types_by_name['CTwoFactor_AddAuthenticator_Response'] = _CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE
DESCRIPTOR.message_types_by_name['CTwoFactor_SendEmail_Request'] = _CTWOFACTOR_SENDEMAIL_REQUEST
DESCRIPTOR.message_types_by_name['CTwoFactor_SendEmail_Response'] = _CTWOFACTOR_SENDEMAIL_RESPONSE
DESCRIPTOR.message_types_by_name['CTwoFactor_FinalizeAddAuthenticator_Request'] = _CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST
DESCRIPTOR.message_types_by_name['CTwoFactor_FinalizeAddAuthenticator_Response'] = _CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE
DESCRIPTOR.message_types_by_name['CTwoFactor_RemoveAuthenticator_Request'] = _CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST
DESCRIPTOR.message_types_by_name['CTwoFactor_RemoveAuthenticator_Response'] = _CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE
DESCRIPTOR.message_types_by_name['CTwoFactor_CreateEmergencyCodes_Request'] = _CTWOFACTOR_CREATEEMERGENCYCODES_REQUEST
DESCRIPTOR.message_types_by_name['CTwoFactor_CreateEmergencyCodes_Response'] = _CTWOFACTOR_CREATEEMERGENCYCODES_RESPONSE
DESCRIPTOR.message_types_by_name['CTwoFactor_DestroyEmergencyCodes_Request'] = _CTWOFACTOR_DESTROYEMERGENCYCODES_REQUEST
DESCRIPTOR.message_types_by_name['CTwoFactor_DestroyEmergencyCodes_Response'] = _CTWOFACTOR_DESTROYEMERGENCYCODES_RESPONSE
DESCRIPTOR.message_types_by_name['CTwoFactor_ValidateToken_Request'] = _CTWOFACTOR_VALIDATETOKEN_REQUEST
DESCRIPTOR.message_types_by_name['CTwoFactor_ValidateToken_Response'] = _CTWOFACTOR_VALIDATETOKEN_RESPONSE
CTwoFactor_Status_Request = _reflection.GeneratedProtocolMessageType('CTwoFactor_Status_Request', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_STATUS_REQUEST,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_Status_Request)
))
_sym_db.RegisterMessage(CTwoFactor_Status_Request)
CTwoFactor_Status_Response = _reflection.GeneratedProtocolMessageType('CTwoFactor_Status_Response', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_STATUS_RESPONSE,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_Status_Response)
))
_sym_db.RegisterMessage(CTwoFactor_Status_Response)
CTwoFactor_AddAuthenticator_Request = _reflection.GeneratedProtocolMessageType('CTwoFactor_AddAuthenticator_Request', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_ADDAUTHENTICATOR_REQUEST,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_AddAuthenticator_Request)
))
_sym_db.RegisterMessage(CTwoFactor_AddAuthenticator_Request)
CTwoFactor_AddAuthenticator_Response = _reflection.GeneratedProtocolMessageType('CTwoFactor_AddAuthenticator_Response', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_AddAuthenticator_Response)
))
_sym_db.RegisterMessage(CTwoFactor_AddAuthenticator_Response)
CTwoFactor_SendEmail_Request = _reflection.GeneratedProtocolMessageType('CTwoFactor_SendEmail_Request', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_SENDEMAIL_REQUEST,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_SendEmail_Request)
))
_sym_db.RegisterMessage(CTwoFactor_SendEmail_Request)
CTwoFactor_SendEmail_Response = _reflection.GeneratedProtocolMessageType('CTwoFactor_SendEmail_Response', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_SENDEMAIL_RESPONSE,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_SendEmail_Response)
))
_sym_db.RegisterMessage(CTwoFactor_SendEmail_Response)
CTwoFactor_FinalizeAddAuthenticator_Request = _reflection.GeneratedProtocolMessageType('CTwoFactor_FinalizeAddAuthenticator_Request', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_FinalizeAddAuthenticator_Request)
))
_sym_db.RegisterMessage(CTwoFactor_FinalizeAddAuthenticator_Request)
CTwoFactor_FinalizeAddAuthenticator_Response = _reflection.GeneratedProtocolMessageType('CTwoFactor_FinalizeAddAuthenticator_Response', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_FinalizeAddAuthenticator_Response)
))
_sym_db.RegisterMessage(CTwoFactor_FinalizeAddAuthenticator_Response)
CTwoFactor_RemoveAuthenticator_Request = _reflection.GeneratedProtocolMessageType('CTwoFactor_RemoveAuthenticator_Request', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_RemoveAuthenticator_Request)
))
_sym_db.RegisterMessage(CTwoFactor_RemoveAuthenticator_Request)
CTwoFactor_RemoveAuthenticator_Response = _reflection.GeneratedProtocolMessageType('CTwoFactor_RemoveAuthenticator_Response', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_RemoveAuthenticator_Response)
))
_sym_db.RegisterMessage(CTwoFactor_RemoveAuthenticator_Response)
CTwoFactor_CreateEmergencyCodes_Request = _reflection.GeneratedProtocolMessageType('CTwoFactor_CreateEmergencyCodes_Request', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_CREATEEMERGENCYCODES_REQUEST,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_CreateEmergencyCodes_Request)
))
_sym_db.RegisterMessage(CTwoFactor_CreateEmergencyCodes_Request)
CTwoFactor_CreateEmergencyCodes_Response = _reflection.GeneratedProtocolMessageType('CTwoFactor_CreateEmergencyCodes_Response', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_CREATEEMERGENCYCODES_RESPONSE,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_CreateEmergencyCodes_Response)
))
_sym_db.RegisterMessage(CTwoFactor_CreateEmergencyCodes_Response)
CTwoFactor_DestroyEmergencyCodes_Request = _reflection.GeneratedProtocolMessageType('CTwoFactor_DestroyEmergencyCodes_Request', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_DESTROYEMERGENCYCODES_REQUEST,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_DestroyEmergencyCodes_Request)
))
_sym_db.RegisterMessage(CTwoFactor_DestroyEmergencyCodes_Request)
CTwoFactor_DestroyEmergencyCodes_Response = _reflection.GeneratedProtocolMessageType('CTwoFactor_DestroyEmergencyCodes_Response', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_DESTROYEMERGENCYCODES_RESPONSE,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_DestroyEmergencyCodes_Response)
))
_sym_db.RegisterMessage(CTwoFactor_DestroyEmergencyCodes_Response)
CTwoFactor_ValidateToken_Request = _reflection.GeneratedProtocolMessageType('CTwoFactor_ValidateToken_Request', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_VALIDATETOKEN_REQUEST,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_ValidateToken_Request)
))
_sym_db.RegisterMessage(CTwoFactor_ValidateToken_Request)
CTwoFactor_ValidateToken_Response = _reflection.GeneratedProtocolMessageType('CTwoFactor_ValidateToken_Response', (_message.Message,), dict(
DESCRIPTOR = _CTWOFACTOR_VALIDATETOKEN_RESPONSE,
__module__ = 'steammessages_twofactor_pb2'
# @@protoc_insertion_point(class_scope:CTwoFactor_ValidateToken_Response)
))
_sym_db.RegisterMessage(CTwoFactor_ValidateToken_Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\220\001\001'))
_CTWOFACTOR_STATUS_REQUEST.fields_by_name['steamid'].has_options = True
_CTWOFACTOR_STATUS_REQUEST.fields_by_name['steamid']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016steamid to use'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['state'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['state']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Authenticator state'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['inactivation_reason'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['inactivation_reason']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\034Inactivation reason (if any)'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['authenticator_type'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['authenticator_type']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\025Type of authenticator'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['authenticator_allowed'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['authenticator_allowed']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030)Account allowed to have an authenticator?'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['steamguard_scheme'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['steamguard_scheme']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\034Steam Guard scheme in effect'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['token_gid'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['token_gid']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030*String rep of token GID assigned by server'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['email_validated'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['email_validated']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030%Account has verified email capability'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['device_identifier'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['device_identifier']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030 Authenticator (phone) identifier'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['time_created'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['time_created']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\032When the token was created'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['revocation_attempts_remaining'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['revocation_attempts_remaining']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030,Number of revocation code attempts remaining'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['classified_agent'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['classified_agent']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030@Agent that added the authenticator (e.g., ios / android / other)'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['allow_external_authenticator'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['allow_external_authenticator']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030=Allow a third-party authenticator (in addition to two-factor)'))
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['time_transferred'].has_options = True
_CTWOFACTOR_STATUS_RESPONSE.fields_by_name['time_transferred']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030AWhen the token was transferred from another device, if applicable'))
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['steamid'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['steamid']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016steamid to use'))
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['authenticator_time'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['authenticator_time']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\032Current authenticator time'))
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['serial_number'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['serial_number']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030$locally computed serial (deprecated)'))
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['authenticator_type'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['authenticator_type']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\022Authenticator type'))
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['device_identifier'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['device_identifier']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\030Authenticator identifier'))
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['sms_phone_id'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['sms_phone_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\'ID of phone to use for SMS verification'))
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['http_headers'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST.fields_by_name['http_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\037HTTP headers alternating by K/V'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['shared_secret'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['shared_secret']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030.Shared secret between server and authenticator'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['serial_number'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['serial_number']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030.Authenticator serial number (unique per token)'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['revocation_code'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['revocation_code']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030!code used to revoke authenticator'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['uri'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['uri']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\032URI for QR code generation'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['server_time'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['server_time']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Current server time'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['account_name'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['account_name']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\'Account name to display on token client'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['token_gid'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['token_gid']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\034Token GID assigned by server'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['identity_secret'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['identity_secret']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\0309Secret used for identity attestation (e.g., for eventing)'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['secret_1'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['secret_1']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Spare shared secret'))
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['status'].has_options = True
_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE.fields_by_name['status']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\013Result code'))
_CTWOFACTOR_SENDEMAIL_REQUEST.fields_by_name['steamid'].has_options = True
_CTWOFACTOR_SENDEMAIL_REQUEST.fields_by_name['steamid']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016Steamid to use'))
_CTWOFACTOR_SENDEMAIL_REQUEST.fields_by_name['email_type'].has_options = True
_CTWOFACTOR_SENDEMAIL_REQUEST.fields_by_name['email_type']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030.Type of email to send (ETwoFactorEmailType::*)'))
_CTWOFACTOR_SENDEMAIL_REQUEST.fields_by_name['include_activation_code'].has_options = True
_CTWOFACTOR_SENDEMAIL_REQUEST.fields_by_name['include_activation_code']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030+Include activation code in email parameters'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['steamid'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['steamid']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016steamid to use'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['authenticator_code'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['authenticator_code']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\021Current auth code'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['authenticator_time'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['authenticator_time']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\032Current authenticator time'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['activation_code'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['activation_code']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030(Activation code from out-of-band message'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['http_headers'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST.fields_by_name['http_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\037HTTP headers alternating by K/V'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE.fields_by_name['success'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE.fields_by_name['success']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030%True if succeeded, or want more tries'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE.fields_by_name['want_more'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE.fields_by_name['want_more']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\027True if want more tries'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE.fields_by_name['server_time'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE.fields_by_name['server_time']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Current server time'))
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE.fields_by_name['status'].has_options = True
_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE.fields_by_name['status']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\013Result code'))
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST.fields_by_name['revocation_code'].has_options = True
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST.fields_by_name['revocation_code']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\037Password needed to remove token'))
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST.fields_by_name['revocation_reason'].has_options = True
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST.fields_by_name['revocation_reason']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030)Reason the authenticator is being removed'))
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST.fields_by_name['steamguard_scheme'].has_options = True
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST.fields_by_name['steamguard_scheme']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\0300Type of Steam Guard to use once token is removed'))
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST.fields_by_name['remove_all_steamguard_cookies'].has_options = True
_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST.fields_by_name['remove_all_steamguard_cookies']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\035Remove all steamguard cookies'))
_CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE.fields_by_name['success'].has_options = True
_CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE.fields_by_name['success']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\0307True if request succeeeded. The mobile app checks this.'))
_CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE.fields_by_name['server_time'].has_options = True
_CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE.fields_by_name['server_time']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\023Current server time'))
_CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE.fields_by_name['revocation_attempts_remaining'].has_options = True
_CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE.fields_by_name['revocation_attempts_remaining']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030,Number of revocation code attempts remaining'))
_CTWOFACTOR_CREATEEMERGENCYCODES_RESPONSE.fields_by_name['codes'].has_options = True
_CTWOFACTOR_CREATEEMERGENCYCODES_RESPONSE.fields_by_name['codes']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\017Emergency codes'))
_CTWOFACTOR_DESTROYEMERGENCYCODES_REQUEST.fields_by_name['steamid'].has_options = True
_CTWOFACTOR_DESTROYEMERGENCYCODES_REQUEST.fields_by_name['steamid']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\016steamid to use'))
_CTWOFACTOR_VALIDATETOKEN_REQUEST.fields_by_name['code'].has_options = True
_CTWOFACTOR_VALIDATETOKEN_REQUEST.fields_by_name['code']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\020code to validate'))
_CTWOFACTOR_VALIDATETOKEN_RESPONSE.fields_by_name['valid'].has_options = True
_CTWOFACTOR_VALIDATETOKEN_RESPONSE.fields_by_name['valid']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\202\265\030\024result of validation'))
_TWOFACTOR = _descriptor.ServiceDescriptor(
name='TwoFactor',
full_name='TwoFactor',
file=DESCRIPTOR,
index=0,
options=_descriptor._ParseOptions(descriptor_pb2.ServiceOptions(), _b('\202\265\030!Two Factor Authentication Service')),
serialized_start=3979,
serialized_end=5263,
methods=[
_descriptor.MethodDescriptor(
name='QueryStatus',
full_name='TwoFactor.QueryStatus',
index=0,
containing_service=None,
input_type=_CTWOFACTOR_STATUS_REQUEST,
output_type=_CTWOFACTOR_STATUS_RESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\265\030@Get two-factor authentication settings for the logged-in account')),
),
_descriptor.MethodDescriptor(
name='AddAuthenticator',
full_name='TwoFactor.AddAuthenticator',
index=1,
containing_service=None,
input_type=_CTWOFACTOR_ADDAUTHENTICATOR_REQUEST,
output_type=_CTWOFACTOR_ADDAUTHENTICATOR_RESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\265\0305Add two-factor authenticator to the logged-in account')),
),
_descriptor.MethodDescriptor(
name='SendEmail',
full_name='TwoFactor.SendEmail',
index=2,
containing_service=None,
input_type=_CTWOFACTOR_SENDEMAIL_REQUEST,
output_type=_CTWOFACTOR_SENDEMAIL_RESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\265\030\031Send email to the account')),
),
_descriptor.MethodDescriptor(
name='FinalizeAddAuthenticator',
full_name='TwoFactor.FinalizeAddAuthenticator',
index=3,
containing_service=None,
input_type=_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_REQUEST,
output_type=_CTWOFACTOR_FINALIZEADDAUTHENTICATOR_RESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\265\030DFinalize two-factor authentication addition to the logged-in account')),
),
_descriptor.MethodDescriptor(
name='RemoveAuthenticator',
full_name='TwoFactor.RemoveAuthenticator',
index=4,
containing_service=None,
input_type=_CTWOFACTOR_REMOVEAUTHENTICATOR_REQUEST,
output_type=_CTWOFACTOR_REMOVEAUTHENTICATOR_RESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\265\030DRemove two-factor authentication addition from the logged-in account')),
),
_descriptor.MethodDescriptor(
name='CreateEmergencyCodes',
full_name='TwoFactor.CreateEmergencyCodes',
index=5,
containing_service=None,
input_type=_CTWOFACTOR_CREATEEMERGENCYCODES_REQUEST,
output_type=_CTWOFACTOR_CREATEEMERGENCYCODES_RESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\265\030&Generate emergency authenticator codes')),
),
_descriptor.MethodDescriptor(
name='DestroyEmergencyCodes',
full_name='TwoFactor.DestroyEmergencyCodes',
index=6,
containing_service=None,
input_type=_CTWOFACTOR_DESTROYEMERGENCYCODES_REQUEST,
output_type=_CTWOFACTOR_DESTROYEMERGENCYCODES_RESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\265\0305Destroy emergency authenticator codes for the account')),
),
_descriptor.MethodDescriptor(
name='ValidateToken',
full_name='TwoFactor.ValidateToken',
index=7,
containing_service=None,
input_type=_CTWOFACTOR_VALIDATETOKEN_REQUEST,
output_type=_CTWOFACTOR_VALIDATETOKEN_RESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\265\030\036Validate (and consume) a token')),
),
])
TwoFactor = service_reflection.GeneratedServiceType('TwoFactor', (_service.Service,), dict(
DESCRIPTOR = _TWOFACTOR,
__module__ = 'steammessages_twofactor_pb2'
))
TwoFactor_Stub = service_reflection.GeneratedServiceStubType('TwoFactor_Stub', (TwoFactor,), dict(
DESCRIPTOR = _TWOFACTOR,
__module__ = 'steammessages_twofactor_pb2'
))
# @@protoc_insertion_point(module_scope)
| 60.337168
| 7,572
| 0.796468
| 8,264
| 68,181
| 6.200266
| 0.057962
| 0.027791
| 0.066785
| 0.089814
| 0.824314
| 0.724097
| 0.674798
| 0.633112
| 0.582037
| 0.527118
| 0
| 0.051193
| 0.091506
| 68,181
| 1,129
| 7,573
| 60.390611
| 0.776016
| 0.019932
| 0
| 0.55311
| 1
| 0.011483
| 0.285999
| 0.180401
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.002871
| 0.008612
| 0
| 0.008612
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
755c918e2b3983c8a538846e6f8956c9a8b29af1
| 245
|
py
|
Python
|
rpython/translator/cli/test/test_range.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
rpython/translator/cli/test/test_range.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | null | null | null |
rpython/translator/cli/test/test_range.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
import py
from rpython.translator.cli.test.runtest import CliTest
from rpython.rtyper.test.test_rrange import BaseTestRrange
class TestCliRange(CliTest, BaseTestRrange):
def test_rlist_range(self):
pass # it doesn't make sense here
| 30.625
| 58
| 0.791837
| 34
| 245
| 5.617647
| 0.735294
| 0.115183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 245
| 7
| 59
| 35
| 0.909524
| 0.106122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.5
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
f329fde281d55efd7c63f2af1252431e6f963b07
| 50
|
py
|
Python
|
tests/__init__.py
|
Informasjonsforvaltning/jsonschematordf
|
dfeb039411b5a9797ad3b7769e0dd3489abc5502
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
Informasjonsforvaltning/jsonschematordf
|
dfeb039411b5a9797ad3b7769e0dd3489abc5502
|
[
"Apache-2.0"
] | 24
|
2021-08-19T08:33:39.000Z
|
2021-10-06T07:43:28.000Z
|
tests/__init__.py
|
Informasjonsforvaltning/jsonschematordf
|
dfeb039411b5a9797ad3b7769e0dd3489abc5502
|
[
"Apache-2.0"
] | null | null | null |
"""Test suite for the jsonschematordf package."""
| 25
| 49
| 0.74
| 6
| 50
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 50
| 1
| 50
| 50
| 0.840909
| 0.86
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f3336e010473b7a299957a7c7773a12a5ea46d38
| 157
|
py
|
Python
|
tests/conftest.py
|
mt3o/injectable
|
0ffc5c758b63d9391134cd822158e1846999b404
|
[
"MIT"
] | 71
|
2018-02-05T04:12:27.000Z
|
2022-02-15T23:08:16.000Z
|
tests/conftest.py
|
Euraxluo/injectable
|
74e640f0911480fb06fa97c1a468c3863541c0fd
|
[
"MIT"
] | 104
|
2018-02-06T23:37:36.000Z
|
2021-08-25T04:50:15.000Z
|
tests/conftest.py
|
Euraxluo/injectable
|
74e640f0911480fb06fa97c1a468c3863541c0fd
|
[
"MIT"
] | 13
|
2019-02-10T18:52:50.000Z
|
2022-01-26T17:12:35.000Z
|
import pytest
from testfixtures import LogCapture
@pytest.fixture(autouse=True)
def log_capture():
with LogCapture() as capture:
yield capture
| 17.444444
| 35
| 0.745223
| 19
| 157
| 6.105263
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184713
| 157
| 8
| 36
| 19.625
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
f354a99befa71d5bc60e342f2839f7c5d0fd0ed2
| 401
|
py
|
Python
|
g.py
|
HakimdarC/Dice-Game
|
96377891e43df68911fa841204c8d1854d068088
|
[
"MIT"
] | null | null | null |
g.py
|
HakimdarC/Dice-Game
|
96377891e43df68911fa841204c8d1854d068088
|
[
"MIT"
] | null | null | null |
g.py
|
HakimdarC/Dice-Game
|
96377891e43df68911fa841204c8d1854d068088
|
[
"MIT"
] | null | null | null |
import random
m=1
n=6
roll_again="yes"
while roll_again=="yes" or roll_again=="y":
print("Rolling the dice....")
print("the values are..")
print(random.randint(m,n))
print(random.randint(m,n))
print(random.randint(m,n))
break
roll_again = input("enter yes or y to continue...Roll the dices again?")
print(random.randint(m,n))
print(random.randint(m,n))
print(random.randint(m,n))
| 25.0625
| 73
| 0.683292
| 69
| 401
| 3.913043
| 0.362319
| 0.244444
| 0.4
| 0.422222
| 0.444444
| 0.444444
| 0.444444
| 0.444444
| 0.444444
| 0.444444
| 0
| 0.005764
| 0.134663
| 401
| 15
| 74
| 26.733333
| 0.772334
| 0
| 0
| 0.4
| 0
| 0
| 0.240933
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.533333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
f36bbf06a2fee123d5868f458818a488d5cdfe22
| 32
|
py
|
Python
|
pysedm/dask/__init__.py
|
MickaelRigault/pysedm
|
5d34d3a6b48eb3bbb7ba9d89b88b4b5b1ff09624
|
[
"Apache-2.0"
] | 5
|
2018-03-16T14:58:09.000Z
|
2019-11-25T15:57:14.000Z
|
pysedm/dask/__init__.py
|
MickaelRigault/pysedm
|
5d34d3a6b48eb3bbb7ba9d89b88b4b5b1ff09624
|
[
"Apache-2.0"
] | 9
|
2018-02-13T17:02:17.000Z
|
2020-09-15T11:43:37.000Z
|
pysedm/dask/__init__.py
|
MickaelRigault/pysedm
|
5d34d3a6b48eb3bbb7ba9d89b88b4b5b1ff09624
|
[
"Apache-2.0"
] | 4
|
2018-03-16T14:58:14.000Z
|
2022-02-07T20:02:58.000Z
|
""" Dask scripts for pysedm """
| 16
| 31
| 0.625
| 4
| 32
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 32
| 1
| 32
| 32
| 0.769231
| 0.71875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f3ad39a1e9e6605b2b9a4db24488edb7e7bc809b
| 87
|
py
|
Python
|
boleto/apps.py
|
feliperuhland/home
|
cfe84ec7243faec8c0cf5cc4afb1db76aebb86f9
|
[
"MIT"
] | null | null | null |
boleto/apps.py
|
feliperuhland/home
|
cfe84ec7243faec8c0cf5cc4afb1db76aebb86f9
|
[
"MIT"
] | null | null | null |
boleto/apps.py
|
feliperuhland/home
|
cfe84ec7243faec8c0cf5cc4afb1db76aebb86f9
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class BoletoConfig(AppConfig):
name = 'boleto'
| 14.5
| 33
| 0.747126
| 10
| 87
| 6.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 87
| 5
| 34
| 17.4
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f3c4cd31cade4e1b3f0dbf54c546f501767d472f
| 198
|
py
|
Python
|
academic_helper/management/commands/dev_init.py
|
AviH0/coursist
|
3db05e8a168be33d2f03b9e082ee4779d80be7c7
|
[
"MIT"
] | 6
|
2020-06-26T12:09:10.000Z
|
2021-12-18T11:44:55.000Z
|
academic_helper/management/commands/dev_init.py
|
AviH0/coursist
|
3db05e8a168be33d2f03b9e082ee4779d80be7c7
|
[
"MIT"
] | 89
|
2020-06-02T11:42:57.000Z
|
2021-06-10T19:09:09.000Z
|
academic_helper/management/commands/dev_init.py
|
AviH0/coursist
|
3db05e8a168be33d2f03b9e082ee4779d80be7c7
|
[
"MIT"
] | 14
|
2020-06-26T12:08:34.000Z
|
2021-04-20T10:59:45.000Z
|
from django.core.management import BaseCommand
from academic_helper.management.init_data import create_all
class Command(BaseCommand):
def handle(self, *args, **options):
create_all()
| 24.75
| 59
| 0.767677
| 25
| 198
| 5.92
| 0.76
| 0.121622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146465
| 198
| 7
| 60
| 28.285714
| 0.87574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f3c5ec66234aca2dce7301f5e29b899eff21bf61
| 76
|
py
|
Python
|
src/dna-center/dna-center-authenticate.py
|
fernando28024/git-clone-https-github.com-CiscoDevNet-devasc-code-examples
|
589dbd5d34f67f9b823159b731844432977b6490
|
[
"BSD-3-Clause"
] | 43
|
2020-08-01T03:01:53.000Z
|
2022-02-17T12:43:27.000Z
|
src/dna-center/dna-center-authenticate.py
|
fernando28024/git-clone-https-github.com-CiscoDevNet-devasc-code-examples
|
589dbd5d34f67f9b823159b731844432977b6490
|
[
"BSD-3-Clause"
] | 2
|
2021-04-20T17:13:39.000Z
|
2021-09-23T23:35:12.000Z
|
src/dna-center/dna-center-authenticate.py
|
grelleum/devasc-code-examples
|
589dbd5d34f67f9b823159b731844432977b6490
|
[
"BSD-3-Clause"
] | 14
|
2020-08-02T00:07:43.000Z
|
2022-03-15T22:25:39.000Z
|
# Fill in this file with the code from the DNA Center authenticate exercise
| 38
| 75
| 0.802632
| 13
| 76
| 4.692308
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 76
| 1
| 76
| 76
| 0.983871
| 0.960526
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
45ebdac3524066e7f27b37c7714af6daef248dfc
| 240
|
py
|
Python
|
ToxicSense/ToxicSense/urls.py
|
adamwespiser/toxic-twitter
|
d9d804ffa231ae1f7b5b188d4beaff1a521d9f27
|
[
"MIT"
] | 7
|
2018-12-16T07:14:35.000Z
|
2022-02-27T03:58:51.000Z
|
ToxicSense/ToxicSense/urls.py
|
adamwespiser/toxic-twitter
|
d9d804ffa231ae1f7b5b188d4beaff1a521d9f27
|
[
"MIT"
] | 1
|
2019-12-17T19:26:46.000Z
|
2019-12-17T19:26:46.000Z
|
ToxicSense/ToxicSense/urls.py
|
adamwespiser/toxic-twitter
|
d9d804ffa231ae1f7b5b188d4beaff1a521d9f27
|
[
"MIT"
] | 3
|
2019-07-15T05:11:21.000Z
|
2022-02-27T04:02:25.000Z
|
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
urlpatterns = [
path("", include("clientapp.urls")),
]
urlpatterns += staticfiles_urlpatterns()
| 26.666667
| 67
| 0.7875
| 27
| 240
| 6.925926
| 0.407407
| 0.160428
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1125
| 240
| 9
| 68
| 26.666667
| 0.877934
| 0
| 0
| 0
| 0
| 0
| 0.058091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.428571
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
45f365799338ddd03fef83273cefbaad5a6921c9
| 94
|
py
|
Python
|
categories/settings/settings.py
|
visuable/vktoolspython
|
8e30ef1ad175c721e4eedf37ec32a77a5af984d0
|
[
"MIT"
] | null | null | null |
categories/settings/settings.py
|
visuable/vktoolspython
|
8e30ef1ad175c721e4eedf37ec32a77a5af984d0
|
[
"MIT"
] | null | null | null |
categories/settings/settings.py
|
visuable/vktoolspython
|
8e30ef1ad175c721e4eedf37ec32a77a5af984d0
|
[
"MIT"
] | null | null | null |
class Settings:
params = ()
def __init__(self, params):
self.params = params
| 15.666667
| 31
| 0.595745
| 10
| 94
| 5.2
| 0.6
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.297872
| 94
| 5
| 32
| 18.8
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3455b1955d2acc2dab4b57985cd45af0fbcdd700
| 27
|
py
|
Python
|
dockerspawner/_version.py
|
JocelynDelalande/dockerspawner
|
d1f27e2855d2cefbdb25b29cc069b9ca69d564e3
|
[
"BSD-3-Clause"
] | 1
|
2021-01-28T17:22:25.000Z
|
2021-01-28T17:22:25.000Z
|
dockerspawner/_version.py
|
JocelynDelalande/dockerspawner
|
d1f27e2855d2cefbdb25b29cc069b9ca69d564e3
|
[
"BSD-3-Clause"
] | null | null | null |
dockerspawner/_version.py
|
JocelynDelalande/dockerspawner
|
d1f27e2855d2cefbdb25b29cc069b9ca69d564e3
|
[
"BSD-3-Clause"
] | 1
|
2018-07-25T16:11:06.000Z
|
2018-07-25T16:11:06.000Z
|
__version__ = '0.12.0.dev'
| 13.5
| 26
| 0.666667
| 5
| 27
| 2.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.111111
| 27
| 1
| 27
| 27
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
347c81c8c4f4daeb6c84c94cebb5501201c3d014
| 126
|
py
|
Python
|
src/generate-random-int.py
|
MRN-Code/coinstac-example-computation-bisect-converge
|
761515c865ed635946620968da8e3bfbab632b7a
|
[
"MIT"
] | null | null | null |
src/generate-random-int.py
|
MRN-Code/coinstac-example-computation-bisect-converge
|
761515c865ed635946620968da8e3bfbab632b7a
|
[
"MIT"
] | 2
|
2016-06-08T02:03:27.000Z
|
2016-10-03T23:11:00.000Z
|
src/generate-random-int.py
|
MRN-Code/coinstac-example-computation-bisect-converge
|
761515c865ed635946620968da8e3bfbab632b7a
|
[
"MIT"
] | 1
|
2021-02-08T03:00:52.000Z
|
2021-02-08T03:00:52.000Z
|
import sys
from random import randint
myint = randint(1, 100)
print myint
sys.stderr.write('My random # was ' + str(myint))
| 15.75
| 49
| 0.722222
| 20
| 126
| 4.55
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038095
| 0.166667
| 126
| 7
| 50
| 18
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.4
| null | null | 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ca9ef425f203639ff23bebc4718d840ebe0dc154
| 388
|
py
|
Python
|
pyfilter/src/filters/__init__.py
|
zkscpqm/pyfilter
|
39c284681ec6f377059907b75346028d99cbdd4c
|
[
"MIT"
] | null | null | null |
pyfilter/src/filters/__init__.py
|
zkscpqm/pyfilter
|
39c284681ec6f377059907b75346028d99cbdd4c
|
[
"MIT"
] | 1
|
2021-04-28T18:40:13.000Z
|
2021-04-28T18:40:13.000Z
|
pyfilter/src/filters/__init__.py
|
zkscpqm/pyfilter
|
39c284681ec6f377059907b75346028d99cbdd4c
|
[
"MIT"
] | null | null | null |
from pyfilter.src.filters.base_filter import _BaseFilter
from pyfilter.src.filters.any_match_filter import _AnyMatchFilter
from pyfilter.src.filters.all_match_filter import _AllMatchFilter
from pyfilter.src.filters.regex_match_filter import _RegexMatchFilter
BaseFilter = _BaseFilter
AnyMatchFilter = _AnyMatchFilter
AllMatchFilter = _AllMatchFilter
RegexMatchFilter = _RegexMatchFilter
| 38.8
| 69
| 0.884021
| 43
| 388
| 7.627907
| 0.348837
| 0.146341
| 0.182927
| 0.268293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074742
| 388
| 9
| 70
| 43.111111
| 0.913649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
cab6f3d40e01507ff20d8b05edf9989c7da0065c
| 906
|
py
|
Python
|
cli/psym/graphql/input/service_type_create_data.py
|
danielrh135568/symphony-1
|
54c92a0f8775d1a837ab7c7bd6a08ccd906d28a4
|
[
"BSD-3-Clause"
] | null | null | null |
cli/psym/graphql/input/service_type_create_data.py
|
danielrh135568/symphony-1
|
54c92a0f8775d1a837ab7c7bd6a08ccd906d28a4
|
[
"BSD-3-Clause"
] | 12
|
2022-02-14T04:20:30.000Z
|
2022-03-28T04:20:17.000Z
|
cli/psym/graphql/input/service_type_create_data.py
|
danielrh135568/symphony-1
|
54c92a0f8775d1a837ab7c7bd6a08ccd906d28a4
|
[
"BSD-3-Clause"
] | 1
|
2022-02-24T21:47:51.000Z
|
2022-02-24T21:47:51.000Z
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field as _field
from functools import partial
from ...config import custom_scalars, datetime
from numbers import Number
from typing import Any, AsyncGenerator, Dict, List, Generator, Optional
from dataclasses_json import DataClassJsonMixin, config
from gql_client.runtime.enum_utils import enum_field_metadata
from ..enum.discovery_method import DiscoveryMethod
from ..input.property_type_input import PropertyTypeInput
from ..input.service_endpoint_definition_input import ServiceEndpointDefinitionInput
@dataclass(frozen=True)
class ServiceTypeCreateData(DataClassJsonMixin):
name: str
hasCustomer: bool
properties: Optional[List[PropertyTypeInput]] = None
endpoints: Optional[List[ServiceEndpointDefinitionInput]] = None
discoveryMethod: Optional[DiscoveryMethod] = None
| 34.846154
| 84
| 0.8234
| 101
| 906
| 7.257426
| 0.60396
| 0.040928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001247
| 0.11479
| 906
| 25
| 85
| 36.24
| 0.912718
| 0.073951
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.588235
| 0
| 0.941176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
cad352c0ee2da88f233720dd9035312793238d9b
| 128
|
py
|
Python
|
tests/druid_import_test.py
|
moshebeeri/datap
|
9ff99bb435728cd69f2589e3ee858a06768ea85e
|
[
"Apache-2.0"
] | null | null | null |
tests/druid_import_test.py
|
moshebeeri/datap
|
9ff99bb435728cd69f2589e3ee858a06768ea85e
|
[
"Apache-2.0"
] | null | null | null |
tests/druid_import_test.py
|
moshebeeri/datap
|
9ff99bb435728cd69f2589e3ee858a06768ea85e
|
[
"Apache-2.0"
] | null | null | null |
from service.druid import Druid
class TestImport:
def test_it(self):
druid = Druid(start=None, end=None)
assert True
| 18.285714
| 39
| 0.71875
| 19
| 128
| 4.789474
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195313
| 128
| 7
| 40
| 18.285714
| 0.883495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
cade8ff7daa98befdd0853d4ec61c4994673256a
| 184
|
py
|
Python
|
alwahaj/core/views/solutions.py
|
haidarzxc/alwahaj
|
113ece9c713a6e6d04b5f9804885ed6a5337e404
|
[
"MIT"
] | null | null | null |
alwahaj/core/views/solutions.py
|
haidarzxc/alwahaj
|
113ece9c713a6e6d04b5f9804885ed6a5337e404
|
[
"MIT"
] | 12
|
2020-02-12T00:30:39.000Z
|
2022-03-11T23:49:30.000Z
|
alwahaj/core/views/solutions.py
|
haidarzxc/alwahaj
|
113ece9c713a6e6d04b5f9804885ed6a5337e404
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def solutionsView(request):
context=dict(x=1)
return render(request,"pages/solutions.html",context)
| 20.444444
| 57
| 0.777174
| 24
| 184
| 5.958333
| 0.75
| 0.13986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00625
| 0.130435
| 184
| 9
| 57
| 20.444444
| 0.8875
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
caf87ffe18c8dd2b3ed1b12b61911c042ab343c5
| 34
|
py
|
Python
|
api/tests.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | null | null | null |
api/tests.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | 96
|
2021-08-28T12:37:02.000Z
|
2022-03-23T04:25:12.000Z
|
api/tests.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | null | null | null |
"""Create your api tests here."""
| 17
| 33
| 0.647059
| 5
| 34
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 1
| 34
| 34
| 0.758621
| 0.794118
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cafc41228d764fbb4c8e04bd84332419504fc329
| 150
|
py
|
Python
|
Level 1/Math with Python/7) Geometry/Question 4.py
|
aaravdave/YoungWonks
|
92f1de88f5c46744bc7229153af4392afa8f6353
|
[
"MIT"
] | null | null | null |
Level 1/Math with Python/7) Geometry/Question 4.py
|
aaravdave/YoungWonks
|
92f1de88f5c46744bc7229153af4392afa8f6353
|
[
"MIT"
] | null | null | null |
Level 1/Math with Python/7) Geometry/Question 4.py
|
aaravdave/YoungWonks
|
92f1de88f5c46744bc7229153af4392afa8f6353
|
[
"MIT"
] | null | null | null |
from math import sqrt
x, y, a, b = list(map(int, input('Enter \'x, y, a, b\': ').split(', ')))
print(round(sqrt(((x - a) ** 2) + ((y - b) ** 2)), 2))
| 37.5
| 72
| 0.48
| 28
| 150
| 2.571429
| 0.607143
| 0.138889
| 0.083333
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024793
| 0.193333
| 150
| 3
| 73
| 50
| 0.570248
| 0
| 0
| 0
| 0
| 0
| 0.073333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1b0aaadcb1eb6855d01a9e5bbebdbbb9748a0d63
| 217
|
py
|
Python
|
main/frontend.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | null | null | null |
main/frontend.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | null | null | null |
main/frontend.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | null | null | null |
from django.conf import settings
def get_project_url(id):
return f'https://{settings.FRONTEND_URL}/three-w/{id}/'
def get_flash_update_url(id):
return f'https://{settings.FRONTEND_URL}/flash-update/{id}/'
| 21.7
| 64
| 0.728111
| 34
| 217
| 4.441176
| 0.529412
| 0.07947
| 0.145695
| 0.15894
| 0.476821
| 0.476821
| 0.476821
| 0.476821
| 0
| 0
| 0
| 0
| 0.110599
| 217
| 9
| 65
| 24.111111
| 0.782383
| 0
| 0
| 0
| 0
| 0
| 0.437788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
1b41e9945c6c839c2b6547f47be6d98b19247e76
| 314
|
py
|
Python
|
tests/image/test_util.py
|
kungfuai/kaishi
|
e20360170ccac2111cab61fcd71b81be3c2a7468
|
[
"MIT"
] | 10
|
2020-04-01T16:46:25.000Z
|
2021-02-09T15:56:42.000Z
|
tests/image/test_util.py
|
kungfuai/kaishi
|
e20360170ccac2111cab61fcd71b81be3c2a7468
|
[
"MIT"
] | 14
|
2020-03-23T13:32:35.000Z
|
2021-12-07T19:30:23.000Z
|
tests/image/test_util.py
|
kungfuai/kaishi
|
e20360170ccac2111cab61fcd71b81be3c2a7468
|
[
"MIT"
] | 2
|
2020-08-14T07:23:06.000Z
|
2021-12-06T18:20:42.000Z
|
from kaishi.image.util import validate_image_header
def test_validate_image_header():
invalid_file = "tests/data/image/empty_unsupported_extension.gif"
valid_file = "tests/data/image/sample.jpg"
assert validate_image_header(invalid_file) is False
assert validate_image_header(valid_file) is True
| 34.888889
| 69
| 0.805732
| 45
| 314
| 5.288889
| 0.533333
| 0.218487
| 0.319328
| 0.218487
| 0.252101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121019
| 314
| 8
| 70
| 39.25
| 0.862319
| 0
| 0
| 0
| 0
| 0
| 0.238854
| 0.238854
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1b425500770d391ac96c7cb6e196a4ccf6b4b2b5
| 106
|
py
|
Python
|
codesignal/arcade/python/intro_51_delete_digit.py
|
tinesife94/random
|
b802924dce4635ae074d30dc03962d4301bd6d8b
|
[
"MIT"
] | null | null | null |
codesignal/arcade/python/intro_51_delete_digit.py
|
tinesife94/random
|
b802924dce4635ae074d30dc03962d4301bd6d8b
|
[
"MIT"
] | null | null | null |
codesignal/arcade/python/intro_51_delete_digit.py
|
tinesife94/random
|
b802924dce4635ae074d30dc03962d4301bd6d8b
|
[
"MIT"
] | null | null | null |
def solution(n):
s = str(n)
return max(int('{}{}'.format(s[:i], s[i+1:])) for i in range(len(s)))
| 26.5
| 73
| 0.518868
| 21
| 106
| 2.619048
| 0.714286
| 0.072727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.188679
| 106
| 3
| 74
| 35.333333
| 0.627907
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
1b56962e5a6d5a63085f2158e015e7d133280d2e
| 82
|
py
|
Python
|
0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.5-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.5-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.5-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# 14.5 Denormalization
# What is denormalization?
# Explain the pros and cons.
| 16.4
| 28
| 0.731707
| 11
| 82
| 5.454545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.195122
| 82
| 4
| 29
| 20.5
| 0.863636
| 0.902439
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1b7b2c0fc016dac1b8ba6644993ed164d22d1e22
| 156
|
py
|
Python
|
Chapter07/function_with_variable_length.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 12
|
2018-07-09T16:20:31.000Z
|
2022-03-21T22:52:15.000Z
|
Chapter07/function_with_variable_length.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | null | null | null |
Chapter07/function_with_variable_length.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 19
|
2018-01-09T12:49:06.000Z
|
2021-11-23T08:05:55.000Z
|
def variable_argument( var1, *vari):
print "Out-put is",var1
for var in vari:
print var
variable_argument(60)
variable_argument(100,90,40,50,60)
| 26
| 38
| 0.717949
| 26
| 156
| 4.192308
| 0.653846
| 0.440367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 0.166667
| 156
| 6
| 39
| 26
| 0.723077
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1b8169f6eb4ed6fdca2017473451e62866f4dddf
| 72
|
py
|
Python
|
osmaxx/clipping_area/__init__.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 27
|
2015-03-30T14:17:26.000Z
|
2022-02-19T17:30:44.000Z
|
osmaxx/clipping_area/__init__.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 483
|
2015-03-09T16:58:03.000Z
|
2022-03-14T09:29:06.000Z
|
osmaxx/clipping_area/__init__.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 6
|
2015-04-07T07:38:30.000Z
|
2020-04-01T12:45:53.000Z
|
default_app_config = 'osmaxx.clipping_area.apps.ClippingGeometryConfig'
| 36
| 71
| 0.875
| 8
| 72
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 72
| 1
| 72
| 72
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1bad40a6c2690a5a87273e36adcd88ebfffca7a8
| 357
|
py
|
Python
|
investment_dashboard/portfolio/models.py
|
mjenrungrot/investment-dashboard
|
89b296a635ee3c29171f7bf88cc8e49250981637
|
[
"MIT"
] | null | null | null |
investment_dashboard/portfolio/models.py
|
mjenrungrot/investment-dashboard
|
89b296a635ee3c29171f7bf88cc8e49250981637
|
[
"MIT"
] | 4
|
2017-12-19T08:39:10.000Z
|
2017-12-20T10:59:38.000Z
|
investment_dashboard/portfolio/models.py
|
mjenrungrot/investment-dashboard
|
89b296a635ee3c29171f7bf88cc8e49250981637
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class PortfolioTransaction(models.Model):
datetime = models.DateTimeField()
equityType = models.CharField(max_length=10)
equityName = models.CharField(max_length=30)
units = models.DecimalField(max_digits=20, decimal_places=10)
currency = models.CharField(max_length=10)
| 35.7
| 66
| 0.747899
| 43
| 357
| 6.093023
| 0.627907
| 0.171756
| 0.206107
| 0.274809
| 0.198473
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.159664
| 357
| 9
| 67
| 39.666667
| 0.84
| 0.067227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
9415bfc2b8cf89dc33ab8191d5c33c608b5083d7
| 717
|
py
|
Python
|
database/users.py
|
obeyurfate/GitWithMe
|
d316814eac17ce9b704b4dd98b8af59ab931b131
|
[
"Unlicense"
] | 1
|
2021-04-02T14:41:51.000Z
|
2021-04-02T14:41:51.000Z
|
database/users.py
|
obeyurfate/GitWithMe
|
d316814eac17ce9b704b4dd98b8af59ab931b131
|
[
"Unlicense"
] | null | null | null |
database/users.py
|
obeyurfate/GitWithMe
|
d316814eac17ce9b704b4dd98b8af59ab931b131
|
[
"Unlicense"
] | null | null | null |
import sqlalchemy
from flask_login import UserMixin
from sqlalchemy import orm
from sqlalchemy_serializer import SerializerMixin
from .db_sess import SqlAlchemyBase
class User(SqlAlchemyBase, UserMixin, SerializerMixin):
__tablename__ = 'users'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True, autoincrement=True)
nickname = sqlalchemy.Column(sqlalchemy.String)
description = sqlalchemy.Column(sqlalchemy.String, nullable=True)
groups = orm.relation("Groups",
secondary="groups_to_users",
backref="user")
github = sqlalchemy.Column(sqlalchemy.String)
icon = sqlalchemy.Column(sqlalchemy.String)
| 35.85
| 69
| 0.704324
| 70
| 717
| 7.071429
| 0.485714
| 0.161616
| 0.262626
| 0.258586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220363
| 717
| 20
| 70
| 35.85
| 0.88551
| 0
| 0
| 0
| 0
| 0
| 0.041783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3125
| 0
| 0.8125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
9422f13be98f523048aa61ab11428eaf41ad9a8c
| 87
|
py
|
Python
|
apps/codigo/apps.py
|
IngMachine/compiladores
|
d8cd2cde29af09188037e7627fc63403a322f5c7
|
[
"Apache-2.0"
] | null | null | null |
apps/codigo/apps.py
|
IngMachine/compiladores
|
d8cd2cde29af09188037e7627fc63403a322f5c7
|
[
"Apache-2.0"
] | null | null | null |
apps/codigo/apps.py
|
IngMachine/compiladores
|
d8cd2cde29af09188037e7627fc63403a322f5c7
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class CodigoConfig(AppConfig):
name = 'codigo'
| 14.5
| 33
| 0.747126
| 10
| 87
| 6.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 87
| 5
| 34
| 17.4
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
943728d83ca7e0f84d7f541a3c187e7872c2261b
| 200
|
py
|
Python
|
backend/apps/sampleapp/serializers.py
|
domasx2/django-angular-docker-seed
|
5c1ad6d62d179c9cb5cdbf7b1254576efa63b2fb
|
[
"Unlicense"
] | 32
|
2015-04-27T02:01:59.000Z
|
2021-04-06T10:19:42.000Z
|
backend/apps/sampleapp/serializers.py
|
domasx2/django-angular-docker-seed
|
5c1ad6d62d179c9cb5cdbf7b1254576efa63b2fb
|
[
"Unlicense"
] | 14
|
2015-03-21T08:20:34.000Z
|
2016-02-15T07:07:39.000Z
|
backend/apps/sampleapp/serializers.py
|
domasx2/django-angular-docker-seed
|
5c1ad6d62d179c9cb5cdbf7b1254576efa63b2fb
|
[
"Unlicense"
] | 21
|
2015-03-18T18:40:12.000Z
|
2021-03-16T22:12:44.000Z
|
from rest_framework import serializers
from .models import Task
class TaskSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Task
read_only_fields = ('slug',)
| 28.571429
| 61
| 0.745
| 21
| 200
| 6.952381
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19
| 200
| 7
| 62
| 28.571429
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0.019901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.