content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from flask import Flask
from werkzeug.middleware.proxy_fix import ProxyFix
from apis import api
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
api.init_app(app)
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
201,
198,
6738,
266,
9587,
2736,
1018,
13,
27171,
1574,
13,
36436,
62,
13049,
1330,
38027,
22743,
201,
198,
201,
198,
6738,
2471,
271,
1330,
40391,
201,
198,
201,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
... | 2.402299 | 87 |
import my_data_file
d = my_data_file.my_dict
print "Hello, my name is %s I am %a years old" % (d['naam'], d['age']) | [
11748,
616,
62,
7890,
62,
7753,
198,
198,
67,
796,
616,
62,
7890,
62,
7753,
13,
1820,
62,
11600,
198,
198,
4798,
366,
15496,
11,
616,
1438,
318,
4064,
82,
314,
716,
4064,
64,
812,
1468,
1,
4064,
357,
67,
17816,
2616,
321,
6,
435... | 2.34 | 50 |
import sys
from io import StringIO
test_input_one = """food, water, materials, metal
5
food - pizza - quantity:10;quality:5
water - mineral - quantity:5;quality:10
materials - wood - quantity:2;quality:5
metal - copper - quantity:3;quality:10
food - burgers - quantity:5;quality:2
"""
# test_input_two = """"""
sys.stdin = StringIO(test_input_one)
# sys.stdin = StringIO(test_input_two)
categories = input().split(', ')
lines_num = int(input())
category_dict = {}
count_items = 0
count_quality = 0
for row in range(lines_num):
category, element, *others = input().split(' - ')
quantity, quality = [[y for y in x.split(':')]for x in others[0].split(';')]
count_items += int(quantity[1])
count_quality += int(quality[1])
# todo if there is unique elements
if not category in category_dict:
category_dict[category] = []
category_dict[category].append(element)
# category_dict[category][0]
# print(category_dict[category][0])
else:
category_dict[category].append(element)
print(f'Count of items: {count_items}')
print(f'Average quality: {count_quality/len(category_dict):.2f}')
print(*[f'{key} -> {", ".join(v)}'for key,v in category_dict.items()], sep='\n') | [
11748,
25064,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
9288,
62,
15414,
62,
505,
796,
37227,
19425,
11,
1660,
11,
5696,
11,
6147,
198,
20,
198,
19425,
532,
14256,
532,
12040,
25,
940,
26,
13237,
25,
20,
198,
7050,
532,
18352,
... | 2.701099 | 455 |
from rofi_menu import Menu, BackItem, NestedMenu, Operation, constants
from rofify.src.TrackMenu import TrackMenu, TrackItem
from rofify.src.SpotifyAPI import spotify
from rofify.src.config import config
class RecentlyPlayedMenu(TrackMenu):
"""
Display the user a list of their recently played tracks that they can
listen to by selecting. Should be accessible from the top menu.
"""
async def pre_render(self, meta):
"""
The playback label contains info about the current playback.
"""
self.prompt = await config.header_playback_label(spotify.playback)
await super().pre_render(meta)
| [
6738,
686,
12463,
62,
26272,
1330,
21860,
11,
5157,
7449,
11,
399,
7287,
23381,
11,
14680,
11,
38491,
198,
6738,
686,
69,
1958,
13,
10677,
13,
24802,
23381,
1330,
17762,
23381,
11,
17762,
7449,
198,
6738,
686,
69,
1958,
13,
10677,
13,... | 3.061611 | 211 |
# with Iterator
# with Generator function
# def vowels(text):
# vowels = {'a', 'e', 'y', 'u', 'o', 'i',
# 'A', 'E', 'Y', 'U', 'O', 'I'}
#
# for char in text:
# if char in vowels:
# yield char
# with generator comprehension
# def vowels(text):
# vowels = {'a', 'e', 'y', 'u', 'o', 'i',
# 'A', 'E', 'Y', 'U', 'O', 'I'}
#
# return (ch for ch in text if ch in vowels)
my_string = vowels('Abcedifuty0o')
for char in my_string:
print(char)
| [
2,
351,
40806,
1352,
198,
198,
2,
351,
35986,
2163,
198,
198,
2,
825,
23268,
1424,
7,
5239,
2599,
198,
2,
220,
220,
220,
220,
23268,
1424,
796,
1391,
6,
64,
3256,
705,
68,
3256,
705,
88,
3256,
705,
84,
3256,
705,
78,
3256,
705,
... | 1.958015 | 262 |
import os
import cv2
import optics
import argparse
import tfrecord
import numpy as np
import tensorflow as tf
from util import *
if __name__ == '__main__':
cur_dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
# evaluation parameters
parser.add_argument('--eval-mode', action='store_true', help='Run in evaluation mode')
parser.add_argument('--eval-res-h', default=1080, type=int, help='Input image height in evaluation mode')
parser.add_argument('--eval-res-w', default=1920, type=int, help='Input image width in evaluation mode')
parser.add_argument('--eval-rgb-path', default=os.path.join(cur_dir, "data", "example_input", "couch_rgb.png"), help='Input rgb image path in evaluation mode')
parser.add_argument('--eval-depth-path', default=os.path.join(cur_dir, "data", "example_input", "couch_depth.png"), help='Input depth image path in evaluation mode')
parser.add_argument('--eval-output-path', default=os.path.join(cur_dir, "data", "example_input"), help='Output directory for results')
parser.add_argument('--eval-depth-shift', default=0, help='Depth shift (in mm) from the predicted midpoint hologram to the target hologram plane')
parser.add_argument('--gaussian-sigma', default=0.7, help='Sigma of Gaussian kernel used by AA-DPM')
parser.add_argument('--gaussian-width', default=3, type=int, help='Width of Gaussian kernel used by AA-DPM')
parser.add_argument('--phs-max', default=3.0, help='Maximum phase modulation of SLM in unit of pi')
parser.add_argument('--use-maimone-dpm', action='store_true', help='Use DPM of Maimone et al. 2017')
# dataset parameters
parser.add_argument('--dataset-res', default=384, help='dataset image resolution')
parser.add_argument('--pitch', default=0.008, help='pixel pitch in mm')
# model parameters
parser.add_argument('--num-filters-per-layer', default=24, help='Number of filters per layer')
parser.add_argument('--num-layers', default=30, help='Number layers')
# validation parameters
parser.add_argument('--validate-mode', action='store_true', help='Run in validation mode')
opt = parser.parse_args()
# fix random seed
tf.compat.v1.set_random_seed(0)
# hologram parameters, units in mm
hologram_params = {
"wavelengths" : np.array([0.000450, 0.000520, 0.000638]), # laser wavelengths in BGR order
"pitch" : opt.pitch, # hologram pitch
"res_h" : opt.dataset_res, # dataset image height
"res_w" : opt.dataset_res, # dataset image width
"depth_base" : -3, # input hologram plane (midpoint hologram)
"depth_scale" : 6, # 3D volume depth
"double_pad": True # double padding for propagation
}
# training parameters
training_params = {
"restore_trained_model": True, # flag to restore pre-trained model
"batch" : 2, # training batch
"num_epochs": 1000, # training epochs
"decay_type": None, # learning rate decay
"decay_params": None, # learning decay parameters
"learning_rate" : 1e-4, # learning rate
"optimizer_type" : "adam", # optimizer type
"optimizer_params" :
{"beta1":0.9, "beta2":0.99, "epsilon":1e-8}, # optimizer parameters
"num_iter_per_model_save": 1000, # number of iterations per model save
"num_iter_per_test": 1000, # number of iterations per validation
"num_top_depth_for_img_loss": 15, # number of top-k depths for computing focal stack loss
"num_random_depth_for_img_loss": 5, # number of random depths selected from rest of the bins
"depth_dependent_weight_scale": 0.35, # attention weight
"num_hist_bins": 200 # number of focal stack bins
}
# model parameters
model_params = {
"name": "full_loss", # model name
"input_dim": 4, # RGBD
"output_dim": 6, # amplitude+phase
"num_layers": opt.num_layers, # number of convolution layers
"interleave_rate": 1, # interleaving rate (default to no interleaving)
"num_filters_per_layer": opt.num_filters_per_layer, # number of filters per convolution layer
"filter_width": 3, # filter width
"bias_stddev": 0.01, # bias standard deviation (for model initialization)
"weight_var_scale": 0.25, # weight variance (for model initialization)
"renormalize_input": True, # normalize input to [-0.5, 0.5]
"activation_func": tf.nn.relu, # activation function for intermediate layers
"output_activation_func": tf.nn.tanh, # activation function for the output layer
}
# loss function parameters
use_l2_loss = False
num_imgs_in_fs = training_params["num_top_depth_for_img_loss"] + training_params["num_random_depth_for_img_loss"]
loss_params = {
"use_l2_loss": use_l2_loss, # activation function for the output layer
"loss_op": tf.losses.mean_squared_error if use_l2_loss else tf.compat.v1.losses.absolute_difference,
"weight_holo": 1.0, # hologram loss weight
"weight_fs": num_imgs_in_fs, # focal stack loss weight
"weight_fs_tv": num_imgs_in_fs, # focal stack tv loss weight
}
# path parameters
labels = ["amp", "phs", "img", "depth"]
train_base_path = os.path.join(cur_dir, "data", "train_%d" % opt.dataset_res)
test_base_path = os.path.join(cur_dir, "data", "test_%d" % opt.dataset_res)
validate_base_path = os.path.join(cur_dir, "data", "validate_%d" % opt.dataset_res)
checkpoint_base_path = os.path.join(cur_dir, 'model', "ckpt_%s_pitch_%d_layers_%d_filters_%d" %
(model_params["name"], hologram_params["pitch"]*1000, model_params["num_layers"], model_params["num_filters_per_layer"]))
path_params = {
"gen_record" : False, # generate tf record
"labels" : labels, # image labels in the tfrecord
"train_output_path" : os.path.join(train_base_path, "train.tfrecord"), # path to training set tfrecord
"train_source_paths" : [os.path.join(train_base_path, x) for x in labels], # path to training set raw images
"test_output_path" : os.path.join(test_base_path, "test.tfrecord"), # path to test set tfrecord
"test_source_paths" : [os.path.join(test_base_path, x) for x in labels], # path to test set raw images
"validate_output_path" : os.path.join(validate_base_path, "validate.tfrecord"), # path to validate set tfrecord
"validate_source_paths": [os.path.join(validate_base_path, x) for x in labels], # path to validation set raw images
"ckpt_path" : os.path.join(checkpoint_base_path, "ckpt"), # checkpoint path
"ckpt_parent_path" : checkpoint_base_path, # checkpoint parent path
"epoch_record_file" : "epoch_record.txt", # txt file that records how many epochs have been trained
"log_path" : "logs", # log file
}
# training dataset parameters
train_dataset_params = {
"repeat": True,
"sample_count": 3800,
"batch": training_params["batch"],
"res_h": hologram_params["res_h"],
"res_w": hologram_params["res_w"],
"prefetch_buffer_size": 2,
"num_parallel_calls": 4,
"shuffle_buffer_size": 2,
"num_epochs": training_params["num_epochs"]
}
# test dataset parameters
test_dataset_params = {
"repeat": True,
"sample_count": 100,
"batch": training_params["batch"],
"res_h": hologram_params["res_h"],
"res_w": hologram_params["res_w"],
"num_parallel_calls": 2,
"prefetch_buffer_size": 4,
"shuffle_buffer_size": 2,
"num_epochs": training_params["num_epochs"]
}
# validation dataset parameters
validate_dataset_params = {
"repeat": True,
"sample_count": 100,
"batch": training_params["batch"],
"res_h": hologram_params["res_h"],
"res_w": hologram_params["res_w"],
"num_parallel_calls": 2,
"prefetch_buffer_size": 4,
"shuffle_buffer_size": 2,
"num_epochs": training_params["num_epochs"]
}
# build model
tensor_holo_model = TensorHolographyModel(hologram_params=hologram_params,
training_params=training_params,
model_params=model_params,
loss_params=loss_params,
path_params=path_params,
train_dataset_params=train_dataset_params,
test_dataset_params=test_dataset_params,
validate_dataset_params=validate_dataset_params)
# train model from scratch (will be available in the second release)
# tensor_holo_model.train()
# validate pre-trained model
if opt.validate_mode:
tensor_holo_model.validate()
# evaluate pre-trained model
if opt.eval_mode:
eval_params = {
"res_h": opt.eval_res_h,
"res_w": opt.eval_res_w,
"rgb_path": opt.eval_rgb_path,
"depth_path": opt.eval_depth_path,
"output_path": opt.eval_output_path,
"double_pad": True,
"use_maimone_dpm": opt.use_maimone_dpm,
"adaptive_phs_shift": False,
"depth_shift": opt.eval_depth_shift,
"gaussian_sigma": opt.gaussian_sigma,
"gaussian_width": opt.gaussian_width,
"phs_max": [opt.phs_max*np.pi] * 3,
"amp_max": None,
}
tensor_holo_model.evaluate(eval_params)
| [
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
36237,
198,
11748,
1822,
29572,
198,
11748,
48700,
22105,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
7736,
1330,
1635,
628,
220,
220,
220,
... | 1.982149 | 5,658 |
"""
PDF stream objects - Reference p. 60
"""
from functools import partial, reduce
from .common import PdfType
from ..filters import StreamFilter
from ..misc import ensure_list
class PdfStream(PdfType):
"""PDF stream type"""
@property
def header(self):
"""Stream header"""
return self._header
@property
@property
def decode(self):
"""Decode the data in the stream by sequentially applying the
filters with their parameters"""
if self._decoded:
return self._decoded_data
# Need to use self._filter_key because, for some reason beyond my
# grasp, the key changes when the stream data is external
# Also, since these may be lists, let's make that happen
filters = ensure_list(self._header.get(self._filter_key, []))
params = ensure_list(self._header.get(self._params_key, []))
if not params:
params = [{} for f in filters]
composed_filters = chain_funcs((partial(StreamFilter[f].decode, **p)
for f, p in zip(filters, params)))
decoded_data = composed_filters(self._data)
self._decoded = True
self._decoded_data = decoded_data
return self._decoded_data
@property
def chain_funcs(funcs):
"""Compose the functions in iterable funcs"""
return lambda x: reduce(lambda f1, f2: f2(f1), funcs, x)
| [
37811,
198,
20456,
4269,
5563,
532,
20984,
279,
13,
3126,
198,
37811,
198,
198,
6738,
1257,
310,
10141,
1330,
13027,
11,
4646,
198,
198,
6738,
764,
11321,
220,
220,
1330,
350,
7568,
6030,
198,
6738,
11485,
10379,
1010,
1330,
13860,
2241... | 2.486957 | 575 |
import json
import re
import sys
import pandas as pd
import benepar
from progress.bar import Bar
from nltk.tree import *
# Would not match:
# Quoted text, e.g. A in "The first letter in the alphabet is 'A'."
# Partial text, e.g. alpha in "The first letter in the alphabet is 'A'."
# Case-insentitive text, e.g. First in "The first letter in the alphabet is 'A'."
# Unicode text, e.g. \u2013 (long hypen 1716-1718) in "It lasted between 1716\u20131718."
# x = get_sentence(
# "Dorsey was born and raised in St. Louis, Missouri the son of Tim and Marcia (née Smith) Dorsey.", "Dorsey", 34)
# print(x)
# exit()
with open("data/squad-train-tiny-v2.0.json", "r") as read_file:
data = json.load(read_file)['data']
# get senteces
print("generating sentences dense with answers...")
bar = Bar('Gen top sentences', max=70000)
sents = []
for section in data:
for paragraph in section['paragraphs']:
for question in paragraph['qas']:
for answer in question['answers']:
x = get_sentence(
paragraph['context'], answer['text'], answer['answer_start'])
if x is not None:
sents.append(x)
bar.next()
bar.finish()
print("# sentences", len(sents))
df = pd.DataFrame(data=sents, columns=['sentence'])
answer_count = len(df)
df = df['sentence'].value_counts().reset_index(name='count')
df.columns = ['sentence', 'count']
df = df.sort_values('count', ascending=False)
# remove sentences that have fewer than 5 associated answers.
df = df[df['count'] > 8]
top_sents = df['sentence'].values.tolist()
print("top sents length: ", len(top_sents))
# Create the map context -> answer text -> answer_start
# Avoids too many answer duplicates.
print("extracting answer, sentence pairs...")
bar = Bar('Extract answers+sentences', max=answer_count)
whole_map = {}
for section in data:
for paragraph in section['paragraphs']:
context = paragraph['context']
whole_map[context] = {}
questions = paragraph['qas']
for question in questions:
for answer in question['answers']:
s = get_sentence(
context, answer['text'], answer['answer_start'])
if s is None:
continue
# Temporary: Do not include non-popular sentences.
if s not in top_sents:
continue
if s not in whole_map.keys():
whole_map[s] = {}
print("adding answer: ", answer['text'])
whole_map[s][answer['text']] = answer['answer_start']
bar.next()
bar.finish()
# Go through the map and for each answer, trim down the context
answers_with_context = []
print("loading model...")
parser = benepar.Parser("benepar_en2")
bar = Bar('Processing', max=len(whole_map))
for context_str, answers in whole_map.items():
# get constituency parsing of text
try:
tmp_tree = parser.parse(context_str)
except ValueError:
# TODO: figure out a way to parse long sentences.
continue
context_tree = ParentedTree.convert(tmp_tree)
for answer_text, start in answers.items():
# if answer occurs multiple time in context, skip to avoid confusion
if context_str.count(answer_text) != 1:
print("skipping answer: ", answer_text)
continue
# find a node in context_tree that contains answer wholly.
for st in context_tree.subtrees(lambda t: " ".join(t.leaves()) == answer_text):
labels = {"text": answer_text, "is_answer": True,
"label": st.label(), "root_label": st.root().label()}
if st.parent() is not None:
labels['parent_label'] = st.parent().label()
if st.parent().parent() is not None:
labels['grand_parent_label'] = st.parent().parent().label()
else:
labels['grand_parent_label'] = None
else:
labels['parent_label'] = None
if st.left_sibling() is not None:
labels['left_sibling_label'] = st.left_sibling().label()
if st.left_sibling().left_sibling() is not None:
labels['left_left_sibling_label'] = st.left_sibling(
).left_sibling().label()
else:
labels['left_left_sibling_label'] = None
else:
labels['left_sibling_label'] = None
if st.right_sibling() is not None:
labels['right_sibling_label'] = st.right_sibling().label()
if st.right_sibling().right_sibling() is not None:
labels['right_right_sibling_label'] = st.right_sibling(
).right_sibling().label()
else:
labels['right_right_sibling_label'] = None
else:
labels['right_sibling_label'] = None
print("r adding answer: ", answer_text[:20])
answers_with_context.append(labels)
# Add other words in sentence.
for st in context_tree.subtrees(lambda t: " ".join(t.leaves()) not in answers):
answer_text = " ".join(st.leaves())
labels = {"text": answer_text, "is_answer": False,
"label": st.label(), "root_label": st.root().label()}
if st.parent() is not None:
labels['parent_label'] = st.parent().label()
if st.parent().parent() is not None:
labels['grand_parent_label'] = st.parent().parent().label()
else:
labels['grand_parent_label'] = None
else:
labels['parent_label'] = None
if st.left_sibling() is not None:
labels['left_sibling_label'] = st.left_sibling().label()
if st.left_sibling().left_sibling() is not None:
labels['left_left_sibling_label'] = st.left_sibling(
).left_sibling().label()
else:
labels['left_left_sibling_label'] = None
else:
labels['left_sibling_label'] = None
if st.right_sibling() is not None:
labels['right_sibling_label'] = st.right_sibling().label()
if st.right_sibling().right_sibling() is not None:
labels['right_right_sibling_label'] = st.right_sibling(
).right_sibling().label()
else:
labels['right_right_sibling_label'] = None
else:
labels['right_sibling_label'] = None
answers_with_context.append(labels)
bar.next()
bar.finish()
print(answers_with_context[:1])
print(len(answers_with_context))
with open("data/squad-train-trimmed-v2.1.json", "w") as out:
json.dump(answers_with_context, out)
print("done.") | [
11748,
33918,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
1888,
538,
283,
198,
6738,
4371,
13,
5657,
1330,
2409,
198,
6738,
299,
2528,
74,
13,
21048,
1330,
1635,
198,
198,
2,
10928,
407,
2872,... | 2.214286 | 3,094 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AuthenticationType',
'AutoProvision',
'MinimalSeverity',
'Roles',
'State',
]
class AuthenticationType(str, Enum):
"""
Connect to your cloud account, for AWS use either account credentials or role-based authentication. For GCP use account organization credentials.
"""
AWS_CREDS = "awsCreds"
AWS_ASSUME_ROLE = "awsAssumeRole"
GCP_CREDENTIALS = "gcpCredentials"
class AutoProvision(str, Enum):
"""
Whether or not to automatically install Azure Arc (hybrid compute) agents on machines
"""
ON = "On"
OFF = "Off"
class MinimalSeverity(str, Enum):
"""
Defines the minimal alert severity which will be sent as email notifications
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class Roles(str, Enum):
"""
A possible role to configure sending security notification alerts to
"""
ACCOUNT_ADMIN = "AccountAdmin"
SERVICE_ADMIN = "ServiceAdmin"
OWNER = "Owner"
CONTRIBUTOR = "Contributor"
class State(str, Enum):
"""
Defines whether to send email notifications from Azure Security Center to persons with specific RBAC roles on the subscription.
"""
ON = "On"
OFF = "Off"
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 2.897331 | 487 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup
~~~~~
Pyontapi setup script.
:copyright: 2010-2015 Schaefer & Tobies SuC GmbH.
:author: Markus Grimm <mgr@schaefer-tobies.de>;
Uwe W. Schaefer <uws@schaefer-tobies.de>
:license: LGPL, see LICENSE for details.
"""
import distutils.core
import os
import shutil
import sys
import tempfile
def main():
""" run distutils.setup """
# Exclude local_settings.py from being added to package/installation
local_settings_py = os.path.join('src', 'schtob', 'tintri_http_api',
'local_settings.py')
temp_file_path = None
if os.path.exists(local_settings_py):
temp_file_path = os.path.join(tempfile.gettempdir(),
'tintri_http_api_local_settings.py')
shutil.move(local_settings_py, temp_file_path)
handle = open(local_settings_py, 'w')
handle.write('')
handle.close()
sys.path.append('src')
from schtob.tintri_http_api import VERSION
distutils.core.setup(
name='tintri_http_api',
version=VERSION,
description='Python Tintri HTTP API Implementation',
long_description=('tintri_http_api is a Python implementation '
'for the Tintri HTTP-Rest API. '
'It is a wrapper around the Tintri HTTP-Rest-API.'),
author='Uwe W. Schäfer',
author_email='uws@schaefer-tobies.de',
url='http://www.schaefer-tobies.de',
download_url='http://www.schaefer-tobies.de/dist/%s/'
'tintri_http_api-%s.tar.gz' % (VERSION, VERSION),
license='LGPL',
platforms=['POSIX', 'Windows'],
packages=['schtob', 'schtob.tintri_http_api'],
package_dir = {'': 'src'},
scripts=[os.path.join('bin', 'tintri_flr.py'),
os.path.join('bin', 'tintri_snap.py'),
],
classifiers=[
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Operating System :: POSIX',
'Operating System :: Microsoft',
'Operating System :: MacOS',
]
)
# restore local_settings.py
if temp_file_path:
shutil.move(temp_file_path, local_settings_py)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
9058,
198,
220,
220,
220,
220,
8728,
93,
628,
220,
220,
220,
9485,
756,
15042,
9058,
4226,... | 2.103917 | 1,251 |
# Open the file in read mode
text = open("sample.txt", "r")
# Create an empty dictionary
d = dict()
# Loop through each line of the file
for line in text:
line = line.strip()
line = line.lower()
words = line.split(" ")
for word in words:
if word in d:
d[word] = d[word] + 1
else:
d[word] = 1
for key in list(d.keys()):
print(key, ":", d[key])
| [
2,
4946,
262,
2393,
287,
1100,
4235,
220,
201,
198,
5239,
796,
1280,
7203,
39873,
13,
14116,
1600,
366,
81,
4943,
220,
201,
198,
201,
198,
2,
13610,
281,
6565,
22155,
220,
201,
198,
67,
796,
8633,
3419,
220,
201,
198,
201,
198,
2,... | 2.212291 | 179 |
__all__ = ['bar']
| [
834,
439,
834,
796,
37250,
5657,
20520,
198
] | 2.25 | 8 |
from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class NetworkVswitchStandardPortgroup(Base):
'''
Commands to list and manipulate Port Groups on an ESX host.
'''
moid = 'ha-cli-handler-network-vswitch-standard-portgroup'
def add(self, portgroupname, vswitchname):
'''
Allows the addition of a standard port group to a virtual switch.
:param portgroupname: string, The name of the port group to add
:param vswitchname: string, The virtual switch to add the port group to.
:returns: boolean
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.network.vswitch.standard.portgroup.Add',
portgroupname=portgroupname,
vswitchname=vswitchname,
)
def set(self, portgroupname, vlanid=None):
'''
Set the vlan id for the given port group
:param portgroupname: string, The name of the port group to set vlan id for.
:param vlanid: long, The vlan id for this port group. This value is in the range (0 - 4095)
:returns: boolean
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.network.vswitch.standard.portgroup.Set',
portgroupname=portgroupname,
vlanid=vlanid,
)
def list(self):
'''
List all of the port groups currently on the system.
:returns: vim.EsxCLI.network.vswitch.standard.portgroup.list.PortGroup[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.network.vswitch.standard.portgroup.List',
)
def remove(self, portgroupname, vswitchname):
'''
Remove a port group from the given virtual switch
:param portgroupname: string,
:param vswitchname: string,
:returns: boolean
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.network.vswitch.standard.portgroup.Remove',
portgroupname=portgroupname,
vswitchname=vswitchname,
) | [
198,
6738,
12972,
4703,
34388,
13,
274,
87,
44506,
13,
18558,
11894,
1330,
12260,
62,
568,
499,
198,
6738,
12972,
4703,
34388,
13,
274,
87,
44506,
13,
8692,
1330,
7308,
198,
198,
4871,
7311,
53,
31943,
23615,
13924,
8094,
7,
14881,
25... | 2.110595 | 1,076 |
import numpy as np
from sklearn import *
import matplotlib.pyplot as plt
# Generate a dataset and plot it
np.random.seed(0)
X, y = datasets.make_gaussian_quantiles(cov=3.,
n_samples=400, n_features=2,
n_classes=3, random_state=0)
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
num_examples = len(X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 3 # output layer dimensionality
# Gradient descent parameters (I picked these by hand)
epsilon = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
sig = np.vectorize(sig)
relu = np.vectorize(relu)
# Helper function to evaluate the total loss on the dataset
# Helper function to predict an output (0 or 1)
# This function learns parameters for the neural network and returns the model.
# - nn_hdim: Number of nodes in the hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
model = build_model(3, 2, print_loss=True)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
Z = predict(model, np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, edgecolor='k')
plt.contourf(xx, yy, Z, alpha=0.5)
plt.show()
| [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
1341,
35720,
1330,
1635,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
201,
198,
2,
2980,
378,
257,
27039,
290,
7110,
340,
201,
198,
37659,
13,
25120,
13,
... | 2.330532 | 714 |
#!/usr/bin/python3
'''
Given an int n, return True if it is within 10 of 100 or 200. Note: abs(num) computes the absolute value of a number.
'''
#Additional implementation
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
7061,
6,
198,
15056,
281,
493,
299,
11,
1441,
6407,
611,
340,
318,
1626,
838,
286,
1802,
393,
939,
13,
5740,
25,
2352,
7,
22510,
8,
552,
1769,
262,
4112,
1988,
286,
257,
1271,
13,
1... | 3.346154 | 52 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#config = tf.ConfigProto()
#config.gpu_options.allow_growth = True
#sess = tf.Session(config=config)
#K.set_session(sess)
import itertools as it
import joblib as jl
from keras import backend as K
from keras.callbacks import Callback
from keras.constraints import maxnorm, nonneg
import keras.layers as kl
import keras.models as km
from keras.optimizers import Adam
import numpy as np
import numpy.random as npr
from rl import graph_includes as graph_inc
from keras.callbacks import TensorBoard
import time
# customize your behavior
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
16,
5974,
198,
198,
2,
11250,
796,
48700,
13,
16934,
2964,
1462,
3419,
198,
2,
11250,
13,
46999,
62,
25811,
13,
12154,
62,
2... | 2.29085 | 306 |
from mpi4py import MPI
import numpy as np
from numpy.lib.shape_base import column_stack
import sys, time
comm = MPI.Comm.Get_parent()
size = comm.Get_size()
rank = comm.Get_rank()
dimensions = np.array((0,0))
comm.Bcast(dimensions, root=0)
M, N = dimensions[0], dimensions[1]
complete = False
reqs = []
counter = 0
if rank == 0:
limit = N
if rank == 1:
limit = M
complete_tog = np.array(0, dtype=np.int64)
# need the length of the previous data before we can set up buffers
previous_data_size = np.array(0, dtype=np.int64)
prev_data_req = comm.Irecv(previous_data_size, source=0, tag=1)
complete_flag_req = comm.Irecv(complete_tog, source=0, tag=7)
new_data_send = None
while complete_tog < 1:
if new_data_send != None:
if MPI.Request.Test(new_data_send) == True:
prev_data_req = comm.Irecv(previous_data_size, source=0, tag=1)
new_data_send = None
if prev_data_req != None:
if MPI.Request.Test(prev_data_req) == True:
# set up buffers
previous_data = np.zeros(previous_data_size, dtype=np.int64)
cost_array = np.zeros(previous_data_size - 1, dtype=np.int64)
neighbor = np.array(0, dtype=np.int64)
# receive essential data
reqs.append(comm.Irecv(previous_data, source=0, tag=2))
reqs.append(comm.Irecv(neighbor, source=0, tag=3))
reqs.append(comm.Irecv(cost_array, source=0, tag=4))
# synchronize
prev_data_req = None
if MPI.Request.Testall(reqs) == True and len(reqs) > 0:
# create a buffer for our calculation
new_data = np.zeros(int(previous_data_size) - 1, dtype=np.int64)
for i in range(len(new_data)):
new_data[i] = min(previous_data[i], neighbor, previous_data[i + 1]) + cost_array[i]
neighbor = new_data[i]
print(str(rank) + " returning: " + str(new_data))
new_data_send = comm.Isend(new_data, dest=0, tag=5 + rank)
previous_data_size = np.array(0, dtype=np.int64)
reqs.clear()
if MPI.Request.Test(complete_flag_req) == True or complete_tog == 1:
break
print(f"Agent {rank} exited.")
# comm.Disconnect()
sys.exit(0) | [
6738,
285,
14415,
19,
9078,
1330,
4904,
40,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
8019,
13,
43358,
62,
8692,
1330,
5721,
62,
25558,
198,
11748,
25064,
11,
640,
198,
198,
9503,
796,
4904,
40,
13,
6935,
13,
38... | 2.180473 | 1,014 |
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE
from __future__ import absolute_import
import numpy
import uproot4.interpretation
| [
2,
347,
10305,
513,
12,
2601,
682,
13789,
26,
766,
3740,
1378,
12567,
13,
785,
14,
36216,
15813,
12,
258,
79,
14,
929,
15763,
19,
14,
2436,
672,
14,
9866,
14,
43,
2149,
24290,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
117... | 2.9 | 60 |
from typing import List
from django.db.models.query import EmptyQuerySet, QuerySet
from django.utils.functional import empty
from apps.oob.forms.project_forms import ProjectForm
from django.http import response
from django.test import TestCase, RequestFactory
from django.urls.base import reverse
from http import HTTPStatus
from apps.oob.models import Task, Project
from apps.core.models import CoreUser
| [
6738,
19720,
1330,
7343,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
22766,
1330,
33523,
20746,
7248,
11,
43301,
7248,
198,
6738,
42625,
14208,
13,
26791,
13,
45124,
1330,
6565,
198,
6738,
6725,
13,
78,
672,
13,
23914,
13,
16302,
... | 3.370079 | 127 |
#!/usr/bin/env python3
'''Global constants for testing.'''
import datetime
# Credentials
API_TOKEN = '92d8c5a0eceef3c05f4149fc04b62bb2cd50d9c6'
OMEGAUP_API_ENDPOINT = 'http://localhost:8001'
# Date limits
DATE_LOWER_LIMIT = datetime.datetime(2005, 1, 1)
DATE_UPPER_LIMIT = datetime.datetime.now()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
7061,
6,
22289,
38491,
329,
4856,
2637,
7061,
198,
198,
11748,
4818,
8079,
198,
198,
2,
327,
445,
14817,
198,
17614,
62,
10468,
43959,
796,
705,
5892,
67,
23,
66,
20,
64,
... | 2.246269 | 134 |
import gym
import numpy as np
import argparse
import pickle
import sys
"""
Download D4RL dataset and store in lifelong_rl format
- Note this script requires having D4RL installed
- See: https://github.com/rail-berkeley/d4rl
"""
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str,
help='Which task to download dataset of (ex. halfcheetah-random-v0)')
parser.add_argument('--output', type=str, default='$$$',
help='What to name the output file of transitions (default: same as task)')
args = parser.parse_args(sys.argv[1:])
print('Getting dataset for %s' % args.task)
env = gym.make(args.task)
dataset = env.get_dataset()
dataset_len = len(dataset['observations'])
print('%d transitions found with average reward %.4f' % (dataset_len, dataset['rewards'].mean()))
# Note we store data as (obs, act, r, d, next_obs)
np_dataset = np.concatenate([
dataset['observations'][:dataset_len-1],
dataset['actions'][:dataset_len-1],
dataset['rewards'][:dataset_len-1].reshape(dataset_len-1, 1),
dataset['terminals'][:dataset_len-1].reshape(dataset_len-1, 1),
dataset['observations'][1:],
], axis=-1)
output_file = args.output
if output_file == '$$$':
output_file = args.task
with open('agent_data/%s.pkl' % output_file, 'wb') as f:
pickle.dump(np_dataset, f)
print('Stored output in agent_data/%s.pkl' % output_file)
| [
11748,
11550,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
1822,
29572,
198,
11748,
2298,
293,
198,
11748,
25064,
628,
198,
37811,
198,
10002,
360,
19,
7836,
27039,
290,
3650,
287,
25837,
62,
45895,
5794,
198,
12,
5740,
428,
422... | 2.540687 | 553 |
import os
import numpy as np
import argparse
import yaml
import chainer
import source.yaml_utils as yaml_utils
from evaluation import get_batch, save_images
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dir_nir', type=str)
parser.add_argument('--dir_rgb', type=str)
parser.add_argument('--results_dir', type=str, required=True)
parser.add_argument('--imlist_nir', type=str)
parser.add_argument('--imlist_rgb', type=str)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--gen_model', type=str, required=True)
args = parser.parse_args()
test(args)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
11748,
331,
43695,
198,
198,
11748,
6333,
263,
198,
198,
11748,
2723,
13,
88,
43695,
62,
26791,
355,
331,
43695,
62,
26791,
198,
6738,
12660,
1330,
651,
62,... | 2.752066 | 242 |
#importing the random package, able to create psuedo random number for testing purposes
import random
#allows additional operations such as the itemgetter function
import operator
#allows the presentation of plots,graphs etc.. will be used to display agents
import matplotlib.pyplot
#time package will be used to help check the efficency of the code. warnings arise for certain functions in this package being depricated
import time
"""we enter in agents row a and row b because this reduces our code for when the function is called, we now no longer need to express an agent
as ([0][1],[0][0]) and we can now express in the more logical [0] and [1]"""
#Starts timer to assess efficency of code.
start = time.clock()
num_of_agents = 10
num_of_iterations = 100
agents = []
# Make the agents.
for i in range(num_of_agents):
agents.append([random.randint(0,99),random.randint(0,99)])
# Move the agents.
for j in range(num_of_iterations):
for i in range(num_of_agents):
if random.random() < 0.5:
agents[i][0] = (agents[i][0] + 1) % 100
else:
agents[i][0] = (agents[i][0] - 1) % 100
if random.random() < 0.5:
agents[i][1] = (agents[i][1] + 1) % 100
else:
agents[i][1] = (agents[i][1] - 1) % 100
#calculates euclidian distance between agents
answer = (((agents[0][0] - agents[1][0])**2) + ((agents[0][1] - agents[1][1])**2))**0.5
print(answer)
"""answer = (((agents[2][0] - agents[3][0])**2) + ((agents[0][1] - agents[1][1])**2))**0.5
print(answer) could get distance between different agents individually but obviously this would be very inefficent"""
answer = distance_between(agents[0], agents[1])
#below is a test to check code is running smoothly, print statements are important to help identify where problems may be occuring
print(answer)
"""so below here we are saying for everything i and j in our agents, do not calculate distance between the same agent, then calculate the distance
in positions 0 and 1 using the function specified at the start of our code in the distance between function."""
for i in range(num_of_agents):
for j in range(num_of_agents):
#!= means not equal to and omitts distance between agents which are the same
if agents[i] != agents[j]:
distance = distance_between(agents[0], agents[1])
answer = distance_between(agents[i], agents[j])
#lines 63 to 64 will print the time it takes for code to execute.
end = time.clock()
print("time = " + str(end - start))
"""Time for 10 agents 0.15 seconds
Time for 100 agents 1.52 seconds
Time for 1000 agents 114 seconds """
"""Creates plots for agents"""
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.xlim(0, 99)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i][1], agents[i][0])
matplotlib.pyplot.show()
| [
2,
11748,
278,
262,
4738,
5301,
11,
1498,
284,
2251,
26692,
1739,
78,
4738,
1271,
329,
4856,
4959,
201,
198,
11748,
4738,
201,
198,
2,
47205,
3224,
4560,
884,
355,
262,
2378,
1136,
353,
2163,
201,
198,
11748,
10088,
201,
198,
2,
472... | 2.757263 | 1,067 |
# 1. Two Sum
"""
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Input: [2, 7, 11, 15], target = 9
Output: [0, 1]
""" | [
2,
352,
13,
4930,
5060,
198,
198,
37811,
198,
15056,
281,
7177,
286,
37014,
11,
1441,
36525,
286,
262,
734,
3146,
884,
326,
484,
751,
510,
284,
257,
2176,
2496,
13,
198,
198,
1639,
743,
7048,
326,
1123,
5128,
561,
423,
3446,
530,
... | 3.530864 | 81 |
import turtle
def Screen(title="Title", size=None, tracer=0):
"""Returns a new turtle screen"""
return turtle.Screen(title, size, tracer)
class Turtle(turtle.Turtle):
"""A Turtle"""
turtle.Screen = setup_screen(turtle.Screen)
if __name__ == "__main__":
s = Screen(0)
t = Turtle()
t.dot()
t.circle(50)
s.mainloop()
| [
11748,
28699,
198,
198,
4299,
15216,
7,
7839,
2625,
19160,
1600,
2546,
28,
14202,
11,
491,
11736,
28,
15,
2599,
198,
220,
220,
220,
37227,
35561,
257,
649,
28699,
3159,
37811,
198,
220,
220,
220,
1441,
28699,
13,
23901,
7,
7839,
11,
... | 2.5 | 140 |
from abc import ABCMeta, abstractmethod
from typing import Any, Union, List, Dict
from core.database.backend import AbstractBackend
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
198,
6738,
19720,
1330,
4377,
11,
4479,
11,
7343,
11,
360,
713,
198,
198,
6738,
4755,
13,
48806,
13,
1891,
437,
1330,
27741,
7282,
437,
628
] | 3.828571 | 35 |
"""Module for conveniently managing a [Hyperopt](https://github.com/hyperopt/hyperopt) cluster."""
import os
import time
from typing import List, Optional, Union
from lazycluster import Runtime, RuntimeGroup, RuntimeTask, _utils
from lazycluster.cluster import MasterLauncher, MasterWorkerCluster, WorkerLauncher
from lazycluster.cluster.exceptions import MasterStartError
from lazycluster.exceptions import PortInUseError
from lazycluster.utils import Environment
class MongoLauncher(MasterLauncher):
"""Abstract implementation of the `MasterLauncher` interface used to implement a concrete launch strategy for mongodb instance used in hyperopt.
This class implements the logic for starting a MongoDB instance on localhost. Hence, we simply treat the MongoDB
instance as master node.
"""
def __init__(self, runtime_group: RuntimeGroup):
"""Initialization method.
Args:
runtime_group: The group where the workers will be started.
"""
super().__init__(runtime_group)
self.dbpath: Optional[str] = None
class LocalMongoLauncher(MongoLauncher):
"""Concrete implementation of the `MasterLauncher` interface. See its documentation to get a list of the inherited methods and attributes.
This class implements the logic for starting a MongoDB instance on localhost. Hence, we simply treat the MongoDB
instance as master node.
"""
def start(
self, ports: Union[List[int], int], timeout: int = 0, debug: bool = False
) -> List[int]:
"""Launch a master instance.
Note:
If you create a custom subclass of MasterLauncher which will not start the master instance on localhost
then you should pass the debug flag on to `execute_task()` of the `RuntimeGroup` or `Runtime` so that you
can benefit from the debug feature of `RuntimeTask.execute()`.
Args:
ports: Port where the DB should be started. If a list is given then the first port that is free in the
`RuntimeGroup` will be used. The actual chosen port can be requested via the property `port`.
timeout: Timeout (s) after which an MasterStartError is raised if DB instance not started yet. Defaults to
3 seconds.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to
`False`.
Returns:
List[int]: In case a port list was given the updated port list will be returned. Otherwise an empty list.
Raises:
PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.
NoPortsLeftError: If a port list was given and none of the ports is actually free in the `RuntimeGroup`.
MasterStartError: If master was not started after the specified `timeout`.
"""
if debug:
self.log.debug("The debug flag has no effect in LocalMongoLauncher.")
_ports: Union[List[int], int] = (
ports.copy() if isinstance(ports, list) else ports
)
if not isinstance(_ports, list):
if _utils.localhost_has_free_port(_ports) and self._group.has_free_port(
_ports, exclude_hosts=Runtime.LOCALHOST
):
self._port = master_port = _ports
else:
raise PortInUseError(_ports, self._group)
else:
self._port = master_port = self._group.get_free_port(
_ports
) # Raises NoPortsLeftError
_ports = _utils.get_remaining_ports(_ports, master_port)
self.log.debug(
f"Starting MongoDB on localhost on port {str(master_port)} with dbpath `{self.dbpath}` and "
f"logfile `{self.dbpath}/{HyperoptCluster.MONGO_LOG_FILENAME}`."
)
# Start the mongod deamon process
return_code = os.system(self.get_mongod_start_cmd())
if return_code != 0:
cause = (
f"Please verify that (1) MongoDB is installed, (2) the dbpath `{self.dbpath}` exists with the "
f"rights required by mongod and (3) that no other MongoDB instance is using and consequently "
f"locking the respective files (=> Init HyperoptCluster with another dbpath "
f"or manually stop the mongod process). See hyperopt docs in README for further details."
)
raise MasterStartError("localhost", master_port, cause)
time.sleep(timeout) # Needed for being able to check the port
if not _utils.localhost_has_free_port(master_port):
self.log.info("MongoDB started on localhost on port " + str(self._port))
else:
self.log.debug(
"MongoDB could NOT be started successfully on port " + str(self._port)
)
cause = f"The master port {master_port} is still free when checking after the timeout of {timeout} seconds."
raise MasterStartError("localhost", master_port, cause)
# Sets up ssh tunnel for scheduler such that all communication is routed over the
# local machine and all entities can talk to each the scheduler on localhost.
self.log.info("Expose the MongoDB port in the RuntimeGroup.")
self._group.expose_port_to_runtimes(self._port)
return _ports if isinstance(_ports, list) else []
def get_mongod_start_cmd(self) -> str:
"""Get the shell command for starting mongod as a deamon process.
Returns:
str: The shell command.
"""
return (
f"mongod --fork --logpath={self.dbpath}/{HyperoptCluster.MONGO_LOG_FILENAME} --dbpath={self.dbpath} "
f"--port={self._port}"
)
def get_mongod_stop_cmd(self) -> str:
"""Get the shell command for stopping the currently running mongod process.
Returns:
str: The shell command.
"""
return f"mongod --shutdown --dbpath={self.dbpath}"
def cleanup(self) -> None:
"""Release all resources."""
self.log.info("Stop the MongoDB ...")
self.log.debug("Cleaning up the LocalMasterLauncher ...")
return_code = os.system(self.get_mongod_stop_cmd())
if return_code == 0:
self.log.info("MongoDB successfully stopped.")
else:
self.log.warning("MongoDB daemon could NOT be stopped.")
super().cleanup()
class RoundRobinLauncher(WorkerLauncher):
"""Concrete WorkerLauncher implementation for launching hyperopt workers in a round robin manner.
See the `WorkerLauncher` documentation to get a list of the inherited methods and attributes.
"""
def __init__(self, runtime_group: RuntimeGroup, dbname: str, poll_interval: float):
"""Initialization method.
Args:
runtime_group: The group where the workers will be started.
dbname: The name of the mongodb instance.
poll_interval: The poll interval of the hyperopt worker.
Raises.
ValueError: In case dbname is empty.
"""
super().__init__(runtime_group)
self._ports = None
if not dbname:
raise ValueError("dbname must not be empty")
self._dbname = dbname
if not poll_interval:
raise ValueError("poll_interval must not be empty")
self._poll_interval = poll_interval
self.log.debug("RoundRobinLauncher initialized.")
def start(
self,
worker_count: int,
master_port: int,
ports: List[int] = None,
debug: bool = True,
) -> List[int]:
"""Launches the worker instances in the `RuntimeGroup`.
Args:
worker_count: The number of worker instances to be started in the group.
master_port: The port of the master instance.
ports: Without use here. Only here because we need to adhere to the interface defined by the
WorkerLauncher class.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to
`False`.
Returns:
List[int]: The updated port list after starting the workers, i.e. the used ones were removed.
"""
hosts = self._group.hosts
runtimes = self._group.runtimes
# Launch each desired worker one by one
for worker_index in range(worker_count):
# Determine the runtime where the next worker will be started in
runtime_index = (
self._group.runtime_count + worker_index
) % self._group.runtime_count
# Get the actual host corresponding to the index
host = hosts[runtime_index]
# working_dir = runtimes[runtime_index].working_dir
assert host == runtimes[runtime_index].host
self.log.debug(
f"Launch Hyperopt worker with index {worker_index} on Runtime {host}"
)
self._launch_single_worker(host, worker_index, master_port, debug)
return ports if isinstance(ports, list) else []
def _launch_single_worker(
self, host: str, worker_index: int, master_port: int, debug: bool
) -> None:
"""Launch a single worker instance in a `Runtime` in the `RuntimeGroup`."""
# 2. Start the worker on this port
task = RuntimeTask("launch-hyperopt-worker-" + str(worker_index))
task.run_command(
self._get_launch_command(master_port, self._dbname, self._poll_interval)
)
self._group.execute_task(task, host, omit_on_join=True, debug=debug)
@classmethod
def _get_launch_command(
cls, master_port: int, dbname: str, poll_interval: float = 0.1
) -> str:
"""Get the shell command for starting a worker instance.
Returns:
str: The launch command.
"""
return (
f"hyperopt-mongo-worker --mongo=localhost:{str(master_port)}/{dbname} "
f"--poll-interval={str(poll_interval)}"
)
def cleanup(self) -> None:
"""Release all resources."""
self.log.info("Cleanup the RoundRobinLauncher ...")
super().cleanup()
class HyperoptCluster(MasterWorkerCluster):
"""Convenient class for launching a Hyperopt cluster in a `RuntimeGroup`.
HyperoptCluster inherits from MasterWorkerCluster. See its documentation to get a list of the inherited methods
and attributes.
The number of hyperopt workers defaults to the number of `Runtimes` in the used `RuntimeGroup`. This number can be
adjusted so that more or less workers than available `Runtimes` can be used. Per default the desired number of
workers is started in a round robin way as implemented in `RoundRobinLauncher`. Consequently, this leads to an
equal distribution of hyperopt workers in the `RuntimeGroup`. You can provide a custom implementation inheriting
from the `WorkerLauncher` class in order to execute a different strategy how workers should be started. The
master instance (i.e. the mongoDB) will always be started on localhost as implemented in `LocalMasterLauncher`. This
behavior can also be changed by providing a custom implementation inheriting from the `MasterLauncher`.
"""
MONGO_LOG_FILENAME = "hyperopt_mongo.log"
DEFAULT_MASTER_PORT = 27017
ENV_NAME_MONGO_URL = "MONGO_CONNECTION_URL"
def __init__(
self,
runtime_group: RuntimeGroup,
mongo_launcher: Optional[MongoLauncher] = None,
worker_launcher: Optional[WorkerLauncher] = None,
dbpath: Optional[str] = None,
dbname: str = "hyperopt",
worker_poll_intervall: float = 0.1,
):
"""Initialization method.
Args:
runtime_group: The `RuntimeGroup` contains all `Runtimes` which can be used for starting the entities.
mongo_launcher: Optionally, an instance implementing the `MasterLauncher` interface can be given, which
implements the strategy for launching the master instances in the cluster. If None, then
`LocalMasterLauncher` is used.
worker_launcher: Optionally, an instance implementing the `WorkerLauncher` interface can be given, which
implements the strategy for launching the worker instances. If None, then
`RoundRobinLauncher` is used.
dbpath: The directory where the db files will be kept. Defaults to a `mongodb` directory inside the
`utils.Environment.main_directory`.
dbname: The name of the database to be used for experiments. See MongoTrials url scheme in hyperopt
documentation for more details. Defaults to ´hyperopt´.
worker_poll_intervall: The poll interval of the hyperopt worker. Defaults to `0.1`.
Raises:
PermissionError: If the `dbpath` does not exsist and could not be created due to lack of permissions.
"""
super().__init__(runtime_group)
self._master_launcher = mongo_launcher or LocalMongoLauncher(runtime_group)
if dbpath:
self._master_launcher.dbpath = os.path.join(
Environment.main_directory, "mongodb"
)
assert self._master_launcher.dbpath
try:
os.makedirs(self._master_launcher.dbpath) # Raises PermissionError
except FileExistsError:
# All good because the dir already exists
pass
else:
self._master_launcher.dbpath = dbpath
self._dbname = dbname
self._worker_launcher = (
worker_launcher
if worker_launcher
else RoundRobinLauncher(runtime_group, dbname, worker_poll_intervall)
)
self.log.debug("HyperoptCluster initialized.")
@property
def mongo_trial_url(self) -> str:
"""The MongoDB url indicating what mongod process and which database to use.
Note:
The format is the format required by the hyperopt MongoTrials object.
Returns:
str: URL string.
"""
if not self.master_port:
self.log.warning(
"HyperoptCluster.mongo_trial_url was requested although the master_port is not yet set."
)
return f"mongo://localhost:{self.master_port}/{self.dbname}/jobs"
@property
def mongo_url(self) -> str:
"""The MongoDB url indicating what mongod process and which database to use.
Note:
The format is `mongo://host:port/dbname`.
Returns:
str: URL string.
"""
if not self.master_port:
self.log.warning(
"HyperoptCluster.mongo_trial_url was requested although the master_port is not yet set."
)
return f"mongo://localhost:{self.master_port}/{self.dbname}"
@property
def dbname(self) -> str:
"""The name of the MongoDB database to be used for experiments."""
return self._dbname
def start_master(
self, master_port: Optional[int] = None, timeout: int = 3, debug: bool = False
) -> None:
"""Start the master instance.
Note:
How the master is actually started is determined by the the actual `MasterLauncher` implementation. Another
implementation adhering to the `MasterLauncher` interface can be provided in the constructor of the cluster
class.
Args:
master_port: Port of the master instance. Defaults to self.DEFAULT_MASTER_PORT, but another one is chosen if
the port is not free within the group. The actual chosen port can be requested via
self.master_port.
timeout: Timeout (s) after which an MasterStartError is raised if master instance not started yet.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. Has no effect for
if the master instance is started locally, what default MasterLauncher implementations usually do.
Raises:
PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.
NoPortsLeftError: If there are no free ports left in the port list for instantiating the master.
MasterStartError: If master was not started after the specified `timeout`.
"""
super().start_master(master_port, timeout)
self._group.add_env_variables({self.ENV_NAME_MONGO_URL: self.mongo_trial_url})
def cleanup(self) -> None:
"""Release all resources."""
self.log.info("Shutting down the HyperoptCluster...")
super().cleanup()
| [
37811,
26796,
329,
29801,
11149,
257,
685,
38197,
8738,
16151,
5450,
1378,
12567,
13,
785,
14,
49229,
8738,
14,
49229,
8738,
8,
13946,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
447... | 2.543356 | 6,758 |
#
# This file is part of Brazil Data Cube Database module.
# Copyright (C) 2019 INPE.
#
# Brazil Data Cube Database module is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Model for table ``bdc.providers``."""
from sqlalchemy import Column, Index, Integer, String, Text
from sqlalchemy.dialects.postgresql import JSONB
from .base_sql import BaseModel
class Provider(BaseModel):
"""Model for table ``bdc.providers``."""
__tablename__ = 'providers'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(64))
description = Column(Text)
uri = Column(String(255))
credentials = Column(JSONB, comment='Follow the JSONSchema @jsonschemas/provider-credentials.json')
__table_args__ = (
Index(None, name),
)
| [
2,
198,
2,
770,
2393,
318,
636,
286,
7595,
6060,
23315,
24047,
8265,
13,
198,
2,
15069,
357,
34,
8,
13130,
3268,
11401,
13,
198,
2,
198,
2,
7595,
6060,
23315,
24047,
8265,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
... | 3.045775 | 284 |
import os
import random
import numpy as np
import torch
seed_value = 6977170
set_seed(seed_value)
if torch.cuda.is_available():
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
| [
11748,
28686,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
28826,
62,
8367,
796,
8644,
3324,
17279,
628,
198,
2617,
62,
28826,
7,
28826,
62,
8367,
8,
198,
198,
361,
28034,
13,
66,
15339,
13,
271,
... | 2.765766 | 111 |
# Important constants about the project are here, mainly paths of important
# files/folders.
import os
import csv
from functools import cached_property
from collections import namedtuple
from math import inf
import textwrap
import numpy as np
import pandas as pd
from .common_utils import get_image, crop_image
from .common_utils import np_to_pil, pil_to_np, np_to_torch, torch_to_np
from .basic_utils import load_obj, save_obj, npy_shape
# important project constants
NUM_RANDOM_OUTPUTS = 100
PROJECT_FOLDER = \
'/srv/beegfs02/scratch/biwismrschool21/data/NAS-DIP Summer Research'
IMG_SIZE = 512
IMG_EXT = '.png'
# to be used in metric calculations
HIST_BINS = 100
HIST_DENSITY = True
HIST_RANGE = None
indent = ' '*3
# We represent the project folder with a Folder object
root = Folder(PROJECT_FOLDER)
root.add('benchmark')
root.add('images')
root.benchmark.add('denoising')
root.benchmark.add('inpainting')
# maybe, you can add field names for this CSVFile object
root.benchmark.add('lowpass_metrics.csv')
root.benchmark.add('models.csv')
if not root.benchmark.models_csv.exists():
raise FileNotFoundError(
'There is no {} file.'.format(root.benchmark.models_csv.path())
)
model_names = list(map(
lambda t: '0_{}_iteration_4000_sigma_25_skip_{}'.format(*t),
root.benchmark.models_csv
))
model_names_hashed = {name: False for name in model_names}
num_models = len(model_names)
root.benchmark.add('random_outputs')
for model_name in model_names:
root.benchmark.random_outputs.add(model_name)
root.benchmark.random_outputs[model_name].add('random_output.npy')
# may be delete this for loop in the future
for i in range(NUM_RANDOM_OUTPUTS):
root.benchmark.random_outputs[model_name].add(
'random_output_{:04}.npy'.format(i)
)
# create the denoising images folder
root.images.add('denoising')
if not root.images.denoising.exists():
raise FileNotFoundError(
'There is no {} folder.'.format(root.images.denoising.path())
)
for fname in os.listdir(root.images.denoising.path):
stem, ext = os.path.splitext(fname)
if ext != '.png':
continue
# fill the denoising folder inside benchmark
root.images.denoising.add(fname)
root.benchmark.denoising.add(stem)
root.benchmark.denoising[stem].add('data')
for model_name in model_names:
root.benchmark.denoising[stem].data.add(model_name)
root.benchmark.denoising[stem].data[model_name].add('res.pkl')
root.benchmark.denoising[stem].data[model_name].add('grid.png')
# maybe, you can add field names for this CSVFile object
root.benchmark.denoising[stem].add('psnr.csv')
root.benchmark.denoising[stem].add('similarity_metrics.csv')
# create the inpainting images folder
root.images.add('inpainting')
if not root.images.inpainting.exists():
raise FileNotFoundError(
'There is no {} folder.'.format(root.images.inpainting.path())
)
for fname in os.listdir(root.images.inpainting.path):
stem, ext = os.path.splitext(fname)
if ext != '.png':
continue
# fill the inpainting folder inside benchmark
root.images.inpainting.add(fname)
root.benchmark.inpainting.add(stem)
root.benchmark.inpainting[stem].add('data')
for model_name in model_names:
root.benchmark.inpainting[stem].data.add(model_name)
root.benchmark.inpainting[stem].data[model_name].add('res.pkl')
root.benchmark.inpainting[stem].data[model_name].add('grid.png')
# maybe, you can add field names for this CSVFile object
root.benchmark.inpainting[stem].add('psnr.csv')
root.benchmark.inpainting[stem].add('similarity_metrics.csv')
if __name__ == '__main__':
print('Checking the integrity of the project directory...\n')
if not root.exists():
print_not_exists(root)
exit()
if not root.images.exists():
print_not_exists(root.images)
exit()
if not root.images.denoising.exists():
print_not_exists(root.images.denoising)
exit()
else:
print('Denoising Images: ')
for file in root.images.denoising:
print(file.path)
print()
if not root.benchmark.exists():
print_not_exists(root.benchmark)
exit()
if not root.benchmark.models_csv.exists():
print_not_exists(root.benchmark.models_csv)
exit()
else:
print('There are {} models inside {}\n'.format(
num_models, root.benchmark.models_csv.path
))
if not root.benchmark.lowpass_metrics_csv.exists():
print_not_exists(root.benchmark.lowpass_metrics_csv)
print()
else:
for row in root.benchmark.lowpass_metrics_csv:
model_name = row[0]
# we are assuming that the field of the first column will
# always be model_name
if model_name == 'model_name':
continue
if not model_name in model_names_hashed:
print('{} exists in {} but it should NOT.'.format(
model_name, root.benchmark.lowpass_metrics_csv
))
continue
model_names_hashed[model_name] = True
print()
for model_name, val in model_names_hashed.items():
if not val:
print('{} does NOT exists in {} but it should.'.format(
model_name, root.benchmark.lowpass_metrics_csv
))
model_names_hashed[model_name] = False
print()
if not root.benchmark.random_outputs.exists():
print_not_exists(root.benchmark.random_outputs)
else:
for folder in os.listdir(root.benchmark.random_outputs.path):
if not folder in model_names_hashed:
print('{} exists in {} but it should NOT.'.format(
os.path.join(root.benchmark.random_outputs.path, folder),
root.benchmark.random_outputs.path
))
print()
for child in root.benchmark.random_outputs:
if not child.exists():
print('{} does NOT exists in {} but it should.'.format(
child.path, root.benchmark.random_outputs.path
))
print()
continue
if not child.random_output_npy.exists():
print('{} does NOT exists in {} but it should.'.format(
child.path, root.benchmark.random_outputs.path
))
print()
continue
shape = child.random_output_npy.shape
true_shape = (NUM_RANDOM_OUTPUTS, 1, IMG_SIZE, IMG_SIZE)
if shape != true_shape:
print('Shape of {} is {} which FALSE. It should be {}.'.format(
child.random_output_npy.path, shape, true_shape
))
print()
print()
if not root.benchmark.denoising.exists():
print_not_exists(root.benchmark.denoising)
print()
else:
for img_folder in root.benchmark.denoising:
if not img_folder.exists():
print_not_exists(img_folder)
print()
continue
if not img_folder.data.exists():
print_not_exists(img_folder.data)
print()
else:
for folder in os.listdir(img_folder.data.path):
if not folder in model_names_hashed:
print('{} exists in {} but it should NOT.'.format(
os.path.join(img_folder.data.path, folder),
img_folder.data.path
))
print()
for child in img_folder.data:
if not child.exists():
print('{} does NOT exists in {} but it should.'.format(
child.path, img_folder.data.path
))
print()
continue
if not child.grid_png.exists():
print('{} does NOT exists in {} but it should.'.format(
child.grid_png.path, child.path
))
if not child.res_pkl.exists():
print('{} does NOT exists in {} but it should.'.format(
child.res_pkl.path, child.path
))
print()
continue
# check whether res.pkl contains necessary information
if not img_folder.psnr_csv.exists():
print_not_exists(img_folder.psnr_csv)
print()
else:
for row in list(img_folder.psnr_csv):
model_name = row[0]
# we are assuming that the field of the first column will
# always be model_name
if model_name == 'model_name':
continue
if not model_name in model_names_hashed:
print('{} exists in {} but it should NOT.'.format(
model_name, img_folder.psnr_csv
))
print()
continue
model_names_hashed[model_name] = True
for model_name, val in model_names_hashed.items():
if not val:
print('{} does NOT exists in {} but it should.'.format(
model_name, img_folder.psnr_csv
))
model_names_hashed[model_name] = False
if not img_folder.similarity_metrics_csv.exists():
print_not_exists(img_folder.similarity_metrics_csv)
print()
else:
for row in img_folder.similarity_metrics_csv:
model_name = row[0]
# we are assuming that the field of the first column will
# always be model_name
if model_name == 'model_name':
continue
if not model_name in model_names_hashed:
print('{} exists in {} but it should NOT.'.format(
model_name, img_folder.similarity_metrics_csv
))
print()
continue
model_names_hashed[model_name] = True
for model_name, val in model_names_hashed.items():
if not val:
print('{} does NOT exists in {} but it should.'.format(
model_name, img_folder.similarity_metrics_csv
))
model_names_hashed[model_name] = False
print('\n'*5)
print('This the ideal structure of the project directory:\n')
print(root.content())
| [
2,
28511,
38491,
546,
262,
1628,
389,
994,
11,
8384,
13532,
286,
1593,
198,
2,
3696,
14,
11379,
364,
13,
198,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
6738,
1257,
310,
10141,
1330,
39986,
62,
26745,
198,
6738,
17268,
1330,
3706... | 1.977178 | 5,740 |
# --- Common libraries
import os
import unittest
import numpy as np
from welib.BEM.steadyBEM import *
MyDir=os.path.dirname(__file__)
class Test(unittest.TestCase):
""" See examples/ for more examples """
if __name__ == '__main__':
unittest.main()
| [
2,
11420,
8070,
12782,
220,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
5029,
571,
13,
33,
3620,
13,
28044,
88,
33,
3620,
1330,
1635,
198,
198,
3666,
35277,
28,
418,
13,
6978,
13,
159... | 2.708333 | 96 |
import settings
import tweepy
import dataset
from textblob import TextBlob
from sqlalchemy.exc import ProgrammingError
import json
import time
import smtplib
import ssl
import threading
import sys
from urllib3.exceptions import ProtocolError as urllib3_protocolError
if (sys.version_info > (3, 0)):
print("Python 3")
from http.client import IncompleteRead as http_incompleteRead
else:
print("Python 2")
from httplib import IncompleteRead as http_incompleteRead
db = dataset.connect(settings.CONNECTION_DATABASE)
tweetNo = 0
done = 0
elapsed = 0
names = []
tweetRateCount = 0
try:
auth = tweepy.OAuthHandler(settings.TWITTER_KEY, settings.TWITTER_SECRET)
auth.set_access_token(settings.TWITTER_APP_KEY, settings.TWITTER_APP_SECRET)
api = tweepy.API(auth)
stream_listener = StreamListener()
stream = tweepy.Stream(auth=api.auth, listener=stream_listener)
except Exception as e:
sendMail(sub=("Tweepy Authorization Error " + settings.EMAIL_SUBJECT), text=str(e))
pass
try:
if settings.TRENDDATA_UPDATE:
data = api.trends_place(settings.PLACE_CODE)[0]
trends_list = data['trends']
names = [trend['name'] for trend in trends_list]
names = names[:settings.TREND_AMOUNT]
except Exception as e:
sendMail(sub=("Tweepy Trend Update Error " + settings.EMAIL_SUBJECT), text=str(e))
pass
track_list_trends = settings.TRACK_TERMS + names
print("Starting Capturing:")
print(track_list_trends)
str_track_list = ' \n '.join(track_list_trends)
sendMail(sub=("Tweet Capture Starting " + settings.EMAIL_SUBJECT), text=("!!STARTING!! Currently capturing " + str_track_list))
start = time.time()
startStream()
while (elapsed > settings.REFRESH_TIME) & settings.TRENDDATA_UPDATE:
data = api.trends_place(settings.PLACE_CODE)[0]
trends_list = data['trends']
names = [trend['name'] for trend in trends_list]
names = names[:settings.TREND_AMOUNT]
track_list_trends = settings.TRACK_TERMS + names
str_track_list = ' \n '.join(track_list_trends)
print("-----------List aquired-------------")
print(track_list_trends)
sendMail(sub=("Tweepy Trend List Renew " + settings.EMAIL_SUBJECT), text=("TweetRate(per min) : " + str(tweetRateCount / (elapsed / 60)) + "\n\n Currently capturing " + str_track_list))
print("-----------Rolling back the streaming--------------")
print("starting streaming now!")
start = done
tweetRateCount = 0
startStream()
done = time.time()
elapsed = done - start
| [
11748,
6460,
198,
11748,
4184,
538,
88,
198,
11748,
27039,
198,
6738,
2420,
2436,
672,
1330,
8255,
3629,
672,
198,
6738,
44161,
282,
26599,
13,
41194,
1330,
30297,
12331,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
895,
83,
489,
571... | 2.741304 | 920 |
definition = {
'node_batch_size': 5,
'fields': {
'ad_breaks': {'omit': True},
'auto_generated_captions': {'omit': True},
'backdated_time': {},
'backdated_time_granularity': {},
'captions': {'omit': True},
'comments': {'edge_type': 'comment'},
'content_category': {},
'content_tags': {},
'created_time': {'default': True},
'custom_labels': {'omit': True},
'description': {'default': True},
'embed_html': {},
'embeddable': {},
'event': {'edge_type': 'event', 'follow_edge': False},
'format': {},
'from': {},
'icon': {},
'is_crosspost_video': {'omit': True},
'is_crossposting_eligible': {'omit': True},
'is_instagram_eligible': {'omit': True},
'is_reference_only': {'omit': True},
'length': {},
'likes': {},
'live_status': {},
'monetization_status': {'omit': True},
'permalink_url': {'default': True},
'picture': {},
'place': {},
'privacy': {},
'published': {},
'reactions': {},
'scheduled_publish_time': {'omit': True},
'sharedposts': {'omit': True},
'source': {},
'sponsor_tags': {'omit': True},
'status': {},
'tags': {'omit': True},
'thumbnails': {'omit': True},
'title': {'default': True},
'universal_video_id': {},
'updated_time': {'default': True},
'video_insights': {'omit': True},
},
'csv_fields': [
'created_time',
'updated_time',
'title',
'description',
'permalink_url',
['from', 'id'],
['from', 'name'],
'content_category',
['privacy', 'description'],
'icon'
]
}
| [
46758,
796,
1391,
198,
220,
220,
220,
705,
17440,
62,
43501,
62,
7857,
10354,
642,
11,
198,
220,
220,
220,
705,
25747,
10354,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
324,
62,
30058,
10354,
1391,
6,
296,
270,
10354,
6407,
... | 1.955628 | 924 |
import racemap
import copy
import sys
import random
import time
import os.path
| [
11748,
3444,
368,
499,
198,
11748,
4866,
198,
11748,
25064,
198,
11748,
4738,
198,
11748,
640,
198,
11748,
28686,
13,
6978,
198
] | 3.590909 | 22 |
# -*- coding: utf-8 -*-
import logging
from django.db import models
from owncloud import Client as NextcloudClient
from addons.base import exceptions
from addons.base import institutions_utils as inst_utils
from addons.base.institutions_utils import (
InstitutionsNodeSettings,
InstitutionsStorageAddon
)
from addons.nextcloud.models import NextcloudProvider
from addons.nextcloudinstitutions import settings, apps
from osf.models.files import File, Folder, BaseFileNode
from osf.utils.permissions import ADMIN, READ, WRITE
logger = logging.getLogger(__name__)
FULL_NAME = apps.FULL_NAME
SHORT_NAME = apps.SHORT_NAME
ENABLE_DEBUG = False
inst_utils.register(NodeSettings)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
898,
17721,
1330,
20985,
355,
7406,
17721,
11792,
198,
198,
6738,
751,
684,
13,
8692,
1330,
... | 3.244131 | 213 |
input = """
c num blocks = 1
c num vars = 150
c minblockids[0] = 1
c maxblockids[0] = 150
p cnf 150 667
-11 -87 28 0
-91 18 -5 0
-9 50 -72 0
-15 -93 -2 0
127 -81 39 0
-107 -49 25 0
132 42 -4 0
63 -103 42 0
95 85 -133 0
-57 -74 -15 0
-150 -76 71 0
7 -141 -104 0
107 -19 -150 0
-69 12 -122 0
72 87 -70 0
31 71 -79 0
42 -99 48 0
-59 -111 -13 0
62 -101 54 0
-135 -54 1 0
54 -75 130 0
115 36 -46 0
-53 -9 76 0
-8 -105 40 0
149 29 -74 0
-132 -46 128 0
-36 137 92 0
90 70 20 0
-120 100 25 0
25 13 -77 0
46 59 74 0
-146 98 -129 0
121 -70 17 0
-10 -22 29 0
-7 -18 40 0
-24 41 150 0
-134 -35 47 0
113 135 -4 0
139 -4 91 0
104 -10 -8 0
-11 -142 -65 0
-87 -42 18 0
76 -83 100 0
57 109 78 0
-83 -90 65 0
48 141 -93 0
123 113 -79 0
25 85 -32 0
-109 -122 -16 0
123 -15 89 0
97 -23 -9 0
136 -101 -74 0
-116 132 10 0
95 -15 11 0
27 -43 33 0
-135 -80 43 0
45 115 -114 0
30 -28 6 0
-135 13 63 0
125 -35 107 0
88 95 30 0
93 27 50 0
-102 -15 27 0
-146 25 31 0
-5 63 116 0
-138 85 117 0
-109 -132 -114 0
134 73 10 0
-82 64 -54 0
81 16 -97 0
-74 -11 -131 0
-97 -70 -18 0
-126 43 -85 0
-101 -97 2 0
67 -35 -68 0
74 138 150 0
23 138 -141 0
138 -47 -62 0
78 -135 -2 0
-100 -38 12 0
146 75 106 0
81 -11 -4 0
-129 -116 44 0
-79 144 -76 0
23 94 -3 0
-72 65 -109 0
-15 -110 -83 0
-103 47 -108 0
66 14 -65 0
137 134 144 0
46 -17 139 0
-139 4 -147 0
135 -73 38 0
74 132 120 0
-148 22 -95 0
106 107 -26 0
62 28 26 0
-123 141 89 0
-129 -81 -36 0
89 56 48 0
-122 -127 -85 0
48 -79 -71 0
117 -20 109 0
-137 87 -109 0
57 147 -93 0
-7 -26 -100 0
-143 112 108 0
40 -60 -96 0
-103 113 41 0
-34 -119 -104 0
104 63 -4 0
-110 -40 -141 0
-24 120 -84 0
52 -49 123 0
114 -62 71 0
44 -50 147 0
-77 49 105 0
112 -23 -80 0
76 -24 121 0
-82 140 69 0
-118 -57 -13 0
-12 81 147 0
92 -61 108 0
-71 -88 -146 0
75 83 -19 0
-135 -37 -74 0
-148 100 -34 0
124 100 -119 0
-111 82 -142 0
49 -107 -126 0
-68 -38 124 0
55 -25 -110 0
54 46 -133 0
-110 70 112 0
-87 -33 111 0
-39 -43 71 0
-29 -45 -125 0
147 -66 126 0
70 -2 98 0
65 -59 -134 0
88 -62 83 0
-84 -79 -121 0
79 146 122 0
-142 -88 116 0
-93 -33 -103 0
98 -24 -150 0
-89 17 -18 0
-82 -25 1 0
-92 149 36 0
67 84 -72 0
46 -30 54 0
-100 -52 85 0
133 89 -109 0
41 110 -66 0
-144 -122 -68 0
-97 -87 -63 0
-46 -31 -10 0
134 73 -32 0
-141 89 -55 0
83 -104 135 0
19 -98 -90 0
-29 -39 -134 0
64 132 -74 0
-142 -117 138 0
142 149 -87 0
-77 38 -6 0
111 22 47 0
-74 -71 -88 0
5 6 -92 0
-142 -107 -32 0
19 -61 71 0
102 -47 -62 0
73 13 -113 0
-72 9 51 0
11 -123 -13 0
-134 116 -48 0
138 -83 85 0
116 44 90 0
7 -68 101 0
-56 -97 37 0
44 43 -128 0
-36 -84 73 0
-26 -81 65 0
129 121 -114 0
-83 -148 103 0
-64 -127 11 0
-3 124 -20 0
129 71 -114 0
-39 108 -7 0
-85 130 49 0
42 -105 -76 0
128 149 -85 0
-84 72 141 0
68 -119 61 0
-63 118 49 0
16 145 -76 0
22 -59 119 0
22 -76 -6 0
122 -41 139 0
-83 -124 -133 0
99 -88 -78 0
-89 54 31 0
102 46 5 0
4 -126 9 0
-19 106 -38 0
-94 86 67 0
58 -100 90 0
113 53 74 0
-8 60 47 0
7 -14 -15 0
-102 124 -66 0
-20 107 -134 0
77 -76 -91 0
133 -5 75 0
-80 -130 -123 0
-49 27 -136 0
32 -114 -147 0
106 -56 -53 0
-117 -123 -14 0
-137 43 132 0
9 -95 -32 0
70 81 4 0
-31 -91 -32 0
-124 -14 -115 0
-14 59 125 0
20 -23 -115 0
-68 26 -57 0
137 6 110 0
-65 146 -42 0
45 30 137 0
-9 52 46 0
-120 -75 105 0
-85 135 -8 0
-71 11 -79 0
15 -106 -85 0
147 105 106 0
-118 66 -12 0
-25 -102 -85 0
108 66 10 0
-128 150 72 0
-42 -144 -131 0
23 45 -86 0
5 45 -71 0
-47 115 150 0
-69 -131 -82 0
-22 101 35 0
73 -102 23 0
136 150 -112 0
-33 -54 -24 0
88 -143 -67 0
-5 -118 -14 0
-38 142 -103 0
-138 64 -26 0
-45 -89 4 0
-120 -112 -45 0
-3 110 -128 0
129 -74 -123 0
49 102 73 0
11 -33 -6 0
98 -2 -13 0
89 -63 2 0
139 4 -89 0
-147 2 -95 0
-37 44 110 0
-129 -130 75 0
106 -92 -60 0
-122 -133 95 0
-53 82 -12 0
-100 9 29 0
-5 31 -2 0
-102 33 66 0
44 -21 -91 0
-117 93 145 0
49 26 -102 0
119 -125 -130 0
14 -112 69 0
25 -66 -67 0
-6 -64 92 0
128 14 51 0
-81 19 -147 0
61 148 -103 0
147 -57 -26 0
-149 109 -2 0
112 132 -120 0
-87 144 106 0
-144 -55 135 0
69 122 27 0
87 -62 127 0
-60 1 106 0
34 96 13 0
135 -82 87 0
70 50 51 0
-117 90 148 0
-146 -19 -134 0
-1 -82 99 0
-88 -70 90 0
109 49 62 0
-49 4 34 0
37 80 72 0
-44 -75 -66 0
-69 128 25 0
24 -33 89 0
-40 64 -65 0
-96 140 -119 0
-127 -124 12 0
-51 -99 -102 0
140 118 83 0
-52 37 87 0
97 -20 107 0
-32 -39 13 0
142 -100 -9 0
133 -34 9 0
-66 67 -7 0
-51 123 64 0
74 -68 -109 0
-26 -7 -92 0
105 41 70 0
-134 7 -104 0
-48 -150 60 0
3 143 -30 0
-78 -7 -65 0
147 34 19 0
-113 -94 -29 0
128 -116 74 0
-104 -27 94 0
-116 -92 -120 0
147 -47 69 0
-54 -91 17 0
116 36 -65 0
-11 93 -51 0
-21 106 10 0
4 -129 -30 0
46 -31 131 0
100 25 139 0
131 120 56 0
64 -25 -110 0
28 139 -122 0
85 -83 107 0
93 -63 84 0
69 -68 -72 0
-97 -86 -122 0
-54 -8 24 0
-145 17 -62 0
-94 -112 -99 0
54 83 -7 0
-140 -52 44 0
-147 121 -74 0
10 133 -88 0
-42 -142 -5 0
133 -61 -106 0
54 -134 -119 0
-132 131 65 0
60 -57 37 0
67 -33 109 0
-51 99 6 0
-46 -104 69 0
-137 -138 -18 0
81 33 49 0
-142 26 124 0
-82 77 24 0
96 -132 110 0
24 129 -148 0
13 53 73 0
-105 -64 -127 0
-46 62 97 0
-80 -23 67 0
-21 -19 100 0
60 -45 58 0
35 -100 -107 0
-90 -147 125 0
127 -148 4 0
-121 -93 -10 0
-38 16 57 0
-66 35 -129 0
-102 -147 -120 0
108 64 6 0
105 138 -100 0
103 -70 -108 0
1 -2 -106 0
-132 -125 -74 0
1 -15 140 0
132 29 -45 0
-134 61 -101 0
13 9 -45 0
101 -70 -30 0
106 -68 26 0
-100 -40 -148 0
-2 14 -57 0
77 30 62 0
43 96 -79 0
-119 75 -52 0
93 -88 -10 0
134 25 75 0
86 90 -37 0
48 -12 -47 0
-115 1 -107 0
61 -31 -49 0
47 -87 120 0
26 -34 -79 0
-23 148 -88 0
-83 137 133 0
-117 -133 13 0
6 -55 -130 0
131 58 -115 0
-145 -54 -149 0
-111 -78 62 0
112 97 134 0
96 132 -57 0
-104 107 -135 0
100 -41 21 0
24 134 -48 0
59 -109 -131 0
-98 -148 110 0
12 -51 -116 0
-112 7 -27 0
-110 57 -130 0
-50 -48 130 0
18 66 -9 0
-87 20 -90 0
-106 -53 138 0
114 -108 -132 0
1 37 99 0
-61 -29 150 0
98 -18 95 0
-103 -143 -113 0
-53 36 -13 0
119 68 -82 0
20 141 148 0
-150 -138 124 0
-42 -72 2 0
109 26 -124 0
-136 80 73 0
75 145 10 0
-55 -138 -99 0
20 -28 26 0
48 114 -45 0
-41 5 87 0
20 -129 88 0
53 84 -23 0
88 -131 54 0
39 109 -147 0
-18 -95 -15 0
110 -9 -12 0
-20 -6 28 0
137 -149 142 0
-147 -50 -17 0
71 10 -21 0
94 37 38 0
-65 19 -117 0
144 -63 114 0
37 -28 25 0
104 142 51 0
16 -38 -73 0
122 -77 48 0
130 54 96 0
-76 49 -62 0
-9 -24 -128 0
-61 57 128 0
140 145 -80 0
-62 121 -105 0
-96 12 -45 0
-127 6 75 0
108 95 -122 0
83 -124 34 0
-44 37 55 0
59 37 149 0
-148 -136 150 0
60 -26 53 0
140 -85 139 0
6 132 -34 0
-107 -50 25 0
-87 115 -124 0
-36 41 51 0
-49 -109 113 0
74 -58 -82 0
23 -32 -150 0
-2 121 111 0
-133 -122 -56 0
90 -36 -94 0
-53 90 76 0
130 -113 -78 0
79 76 -17 0
38 140 131 0
44 148 92 0
-115 135 113 0
124 -116 -106 0
-50 106 84 0
123 110 -136 0
-136 -119 -76 0
-133 -147 -2 0
-141 -18 94 0
-21 43 -141 0
-124 -120 -85 0
-47 17 -120 0
-116 147 40 0
-45 71 -34 0
-119 -123 -11 0
-118 -48 16 0
-148 121 -56 0
-11 47 -78 0
-27 50 -133 0
-73 144 -92 0
-129 -51 87 0
-2 128 -69 0
135 -61 -133 0
-53 -33 115 0
-132 109 -51 0
-32 -131 65 0
-130 38 139 0
51 -103 -97 0
-135 -20 -128 0
146 143 -122 0
-146 -116 93 0
138 131 73 0
50 48 110 0
-147 -16 71 0
-23 -76 69 0
-97 -132 -76 0
73 -34 132 0
118 -137 -41 0
-141 -37 -139 0
42 -119 57 0
-56 2 -29 0
-43 36 -132 0
130 -143 126 0
-145 144 64 0
67 -135 -56 0
149 145 -61 0
-145 31 -114 0
-39 -139 33 0
140 114 75 0
96 146 113 0
59 137 84 0
16 88 -61 0
-11 100 -120 0
-124 125 -119 0
107 -87 -120 0
-113 -123 -6 0
-56 9 83 0
59 -132 -98 0
-125 45 -128 0
76 118 -75 0
-27 -33 -143 0
105 -78 93 0
-76 -121 7 0
-124 -83 134 0
107 -144 -51 0
-2 -32 39 0
-63 12 -59 0
120 -138 123 0
-54 12 -109 0
-68 -21 129 0
-109 -144 44 0
133 -37 -13 0
78 -77 -135 0
-62 -88 84 0
97 -53 -73 0
30 25 84 0
-98 -5 49 0
-91 -137 117 0
54 139 34 0
-57 53 116 0
-45 126 -80 0
39 -48 -108 0
4 -74 -122 0
120 8 -92 0
129 -133 -31 0
102 80 -92 0
48 81 73 0
-33 -39 -47 0
69 -130 76 0
115 33 25 0
148 -137 -20 0
41 122 55 0
-135 -67 -13 0
-91 82 -114 0
-37 87 -56 0
-84 -45 130 0
-109 -137 -87 0
-52 -78 -55 0
-77 -150 7 0
131 -28 150 0
59 -138 -20 0
131 -119 -6 0
148 -8 -93 0
97 60 -6 0
-100 26 149 0
-79 5 -19 0
104 -62 -96 0
96 -95 137 0
-122 86 -92 0
132 98 78 0
-132 -104 83 0
3 -46 -70 0
40 122 105 0
115 98 102 0
136 7 -29 0
63 -21 -102 0
133 -107 82 0
2 57 -35 0
36 74 -80 0
-75 130 23 0
77 -122 5 0
95 122 -16 0
61 43 -114 0
17 53 133 0
52 -2 -82 0
-145 -31 -115 0
-51 40 -106 0
93 -4 -51 0
126 115 -84 0
46 57 118 0
-34 16 8 0
-144 -70 -22 0
-34 135 -125 0
-32 83 -128 0
117 -85 -94 0
97 -142 81 0
-140 84 -88 0
117 112 54 0
104 149 143 0
-135 35 -67 0
-58 25 -105 0
119 54 70 0
-121 49 82 0
44 -37 140 0
-77 139 -93 0
-150 -142 106 0
116 -51 97 0
50 -57 135 0
-25 -78 120 0
-105 39 48 0
142 -93 -84 0
75 -32 21 0
30 -125 -77 0
133 -35 -44 0
92 13 -34 0
18 133 31 0
129 43 -55 0
-6 29 32 0
124 -93 -33 0
62 -99 -67 0
126 105 96 0
-76 -120 134 0
113 -125 110 0
-94 128 41 0
22 -87 20 0
-56 -50 125 0
-117 73 -142 0
-98 7 -31 0
-133 100 -8 0
-131 -12 -132 0
-109 -134 -119 0
-126 -94 -95 0
-69 6 57 0
-119 82 -2 0
37 40 77 0
118 27 -102 0
9 -125 -79 0
11 -53 -27 0
-97 -65 114 0
-3 134 -57 0
-80 -13 -95 0
78 43 -32 0
79 -22 -150 0
-125 96 -148 0
114 48 -129 0
"""
output = "UNSAT"
| [
15414,
796,
37227,
198,
66,
997,
7021,
796,
352,
198,
66,
997,
410,
945,
796,
6640,
198,
66,
949,
9967,
2340,
58,
15,
60,
796,
352,
198,
66,
3509,
9967,
2340,
58,
15,
60,
796,
6640,
198,
79,
269,
77,
69,
6640,
718,
3134,
198,
... | 2.056536 | 4,422 |
# -*- coding: utf-8 -*-
from os import path
from setuptools import find_packages, setup
setup_dependencies = [
"wheel",
"setuptools-scm",
]
install_dependencies = [
"importlib-metadata;python_version<'3.8'",
"redis",
]
try:
docs_file = path.join(path.abspath(path.dirname(__file__)), "README.md")
with open(docs_file, encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = __doc__
setup(
name="redis-light",
url="https://github.com/zobayer1/redis-light",
license="MIT",
author="Zobayer Hasan",
author_email="zobayer1@gmail.com",
description="Lightweight Redis ORM",
keywords="python redis orm",
long_description=long_description,
use_scm_version=True,
setup_requires=setup_dependencies,
packages=find_packages(exclude=["docs", "tests"]),
include_package_data=False,
zip_safe=True,
platforms="any",
install_requires=install_dependencies,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Operating System :: Unix",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
28686,
1330,
3108,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
40406,
62,
45841,
3976,
796,
685,
198,
220,
220,
220,
366,
22001,
1600,... | 2.562092 | 612 |
"""Module unittests.test_metadata_algorithms.py
This module contains methods to test the metadata_algorithms module via pytest.
It uses good_mock_server to validate the positive test cases
and bad_mock_server for the negative test cases.
"""
import pytest
import json
import click
from click.testing import CliRunner
from compliance_suite.metadata_algorithms import *
from compliance_suite.test_runner import TestRunner
from compliance_suite.tests import Test
from unittests.constants import GOOD_SERVER_URL as good_mock_server, BAD_SERVER_URL as bad_mock_server
good_runner = TestRunner(good_mock_server)
good_runner.session_params = {
"limit": 400000,
"trunc512": True,
"circular_supported": None,
"redirection": None
}
bad_runner = TestRunner(bad_mock_server)
bad_runner.session_params = {
"limit": 400000,
"trunc512": True,
"circular_supported": None,
"redirection": None
}
test = Test(testing_metadata_algorithms) | [
37811,
26796,
555,
715,
3558,
13,
9288,
62,
38993,
62,
282,
7727,
907,
13,
9078,
198,
1212,
8265,
4909,
5050,
284,
1332,
262,
20150,
62,
282,
7727,
907,
8265,
2884,
12972,
9288,
13,
198,
1026,
3544,
922,
62,
76,
735,
62,
15388,
284,... | 2.996904 | 323 |
#!/usr/bin/env python3
import os
import sys
import traceback
base_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, base_path)
from junit.constants import JUNIT_TEST_CMD_JUNIT, JUNIT_TEST_CMD_TRACE, TESTFILES_PREFIX, REPO_TRACE_DIR
from junit.utils import parse_command_line_args
from junit.test_junit import TestJUnit
from util.constants import LOG, ErrorCode
from test_case_junit import TestCaseJUnit
# DEF
if __name__ == "__main__":
args = parse_command_line_args()
all_exit_codes = []
exit_code = ErrorCode.SUCCESS
junit_test_runner = TestJUnit(args)
# Step 1: Run the regular JUnit tests.
LOG.info(section_header("JUNIT TESTS"))
try:
test_case_junit = TestCaseJUnit(args, test_command=JUNIT_TEST_CMD_JUNIT)
exit_code = junit_test_runner.run(test_case_junit)
except:
LOG.error(f'Exception trying to run {JUNIT_TEST_CMD_JUNIT}')
LOG.error("================ Python Error Output ==================")
traceback.print_exc(file=sys.stdout)
exit_code = ErrorCode.ERROR
finally:
all_exit_codes.append(exit_code)
# Step 2: Run the trace test for each file that we find
# Each directory represents another set of SQL traces to test.
LOG.info(section_header("TRACEFILE TESTS"))
noise_trace_dir = os.path.join(base_path, REPO_TRACE_DIR)
for item in os.listdir(noise_trace_dir):
# Look for all of the .test files in the each directory
if item.endswith(TESTFILES_PREFIX):
os.environ["NOISEPAGE_TRACE_FILE"] = os.path.join(noise_trace_dir, item)
LOG.info(section_header("TRACEFILE TEST: " + os.environ["NOISEPAGE_TRACE_FILE"]))
exit_code = ErrorCode.ERROR
try:
test_case_junit = TestCaseJUnit(
args, test_command=JUNIT_TEST_CMD_TRACE)
exit_code = junit_test_runner.run(test_case_junit)
except KeyboardInterrupt:
exit_code = ErrorCode.ERROR
raise
except Exception as err:
LOG.error(f'Exception trying to run {JUNIT_TEST_CMD_TRACE}')
LOG.error(err)
LOG.error("================ Python Error Output ==================")
traceback.print_exc(file=sys.stdout)
exit_code = ErrorCode.ERROR
finally:
all_exit_codes.append(exit_code)
## FOR (files)
## FOR (dirs)
# Compute final exit code. If any test failed, then the entire program has to fail
final_code = 0
for c in all_exit_codes:
final_code = final_code or c
LOG.info("Final Status => {}".format("FAIL" if final_code else "SUCCESS"))
sys.exit(final_code)
# MAIN
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
198,
8692,
62,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
697... | 2.260586 | 1,228 |
# Generated by Django 3.1.5 on 2021-02-02 07:59
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
2999,
12,
2999,
8753,
25,
3270,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-18 00:01
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
16,
319,
1584,
12,
486,
12,
1507,
3571,
25,
486,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198... | 2.672727 | 55 |
'''
Used to compare to other results pair predictions it they have different contact map criterium
'''
import os, sys
import pandas as pd
if __name__=="__main__":
inPath, cmapPath, resultsPath = sys.argv[1:]
main( inPath, cmapPath, resultsPath )
| [
7061,
6,
198,
38052,
284,
8996,
284,
584,
2482,
5166,
16277,
340,
484,
423,
1180,
2800,
3975,
1955,
263,
1505,
198,
7061,
6,
198,
198,
11748,
28686,
11,
25064,
198,
11748,
19798,
292,
355,
279,
67,
628,
220,
220,
220,
220,
198,
361,... | 2.921348 | 89 |
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import pickle
import pprint
import datefinder
# What the program can access within Calendar
# See more at https://developers.google.com/calendar/auth
scopes = ["https://www.googleapis.com/auth/calendar"]
flow = InstalledAppFlow.from_client_secrets_file("client_secret.json", scopes=scopes)
# Use this to pull the users credentials into a pickle file
#credentials = flow.run_console()
#pickle.dump(credentials, open("token.pkl", "wb"))
# Read the credentials from a saved pickle file
credentials = pickle.load(open("token.pkl", "rb"))
# Build the calendar resource
service = build("calendar", "v3", credentials=credentials)
# Store a list of Calendars on the account
result = service.calendarList().list().execute()
calendar_id = result["items"][0]["id"]
result = service.events().list(calendarId=calendar_id).execute()
def create_event(my_event):
"""
Create a Google Calendar Event
Args:
my_event: CalendarEvent object
"""
print("Created Event for " + str(my_event.date))
event = {
"summary": my_event.summary,
"location": my_event.location,
"description": my_event.description,
"start": {
"dateTime": my_event.start_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"end": {
"dateTime": my_event.end_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"reminders": {
"useDefault": False,
},
}
return service.events().insert(calendarId=calendar_id, body=event, sendNotifications=True).execute()
| [
6738,
23645,
499,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
198,
6738,
23645,
62,
18439,
62,
12162,
1071,
8019,
13,
11125,
1330,
2262,
4262,
4677,
37535,
198,
11748,
2298,
293,
198,
11748,
279,
4798,
198,
11748,
3128,
22805,
198,
198,
... | 2.564636 | 673 |
from flask import Flask
app = Flask(__name__)
from Webapp import views
| [
6738,
42903,
1330,
46947,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
6738,
5313,
1324,
1330,
5009,
198
] | 3.428571 | 21 |
# https://leetcode.com/problems/open-the-lock/
# By Jiapeng
# Runtime: 544 ms, faster than 86.92% of Python3 online submissions for Open the Lock.
# Memory Usage: 15 MB, less than 37.13% of Python3 online submissions for Open the Lock.
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
9654,
12,
1169,
12,
5354,
14,
198,
198,
2,
2750,
29380,
499,
1516,
198,
2,
43160,
25,
642,
2598,
13845,
11,
5443,
621,
9849,
13,
5892,
4,
286,
11361,
18,
2691,
22129,
... | 3.202703 | 74 |
from django.shortcuts import render
from .forms import PostingPost, FormJawab
from .models import Penanya
from django.http import HttpResponse
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
201,
198,
6738,
764,
23914,
1330,
2947,
278,
6307,
11,
5178,
41,
707,
397,
201,
198,
6738,
764,
27530,
1330,
7507,
34183,
201,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,... | 3.46 | 50 |
import webtech
os_list = ["CentOS","Ubuntu","Debian","Microsoft","Windows Server","Red Hat"]
#Filter report from webtech and only return web technologies
| [
11748,
3992,
13670,
628,
198,
418,
62,
4868,
796,
14631,
19085,
2640,
2430,
36609,
11157,
2430,
16587,
666,
2430,
15905,
2430,
11209,
9652,
2430,
7738,
10983,
8973,
198,
198,
2,
22417,
989,
422,
3992,
13670,
290,
691,
1441,
3992,
8514,
... | 3.829268 | 41 |
# -*- coding: utf-8 -*-
class Log:
"""
Describes single log object that embedded to a Meta object.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
4871,
5972,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
39373,
22090,
2060,
2604,
2134,
326,
14553,
284,
257,
30277,
2134,
13,
198,
220,
220,
220,
37227,
198
] | 2.613636 | 44 |
a = 0
while a < 200:
a = a + 1
print(a)
| [
64,
796,
657,
201,
198,
201,
198,
4514,
257,
1279,
939,
25,
201,
198,
220,
220,
220,
257,
796,
257,
220,
1343,
352,
201,
198,
220,
220,
220,
3601,
7,
64,
8,
201,
198,
201,
198
] | 1.583333 | 36 |
# simulate prio techniques on ctest dataset
import time
import threading
from concurrent.futures import ThreadPoolExecutor
import ordering, parsing_utils, peer, utils
from constant import *
from pinput import *
from metric import Metric
TCPS = pinput["tcps"][1]
if __name__ == '__main__':
main() | [
2,
29308,
1293,
78,
7605,
319,
269,
9288,
27039,
198,
11748,
640,
198,
11748,
4704,
278,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
198,
198,
11748,
16216,
11,
32096,
62,
26791,
11,
12720,
11,
3384,
4487,
19... | 3.326087 | 92 |
import srcomapi
import srcomapi.datatypes as dt
from Levenshtein._levenshtein import distance
api = srcomapi.SpeedrunCom()
api.debug = 1
game = api.search(srcomapi.datatypes.Game, {"name": "azure dreams"})[0]
| [
11748,
19677,
785,
15042,
198,
11748,
19677,
785,
15042,
13,
19608,
265,
9497,
355,
288,
83,
198,
6738,
1004,
574,
1477,
22006,
13557,
293,
574,
1477,
22006,
1330,
5253,
198,
198,
15042,
796,
19677,
785,
15042,
13,
22785,
5143,
5377,
34... | 2.776316 | 76 |
ConsumerKey= "EzDbhlelyQmUFnPvPOJTaIeNu"
ConsumerSecret = "nn2Aj8PXAtEGiacux9kwIKd38pMa7OQBCVShYht1AK7zxlXHSU" | [
49106,
9218,
28,
366,
36,
89,
35,
34369,
293,
306,
48,
76,
36820,
77,
47,
85,
16402,
41,
38586,
40,
68,
45,
84,
1,
198,
49106,
23725,
796,
366,
20471,
17,
32,
73,
23,
47,
55,
2953,
7156,
9607,
2821,
24,
46265,
18694,
67,
2548,
... | 1.71875 | 64 |
from base64 import b64encode
from zope.interface.verify import verifyObject
from twisted.cred.error import LoginFailed
from twisted.web.iweb import ICredentialFactory
from twisted.web.http_headers import Headers
from fluiddb.testing.basic import FluidinfoTestCase
from fluiddb.testing.doubles import FakeRequest
from fluiddb.util.oauth2_credentials import (
OAuth2Credentials, OAuth2CredentialFactory, IOAuth2Credentials)
| [
6738,
2779,
2414,
1330,
275,
2414,
268,
8189,
198,
6738,
1976,
3008,
13,
39994,
13,
332,
1958,
1330,
11767,
10267,
198,
198,
6738,
19074,
13,
66,
445,
13,
18224,
1330,
23093,
37,
6255,
198,
6738,
19074,
13,
12384,
13,
72,
12384,
1330,... | 3.208955 | 134 |
#!/usr/bin/env python3
from altai.gui.main import main
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
5988,
1872,
13,
48317,
13,
12417,
1330,
1388,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.473684 | 38 |
import numpy as np
a = np.random.randint(100, size=(10,))
b = np.zeros(10)
try:
test_no_return(a, b)
except AssertionError as error:
print(error)
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.zeros(10)
try:
test_with_return(a, b)
except AssertionError as error:
print(error)
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.zeros(10)
try:
test_cond_return_if_only(a, b)
except AssertionError as error:
print(error)
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.zeros(10)
try:
test_assert_module_cond_return_if_else(a, b)
except AssertionError as error:
print(error)
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.zeros(10)
try:
test_assert_module_cond_return_multi_if_else(a, b)
except AssertionError as error:
print(error)
a = np.array([12, 2, 3, 1, 6, 5, 2, 8, 3, 0])
b = np.zeros(10)
try:
test_assert_module_cond_return_for(a, b)
except AssertionError as error:
print(error)
a = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
b = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
c = np.zeros(10)
try:
test_assert_module_multi_calls(a, b, c)
except AssertionError as error:
print(error)
a = np.random.randint(100, size=(10,))
b = np.random.randint(100, size=(10,))
c = np.zeros(10)
try:
test_assert_module_declarative(a, b, c)
except AssertionError as error:
print(error)
a = np.random.randint(100, size=(10,))
b = np.random.randint(100, size=(10,))
c = np.zeros(10)
try:
test_assert_module_declarative_internal_allocate(a, b, c)
except AssertionError as error:
print(error)
a = np.random.randint(100, size=(10,))
b = np.random.randint(100, size=(10,))
c = np.zeros(10)
try:
test_assert_module_declarative_compute_at(a, b, c)
except AssertionError as error:
print(error)
| [
11748,
299,
32152,
355,
45941,
198,
198,
64,
796,
45941,
13,
25120,
13,
25192,
600,
7,
3064,
11,
2546,
16193,
940,
11,
4008,
198,
65,
796,
45941,
13,
9107,
418,
7,
940,
8,
198,
198,
28311,
25,
198,
220,
220,
220,
1332,
62,
3919,
... | 2.133253 | 833 |
# Generated by Django 3.1.13 on 2021-11-02 19:54
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1485,
319,
33448,
12,
1157,
12,
2999,
678,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.840909 | 44 |
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_core_components as dcc
panellayout = html.Div([
dcc.Location(id='panel-url', pathname='/panel'),
dbc.Container(
[
dbc.Row(
dbc.Col(
[
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Allow multiple files to be uploaded
multiple=False
),
],
),
justify='center'
),
dbc.Row(
dbc.Col(
[
html.Div(id='output-data-upload'),
]
)
),
html.Hr(),
dbc.Row(
dbc.Col(
[
html.H5("all data"),
html.Div(id='all-data-table'),
html.Div(id='admin-table-trigger')
]
)
),
],
)
])
| [
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
628,
198,
6839,
695,
323,
448,
796,
27711,
13,
24095... | 1.304768 | 1,657 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, Wenbin Fei, Email: wenbinfei@gmail.com
# All rights reserved.
"""
Create a logger
===============
To create a logger in other files, usingt the following code
import logger_ini
logger = logger_ini.logger_run()
"""
import logging.config
def logger_run():
"""
Create a logger to be used in other files.
The reason to create a logger in a sepearted file is becasue
Sphinx may change the current working directory to one of its subdirectories and
result in that the relative path is not correct.
Using absolute path when using Sphinx.
To provide for other users, change it to "logging.ini"
"""
logging.config.fileConfig(fname="C:/Wenbin/GitHub/revamp/src/logging.ini") # To provide for other users, change it to "logging.ini"
logger = logging.getLogger(__name__)
return logger
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
11,
31164,
8800,
39575,
11,
9570,
25,
266,
268,
8800,
5036,
72,
31,
14816,... | 3.134545 | 275 |
import re
import pytest
@pytest.mark.parametrize('table, expected', [
pytest.param('__dataset__', re.compile(r'CREATE TABLE __dataset__ \(\n'
r'.+\n'
r'0', re.DOTALL),
id='table=__dataset__'),
])
| [
11748,
302,
198,
198,
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
11487,
11,
2938,
3256,
685,
198,
220,
220,
220,
12972,
9288,
13,
17143,
10786,
834,
19608,
292,
316,
834,
3256,
302,
13,
... | 1.612565 | 191 |
from setuptools import setup
setup(
name='require.py',
version='2.0.1',
description="Path-based imports in Python, inspired by `require.js`",
url='https://github.com/pcattori/require.py',
author='Pedro Cattori',
author_email='pcattori@gmail.com',
license='MIT',
packages=['require'],
install_requires=[
'six>=1.10.0'
],
test_suite='tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5'
]
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
46115,
13,
9078,
3256,
198,
220,
220,
220,
2196,
11639,
17,
13,
15,
13,
16,
3256,
198,
220,
220,
220,
6764,
2625,
15235,
12,
3106,
17944,
287,... | 2.4947 | 283 |
import requests
from cloudbot import hook
url = 'http://www.discuvver.com/jump2.php'
headers = {'Referer': 'http://www.discuvver.com'}
@hook.command('randomusefulsite', 'randomwebsite', 'randomsite', 'discuvver')
| [
11748,
7007,
198,
198,
6738,
6279,
13645,
1330,
8011,
198,
198,
6371,
796,
705,
4023,
1378,
2503,
13,
15410,
14795,
332,
13,
785,
14,
43327,
17,
13,
10121,
6,
198,
50145,
796,
1391,
6,
8134,
11882,
10354,
705,
4023,
1378,
2503,
13,
... | 2.818182 | 77 |
"""
Tools for building pagination.
"""
from math import ceil
from itertools import islice, chain
from lettersmith.util import chunk, decorate_group_matching_id_path
from lettersmith import doc as Doc
TEMPLATES = ("list.html", "default.html")
OUTPUT_PATH_TEMPLATE = "page/{n}/index.html"
def count_pages(length, per_page):
"""
Find the number of pages in a list of `length`.
"""
# Note it's important that we cast to float before division, otherwise
# we lose floating point precision and get a rounded int.
return int(ceil(float(length) / float(per_page)))
def slice_page(iterable, page_i, per_page):
"""Slice a page for an iterable"""
page_start = page_i * per_page
page_end = page_start + per_page
return islice(iterable, page_start, page_end)
@decorate_group_matching_id_path
def gen_paging(stubs,
template=None,
output_path_template=None,
per_page=10):
"""
Generate paging docs from stubs
"""
paged = tuple(chunk(stubs, per_page))
page_count = len(paged)
templates = (template,) + TEMPLATES if template is not None else TEMPLATES
n = 0
for stubs in paged:
n = n + 1
output_path = (output_path_template or OUTPUT_PATH_TEMPLATE).format(n=n)
page_list = tuple(stub for stub in stubs)
meta = {
"page_n": n,
"per_page": per_page,
"page_count": page_count,
"page_list": page_list
}
yield Doc.doc(
id_path=output_path,
output_path=output_path,
title="Page {}".format(n),
meta=meta,
templates=templates
)
| [
37811,
198,
33637,
329,
2615,
42208,
1883,
13,
198,
37811,
198,
198,
6738,
10688,
1330,
2906,
346,
198,
6738,
340,
861,
10141,
1330,
318,
75,
501,
11,
6333,
198,
6738,
7475,
22947,
13,
22602,
1330,
16058,
11,
11705,
378,
62,
8094,
62,... | 2.338028 | 710 |
import threading
import StellarPlayer
import os
import shutil
import traceback
import sys
import importlib
import json
from . import douyu
#from . import iqiyi
#from . import sports_iqiyi
#from . import fengbolive
#from . import look
#from . import ppsport
#from . import yangshipin
from . import zhibotv
from . import acfun
from . import bilibili
from . import cc
from . import changyou
from . import douyin
from . import egame
from . import hongle
from . import huajiao
from . import huomao
from . import huya
from . import imifun
from . import immomo
from . import inke
from . import ixigua
from . import jd
from . import kbs
from . import kk
from . import kuaishou
from . import kugou
from . import kuwo
from . import laifeng
from . import lehai
from . import longzhu
from . import maoer
from . import now
from . import pps
from . import qf
from . import qie
from . import renren
from . import showself
from . import tiktok
from . import tuho
from . import twitch
from . import v6cn
from . import wali
from . import woxiu
from . import xunlei
from . import yizhibo
from . import youku
from . import yuanbobo
from . import yy
from . import zhanqi
m2cq = importlib.import_module('zhibo.2cq')
m9xiu = importlib.import_module('zhibo.9xiu')
m17live = importlib.import_module('zhibo.17live')
m51lm = importlib.import_module('zhibo.51lm')
m95xiu = importlib.import_module('zhibo.95xiu')
m173 = importlib.import_module('zhibo.173')
| [
11748,
4704,
278,
198,
11748,
39336,
14140,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
12854,
1891,
198,
11748,
25064,
220,
198,
11748,
1330,
8019,
198,
11748,
33918,
198,
198,
6738,
764,
1330,
2255,
24767,
198,
2,
6738,
764,
... | 2.805769 | 520 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# novault
# Copyright (c) 2016 Avner Herskovits
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# stdlib
from argparse import ArgumentParser, ArgumentTypeError, REMAINDER
from base64 import b85encode
from binascii import hexlify
from datetime import date, timedelta
from getpass import getpass
from hashlib import new as hashlib_new
from itertools import takewhile
from os import urandom
from re import match
from sys import exit
# pypi
from ecdsa import SigningKey, SECP256k1
from pyperclip import copy as pyperclip_copy
from pyscrypt import hash as pyscrypt_hash
# local modules
from novault.ed25519 import B as BASE, encodepoint, l as GROUPGEN, scalarmult
from novault.Keccak import Keccak
def _b58( value ):
'''Convert big-endian bytes to a Base58 string'''
v, z, r = int. from_bytes( value, 'big' ), sum( 1 for _ in takewhile( int( 0 ). __eq__, value )), ''
while v:
v, c = divmod( v, 58 )
r = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'[ c ] + r
return '1' * z + r
def novault( action, description, master ):
'''Generate a password or wallet from given description & master password'''
return action( mk_seed( description, master, action() ))
def mk_seed( description, master, length ):
'''Generate pseudorandom seed of desired length from description & master password'''
S0, S1, S2 = b'%<6>0Mk$ziGdz@:z-O-', b'Jea`_uH6.ji4R$VM1ZB', b'C!#1P4zJLB2O=no06[1'
return pyscrypt_hash(
description + S0 + master,
pyscrypt_hash( description + S1 + master, master + S2 + description, 1024, 1, 1, 32 ),
1024, 1, 1, length )
def placebo( seed = None ):
'''Passthrough action'''
if not seed: return 0
return { None: hexlify( seed ). decode( 'latin_1' )}
def password( seed = None ):
'''Generate a 128 bit password from seed'''
if not seed: return 16
password = list( b85encode( seed ). decode( 'latin_1' ))
pass_len = len( password ) # always 20
ornament = int. from_bytes( seed[ 12: ], 'little')
for chr_class in ( '0123456789', 'abcdefghijklmnopqrstuvwxyz', 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', '.,:[]/' ):
pass_len += 1
ornament, pos = divmod( ornament, pass_len )
ornament, char = divmod( ornament, len( chr_class ))
password. insert( pos, chr_class[ char ])
result = { 'password': ''. join( password )}
result[ None ] = result[ 'password' ]
return result
# btc
_sha256 = lambda v: _hash( v, 'sha256' )
_ripemd160 = lambda v: _hash( v, 'ripemd160' )
def btc( seed = None ):
'''Generate BTC address and key from seed'''
if not seed: return 32
private_key = b'\x80' + seed + b'\x01'
private_key += _sha256( _sha256( private_key ))[ :4 ]
public_key_uncomp = bytes( SigningKey. from_string( seed, curve = SECP256k1 ). get_verifying_key(). to_string() )
public_key = b'\x03' if public_key_uncomp[ -1 ] & 1 else b'\x02'
public_key += public_key_uncomp[ :32 ]
address = b'\x00' + _ripemd160( _sha256( public_key ))
address += _sha256( _sha256( address ))[ :4 ]
result = { 'key': _b58( private_key ), 'address': _b58( address )}
result[ None ] = result[ 'address' ]
return result
# xmr
_hex = lambda v: hexlify( v. to_bytes( 32, 'little' )). decode( 'latin_1' )
_int = lambda v: int. from_bytes( bytes. fromhex( v ), 'little' )
_keccak_256 = lambda v: Keccak(). Keccak(( len( v ) * 4, v ), 1088, 512, 0x01, 256, False ). lower()
_sc_reduce32 = lambda v: _hex( _int( v ) % GROUPGEN )
_derive_key = lambda v: _sc_reduce32( _keccak_256( v ))
_secret2pub = lambda v: hexlify( encodepoint( scalarmult( BASE, _int( v ))). encode( 'latin_1' )). decode( 'latin_1' )
_funny_b58 = lambda v: ''. join( _b58( bytes. fromhex( v[ i: i + 16 ])).rjust( 11 if i < 128 else 7, '1' ) for i in range( 0, 138, 16 ))
_monero_addr = lambda v: _funny_b58( v + _keccak_256( v )[ :8 ] )
def xmr( seed = None ):
'''Generate XMR address and private spend and view keys from seed'''
if not seed: return 32
ssk = _sc_reduce32( hexlify( seed ). decode( 'latin_1' ))
svk = _derive_key( ssk )
address = _monero_addr( '12' + _secret2pub( ssk ) + _secret2pub( svk ))
return { 'address': address, 'spend': ssk, 'view': svk, None: address }
COINS = ( 'btc', 'xmr' )
consonants = ( 'b', 'bj', 'bl', 'br', 'c', 'ch', 'cj', 'ck', 'cl', 'cr', 'd', 'dj', 'dl', 'dr', 'dv', 'dw', 'f', 'fj', 'fl', 'fn', 'fr',
'g', 'gd', 'gl', 'gn', 'gr', 'gw', 'h', 'j', 'jl', 'jr', 'k', 'kd', 'kf', 'kj', 'kl', 'kn', 'kp', 'kr', 'ks', 'kv',
'l', 'lj', 'm', 'mj', 'ml' ,'mr', 'n', 'nj', 'nl', 'nr', 'p', 'pch', 'pf', 'ph', 'pj', 'pk', 'pl', 'pn', 'pqu', 'pr', 'ps', 'pw',
'qu', 'r', 'rh', 'rn', 's', 'sc', 'sch', 'sd', 'sg', 'sh', 'shm', 'shn', 'shp', 'shr', 'sht', 'shv', 'sj', 'sk', 'sl', 'sm', 'sn',
'sp', 'sr', 'ss', 'st', 'sv', 'sw', 't', 'tf', 'th', 'thl', 'thn', 'thr', 'tl', 'tn', 'tr', 'ts', 'tsh', 'tv', 'tw', 'tz',
'v', 'vj', 'vl', 'vn', 'vr', 'w', 'y', 'z', 'zd', 'zg', 'zm', 'zn', 'zr', 'zv', 'zw' )
vowels = ( 'a', 'e', 'i', 'o', 'u', 'ea', 'ai', 'au', 'ei', 'ie', 'oi', 'io', 'ou', 'eau', 'oe', 'ue', 'ee', 'oo' )
lc, lv = len( consonants ), len( vowels )
def main():
'''cli utility'''
is_hex = lambda v: match( '^(?:[0-9a-fA-F][0-9a-fA-F])+$', v ) and v or arg_err( '%s is not a valid hexadecimal number' % v )
is_positive = lambda v: match( '^0*[1-9]+[0-9]*$', v ) and int( v ) or arg_err( 'expecting a positive integer, got %s' % v )
p = ArgumentParser( description = 'Stateless password manager and brain wallet' )
p. add_argument( 'what', nargs = REMAINDER, choices = ( 'seed', 'password', 'address', 'key', 'spend', 'view', 'name', 'date' ), help = 'What information to return' )
p. add_argument( '-w', action = 'append', nargs = '?', choices = COINS, help = 'Generate wallet (default: btc)' )
p. add_argument( '-s', action = 'append', nargs = '?', type = is_positive, help = 'Generate raw seed only with given number of bytes (default: 16)' )
p. add_argument( '-n', action = 'store_const', const = True, help = 'Generate a silly name' )
p. add_argument( '-b', action = 'store_const', const = True, help = 'Generate a birth date' )
p. add_argument( '-D', action = 'store', help = 'Description' )
p. add_argument( '-M', action = 'store', help = 'Master password' )
p. add_argument( '-S', action = 'store', type = is_hex, help = 'Use this seed instead of description/master' )
p. add_argument( '-R', action = 'store_const', const = True, help = 'Use a random seed, don\'t ask for inputs' )
p. add_argument( '-c', action = 'store_const', const = True, help = 'Input master password as clear text' )
p. add_argument( '-d', action = 'store_const', const = True, help = 'Display result instead of copy to clipboard' )
cli = p. parse_args()
if 1 < sum( _ and 1 or 0 for _ in ( cli. w, cli. s, cli. n, cli. b )):
p. error( '-w, -s, -n, -b options are mutually exclusive' )
elif cli. S and cli. R:
p. error( '-S, -R options are mutually exclusive' )
elif ( cli. S or cli. R ) and ( cli. D or cli. M ):
p. error( 'The -D/-M options are mutually exclusive with -S/-R' )
action = btc if cli. w and cli. w[ -1 ] is None else globals()[ cli. w[ -1 ]] if cli. w else placebo if cli. s else sillyname if cli. n else birthdate if cli. b else password
seed_len = 16 if cli. s and cli. s[ -1 ] is None else cli. s[ -1 ] if cli. s else action()
if cli. R:
seed = mk_seed( urandom( 32 ), urandom( 32 ), seed_len )
result = action( seed )
elif cli. S:
seed = bytes. fromhex( cli. S )
if len( seed ) != seed_len:
p. error( 'Wrong seed length, expecting %s bytes' % seed_len )
result = action( seed )
else:
try:
description = bytes( input( 'Enter description: ' ), 'utf-8' ) if not cli. D else bytes( cli. D, 'utf-8' )
if cli. M:
master = bytes( cli. M, 'utf-8' )
elif cli. c:
master = bytes( input( 'Enter password: ' ), 'utf-8' )
else:
master = bytes( getpass( 'Enter password: ' ), 'utf-8' )
master0 = bytes( getpass( 'Enter password: ' ), 'utf-8' )
if master != master0:
print( 'ERROR: Password mismatch' )
exit( 1 )
except ( EOFError, KeyboardInterrupt ):
exit( 0 )
seed = mk_seed( description, master, seed_len )
result = action( seed )
result[ 'seed' ] = hexlify( seed ). decode( 'latin_1' )
if not cli. what:
output = result[ None ]
else:
try:
output = ' '. join( result[ what ] for what in cli. what )
except KeyError as e:
p. error( 'Unexpected output specifier %s' % e )
if cli. d:
print( output )
else:
pyperclip_copy( output )
print( 'Result placed in clipboard.' )
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
220,
220,
645,
85,
1721,
198,
2,
220,
220,
220,
15069,
357,
66,
8,
1584,
5184,
1008,
28492,
2... | 2.396788 | 4,234 |
from sqlalchemy import Column, String, Integer, ForeignKey, Text
from sqlalchemy.orm import relationship
from infrastructor.IocManager import IocManager
from models.dao.Entity import Entity
| [
6738,
44161,
282,
26599,
1330,
29201,
11,
10903,
11,
34142,
11,
8708,
9218,
11,
8255,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2776,
198,
6738,
1167,
5685,
1356,
273,
13,
40,
420,
13511,
1330,
314,
420,
13511,
198,
6738,
4981,
13,... | 3.897959 | 49 |
"""
This module can be used without django
"""
from hashlib import md5
import urllib2
import logging
from urllib import urlencode
POSTBACK_URL = '/eng/query/validate'
POSTBACK_SERVER = 'https://www.payfast.co.za'
def signature(data):
"""
Calculates PayFast signature.
'data' should be a SortedDict or an OrderedDict instance.
"""
text = _signature_string(data)
return md5(text).hexdigest()
def data_is_valid(post_data, postback_server=POSTBACK_SERVER):
"""
Validates data via the postback. Returns True if data is valid,
False if data is invalid and None if the request failed.
"""
post_str = urlencode(_values_to_encode(post_data))
postback_url = postback_server.rstrip('/') + POSTBACK_URL
try:
response = urllib2.urlopen(postback_url, post_str).read()
except urllib2.HTTPError:
return None
if response == 'VALID':
return True
if response == 'INVALID':
return False
return None
| [
37811,
198,
1212,
8265,
460,
307,
973,
1231,
42625,
14208,
198,
37811,
198,
6738,
12234,
8019,
1330,
45243,
20,
198,
11748,
2956,
297,
571,
17,
198,
11748,
18931,
198,
6738,
2956,
297,
571,
1330,
2956,
11925,
8189,
198,
198,
32782,
3109... | 2.639785 | 372 |
from regex.lexer import tokens
from regex.parser import parse
from regex.generator import *
from regex.regex import Regex
| [
6738,
40364,
13,
2588,
263,
1330,
16326,
198,
6738,
40364,
13,
48610,
1330,
21136,
198,
6738,
40364,
13,
8612,
1352,
1330,
1635,
198,
6738,
40364,
13,
260,
25636,
1330,
797,
25636,
198
] | 3.8125 | 32 |
import requests,json,sys,re,os
from datetime import datetime
from VideoIDHelper import *
#if you intend to extend this module
#simply call
#saveYouTubeAnnotations.retrieveAnnotation
#
#arg is either an ID, link or shortened link
#and location is the location the XML file is to be saved in
if __name__== "__main__":
main() | [
11748,
7007,
11,
17752,
11,
17597,
11,
260,
11,
418,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
7623,
2389,
47429,
1330,
1635,
198,
198,
2,
361,
345,
14765,
284,
9117,
428,
8265,
198,
2,
14323,
2145,
869,
198,
2,
21928... | 3.391753 | 97 |
# -*- coding: utf-8 -*-
import unittest
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import NoSuchElementException
if __name__ == "__main__":
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
555,
715,
395,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
11321,
13,
1069,
11755,
1330,
1400,
36420,
34695,
16922,
198,
... | 3.049383 | 81 |
from .mlp import *
from .resnet import *
from .densenet import *
| [
6738,
764,
4029,
79,
1330,
1635,
198,
6738,
764,
411,
3262,
1330,
1635,
198,
6738,
764,
67,
18756,
316,
1330,
1635,
198
] | 2.954545 | 22 |
'''
Created on Jun 12, 2012
@author: kristof
'''
| [
7061,
6,
198,
41972,
319,
7653,
1105,
11,
2321,
198,
198,
31,
9800,
25,
479,
1585,
1659,
198,
7061,
6,
628,
198
] | 2.363636 | 22 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import xadmin
from open_loan.models import Loan, LoanCategory, LoanScraper, LoanWebsite, Legend
xadmin.site.register(Loan, LoanAdmin)
xadmin.site.register(LoanCategory, LoanCategoryAdmin)
xadmin.site.register(LoanScraper, LoanScraperAdmin)
xadmin.site.register(LoanWebsite, LoanWebsiteAdmin)
xadmin.site.register(Legend, LegendAdmin) | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
2124,
28482,
198,
6738,
1280,
62,
5439,
272,
13,
27530,
1330,
32314,
11,
32314,
27313,
11,
32314,
3351,
38545,
11,... | 2.969231 | 130 |
'''Test code.
'''
# pylint: disable=import-error
import unittest
from VectorMizuno import Vectors
if __name__ == "__main__":
unittest.main()
| [
7061,
6,
14402,
2438,
13,
198,
7061,
6,
198,
2,
279,
2645,
600,
25,
15560,
28,
11748,
12,
18224,
198,
11748,
555,
715,
395,
198,
198,
6738,
20650,
44,
528,
36909,
1330,
569,
478,
669,
628,
198,
198,
361,
11593,
3672,
834,
6624,
36... | 2.525424 | 59 |
#!/usr/bin/env python3
# imports go here
#
# Free Coding session for 2015-02-24
# Written by Matt Warren
#
if __name__ == '__main__':
try:
bad_outter()
except UnboundLocalError as e:
print("bad_outter failed because variable was not scoped correctly")
print(good_outter())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
17944,
467,
994,
198,
198,
2,
198,
2,
3232,
327,
7656,
6246,
329,
1853,
12,
2999,
12,
1731,
198,
2,
22503,
416,
4705,
11328,
198,
2,
628,
628,
198,
361,
11593,
3672,
834,
... | 2.646552 | 116 |
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from fds.sdk.AxiomaEquityOptimizer.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from fds.sdk.AxiomaEquityOptimizer.model.axioma_equity_optimization_parameters import AxiomaEquityOptimizationParameters
from fds.sdk.AxiomaEquityOptimizer.model.document_directories import DocumentDirectories
from fds.sdk.AxiomaEquityOptimizer.model.optimal_portfolio import OptimalPortfolio
from fds.sdk.AxiomaEquityOptimizer.model.optimization import Optimization
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_account import OptimizerAccount
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_account_overrides import OptimizerAccountOverrides
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_optimal_holdings import OptimizerOptimalHoldings
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_output_types import OptimizerOutputTypes
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_strategy import OptimizerStrategy
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_strategy_overrides import OptimizerStrategyOverrides
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_trades_list import OptimizerTradesList
| [
2,
781,
539,
23,
25,
645,
20402,
198,
198,
2,
1330,
477,
4981,
656,
428,
5301,
198,
2,
611,
345,
423,
867,
4981,
994,
351,
867,
10288,
422,
530,
2746,
284,
1194,
428,
743,
198,
2,
5298,
257,
3311,
24197,
12331,
198,
2,
284,
3368... | 3.107692 | 455 |
# In this file we will check if each company's grant of stocks is part of a regularly scheduled event
# (incentive bonus, 10b5-1 plan, performance award, etc)
# Import libraries
import bs4 as bs
import requests
import re
# A helper function that checks for key words that identify transaction as being scheduled
# A helper function to get the transaction details
# This function checks if any of the form 4 filings are a part of incentive
# or performance bonuses or part of a 10b5-1 plan
| [
2,
554,
428,
2393,
356,
481,
2198,
611,
1123,
1664,
338,
7264,
286,
14420,
318,
636,
286,
257,
7987,
7530,
1785,
198,
2,
220,
357,
42816,
425,
7202,
11,
838,
65,
20,
12,
16,
1410,
11,
2854,
5764,
11,
3503,
8,
198,
198,
2,
17267,... | 3.870229 | 131 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="skilletlib",
version="1.2.0",
author="Nathan Embery",
author_email="nembery@paloaltonetworks.com",
description="Tools for working with PAN-OS Skillets in Python 3",
license='Apache 2.0',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/paloaltonetworks/skilletlib",
packages=setuptools.find_packages(),
include_package_data=True,
package_data={'assets': ['skilletlib/assets/**/*.yaml', 'skilletlib/assets/**/*.xml']},
install_requires=[
"oyaml",
"docker",
"pan-python",
"pathlib",
"jinja2",
"pyyaml",
"xmldiff",
"xmltodict",
"requests-toolbelt",
"requests",
"jsonpath_ng",
"passlib",
"GitPython",
"jinja2-ansible-filters",
"jmespath"
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
python_requires='>=3.6',
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.201479 | 541 |
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
from fastapi.requests import Request
engine = create_engine("sqlite:///./sql_app.db")
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
| [
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
11,
23575,
198,
6738,
3049,
15042,
... | 3.27451 | 102 |
import string
import numpy as np
from scipy.spatial.distance import pdist, squareform
from Bio import Alphabet
from Bio.Alphabet import Reduced
import random
# %%
class MSA:
''' Copies trRosetta's methods'''
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
# weighted covariance
# https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/weigcorr.htm
@staticmethod
@staticmethod
#msa = MSA('./data/3k0bA02a')
#import matplotlib.pyplot as plt
#plt.imshow(msa.apc*(1-np.eye(55)))
#plt.show()
#V = msa.hodca(0,16,0,16,0,16)
#Vp = np.linalg.norm(V[3],axis=3)
#Vp = msa.calc_apc(Vp)
#plt.imshow(Vp)
#plt.show()
| [
11748,
4731,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
30246,
1330,
279,
17080,
11,
6616,
687,
198,
198,
6738,
16024,
1330,
45695,
198,
6738,
16024,
13,
2348,
19557,
1330,
39963,
198,
198,
11748,
... | 2.271875 | 320 |
# -*- coding: utf-8 -*-
import sys
import os
from workflow import Workflow3
import leancloud
import util
log = None
if __name__ == '__main__':
wf = Workflow3()
log = wf.logger
sys.exit(wf.run(main))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
30798,
1330,
5521,
11125,
18,
198,
11748,
10904,
17721,
198,
11748,
7736,
198,
198,
6404,
796,
6045,
628,
628,
628,
198,
198,
... | 2.406593 | 91 |
from selenium import webdriver
import pytest
from application import Application
@pytest.fixture(scope='module')
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
11748,
12972,
9288,
198,
6738,
3586,
1330,
15678,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
21412,
11537,
198
] | 3.833333 | 30 |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render
import youtube_dl
from .forms import DownloadForm
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
11748,
35116,
62,
25404,
1... | 3.895833 | 48 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import os.path as osp
import cv2
from PIL import Image
import pickle
import threading
import multiprocessing as mp
import paddlex.utils.logging as logging
from paddlex.utils import path_normalization
from paddlex.cv.transforms.seg_transforms import Compose
from .dataset import get_encoding
| [
2,
15069,
357,
66,
8,
12131,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845... | 3.681992 | 261 |
from app.api.v1.models.base_model import BaseModel
# model class for products | [
6738,
598,
13,
15042,
13,
85,
16,
13,
27530,
13,
8692,
62,
19849,
1330,
7308,
17633,
628,
220,
220,
220,
1303,
2746,
1398,
329,
3186
] | 3.28 | 25 |
# Copyright 2021 PCL & PKU
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.train.callback import Callback
from mindspore.common.tensor import Tensor
import time
import numpy as np
import os
from mlperf_logging import mllog
| [
2,
15069,
33448,
4217,
43,
1222,
29673,
52,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
74... | 4.055 | 200 |
import random
from unittest import TestCase
from website import WebsiteMock
| [
11748,
4738,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
3052,
1330,
15887,
44,
735,
628
] | 4.105263 | 19 |
import time
from six import string_types
from .LambdaBackedCustomResource import LambdaBackedCustomResource
class Tags(LambdaBackedCustomResource):
"""
Custom Resource to extract tags from the CloudFormation Stack, and
expose them via GetAtt() to other resources that don't automatically
inherit the tags from the stack (e.g. custom resources).
Caveat: Some resources fail when no tags are present. It is advisable to
always configure a tag to be added (via Set={"foo":"bar"}) to avoid this
case.
"""
props = {
'Omit': ([string_types], False), # Keys to remove from list
'Set': (dict, False), # Keys to set/override/add, with the new values
'Dummy': (string_types, False), # Dummy parameter to trigger updates
}
@classmethod
| [
11748,
640,
198,
198,
6738,
2237,
1330,
4731,
62,
19199,
198,
198,
6738,
764,
43,
4131,
6814,
33,
6021,
15022,
26198,
1330,
21114,
6814,
33,
6021,
15022,
26198,
628,
198,
4871,
44789,
7,
43,
4131,
6814,
33,
6021,
15022,
26198,
2599,
1... | 3.120623 | 257 |
import os
import pytest
from pymongo import MongoClient
from starlette.testclient import TestClient
@pytest.fixture
@pytest.yield_fixture(scope='function')
| [
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
6738,
3491,
21348,
13,
9288,
16366,
1330,
6208,
11792,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
88,
1164,
6... | 3.22 | 50 |
"""Base core views."""
from django.contrib.auth import mixins as auth_mixins
from django.urls import reverse
from django.utils.http import is_safe_url
from django.views import generic
from ..extensions import exts_pool
def find_nextlocation(request, user):
"""Find next location for given user after login."""
if not user.last_login:
# Redirect to profile on first login
return reverse("core:user_index")
nextlocation = request.POST.get("next", request.GET.get("next"))
condition = (
nextlocation and
is_safe_url(nextlocation, request.get_host())
)
if condition:
return nextlocation
if request.user.role == "SimpleUsers":
topredir = request.localconfig.parameters.get_value(
"default_top_redirection")
if topredir != "user":
infos = exts_pool.get_extension_infos(topredir)
nextlocation = infos["topredirection_url"]
else:
nextlocation = reverse("core:user_index")
else:
nextlocation = reverse("core:dashboard")
return nextlocation
class RootDispatchView(auth_mixins.LoginRequiredMixin, generic.RedirectView):
"""Handle root dispatching based on role."""
def get_redirect_url(self):
"""Find proper next hop."""
return find_nextlocation(self.request, self.request.user)
| [
37811,
14881,
4755,
5009,
526,
15931,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
5022,
1040,
355,
6284,
62,
19816,
1040,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
4023,... | 2.651663 | 511 |
#!/usr/bin/env python
# Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)
# Bespoke Link to Instruments and Small Satellites (BLISS)
#
# Copyright 2016, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
"""
Usage:
ait-bsc-stop-handler [options] <name>
--service-host=<host> The host for the BSC REST service connection
[default: localhost]
--service-port=<port> The port for the BSC REST service connection
[default: 8080]
"""
import requests
import argparse
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
13435,
15237,
12,
37057,
16205,
4482,
357,
2390,
44,
2640,
8,
42410,
16984,
15813,
357,
32,
2043,
8,
198,
2,
347,
9774,
2088,
7502,
284,
43953,
290,
10452,
311,
7528,
2737,
3... | 3.486239 | 327 |
import os
import jinja2
tilenames = [
'DES0544-2249',
'DES2122+0001',
'DES2122-0041',
'DES2122+0043',
'DES2122-0124',
'DES2122+0126',
'DES2122-0207',
'DES2122+0209',
'DES2125+0001',
'DES2125-0041',
'DES2125+0043',
'DES2125-0124',
'DES2125+0126',
'DES2125-0207',
'DES2125+0209',
'DES2128+0001',
'DES2128-0041',
'DES2128+0043',
'DES2128-0124',
'DES2128+0126',
'DES2128-0207',
'DES2128+0209',
'DES2131+0001',
'DES2131-0041',
'DES2131+0043',
'DES2131-0124',
'DES2131+0126',
'DES2131-0207',
'DES2131+0209',
'DES2134+0001',
'DES2134-0041',
'DES2134+0043',
'DES2134-0124',
'DES2134+0126',
'DES2134-0207',
'DES2134+0209',
'DES2137+0001',
'DES2137-0041',
'DES2137+0043',
'DES2137-0124',
'DES2137+0126',
'DES2137+0209']
with open('job.sh.temp', 'r') as fp:
tmp = jinja2.Template(fp.read())
for i, tilename in enumerate(tilenames):
with open('job_%s.sh' % tilename, 'w') as fp:
fp.write(tmp.render(
tilename=tilename, seed_galsim=i+1, seed_mcal=i+1000))
os.system('chmod u+x job_%s.sh' % tilename)
| [
11748,
28686,
198,
11748,
474,
259,
6592,
17,
198,
198,
47163,
268,
1047,
796,
685,
198,
220,
220,
220,
705,
30910,
2713,
2598,
12,
17,
21626,
3256,
198,
220,
220,
220,
705,
30910,
17,
18376,
10,
18005,
3256,
198,
220,
220,
220,
705... | 1.784108 | 667 |
"""Exception classes
"""
| [
37811,
16922,
6097,
198,
37811,
628
] | 4.333333 | 6 |
"""
Модуль для генерация отчёта со статистикой по урлам
"""
import sys
import asyncio
import csv
import aiohttp
from typing import Tuple, List
from argparse import ArgumentParser
from aiohttp import ClientSession
if __name__ == "__main__":
generator_report_stat_nodes = GeneratorReportStatUrls()
generator_report_stat_nodes.get_report()
| [
37811,
198,
140,
250,
25443,
112,
35072,
30143,
45367,
12466,
112,
30143,
40623,
12466,
111,
16843,
22177,
16843,
21169,
16142,
141,
228,
18849,
40623,
12466,
122,
20375,
141,
229,
141,
239,
20375,
16142,
220,
21727,
15166,
220,
21727,
2037... | 2.328947 | 152 |
from os import listdir
from os.path import isfile, join
import argparse
import cv2 # pip install opencv-python
from pyzbar.pyzbar import decode
# decode barcode
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=False, help="path to the image file")
ap.add_argument("-d", "--dir", required=False, help="path to the images folder")
ap.add_argument("-pre", "--preprocess", required=False, help="pre process images")
args = vars(ap.parse_args())
image_dir = args["dir"]
image_file_name = args["image"]
is_preprocess = args["preprocess"]
if is_preprocess:
print('this is not always working properly')
if not image_dir and not image_file_name:
print("required path to the image file or images dir using --image or --dir")
if image_dir:
images = [f for f in listdir(image_dir) if isfile(join(image_dir, f))]
for image_in_folder in images:
image_read_in_folder = cv2.imread(image_dir+'/'+image_in_folder, 0)
barcode(image_read_in_folder, is_preprocess)
else:
image_read = cv2.imread(image_file_name, 0)
barcode(image_read, is_preprocess)
| [
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
11748,
1822,
29572,
198,
11748,
269,
85,
17,
220,
220,
1303,
7347,
2721,
1280,
33967,
12,
29412,
198,
6738,
12972,
89,
5657,
13,
9078,
89,
565... | 2.851064 | 376 |
# Backwards compatibilty
video_captioner = captioner | [
2,
5157,
2017,
8330,
571,
6267,
198,
15588,
62,
6888,
1159,
263,
796,
8305,
263
] | 3.466667 | 15 |
#
# Copyright Michael Groys, 2012-2014
#
#
# This file contains definition of different commands used in mining statement
# Each command is responsible to create code text for its generator function
#
import sys
from base import *
from async_command import *
from accumulate import *
from db_command import *
from expand import *
from for_command import *
from if_command import *
from io_command import *
from limit_command import *
from match import *
if sys.platform != "win32":
from map_reduce import *
from merge_command import *
from parse_command import *
from pareto import *
from pass_command import *
from reorder import *
from pie_command import *
from select import *
from sortby import *
from tail_command import *
from top_command import *
__all__ = ['createTupleString', 'createNamedParameters', 'CommandBase', 'TypicalCommand']
all_p_commands = filter(lambda var: var.startswith("p_"), dir())
__all__ += all_p_commands
| [
2,
201,
198,
2,
15069,
3899,
10299,
893,
11,
2321,
12,
4967,
201,
198,
2,
201,
198,
201,
198,
2,
201,
198,
2,
770,
2393,
4909,
6770,
286,
1180,
9729,
973,
287,
9691,
2643,
201,
198,
2,
5501,
3141,
318,
4497,
284,
2251,
2438,
242... | 3.203883 | 309 |
"""
Copyright (c) 2018 iCyP
Released under the MIT license
https://opensource.org/licenses/mit-license.php
"""
import bpy, bmesh
from mathutils import Vector,Matrix
from .. import V_Types as VRM_Types
from ..V_Types import nested_json_value_getter as json_get
from math import sqrt,pow,radians
import numpy
import os.path
import json,copy
#region material
#region material_util func
#endregion material_util func
#endregion material
| [
37811,
198,
15269,
357,
66,
8,
2864,
1312,
20418,
47,
198,
45037,
739,
262,
17168,
5964,
198,
5450,
1378,
44813,
1668,
13,
2398,
14,
677,
4541,
14,
2781,
12,
43085,
13,
10121,
198,
198,
37811,
628,
198,
11748,
275,
9078,
11,
275,
76... | 3.039735 | 151 |
"""
Contains models for the export app. These are mainly just to provide
some way to track your exports.
"""
import importlib
from django.db import models
from django.contrib.auth.models import User
class ExportType(models.Model):
"""
Describes 'Type' of export: Bib Records (as MARC), etc.
"""
code = models.CharField(max_length=255, primary_key=True)
path = models.CharField(max_length=255)
label = models.CharField(max_length=255)
description = models.TextField()
order = models.IntegerField()
class ExportFilter(models.Model):
"""
Describes the filter used to limit what entities were exported:
Full export, date-range export, etc.
"""
code = models.CharField(max_length=255, primary_key=True)
label = models.CharField(max_length=255)
order = models.IntegerField()
description = models.TextField()
class Status(models.Model):
"""
Used by ExportInstance to describe the status or state of the job.
"""
code = models.CharField(max_length=255, primary_key=True)
label = models.CharField(max_length=255)
description = models.TextField()
class ExportInstance(models.Model):
"""
Instances of exports that have actually been run, including date
and user that ran them.
"""
user = models.ForeignKey(User)
export_type = models.ForeignKey(ExportType)
export_filter = models.ForeignKey(ExportFilter)
filter_params = models.CharField(max_length=255)
status = models.ForeignKey(Status)
timestamp = models.DateTimeField()
errors = models.IntegerField(default=0)
warnings = models.IntegerField(default=0)
| [
37811,
198,
4264,
1299,
4981,
329,
262,
10784,
598,
13,
2312,
389,
8384,
655,
284,
2148,
198,
11246,
835,
284,
2610,
534,
15319,
13,
198,
37811,
198,
11748,
1330,
8019,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42... | 3.042751 | 538 |
# -*- coding: utf-8 -*-
import time
import re
import threading
from contextlib import contextmanager
from ..exceptions import CleoException
from ..outputs import Output
from .helper import Helper
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
640,
198,
11748,
302,
198,
11748,
4704,
278,
198,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
6738,
11485,
1069,
11755,
1330,
3779,
78,
16922,
198,
6... | 3.448276 | 58 |
import numpy as np
import pandas as pd
import aljpy.download
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Float, String, ForeignKey, create_engine
from pavlov import runs, storage
import ast
from tqdm.auto import tqdm
from pathlib import Path
from contextlib import contextmanager
from logging import getLogger
log = getLogger(__name__)
# First modern run
FIRST_RUN = pd.Timestamp('2021-02-03 12:47:26.557749+00:00')
DATABASE = Path('output/experiments/eval/database.sql')
URL = 'https://f002.backblazeb2.com/file/boardlaw/output/experiments/eval/database.sql'
Base = declarative_base()
class Run(Base):
"""One row per training run. A training run corresponds to one folder in `output/pavlov`. They
are usually generated by `main.run`."""
__tablename__ = 'runs'
run = Column(String, primary_key=True)
"""Name of the training run, typically in the format `date - nickname`. This corresponds to
the folder name that the run is stored in by `pavlov`."""
description = Column(String)
"""Description of the group the training run is in, typically in the format `groupname/boardsize`.
All the data worth looking at has the description `bee/`; the other descriptions are for abandonned
experimental sets.
"""
boardsize = Column(Integer)
"""Size of the board the run was trained on."""
width = Column(Integer)
"""Width of the network that was trained."""
depth = Column(Integer)
"""Depth of the network that was trained."""
nodes = Column(Integer)
"""Number of nodes in the MCTS during training."""
class Snap(Base):
"""One row per snapshot. A snapshot is a version of the model from a specific point in a training run. They
are usually generated by `main` and `storage.LogarithmicStorer`."""
__tablename__ = 'snaps'
id = Column(Integer, primary_key=True)
"""Auto-incremented snapshot ID."""
run = Column(String, ForeignKey('runs.run'))
"""The training run the snapshot was taken during."""
idx = Column(Integer)
"""The index of the snapshot, ranging from 0 for the first snapshot in a run to (typically) 20 for the last one."""
samples = Column(Float)
"""The number of training samples seen by the time the snapshot was taken. See the `main` module and
`storage.LogarithmicStorer.step` method for details. """
flops = Column(Float)
"""The number of floating point operations done by the time the snapshot was taken. See the `main` module
and `storage.LogarithmicStorer.step` method for details. """
class Agent(Base):
"""One row per agent. An agent is a snapshot plus a few parameters describing the test-time search."""
__tablename__ = 'agents'
id = Column(Integer, primary_key=True)
"""Auto-incremented agent ID."""
snap = Column(Integer, ForeignKey('snaps.id'))
"""ID of the snapshot this agent is based on."""
nodes = Column(Integer)
"""Number of nodes in the test-time search tree."""
c = Column(Float)
"""The policy-vs-value test-time search coefficient, c_puct."""
class Trial(Base):
"""One row per trial. A trial is the outcome of a set of games between two agents.
These are usually generated by `arena.best` and `arena.neural`. Be aware that a pair of agents
usually have more than one row here, since trials were conducted piecemeal! In particular,
every pair of agents has at least one row where one agent plays black, and another row where they're
swapped and that agent instead plays white."""
__tablename__ = 'trials'
id = Column(Integer, primary_key=True)
"""Auto-incremented trial ID."""
black_agent = Column(Integer, ForeignKey('agents.id'))
"""ID of the agent playing black."""
white_agent = Column(Integer, ForeignKey('agents.id'))
"""ID of the agent playing black."""
black_wins = Column(Integer)
"""Number of wins by black. Sum of this and white wins gives you the number of games."""
white_wins = Column(Integer)
"""Number of wins by white. Sum of this and black wins gives you the number of games."""
moves = Column(Integer)
"""Number of moves in the games, total"""
times = Column(Integer)
"""Amount of time used, total. See `arena.common.gather` for details on how this is calculated."""
class MohexTrial(Base):
"""One row per mohex trial. A mohex trial is the outcome of a set of games between an agent and MoHex, and
is used to calibrate the top-ranking agents. These are usually generated by `arena.mohex`."""
__tablename__ = 'mohex_trials'
id = Column(Integer, primary_key=True)
"""Auto-incremented trial ID."""
black_agent = Column(Integer, ForeignKey('agents.id'), nullable=True)
"""ID of the agent playing black. Null if the agent was MoHex."""
white_agent = Column(Integer, ForeignKey('agents.id'), nullable=True)
"""ID of the agent playing white. Null if the agent was MoHex."""
black_wins = Column(Integer)
"""Number of wins by black. Sum of this and white wins gives you the number of games."""
white_wins = Column(Integer)
"""Number of wins by white. Sum of this and black wins gives you the number of games."""
moves = Column(Integer)
"""Number of moves in the games, total"""
times = Column(Integer)
"""Amount of time used, total. See `arena.common.gather` for details on how this is calculated."""
_engine = None
@contextmanager | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
435,
73,
9078,
13,
15002,
198,
11748,
44161,
282,
26599,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198... | 3.199071 | 1,723 |