index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
13,000 | 538360299596b49893be605d6fb08f3a41a5ed88 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 23 15:26:26 2018
@author: NicholasWolczynski
"""
import setup
from sklearn import preprocessing
# Create lable encoder
class_y = ['Unreliable', 'Reliable']
le = preprocessing.LabelEncoder()
le.fit(class_y)
# Encode labels for both train and testing data
train_y = le.transform(setup.train['label'])
test_y = le.transform(setup.test['label'])
# Check to see if it worked
print(le.transform(['Unreliable', 'Reliable', 'Reliable']))
print(train_y)
print(test_y)
|
13,001 | b506b9887527c396d0828e182e58d0b231ddb58d | # Uses python3
import sys
def get_optimal_value(capacity, weights, values):
value = 0.
# write your code here
if capacity == 0:
return value
for i in range(len(weights)):
wi = -1
max = 0
for j in range(len(weights)):
if weights[j] > 0 and values[j] / weights[j] > max:
max = values[j] / weights[j]
wi = j
if wi > -1:
weight = min(weights[wi], capacity)
capacity -= weight
value += weight * max
weights[wi] -= weight
return value
if __name__ == "__main__":
data = list(map(int, sys.stdin.read().split()))
n, capacity = data[0:2]
values = data[2:(2 * n + 2):2]
weights = data[3:(2 * n + 2):2]
opt_value = get_optimal_value(capacity, weights, values)
print("{:.10f}".format(opt_value))
|
13,002 | 38f460115d5313d5c060cfb68e2a635f247efdac | class Solution:
def totalNQueens(self, n: int) -> int:
ans = 0
def dfs(i,col,dig1,dig2):
if i == n:
nonlocal ans
ans += 1
return
for j in range(n):
if j not in col and i+j not in dig1 and j-i not in dig2:
dfs(i+1,col+[j],dig1+[i+j],dig2+[j-i])
dfs(0,[],[],[])
return ans |
13,003 | cf70648c2711e42e5cf51df7d89c9b1eed38f024 | #!/usr/bin/env python
import json
import os
from optparse import OptionParser
import random
import sys
import wave
try:
from tqdm import tqdm
except ImportError:
def tqdm(x):
return x
from longwave import Wav, OutputWav, ListLevelBackingStore
from reprowave import dual_carpet
OUTPUT_RATE = None
WAVS = {}
def load_wav(filename):
global WAVS
if filename in WAVS:
return WAVS[filename]
print "loading", filename, "..."
assert filename.endswith('.pickle')
w = Wav.unpickle(filename)
assert w.sampwidth == 2
assert w.framerate == OUTPUT_RATE
w.dump()
WAVS[filename] = w
return w
def main(argv):
global OUTPUT_RATE
optparser = OptionParser(__doc__)
optparser.add_option("--minutes", default=8)
optparser.add_option("--output-rate", default=16000)
optparser.add_option("--master-dir", default='pickle')
#optparser.add_option("--snippet-duration", default=2.0)
optparser.add_option("--output-name", "-o", default=None)
(options, args) = optparser.parse_args(argv[1:])
OUTPUT_RATE = int(options.output_rate)
scheme = json.loads(open(args[0], 'r').read())
scene_num = int(args[1])
assert scene_num >= 1
scene = scheme[scene_num-1]
output_name = options.output_name
if output_name is None:
output_name = scene['name'].replace(' ', '_')
layer_names = []
for (layer_num, layer) in enumerate(scene['layers']):
print 'LAYER', layer_num
wavename = layer['sample']
wav = load_wav(wavename)
pattern = layer['pattern']
print wavename, '-> "%s"' % pattern
#snippet_dur = float(options.snippet_duration)
snippet_dur = 2.0
repetitions = int(options.minutes) * int(60.0 / snippet_dur)
layer_name = 'layer%s.wav' % layer_num
dual_carpet(
[wav], layer_name,
repetitions, snippet_dur, pattern,
framerate=OUTPUT_RATE
)
layer_names.append(layer_name)
if len(layer_names) > 1:
os.system('sox --norm -m %s %s.wav' % (
' '.join(layer_names), output_name
))
for layer_name in layer_names:
os.unlink(layer_name)
else:
os.system('mv %s %s.wav' % (layer_names[0], output_name))
lame_name = output_name + '.mp3'
os.system('lame %s.wav %s' % (output_name, lame_name))
os.system('ls -lh %s' % lame_name)
os.unlink('%s.wav' % output_name)
if False:
os.system('totem %s' % lame_name)
if __name__ == '__main__':
import sys
main(sys.argv)
|
13,004 | 36e51a9ac1d6ad2434fc31f15f0360e67f5c67f2 | x = input()
visited = [0]*(len(x))
ans = 0
s = 0
t = 0
for i in range(len(x)):
if not visited[i]:
if x[i:i+2] == "ST":
ans += 2
visited[i+1] = 1
elif x[i] == "S":
s += 1
elif x[i] == "T" and s:
ans += 2
s -= 1
print(len(x)-ans) |
13,005 | 351c7a4f48300e0e518b44cfa16a605e694f2d0d | """
Homework target model
AlexNet
GoogLeNet
vgg16
resnet18
Must need ILSVRC2012 Validation set for evaluation
"""
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
import scipy
from matplotlib import pyplot as plt
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
# https://csm-kr.tistory.com/m/6
def compare_model(dev):
valid_set = torchvision.datasets.ImageNet(root='./validset', transform=transform, split='val')
valid_loader = DataLoader(valid_set, batch_size=128, shuffle=True, num_workers=4)
result = []
for i in range(4):
if i == 0:
model = torchvision.models.alexnet(pretrained=True).to(dev)
if i == 1:
model = torchvision.models.vgg16(pretrained=True).to(dev)
if i == 2:
model = torchvision.models.googlenet(pretrained=True).to(dev)
if i == 3:
model = torchvision.models.resnet18(pretrained=True).to(dev)
model.eval()
acc_top1 = 0
acc_top5 = 0
total = 0
'''Not need back prop'''
with torch.no_grad():
for j, (img, label) in enumerate(valid_loader):
img = img.to(dev)
label = label.to(dev)
# Evaluate by batch size
output = model(img)
"""rank 1"""
_, pred = torch.max(output, 1)
total += label.size(0)
acc_top1 += (pred == label).sum().item()
"""rank 5"""
_, rank5 = output.topk(5, 1, True, True)
rank5 = rank5.t()
correct5 = rank5.eq(label.view(1, -1).expand_as(rank5))
correct5 = correct5.contiguous()
for k in range(6):
correct_k = correct5[:k].view(-1).float().sum(0, keepdim=True)
acc_top5 += correct_k.item()
print("step : {} / {}".format(j + 1, len(valid_set) / int(label.size(0))))
print("Top-1 Accuracy : {0:0.2f}%".format(acc_top1 / total * 100))
print("Top-5 Accuracy : {0:0.2f}%".format(acc_top5 / total * 100))
print("Final result")
print("Top-1 Accuracy : {0:0.2f}%".format(acc_top1 / total * 100))
print("Top-5 Accuracy: {0:0.2f}%".format(acc_top5 / total * 100))
result.append(acc_top1 / total * 100)
result.append(acc_top5 / total * 100)
return result
def draw_result(eval_output):
model_label = [
'alexNet Top-1',
'alexNet Top-5',
'VGG16 Top-1',
'VGG16 Top-5',
'googLeNet Top-1',
'googLeNet Top-5',
'resnet18 Top-1',
'resnet18 Top-5'
]
fig, ax = plt.subplots()
ax.barh(model_label, eval_output, height=0.6, color='orange', alpha=0.8)
plt.xlabel('Accuracy (%)')
plt.title('Compare 4 models')
for i, v in enumerate(eval_output):
ax.text(v + 3, i + .25, str(v), color='black')
plt.show()
if __name__ == '__main__':
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
evaluation_result = compare_model(device)
draw_result(evaluation_result)
|
13,006 | 2e14cd1d3d415bf4562cc12273f20e9343a09839 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-25 12:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fullstackbooks', '0002_auto_20170725_0819'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='published_date',
),
]
|
13,007 | 0dba45e27d81cd77f98085fdb8ad445f74653186 | from pefile import PE
from pefile import *
import sys
def getPE(file):
if(PE(file)==True):
PEFileInstance = PE(file, data='module.dll')
yeet = PEFileInstance.dump_info().strip().split('\n')
pedata = []
keys = ['SizeOfOptionalHeader','Characteristics','MajorLinkerVersion','MinorLinkerVersion','SizeOfCode','SizeOfInitializedData','SizeOfUninitializedData','AddressOfEntryPoint','BaseOfCode','BaseOfData','ImageBase','SectionAlignment','FileAlignment','MajorOperatingSystemVersion','MinorOperatingSystemVersion','MajorImageVersion','MinorImageVersion','MajorSubsystemVersion','MinorSubsystemVersion','SizeOfImage','SizeOfHeaders','CheckSum','Subsystem','DllCharacteristics','SizeOfStackReserve','SizeOfStackCommit','SizeOfHeapReserve','SizeOfHeapCommit','LoaderFlags','NumberOfRvaAndSizes','SectionsMeanVirtualsize','SectionsMinVirtualsize','SectionMaxVirtualsize']
for key in keys:
for x in range(len(yeet)):
if(yeet[x].find(key)!=-1):
pedata.append(int(str(yeet[x][48:].strip()),16))
break
print(pedata)
if __name__ == '__main__':
getPE(sys.argv[1])
|
13,008 | bcf111792c32fc63b77972b6b415aaecabd33702 | # -*- coding: cp1252 -*-
#UNIVERSIDADE DO ESTADO DO RIO DE JANEIRO - UERJ
#BRUNO BANDEIRA BRANDÃO
#LISTA 3: FUNDAMENTOS DA COMPURAÇÃO 2018/2
#EXERCÍCIO 5
soma = 0
for i in range (1, 26):
p = float (input ('Digite o Peso da caixa: ' ))
soma += p
print ' O peso total da carga é:',soma
|
13,009 | 188eac26d2384b8077acbb9767c5078c41083d56 | # -*- coding: utf-8 -*-
'''
https://leetcode.com/problems/evaluate-reverse-polish-notation/
'''
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
if not tokens:
return
stack = []
for val in tokens:
if val in ['+', '-', '*', '/']:
num1 = stack.pop()
num2 = stack.pop()
if val == '+':
stack.append( num2+num1 )
elif val == '-':
stack.append( num2-num1 )
elif val == '*':
stack.append( num2*num1 )
else:
if num1*num2 < 0:
stack.append( -(abs(num2)/abs(num1)) )
else:
stack.append( abs(num2)/abs(num1) )
else:
stack.append( int(val) )
return stack[0]
# Solution 2
# Time: O(n); Space: O(1) [no extra space needed]
# However, during practice, it's much slower than solution 1 because the size changes over time
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
i = 0
while i < len(tokens):
if tokens[i].isdigit() or tokens[i][1:].isdigit():
i += 1
continue
i -= 2
num1 = int( tokens.pop(i) )
num2 = int( tokens.pop(i) )
if tokens[i] == '+':
tokens[i] = str( num1 + num2 )
elif tokens[i] == '-':
tokens[i] = str( num1 - num2 )
elif tokens[i] == '*':
tokens[i] = str( num1*num2 )
else:
sign = 1
if num1 * num2 < 0:
sign = - 1
num1, num2 = abs(num1), abs(num2)
tokens[i] = str( sign * (num1/num2) )
# print tokens
return int( tokens[0] ) |
13,010 | b9d7de226634b706f73913f502778a5369e086ac | '''
NowplayingMovieCrawler.py
功能:获取正在上映电影的id和名字
'''
import requests, random
from lxml import etree
from settings import USER_AGENTS, NOW_PLAYING_URL
# 从settings中获取正在上映电影的url
def UrlManager():
url = NOW_PLAYING_URL
return url
# 解析网页为HTML源代码
def HtmlDownloader(url):
try:
res = requests.get(url, headers = {
'User-Agent':random.choice(USER_AGENTS)
})
# headers中设置用户代理为用户代理池中的随机一个
if res.status_code == 200:
# 当获取成功时返回状态码200
return res.text
return None
except Exception as e:
print(e)
return None
def Htmlparser(html):
movie_list = []
try:
html = etree.HTML(html)
# 构造XPath解析对象并对HTML文本进行自动修正
except Exception as e:
raise(e)
print('can\'t get nowplaying page')
print(e)
exit(0)
# 分析网页的HTML源代码结构,编写XPath表达式,返回符合结果的内容合并为的数组
NowplayingPart_id = html.xpath('//div[@id="nowplaying"]//div[@class="mod-bd"]//ul[@class="lists"]/li')
# 获取单个电影的id和名字
for i in NowplayingPart_id:
movie_id = i.xpath('./@id')
movie_name = i.xpath('./@data-title')
movie_entity = {
'id': movie_id[0],
'name' : movie_name[0]
}
movie_list.append(movie_entity)
return movie_list
# 获取正在上映电影爬虫调度器
def NowplayingMovieCrawler_Schedular():
url = UrlManager()
html = HtmlDownloader(url)
NowPlayingList = Htmlparser(html)
print('近期正在上映的电影有')
for i in NowPlayingList:
print(i['name'])
print('\n')
return NowPlayingList
|
13,011 | aab09158f7fb2164f55a44210ab43eeb16ab2685 | from django.contrib import admin
from django.urls import path
from solicitacao.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('', index, name="pagina_inicial"),
path('autenticar/', do_login, name="autenticar"),
path('veiculos/', lista_veiculos, name="lista_veiculos"),
path('veiculos/add', criar_veiculo, name='criar_veiculo'),
path('motoristas/', lista_motoristas, name="lista_motoristas"),
path('motoristas/add', criar_motorista, name='criar_motorista'),
]
|
13,012 | b087eafb86c1227eed85f6cd02c2f7341aac3e69 | import sys
import argparse
import networkx as nx
import json
import urllib2
from matplotlib import pyplot as plt
from matplotlib.colors import ColorConverter
from collections import defaultdict
def as_network(d):
G = nx.Graph()
for n in d['nodes']:
G.add_node(n['data']['id'],
{
'name': n['data']['orf'],
'label': n['data']['orf'],
'classes': n['classes'],
'graphics': {
'x': n['position']['x'],
'y': n['position']['y'],
'type': 'rectangle' if 'tf' in n['classes'].split() else 'circle',
}
}
)
if 'edges' in d:
for e in d['edges']:
G.add_edge(e['data']['source'], e['data']['target'],
{
'graphics': {
'width': int(e['data']['weight'])
}
}
)
return G
def get_positions(G):
ids = []
x = []
y = []
for n in G.nodes(data=True):
ids.append(n[0])
x.append(n[1]['graphics']['x'])
y.append(n[1]['graphics']['y'])
ymax = max(y)
ry = [abs(yi - ymax) for yi in y]
return {nid: (xi, yi) for nid, xi, yi in zip(ids, x, ry)}
def get_edge_widths(G):
w = []
for e in G.edges(data=True):
w.append(e[2]['graphics']['width'])
wmax = max(w)
wmin = min(w)
return [x * (2 - 0.1) / (wmax - wmin) for x in w]
def get_node_types(G):
color = '#AAAAAA'
basket_color = '#219D1A'
types = defaultdict(list)
for n in G.nodes(data=True):
if n[1]['graphics']['type'] == 'rectangle':
if 'basket' in n[1]['classes'].split():
types[('s', basket_color)].append(n[0])
else:
types[('s', color)].append(n[0])
else:
if 'basket' in n[1]['classes'].split():
types[('o', basket_color)].append(n[0])
else:
types[('o', color)].append(n[0])
return types
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('json', help='JSON string of the network')
parser.add_argument('--type', help='filetype of output',
choices=('gml', 'png', 'pdf'), default='gml')
args = parser.parse_args()
return args
def main():
args = parse_args()
network = json.loads(urllib2.unquote(args.json.encode('utf8')))
G = as_network(network)
if args.type == 'gml':
nx.write_gml(G, sys.stdout)
elif args.type == 'png' or args.type == 'pdf':
ax = plt.axes(frameon=False)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
nodepos = get_positions(G)
if 'edges' in network:
nx.draw_networkx_edges(G,
pos=nodepos,
edge_color='0.6',
width=get_edge_widths(G))
for (shape, color), nodes in get_node_types(G).iteritems():
nx.draw_networkx_nodes(G,
pos=nodepos,
nodelist=nodes,
node_color=color,
ax=ax,
linewidths=0.5,
node_size=100,
node_shape=shape)
nx.draw_networkx_labels(G,
pos={n: (x, y + 17) for n, (x, y) in nodepos.iteritems()},
labels=nx.get_node_attributes(G, 'label'),
font_size=6)
bbox = None if G.number_of_nodes() == 1 else 'tight'
plt.savefig(sys.stdout, dpi=300, bbox_inches=bbox, format=args.type)
if __name__ == '__main__':
main()
|
13,013 | 1ce9f3da51759af71d012798bdfe68f65ac2c351 | from rest_framework import routers
from .api import LeaveViewSet
router = routers.DefaultRouter()
router.register('api/leaves', LeaveViewSet, 'leaves')
urlpatterns = router.urls |
13,014 | 07e1b03589c61cecf64b39df6003090d481c073d | import requests,re,pymysql,time
date = time.strftime('%Y-%m-%d')
def insert_mysql(sql):
conn = pymysql.connect(host='121.201.68.21', port=3307, user='jiang', passwd='jiangwenhui', db='daxiangzhanshi',
charset='UTF8')
cur = conn.cursor()
cur.execute(sql)
conn.commit()
for i in cur:
print(i)
cur.close()
conn.close()
ret = requests.get('http://120.76.65.147:5000/ip')
data=re.findall('''<div id="menu" style="height:750px;width:200px;float:left;">(.*?)</div>''',ret.text)
djcs=re.findall(">(.*?),(.*?)<",data[0])
print(data[1])
insert_mysql("delete from app01_the_number_of_clicks")
for i in djcs:
sql="insert into app01_the_number_of_clicks values (null,'%s','%s','%s')"%(i[0],i[1],date)
insert_mysql(sql)
print(sql)
source_ip=re.findall(">(.*?),(.*?)<",data[1])
insert_mysql("delete from app01_source_ip")
for i in source_ip:
sql="insert into app01_source_ip values (null,'%s','%s','%s')"%(i[0],i[1],date)
insert_mysql(sql)
print(sql) |
13,015 | 818b792c7b0c688e646d16ecc60f4382a4e9c4f8 | import blockchain_client as client
def main():
args = client.arg_parse()
client.run_app(client.setup(args))
if __name__ == "__main__":
main() |
13,016 | eab67ddf95ed283f4f66d1f93904c3726ea1d099 | class Solution:
def reverse(self, x: int) -> int:
negative = False
if (x < 0):
x = -x
negative = True
res = 0
while x != 0 :
remainder = x % 10
x = x // 10
res = res * 10 + remainder
if negative:
res = -res
if -2147483648 <= res <= 2147483647:
return res
else:
return 0 |
13,017 | 1fdca4d60e7b3a8ca970d33fa9d9e8aa1e252cc6 | a, b, c = map(int, input().split())
print((a+b)%c)
print((a%c+b%c)%c)
print((a*b)%c)
print((a%c*b%c)%c) //세 수 A, B, C가 주어졌을 때, 위의 네 가지 값을 구하는 프로그램 |
13,018 | e76bd71b66a783c7e80dbd9eccb69cc20280efe8 | from os.path import exists, join
from os import makedirs
from sklearn.metrics import confusion_matrix
from helper_tool import DataProcessing as DP
import tensorflow as tf
import numpy as np
import helper_tf_util
import time
import math
from utils.sampling import tf_sampling
def log_out(out_str, f_out):
f_out.write(out_str + '\n')
f_out.flush()
print(out_str)
def sampling(batch_size, npoint, pts, feature=None):
'''
inputs:
npoint: scalar, number of points to sample
pointcloud: B * N * D, input point cloud
output:
sub_pts: B * npoint * D, sub-sampled point cloud
'''
fps_idx = tf_sampling.farthest_point_sample(npoint, pts)
batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1)), (1, npoint,1))
idx = tf.concat([batch_indices, tf.expand_dims(fps_idx, axis=2)], axis=2)
idx.set_shape([batch_size, npoint, 2])
if feature is None:
return tf.gather_nd(pts, idx)
else:
return tf.gather_nd(pts, idx), tf.gather_nd(feature, idx)
class Network:
def __init__(self, dataset, config):
flat_inputs = dataset.flat_inputs
self.config = config
# Path of the result folder
if self.config.saving:
if self.config.saving_path is None:
self.saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
else:
self.saving_path = self.config.saving_path
makedirs(self.saving_path) if not exists(self.saving_path) else None
with tf.variable_scope('inputs'):
self.inputs = dict()
num_layers = self.config.num_layers
self.inputs['features'] = flat_inputs[0]
self.inputs['labels'] = flat_inputs[1]
self.inputs['input_inds'] = flat_inputs[2]
self.inputs['cloud_inds'] = flat_inputs[3]
self.labels = self.inputs['labels']
self.is_training = tf.placeholder(tf.bool, shape=())
self.training_step = 1
self.training_epoch = 0
self.correct_prediction = 0
self.accuracy = 0
self.mIou_list = [0]
self.class_weights = DP.get_class_weights(dataset.name)
self.time_stamp = time.strftime('_%Y-%m-%d_%H-%M-%S', time.gmtime())
self.Log_file = open('log_train_' + dataset.name + str(dataset.val_split) + self.time_stamp + '.txt', 'a')
with tf.variable_scope('layers'):
self.logits, self.new_xyz, self.xyz = self.inference(self.inputs, self.is_training)
#####################################################################
# Ignore the invalid point (unlabeled) when calculating the loss #
#####################################################################
with tf.variable_scope('loss'):
self.logits = tf.reshape(self.logits, [-1, config.num_classes])
self.labels = tf.reshape(self.labels, [-1])
# Boolean mask of points that should be ignored
ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)
for ign_label in self.config.ignored_label_inds:
ignored_bool = tf.logical_or(ignored_bool, tf.equal(self.labels, ign_label))
# Collect logits and labels that are not ignored
valid_idx = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))
valid_logits = tf.gather(self.logits, valid_idx, axis=0)
valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)
# Reduce label values in the range of logit shape
reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
inserted_value = tf.zeros((1,), dtype=tf.int32)
for ign_label in self.config.ignored_label_inds:
reducing_list = tf.concat([reducing_list[:ign_label], inserted_value, reducing_list[ign_label:]], 0)
valid_labels = tf.gather(reducing_list, valid_labels_init)
aug_loss_weights = tf.constant([0.1, 0.1, 0.3, 0.5, 0.5])
aug_loss = 0
for i in range(self.config.num_layers):
centroids = tf.reduce_mean(self.new_xyz[i], axis=2)
relative_dis = tf.sqrt(tf.reduce_sum(tf.square(centroids-self.xyz[i]), axis=-1) + 1e-12)
aug_loss = aug_loss + aug_loss_weights[i] * tf.reduce_mean(tf.reduce_mean(relative_dis, axis=-1), axis=-1)
self.loss = self.get_loss(valid_logits, valid_labels, self.class_weights) + aug_loss
with tf.variable_scope('optimizer'):
self.learning_rate = tf.Variable(config.learning_rate, trainable=False, name='learning_rate')
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.variable_scope('results'):
self.correct_prediction = tf.nn.in_top_k(valid_logits, valid_labels, 1)
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.prob_logits = tf.nn.softmax(self.logits)
tf.summary.scalar('learning_rate', self.learning_rate)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.accuracy)
my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.saver = tf.train.Saver(my_vars, max_to_keep=100)
c_proto = tf.ConfigProto()
c_proto.gpu_options.allow_growth = True
self.sess = tf.Session(config=c_proto)
self.merged = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(config.train_sum_dir, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def inference(self, inputs, is_training):
d_out = self.config.d_out
ratio = self.config.sub_sampling_ratio
k_n = self.config.k_n
feature = inputs['features']
og_xyz = feature[:, :, :3]
feature = tf.layers.dense(feature, 8, activation=None, name='fc0')
feature = tf.nn.leaky_relu(tf.layers.batch_normalization(feature, -1, 0.99, 1e-6, training=is_training))
feature = tf.expand_dims(feature, axis=2)
# ###########################Encoder############################
f_encoder_list = []
input_xyz = og_xyz
input_up_samples = []
new_xyz_list = []
xyz_list = []
n_pts = self.config.num_points
for i in range(self.config.num_layers):
# Farthest Point Sampling:
input_neigh_idx = tf.py_func(DP.knn_search, [input_xyz, input_xyz, k_n], tf.int32)
n_pts = n_pts // ratio[i]
sub_xyz, inputs_sub_idx = tf.cond(tf.equal(is_training, tf.constant(True)), lambda: sampling(self.config.batch_size, n_pts, input_xyz, input_neigh_idx), lambda: sampling(self.config.val_batch_size, n_pts, input_xyz, input_neigh_idx))
inputs_interp_idx = tf.py_func(DP.knn_search, [sub_xyz, input_xyz, 1], tf.int32)
input_up_samples.append(inputs_interp_idx)
# Bilateral Context Encoding
f_encoder_i, new_xyz = self.bilateral_context_block(feature, input_xyz, input_neigh_idx, d_out[i],
'Encoder_layer_' + str(i), is_training)
f_sampled_i = self.random_sample(f_encoder_i, inputs_sub_idx)
feature = f_sampled_i
if i == 0:
f_encoder_list.append(f_encoder_i)
f_encoder_list.append(f_sampled_i)
xyz_list.append(input_xyz)
new_xyz_list.append(new_xyz)
input_xyz = sub_xyz
# ###########################Encoder############################
# ###########################Decoder############################
# Adaptive Fusion Module
f_multi_decoder = [] # full-sized feature maps
f_weights_decoders = [] # point-wise adaptive fusion weights
for n in range(self.config.num_layers):
feature = f_encoder_list[-1-n]
feature = helper_tf_util.conv2d(feature, feature.get_shape()[3].value, [1, 1],
'decoder_0' + str(n),
[1, 1], 'VALID', True, is_training)
f_decoder_list = []
for j in range(self.config.num_layers-n):
f_interp_i = self.nearest_interpolation(feature, input_up_samples[-j - 1 -n])
f_decoder_i = helper_tf_util.conv2d_transpose(tf.concat([f_encoder_list[-j - 2 -n], f_interp_i], axis=3),
f_encoder_list[-j - 2 -n].get_shape()[-1].value, [1, 1],
'Decoder_layer_' + str(n) + '_' + str(j), [1, 1], 'VALID', bn=True,
is_training=is_training)
feature = f_decoder_i
f_decoder_list.append(f_decoder_i)
# collect full-sized feature maps which are upsampled from multiple resolutions
f_multi_decoder.append(f_decoder_list[-1])
# summarize point-level information
curr_weight = helper_tf_util.conv2d(f_decoder_list[-1], 1, [1, 1], 'Decoder_weight_' + str(n), [1, 1], 'VALID', bn=False, activation_fn=None)
f_weights_decoders.append(curr_weight)
# regress the fusion parameters
f_weights = tf.concat(f_weights_decoders, axis=-1)
f_weights = tf.nn.softmax(f_weights, axis=-1)
# adptively fuse them by calculating a weighted sum
f_decoder_final = tf.zeros_like(f_multi_decoder[-1])
for i in range(len(f_multi_decoder)):
f_decoder_final = f_decoder_final + tf.tile(tf.expand_dims(f_weights[:,:,:,i], axis=-1), [1, 1, 1, f_multi_decoder[i].get_shape()[-1].value]) * f_multi_decoder[i]
# ###########################Decoder############################
f_layer_fc1 = helper_tf_util.conv2d(f_decoder_final, 64, [1, 1], 'fc1', [1, 1], 'VALID', True, is_training)
f_layer_fc2 = helper_tf_util.conv2d(f_layer_fc1, 32, [1, 1], 'fc2', [1, 1], 'VALID', True, is_training)
f_layer_drop = helper_tf_util.dropout(f_layer_fc2, keep_prob=0.5, is_training=is_training, scope='dp1')
f_layer_fc3 = helper_tf_util.conv2d(f_layer_drop, self.config.num_classes, [1, 1], 'fc', [1, 1], 'VALID', False,
is_training, activation_fn=None)
f_out = tf.squeeze(f_layer_fc3, [2])
return f_out, new_xyz_list, xyz_list
def train(self, dataset):
log_out('****EPOCH {}****'.format(self.training_epoch), self.Log_file)
self.sess.run(dataset.train_init_op)
while self.training_epoch < self.config.max_epoch:
t_start = time.time()
try:
ops = [self.train_op,
self.extra_update_ops,
self.merged,
self.loss,
self.logits,
self.labels,
self.accuracy]
_, _, summary, l_out, probs, labels, acc = self.sess.run(ops, {self.is_training: True})
self.train_writer.add_summary(summary, self.training_step)
t_end = time.time()
if self.training_step % 50 == 0:
message = 'Step {:08d} L_out={:5.3f} Acc={:4.2f} ''---{:8.2f} ms/batch'
log_out(message.format(self.training_step, l_out, acc, 1000 * (t_end - t_start)), self.Log_file)
self.training_step += 1
except tf.errors.OutOfRangeError:
m_iou = self.evaluate(dataset)
if m_iou > np.max(self.mIou_list):
# Save the best model
snapshot_directory = join(self.saving_path, 'snapshots')
makedirs(snapshot_directory) if not exists(snapshot_directory) else None
self.saver.save(self.sess, snapshot_directory + '/snap', global_step=self.training_step)
self.mIou_list.append(m_iou)
log_out('Best m_IoU is: {:5.3f}'.format(max(self.mIou_list)), self.Log_file)
self.training_epoch += 1
self.sess.run(dataset.train_init_op)
# Update learning rate
op = self.learning_rate.assign(tf.multiply(self.learning_rate,
self.config.lr_decays[self.training_epoch]))
self.sess.run(op)
log_out('****EPOCH {}****'.format(self.training_epoch), self.Log_file)
except tf.errors.InvalidArgumentError as e:
print('Caught a NaN error :')
print(e.error_code)
print(e.message)
print(e.op)
print(e.op.name)
print([t.name for t in e.op.inputs])
print([t.name for t in e.op.outputs])
a = 1 / 0
print('finished')
self.sess.close()
def evaluate(self, dataset):
# Initialise iterator with validation data
self.sess.run(dataset.val_init_op)
gt_classes = [0 for _ in range(self.config.num_classes)]
positive_classes = [0 for _ in range(self.config.num_classes)]
true_positive_classes = [0 for _ in range(self.config.num_classes)]
val_total_correct = 0
val_total_seen = 0
for step_id in range(self.config.val_steps):
if step_id % 50 == 0:
print(str(step_id) + ' / ' + str(self.config.val_steps))
try:
ops = (self.prob_logits, self.labels, self.accuracy)
stacked_prob, labels, acc = self.sess.run(ops, {self.is_training: False})
pred = np.argmax(stacked_prob, 1)
if not self.config.ignored_label_inds:
pred_valid = pred
labels_valid = labels
else:
invalid_idx = np.where(labels == self.config.ignored_label_inds)[0]
labels_valid = np.delete(labels, invalid_idx)
labels_valid = labels_valid - 1
pred_valid = np.delete(pred, invalid_idx)
correct = np.sum(pred_valid == labels_valid)
val_total_correct += correct
val_total_seen += len(labels_valid)
conf_matrix = confusion_matrix(labels_valid, pred_valid, np.arange(0, self.config.num_classes, 1))
gt_classes += np.sum(conf_matrix, axis=1)
positive_classes += np.sum(conf_matrix, axis=0)
true_positive_classes += np.diagonal(conf_matrix)
except tf.errors.OutOfRangeError:
break
iou_list = []
for n in range(0, self.config.num_classes, 1):
iou = true_positive_classes[n] / float(gt_classes[n] + positive_classes[n] - true_positive_classes[n])
iou_list.append(iou)
mean_iou = sum(iou_list) / float(self.config.num_classes)
log_out('eval accuracy: {}'.format(val_total_correct / float(val_total_seen)), self.Log_file)
log_out('mean IOU:{}'.format(mean_iou), self.Log_file)
mean_iou = 100 * mean_iou
log_out('Mean IoU = {:.1f}%'.format(mean_iou), self.Log_file)
s = '{:5.2f} | '.format(mean_iou)
for IoU in iou_list:
s += '{:5.2f} '.format(100 * IoU)
log_out('-' * len(s), self.Log_file)
log_out(s, self.Log_file)
log_out('-' * len(s) + '\n', self.Log_file)
return mean_iou
def get_loss(self, logits, labels, pre_cal_weights):
# calculate the weighted cross entropy according to the inverse frequency
class_weights = tf.convert_to_tensor(pre_cal_weights, dtype=tf.float32)
one_hot_labels = tf.one_hot(labels, depth=self.config.num_classes)
weights = tf.reduce_sum(class_weights * one_hot_labels, axis=1)
unweighted_losses = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_labels)
weighted_losses = unweighted_losses * weights
output_loss = tf.reduce_mean(weighted_losses)
return output_loss
def bilateral_context_block(self, feature, xyz, neigh_idx, d_out, name, is_training):
"""
Inputs:
feature: [B, N, 1, c] input features
xyz: [B, N, 3] input coordinates
neigh_idx: [B, N, k] indices of k neighbors
Output:
output_feat: [B, N, 1, 2*d_out] encoded (output) features
shifted_neigh_xyz: [B, N, k, 3] shifted neighbor coordinates, for augmentation loss
"""
batch_size = tf.shape(xyz)[0]
num_points = tf.shape(xyz)[1]
# Input Encoding
feature = helper_tf_util.conv2d(feature, d_out // 2, [1, 1], name + 'mlp1', [1, 1], 'VALID', True, is_training)
# Bilateral Augmentation
neigh_feat = self.gather_neighbour(tf.squeeze(feature, axis=2), neigh_idx) # B, N, k, d_out/2
neigh_xyz = self.gather_neighbour(xyz, neigh_idx) # B, N, k, 3
tile_feat = tf.tile(feature, [1, 1, self.config.k_n, 1]) # B, N, k, d_out/2
tile_xyz = tf.tile(tf.expand_dims(xyz, axis=2), [1, 1, self.config.k_n, 1]) # B, N, k, 3
feat_info = tf.concat([neigh_feat - tile_feat, tile_feat], axis=-1) # B, N, k, d_out
neigh_xyz_offsets = helper_tf_util.conv2d(feat_info, xyz.get_shape()[-1].value, [1, 1], name + 'mlp5', [1, 1], 'VALID', True, is_training) # B, N, k, 3
shifted_neigh_xyz = neigh_xyz + neigh_xyz_offsets # B, N, k, 3
xyz_info = tf.concat([neigh_xyz - tile_xyz, shifted_neigh_xyz, tile_xyz], axis=-1) # B, N, k, 9
neigh_feat_offsets = helper_tf_util.conv2d(xyz_info, feature.get_shape()[-1].value, [1, 1], name + 'mlp6', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
shifted_neigh_feat = neigh_feat + neigh_feat_offsets # B, N, k, d_out/2
xyz_encoding = helper_tf_util.conv2d(xyz_info, d_out//2, [1, 1], name + 'mlp7', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
feat_info = tf.concat([shifted_neigh_feat, feat_info], axis=-1) # B, N, k, 3/2*d_out
feat_encoding = helper_tf_util.conv2d(feat_info, d_out//2, [1, 1], name + 'mlp8', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
# Mixed Local Aggregation
overall_info = tf.concat([xyz_encoding, feat_encoding], axis=-1) # B, N, k, d_out
k_weights = helper_tf_util.conv2d(overall_info, overall_info.get_shape()[-1].value, [1, 1], name + 'mlp9', [1, 1], 'VALID', bn=False, activation_fn=None) # B, N, k, d_out
k_weights = tf.nn.softmax(k_weights, axis=2) # B, N, k, d_out
overall_info_weighted_sum = tf.reduce_sum(overall_info * k_weights, axis=2, keepdims=True) # B, N, 1, d_out
overall_info_max = tf.reduce_max(overall_info, axis=2, keepdims=True) # B, N, 1, d_out
overall_encoding = tf.concat([overall_info_max, overall_info_weighted_sum], axis=-1) # B, N, 1, 2*d_out
# Output Encoding
overall_encoding = helper_tf_util.conv2d(overall_encoding, d_out, [1, 1], name + 'mlp10', [1, 1], 'VALID', True, is_training) # B, N, 1, d_out
output_feat = helper_tf_util.conv2d(overall_encoding, d_out * 2, [1, 1], name + 'mlp11', [1, 1], 'VALID', True, is_training, activation_fn=tf.nn.leaky_relu) # B, N, 1, 2*d_out
return output_feat, shifted_neigh_xyz
@staticmethod
def random_sample(feature, pool_idx):
"""
:param feature: [B, N, d] input features matrix
:param pool_idx: [B, N', max_num] N' < N, N' is the selected position after pooling
:return: pool_features = [B, N', d] pooled features matrix
"""
feature = tf.squeeze(feature, axis=2)
num_neigh = tf.shape(pool_idx)[-1]
d = feature.get_shape()[-1]
batch_size = tf.shape(pool_idx)[0]
pool_idx = tf.reshape(pool_idx, [batch_size, -1])
pool_features = tf.batch_gather(feature, pool_idx)
pool_features = tf.reshape(pool_features, [batch_size, -1, num_neigh, d])
pool_features = tf.reduce_max(pool_features, axis=2, keepdims=True)
return pool_features
@staticmethod
def nearest_interpolation(feature, interp_idx):
"""
:param feature: [B, N, d] input features matrix
:param interp_idx: [B, up_num_points, 1] nearest neighbour index
:return: [B, up_num_points, d] interpolated features matrix
"""
feature = tf.squeeze(feature, axis=2)
batch_size = tf.shape(interp_idx)[0]
up_num_points = tf.shape(interp_idx)[1]
interp_idx = tf.reshape(interp_idx, [batch_size, up_num_points])
interpolated_features = tf.batch_gather(feature, interp_idx)
interpolated_features = tf.expand_dims(interpolated_features, axis=2)
return interpolated_features
@staticmethod
def gather_neighbour(pc, neighbor_idx):
# gather the coordinates or features of neighboring points
batch_size = tf.shape(pc)[0]
num_points = tf.shape(pc)[1]
d = pc.get_shape()[2].value
index_input = tf.reshape(neighbor_idx, shape=[batch_size, -1])
features = tf.batch_gather(pc, index_input)
features = tf.reshape(features, [batch_size, num_points, tf.shape(neighbor_idx)[-1], d])
return features |
13,019 | f78ee81141022d44754a8c2d03cddba9f428bcb5 | from rest_framework import serializers
from core.structures.company.models import Company
from enterprise.libs.base62 import base62_decode, base62_encode
from enterprise.libs.rest_module.exception import ErrorValidationException
from enterprise.structures.common.models import File
from api.company.serializers.address import (
AddressWriteSerializer,
AddressReadSerializer,
)
class CompanyListSerializer(serializers.ModelSerializer):
logo = serializers.SerializerMethodField()
address = serializers.SerializerMethodField()
def get_address(self, instance):
if instance.address:
return AddressReadSerializer(instance.address).data
return None
def get_logo(self, instance):
if instance.logo:
return {
"id62": base62_encode(instance.logo_id),
"url": instance.logo.get_safe_url(),
}
class Meta:
model = Company
fields = (
"display_name",
"business_type",
"address",
"description",
"website",
"logo",
)
class CompanyWriteSerializer(serializers.ModelSerializer):
logo = serializers.CharField()
address = AddressWriteSerializer()
def validate(self, attrs):
validated_data = super().validate(attrs)
logo = validated_data.get("logo")
logo_file = File.objects.filter(id62=logo, deleted_at__isnull=True).last()
if not logo_file:
raise ErrorValidationException(
"422",
{"en": "Failed create company", "id": "Gagal membuat perusahaan"},
{"logo": "Invalid file id62"},
422,
)
validated_data["logo"] = logo_file
return validated_data
class Meta:
model = Company
fields = (
"display_name",
"business_type",
"address",
"description",
"website",
"logo",
)
|
13,020 | 35b0096138bc2062071c66af505824d1c1e185cd | import discord.ext.commands as dec
import database.song
from commands.common import *
class Song:
"""Song insertion, querying and manipulation"""
def __init__(self, bot):
self._bot = bot
self._db = database.song.SongInterface(bot.loop)
_help_messages = {
'group': 'Song information, querying and manipulation',
'blacklist': '* Puts the specified song to the blacklist\n\n'
'Song ID can be located in the square brackets just before the title. It is included in the status message '
'and all the listings.\nThis does not prevent users from including blacklisted song in their playlist, song '
'is skipped just before playing.',
'deduplicate': '* Marks a song as a duplicate of another song\n\n'
'This is a destructive operation. The duplicate is replaced by the "original" just before playing. All tests '
'(blacklist, length, overplay) are performed on the "original" song.\nThis function is useful for replacing '
'songs with a bad quality and is necessary for overplay protection to work correctly.\nSong IDs can be located '
'in the square brackets just before the title. It is included in the status message and all the listings. You '
'can also use \'search\' command to obtain the IDs.',
'failed_clear': '* Removes the songs from the failed list\n\n'
'Songs marked as duplicates are not affected. Individual songs can be removed by specifying their ID. You can '
'use the command to fix the automatic playlist after a service outage or bot connection problems.',
'failed_list': 'Lists all the songs that have failed to download\n\n'
'Up to 20 songs are returned. Songs marked as a duplicate are considered resolved and are excluded from the '
'list. Songs are automatically removed from this list after a successful download, or manually by using '
'\'clear\' subcommand.\n\nSongs that are marked as failed to download are excluded from the automatic '
'playlist. Bot operators are expected to investigate download issues and provide an alternative source for '
'the songs if necessary.',
'info': 'Displays information about the song stored in the database\n\n'
'Mainly for debugging purposes, as an aid for the bot operators.',
'permit': '* Removes the specified song from the blacklist\n\n'
'Song ID can be located in the square brackets just before the title. It is included in the status message '
'and all the listings.',
'rename': '* Changes the title of a specified song\n\n'
'This command can be used to rename the song stored in the database. It does not update the status message; '
'the new name is used next time the song is played.\nSong ID can be located in the square brackets just before '
'the title. It is included in the status message and all the listings.',
'search': 'Queries the database for songs\n\n'
'Title and UURI are matched against the specified keywords. All the keywords must match either the title or '
'UURI. Up to 20 results are returned.\nThis command can be used to lookup song IDs.',
'split': '* Marks a given song as an original\n\n'
'This command can be used to fix duplication status of the song. After this command is issued, the song '
'specified won\'t be marked as a duplicate anymore.\nThis is the inverse command to the \'deduplicate\'. '
'Just like the \'deduplicate\', this command does not manipulate with timestamps nor credit counts.\nSong ID '
'can be located in the square brackets just before the song title. It is included in the status message and '
'all the listings.'
}
@dec.group(invoke_without_command=True, aliases=['s'], help=_help_messages['group'])
async def song(self, subcommand: str, *arguments: str):
raise dec.UserInputError('Command *song* has no subcommand named {}. Please use `{}help song` to list all '
'the available subcommands.'
.format(subcommand, self._bot.config['ddmbot']['delimiter']))
@privileged
@song.command(ignore_extra=False, help=_help_messages['blacklist'])
async def blacklist(self, song_id: int):
await self._db.blacklist(song_id)
await self._bot.message('Song [{}] has been blacklisted'.format(song_id))
@privileged
@song.command(ignore_extra=False, help=_help_messages['deduplicate'])
async def deduplicate(self, which_id: int, target_id: int):
await self._db.merge(which_id, target_id)
await self._bot.message('Song [{}] has been marked as a duplicate of the song [{}]'.format(which_id, target_id))
@song.group(ignore_extra=False, invoke_without_command=True)
async def failed(self):
raise dec.UserInputError('You need to provide a subcommand to the *song failed* command')
@privileged
@failed.command(name='clear', ignore_extra=False, help=_help_messages['failed_clear'])
async def failed_clear(self, song_id: int = None):
raise dec.UserInputError('You need to provide a subcommand to the *song failed* command')
@failed.command(name='list', ignore_extra=False, aliases=['l'], help=_help_messages['failed_list'])
async def failed_list(self):
items, total = await self._db.list_failed(20)
if not items:
await self._bot.whisper('There are no songs flagged because of a download failure')
return
reply = '**{} songs (out of {}) flagged because of a download failure:**\n **>** '.format(len(items), total) + \
'\n **>** '.join(['[{}] {}'.format(*item) for item in items])
await self._bot.whisper(reply)
@song.command(ignore_extra=False, aliases=['i'], help=_help_messages['info'])
async def info(self, song_id: int):
info = await self._db.get_info(song_id)
reply = '**Song [{id}] information:**\n' \
' **Source URL:** [{url}]\n' \
' **Title:** {title}\n' \
' **Last played:** {last_played!s}\n' \
' **Listener count:** {total_listener_count} ({listener_count})\n' \
' **Skip vote count:** {total_skip_vote_count} ({skip_vote_count})\n' \
' **Duration:** {duration}s\n' \
' **Credits remaining:** {credit_count}\n\n' \
' **Blacklisted:** {is_blacklisted}\n' \
' **Has failed to download:** {has_failed}\n\n' \
' **Marked as a duplicate of:** {duplicates}\n' \
' **Is duplicated by:** {duplicated_by}'.format_map(info)
await self._bot.whisper(reply)
@privileged
@song.command(ignore_extra=False, help=_help_messages['permit'])
async def permit(self, song_id: int):
await self._db.permit(song_id)
await self._bot.message('Song [{}] has been removed from blacklist'.format(song_id))
@privileged
@song.command(ignore_extra=False, help=_help_messages['rename'])
async def rename(self, song_id: int, new_title: str):
await self._db.rename(song_id, new_title)
await self._bot.message('Song [{}] has been renamed to "{}"'.format(song_id, new_title))
@song.command(ignore_extra=False, aliases=['s'], help=_help_messages['search'])
async def search(self, *keywords: str):
items, total = await self._db.search(keywords, 20)
if not items:
await self._bot.whisper('Search for songs with keywords {} has not returned any result'.format(keywords))
return
reply = '**{} songs (out of {}) matching the keywords {}:**\n **>** '.format(len(items), total, keywords) + \
'\n **>** '.join(['[{}] {}'.format(*item) for item in items])
await self._bot.whisper(reply)
@privileged
@song.command(ignore_extra=False, help=_help_messages['split'])
async def split(self, song_id: int):
await self._db.merge(song_id, song_id)
await self._bot.message('Song [{}] has been marked as unique'.format(song_id))
|
13,021 | b87c156d420f357b740703541ad2a2dcff63c3d6 | from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from collections import Counter
from sklearn import metrics
import numpy as np
def KMeansTraining(X_train, Y_train, X_test, Y_test, folds=10):
results = []
for i in range(folds):
myset = set(Y_train[i]) # Cria um conjunto. Em conjuntos, dados não se repetem. Assim, esse conjunto conterá apenas um valor de cada, ou seja: [1,2,3]
clusters = len(myset) # Quantos clusters teremos no KMeans
model = KMeans(n_clusters = clusters)
model = model.fit(X_train[i])
# Pegar os labels dos padrões de Treinamento
labels = model.labels_
map_labels = []
for i in range(clusters):
map_labels.append([])
new_y_train = Y_train[i]
for i in range(len(Y_train[i])):
for c in range(clusters):
if labels[i] == c:
map_labels[c].append(new_y_train[i])
#print(map_labels)
# Criar dicionário com os labells a serem mapeados
mapping = {}
for i in range(clusters):
final = Counter(map_labels[i]) # contar a classe que mais aparece
value = final.most_common(1)[0][0] # retorna a classe com maior frequência
mapping[i] = value
#print(mapping)
result = model.predict(X_test[i])
result = [mapping[i] for i in result]
acc = metrics.accuracy_score(result, Y_test[i])
results.append(acc)
accuracy = round(np.mean(results) * 100)
# show = round(acc * 100)
# print("{}%".format(show))
# print(list(result))
# print(list(Y_test))
return [results, accuracy] |
13,022 | 7a56eb78010bcdcc416a6bd161d76e6b4ee21a8e | import random
import pickle
class Graph:
def load(self, path):
with open(path, 'rb') as file:
pickled = pickle.load(file)
self.graph = pickled.graph
self.bidirectional = pickled.bidirectional
def save(self, path):
with open(path, 'wb') as file:
pickle.dump(self, file)
def __init__(self, bidirectional=True):
self.graph = {}
self.bidirectional = bidirectional
def vertices(self):
return self.graph.keys()
def add_vertex(self, *vertex):
for v in vertex:
if v not in self.graph:
self.graph[v] = set()
def add_edge(self, source, sink):
if source == sink:
return
if source in self.graph and sink in self.graph:
self.graph[source].add(sink)
if self.bidirectional:
self.graph[sink].add(source)
def assign_random_edges(self, p):
if self.bidirectional:
p /= 2
for x in self.graph:
for y in self.graph:
if random.random() < p:
self.add_edge(x, y)
def __str__(self):
str = ''
for x in self.graph:
str += '\n{} {}'.format(x, self.graph[x] or '_')
return str
if __name__ == '__main__':
generate = False
graph = Graph()
if generate:
graph.add_vertex('a', 'b', 'c', 'd', 'e', 'f')
graph.assign_random_edges(0.5)
graph.save('saved_graph.pickle')
else:
graph.load('saved_graph.pickle')
print(graph)
|
13,023 | 8eef1bba5ef99a23b705220a7f8a1bd1e7c8bb89 | #!/usr/bin/python3
import os
import time
import multiprocessing as mp
import cv2
import mss
import numpy as np
import pykitml as pk
# Values shared between processess
A_val = mp.Value('d', 0)
left_val = mp.Value('d', 0)
def on_frame(server, frame, A_val, left_val):
# Toggle start button to start rounds
if(frame%10 < 5): start = True
else: start = False
# Set joypad
server.set_joypad(A=A_val.value==1, left=left_val.value==1, start=start)
# Continue emulation
server.frame_advance()
# Initialize and start server
def start_server(A_val, left_val):
server = pk.FCEUXServer(lambda server, frame: on_frame(server, frame, A_val, left_val))
print(server.info)
server.start()
if __name__ == '__main__':
p = mp.Process(target=start_server, args=(A_val, left_val))
p.start()
# Load models
from sklearn.decomposition import PCA
from joblib import load
pca = load('../lstm/pca.joblib')
import torch
import torch.nn as nn
model = nn.LSTM(64, 3, 2).float()
model.load_state_dict(torch.load('actor.pt'))
last_render = time.time()
dps = 0
with mss.mss() as sct:
monitor = {"top": 224, "left": 256-116, "width":256, "height":256}
running = True
while running:
last_time = time.time()
# Get raw pixels from the screen, save it to a Numpy array
img = np.array(sct.grab(monitor))
# Convert to gray scale
img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)
# Resize image
img = cv2.resize(src=img, dsize=(64, 64))
# Reshape
img = img.reshape(4096)
# Normalize
img = img/255
# PCA
img = pca.transform([img])
outputs, _ = model(torch.from_numpy(np.array([img])).float())
a, left, _ = torch.where(outputs.flatten() == outputs.flatten().max(), 1, 0)
A_val.value = a.item()
left_val.value = left.item()
if A_val.value:
print("Punch!")
if left_val.value:
print("Dodge!")
|
13,024 | fffdb5eb1408f984e0e2498b21214aeb44f9bb84 | # -*- coding: utf-8 -*-
# from utils import FileUtil
# fileNameList = FileUtil.GetFileListFromFolderWithExtension("/Users/SeekMac/Code_Projects/study_python/python3_projects/stock_analyse/data",
# False, [".csv"])
# for fileName in fileNameList.values():
# print(fileName)
import sys
import os
current_folder_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_folder_path+'/utils')
|
13,025 | 739622466fe39055f9a9f0141bb303dc0510ab9f | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from servicecatalog_puppet import constants
from servicecatalog_puppet.workflow.stack import stack_for_task
class StackTask(stack_for_task.StackForTask):
def params_for_results_display(self):
return {
"puppet_account_id": self.puppet_account_id,
"stack_name": self.stack_name,
"cache_invalidator": self.cache_invalidator,
}
def requires(self):
requirements = list()
klass = self.get_klass_for_provisioning()
for (
account_id,
regions,
) in self.manifest.get_account_ids_and_regions_used_for_section_item(
self.puppet_account_id, constants.STACKS, self.stack_name
).items():
for region in regions:
for task in self.manifest.get_tasks_for_launch_and_account_and_region(
self.puppet_account_id,
self.section_name,
self.stack_name,
account_id,
region,
single_account=self.single_account,
):
requirements.append(
klass(**task, manifest_file_path=self.manifest_file_path)
)
return requirements
def run(self):
self.write_output(self.params_for_results_display())
|
13,026 | 75b909849dac671cd0f597b9bc55936b50afb0bf | from app import app
from app.models import *
from flask import render_template, make_response, url_for, abort, request, flash, redirect
import openslide
from openslide import ImageSlide, open_slide
from openslide.deepzoom import DeepZoomGenerator
import re
from unicodedata import normalize
from io import BytesIO
import json
import ast
@app.route('/')
def index():
user = {'name': 'Rebecca Stone'}
# Show all the datasets and studies on the dashboard
datasets = Dataset.query.all()
studies = Study.query.all()
return render_template('index.html',
datasets=datasets,
studies=studies,
user=user)
@app.route('/new_dataset', methods=['GET', 'POST'])
def new_dataset():
form = DatasetForm(request.form)
if request.method == 'POST' and form.validate():
print(form.name.data)
print(form.directory.data)
print(form.images.data)
# dataset = Dataset(form.name.data, form.images.data)
# db_session.add(dataset)
flash('New dataset created:' + str(form.name.data))
return redirect(url_for('/new_dataset'))
return render_template('new_dataset.html', form=form)
# @app.route('/view/<int:study_id>/<int:image_id>')
@app.route('/run_study/<int:study_id>')
def run_study(study_id):
study = Study.query.get(study_id)
dataset = study.dataset
image_num = 0
# return redirect(url_for('/view_single',
# study_id=study.id,
# dataset_id=dataset.id,
# image_num=0))
return view_single(study.id, dataset.id, 0)
@app.route('/view_study/<int:study_id>')
def view_study(study_id):
study = Study.query.get(study_id)
dataset = study.dataset
image_count = dataset.images.count()
return render_template('view_study.html',
study=study,
dataset=dataset,
image_count=image_count)
# Temporary single page for viewing a single WSI - testing only.
@app.route('/view_single/<int:study_id>/<int:image_num>')
def view_single(study_id, image_num):
# Test case only
if study_id == -1:
image_id = 102
image_dir = "/Users/ysbecca/ysbecca-projects/iciar-2018/data/WSI_xml/Case_0001/"
file_name = "A01.svs"
else:
study = Study.query.get(study_id)
dataset = study.dataset
image_count = dataset.images.count()
if image_num >= image_count or image_num < 0:
print("At end or beginning of study.")
# Redirect to study page - study completed, or back to beginning.
return render_template('view_study.html',
study=study,
dataset=dataset,
image_count=image_count)
else:
image = dataset.images[image_num]
image_id = image.id
image_dir = image.file_dir
file_name = image.file_name
# Set single WSI options
config_map = {
'DEEPZOOM_TILE_SIZE': 'tile_size',
'DEEPZOOM_OVERLAP': 'overlap',
'DEEPZOOM_LIMIT_BOUNDS': 'limit_bounds',
}
opts = dict((v, app.config[k]) for k, v in config_map.items())
print("========= " + image_dir + file_name + " ==========")
slide = open_slide(image_dir + file_name)
# Fetch the x and y dimensions of the original WSI
if not slide:
associated_urls = {}
file_name = "ERROR: unable to load image " + file_name + " at " + image_dir
else:
x, y = slide.dimensions
print("X, Y: " + str(x) + ", " + str(y))
try:
mpp_x = slide.properties[openslide.PROPERTY_NAME_MPP_X]
mpp_y = slide.properties[openslide.PROPERTY_NAME_MPP_Y]
app.slide_mpp = (float(mpp_x) + float(mpp_y)) / 2
except (KeyError, ValueError):
app.slide_mpp = 0
# Save globally in app config variables
slide_slug = slugify(file_name)
app.slides = {
slide_slug: DeepZoomGenerator(slide, **opts)
}
app.config['DEEPZOOM_SLIDE'] = image_dir + file_name
app.associated_images = []
app.slide_properties = slide.properties
for name, image in slide.associated_images.items():
app.associated_images.append(name)
slug = slugify(name)
app.slides[slug] = DeepZoomGenerator(ImageSlide(image), **opts)
slide_url = url_for('dzi', slug=slide_slug)
associated_urls = dict((file_name, url_for('dzi', slug=slugify(file_name)))
for file_name in app.associated_images)
return render_template('view_single.html', slide_url=slide_url,
associated=associated_urls, #, properties=app.slide_properties,
slide_mpp=app.slide_mpp,
image_id=image_id,
image_dir=image_dir,
file_name=file_name,
x=x, y=y,
study=study,
dataset_id=dataset.id,
image_num=image_num,
image_count=image_count,
annotation_types=app.config['ANNOTATION_TYPES'])
@app.route('/save_annotations', methods=['POST'])
def save_annotations():
print("Received: " + str(request))
data = request.values.to_dict(flat=False)
# This is a hack because of the way the data is stored in a stringified string.
key, value = data.popitem()
parsed_data = ast.literal_eval(key)
paths = parsed_data["paths"]
image_id = parsed_data["image_id"]
wsi_x, wsi_y = parsed_data["wsi_x"], parsed_data["wsi_y"]
code = parsed_data["code"]
file_name = parsed_data["file_name"]
# Parse and save annotations
state = save_new_annotations_file(paths, wsi_x, wsi_y, code, file_name[:-4])
return json.dumps({'status': state, 'data': str(paths) });
@app.route('/<slug>.dzi')
def dzi(slug):
format = app.config['DEEPZOOM_FORMAT']
try:
resp = make_response(app.slides[slug].get_dzi(format))
resp.mimetype = 'application/xml'
return resp
except KeyError:
# Unknown slug
abort(404)
@app.route('/<slug>_files/<int:level>/<int:col>_<int:row>.<format>')
def tile(slug, level, col, row, format):
format = format.lower()
if format != 'jpeg' and format != 'png':
# Not supported by Deep Zoom
abort(404)
try:
tile = app.slides[slug].get_tile(level, (col, row))
except KeyError:
# Unknown slug
abort(404)
except ValueError:
# Invalid level or coordinates
abort(404)
buf = PILBytesIO()
tile.save(buf, format, quality=app.config['DEEPZOOM_TILE_QUALITY'])
resp = make_response(buf.getvalue())
resp.mimetype = 'image/%s' % format
return resp
def slugify(text):
text = normalize('NFKD', text.lower()).encode('ascii', 'ignore').decode()
return re.sub('[^a-z0-9]+', '-', text)
class PILBytesIO(BytesIO):
def fileno(self):
'''Classic PIL doesn't understand io.UnsupportedOperation.'''
raise AttributeError('Not supported')
|
13,027 | 04cacfbae4a44add236f25a52273badb91ca3008 |
#~POUTINE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# #
# It's a Web nano-framework in Python #
# #
#~Enjoy!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import inspect
import sys
import os
import wsgiref.util as wsgi
from urlparse import urlparse, urlunparse, urlsplit
class Poutine:
environ = {}
retval = {}
def __init__(self, environ):
self.environ = environ
self.retval = { #or : 303 See Other , or 404 Not Found
'output':'', \
'status':'200 OK', \
'location':'' \
}
self.qs = self.getquerystring()
def dispatch(self):
self.retval['output'] = getattr(self, self.qs['action'])() # override
return self.retval # Return the response Dictionary
def getpath(self):
return "\n".join(sys.path)
def respond404(self):
self.retval['status'] = '404 Not Found'
self.retval['output'] = ''
return self.retval
def redirect(self, address):
self.retval['output'] = ''
self.retval['status'] = '303 See Other'
self.retval['location'] = '?action='+address
return self.retval
def getquerystring(self):
qs = self.environ['QUERY_STRING']
return self.parse_qs(qs)
def getkey(self, key):
qs = self.environ['QUERY_STRING']
return self.parse_qs(qs)[key]
def getpost(self):
try:
request_body_size = int(self.environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_body_size = 0
request_body = self.environ['wsgi.input'].read(request_body_size)
# print >> self.environ['wsgi.errors'], str(self.parse_qs(request_body)) # debugging technique , more info at : http://code.google.com/p/modwsgi/wiki/DebuggingTechniques
return self.parse_qs(request_body)
def parse_qs(self, qs):
"""
parse query string
"""
if qs == '':
return {}
else:
querylist = qs.split('&')
if len(querylist) < 1 :
return {}
else:
querydict = {}
for item in querylist:
lis = item.split('=')
querydict[lis[0]] = lis[1]
return querydict
|
13,028 | 5de1dc8338b33c8e409c7533c148dc36596ca27d | import glob
import os
from commands import *
user_folders = glob.glob('/Users/mihirkelkar/code/Text_Mining_Enron/Length_data/*')
for user in user_folders:
print user
user_sub_dirs = glob.glob(user + '/*')
print len(user_sub_dirs)
if user + '/sent_items' not in user_sub_dirs:
os.chdir(user)
if user + '/sent' in user_sub_dirs:
command = "mv sent sent_items"
print getoutput(command)
elif user + '/_sent_mail' in user_sub_dirs:
command = "mv _sent_mail sent_items"
print getoutput(command)
|
13,029 | 4e32d24bb04c450426f2a4e802261454f8261ae3 | from flask import render_template
from app import app, db
@app.errorhandler(404)
def not_found_error(error):
"""!
Обработчик ошибки 404 "File not found"
@param error - код ошибки
@return страницу с ошибкой (404.html)
"""
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
"""!
Обработчик ошибки 500
@param error - код ошибки
@return страницу с ошибкой (500.html)
"""
db.session.rollback()
return render_template('500.html'), 500
|
13,030 | 735f713eed63df62a4f2a5630d6bfdaaa976cb52 | import time
class node:
def __init__(self,item,next):
self.item=item
self.next=next
class linkedlist:
def __init__(self):
self.first=None
def traverse(self,callback):
current=self.first
while current is not None:
callback(current.item)
current=current.next
def count(self):
count=0
current=self.first
while current is not None:
count+=1
current=current.next
return count
def retrieve(self,item):
current=self.first
while current is not None:
if current.item==item:
return current.item
else:
current=current.next
return None
def insert(self,item):
if not self.exists(item):
n=node(item,self.first)
self.first=n
return True
else:
return False
def exists(self,item):
current=self.first
while current is not None:
if current.item==item:
return True
else:
current=current.next
return False
def delete(self,item):
if not self.exists(item):
return False
elif self.first.item==item:
self.first=self.first.next
return True
current=self.first
while current.next.item != item:
current=current.next
current.next=current.next.next
return True
class student:
def __init__(self,last,first,ssn,email,age):
self.last=last
self.first=first
ssn1=ssn.replace("-","")
self.ssn=ssn1
self.email=email
self.age=float(age)
def __eq__(self,rhs):
if self.ssn==rhs.ssn:
return True
return False
def __ne__(self,rhs):
if self.ssn!=rhs.ssn:
return True
return False
def __le__(self,rhs):
if self.ssn<=rhs.ssn:
return True
return False
def __lt__(self,rhs):
if self.ssn<rhs.ssn:
return True
return False
def __gt__(self,rhs):
if self.ssn>rhs.ssn:
return True
return False
def __ge__(self,rhs):
if self.ssn>=rhs.ssn:
return True
return False
gtotalage=0.0
def addage(student):
global gtotalage
gtotalage+=student.age
gretage=0.0
def addretage(student):
global gretage
gretage+=student.age
def main():
global gtotalage
global gretage
ti=time.time()
#open files
#create listobject
# go through input file and strip and split
####### Traverse #######
aveage=0
gtotalage=0.0
l.traverse(addage)
#average age
######## Delete ########
#delete students
####### Retrieve #######
#go through your list object and get age of selected students then get the average
main()
|
13,031 | ca038afa62c88640ee16bb7391081d0bc84c6d7e | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-25 17:21
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('users', '0008_auto_20160925_1705'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='address',
field=models.CharField(max_length=256),
),
migrations.AlterField(
model_name='userprofile',
name='dateofbirth',
field=models.DateField(default=datetime.datetime(2016, 9, 25, 17, 21, 34, 805270, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterField(
model_name='userprofile',
name='facebook',
field=models.URLField(),
),
migrations.AlterField(
model_name='userprofile',
name='field_of_work',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='userprofile',
name='image',
field=models.ImageField(upload_to='profile_images'),
),
migrations.AlterField(
model_name='userprofile',
name='linkedin',
field=models.URLField(),
),
migrations.AlterField(
model_name='userprofile',
name='team_members',
field=models.ManyToManyField(to='users.UserProfile'),
),
]
|
13,032 | 4e2f6b10bb2c0b2c272c31ab518714d4c5ee9223 | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
from webdriver_manager.chrome import ChromeDriverManager
from django.contrib.auth.hashers import make_password
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import LiveServerTestCase
from django.urls import reverse
from django.core import management
from django.test.utils import override_settings
import time
from headLine.models import *
import os
class TestingRegistration(StaticLiveServerTestCase):
#Set up Test
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.chrome = webdriver.Chrome(ChromeDriverManager().install())
cls.chrome.implicitly_wait(15)
@classmethod
def tearDownClass(cls):
cls.chrome.quit()
super().tearDownClass()
def setUp(self):
management.call_command('flush', verbosity=0, interactive=False)
self._database_data()
#Test Registration
@override_settings(DEBUG=True)
def testing_Register(self):
#Go to register page
self.chrome.get(self.live_server_url+"/register/")
time.sleep(2)
#Fill in Registration form
user_field = self.chrome.find_element_by_name("username")
user_password = self.chrome.find_element_by_name("password")
user_email = self.chrome.find_element_by_name("email")
user_fname = self.chrome.find_element_by_name("first_name")
user_lname = self.chrome.find_element_by_name("last_name")
dob_field = self.chrome.find_element_by_name("date_of_birth")
image_field = self.chrome.find_element_by_name("profile_photo")
user_field.send_keys("test_new_username")
user_password.send_keys("testpassword")
user_email.send_keys("ibby@yahoo.com")
user_fname.send_keys("Bob")
user_lname.send_keys("Smith")
dob_field.clear()
dob_field.send_keys("01012020")
image_field.send_keys(os.path.abspath("media/uploads/cattest2.jpg"))
time.sleep(2)
#Submit registration form
login_button = self.chrome.find_element_by_id("login-button")
login_button.click()
time.sleep(4)
#Load test data into test database
def _database_data(self):
user = User.objects.create(
id=1,
username='testusername',
password=make_password("testpassword")
)
category = Category.objects.create(
id=1,
category_name="Category Temp",
category_description="Testing Category"
)
category.selected_by.add(1)
article = Article.objects.create(
id=1,
article_title="Article Temp",
article_body="Once upon a time there was a Covid Aladeen.",
article_summary="COVID FLASHNEWS",
category=category
)
|
13,033 | e01c1fdc20f2b908d2a9193d70024ae515909dee | import datetime
import unittest
import pandas as pd
from QUANTAXIS import QUANTAXIS as QA
from QUANTAXIS.QAData import (QA_DataStruct_Index_day, QA_DataStruct_Index_min,
QA_DataStruct_Stock_block,
QA_DataStruct_Stock_day, QA_DataStruct_Stock_min,
QA_DataStruct_Stock_transaction)
from QUANTAXIS.QAData.base_datastruct import _quotation_base
from QUANTAXIS.QAFetch.QAQuery import (QA_fetch_index_day, QA_fetch_index_min,
QA_fetch_stock_day, QA_fetch_stock_full,
QA_fetch_stock_min)
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_day_adv
from QUANTAXIS.QAUtil.QAParameter import (DATABASE_TABLE, DATASOURCE,
FREQUENCE, MARKET_TYPE,
OUTPUT_FORMAT)
class quotation_base_test(unittest.TestCase):
'''
'''
def test_quotation_base_class(self):
df_from_Tdx = QA.QA_quotation('300439', '2018-04-01', '2018-04-10', frequence=FREQUENCE.DAY,
market=MARKET_TYPE.STOCK_CN, source=DATASOURCE.TDX, output=OUTPUT_FORMAT.DATAFRAME)
# for iRow in range((df_from_Tdx)):
# print(iRow)
def test_quotation_base_class_iter_(self):
qaDAStruct = QA_fetch_stock_day_adv('300439')
for iRow in qaDAStruct:
print(iRow)
iterObj = qaDAStruct.__iter__()
a = type(iterObj)
print(a)
i = iterObj.__next__()
print(i)
i = iterObj.__next__()
print(i)
i = iterObj.__next__()
print(i)
def do0_ReverseAttributes_test(self):
qaDAStruct = QA_fetch_stock_day_adv('300439', start="2018-01-01", end="2018-01-10")
rev_qaDAStruct = reversed(qaDAStruct)
list1 = []
for iRow in qaDAStruct:
print(iRow)
list1.append(iRow)
print('--------------')
list2 = []
for iRowRev in rev_qaDAStruct:
print(iRowRev)
list2.append(iRowRev)
print('--------------')
def test_quotation_base_class_reverse_(self):
#没有 reverse
self.assertRaises(NotImplementedError,self.do0_ReverseAttributes_test)
def test_quotation_base_class_add_(self):
qaDAStruct0 = QA_fetch_stock_day_adv('300439', start="2018-01-01", end="2018-01-10")
qaDAStruct0.show()
qaDAStruct1 = QA_fetch_stock_day_adv('300439', start="2018-01-01", end="2018-01-05")
qaDAStruct2 = QA_fetch_stock_day_adv('300439', start="2018-01-06", end="2018-01-10")
qaDAStruct3 = qaDAStruct1 + qaDAStruct2
qaDAStruct3.show()
# 🛠todo 进一步研究为何不相等
b = qaDAStruct0().equals(qaDAStruct3())
#self.assertEqual(b, True)
# 🛠todo 进一步研究为何不相等
# 为何这个就不写 , 是不是 比较 __eq__的问题
# self.assertEqual( qaDAStruct0 , qaDAStruct3)
list1 = []
for iRow1 in qaDAStruct0:
list1.append(iRow1)
list2 = []
for iRow2 in qaDAStruct0:
list2.append(iRow2)
len1 = len(list1)
len2 = len(list2)
for iIndex in range(len1):
aRow = list1[iIndex]
bRow = list2[iIndex]
# 循环变量是相等的
v = aRow.equals(bRow)
self.assertEqual(v, True)
def test_quotation_base_class_sub_(self):
qaDAStruct0 = QA_fetch_stock_day_adv('300439', start="2018-01-01", end="2018-01-10")
qaDAStruct0.show()
qaDAStruct1 = QA_fetch_stock_day_adv('300439', start="2018-01-01", end="2018-01-05")
qaDAStruct2 = QA_fetch_stock_day_adv('300439', start="2018-01-06", end="2018-01-10")
qaDAStruct3 = qaDAStruct1 + qaDAStruct2
qaDAStruct4 = qaDAStruct3 - qaDAStruct1
#qaDAStruct5 = qaDAStruct3 - qaDAStruct2
list1 = []
for iRow1 in qaDAStruct4:
list1.append(iRow1)
list2 = []
for iRow2 in qaDAStruct2:
list2.append(iRow2)
len1 = len(list1)
len2 = len(list2)
for iIndex in range(len1):
aRow = list1[iIndex]
bRow = list2[iIndex]
# 循环变量是相等的
v = aRow.equals(bRow)
self.assertEqual(v, True)
# 🛠todo 测试 __getitem__
def test_GetItem(self):
print("ok get item")
qaDAStruct0 = QA_fetch_stock_day_adv('300439', start="2018-01-01", end="2018-01-10")
#for iRow int qaDAStruct0.index:
closePrices = qaDAStruct0.__getitem__('close')
print(closePrices)
def test_00(self):
res = QA.QA_fetch_index_day('000300', '2017-01-01', '2018-01-05')
print(res)
# 🛠todo 测试 __getattr__
# 🛠todo 测试 ix
# 🛠todo 测试 iloc
# 🛠todo 测试 loc
# 🛠todo 测试 iloc#
# 🛠todo 测试 iloc#
# 🛠todo 测试 iloc
if __name__ == '__main__':
unittest.main() |
13,034 | ae2e56bc2c72a4d0c6d36084bfa83a565aae938f | #Reverse the elements of the list
def freverse(x):
y=x[::-1]
return y
x=(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
print(x)
print(" After the reverse ")
k = freverse(x)
print(k)
|
13,035 | fe85f628343f61afdefc13f45240bbd16b31a06c | import numpy as np
import pandas as pd
from tqdm.auto import tqdm as tqdm
import os, sys, time, pickle, argparse
sys.path.append('..')
import torch as torch
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.decomposition import PCA
from feature_extraction import *
from model_options import *
def get_prepped_model(model_string):
model_options = get_model_options()
model_call = model_options[model_string]['call']
model = eval(model_call)
model = model.eval()
if torch.cuda.is_available():
model = model.cuda()
return(model)
def check_model(model_string, model = None):
model_options = get_model_options()
if model_string not in model_options and model == None:
raise ValueError('model_string not available in prepped models. Please supply model object.')
def check_reduction_inputs(feature_maps = None, model_inputs = None):
if feature_maps == None and model_inputs == None:
raise ValueError('Neither feature_maps nor model_inputs are defined.')
if model_inputs is not None and not isinstance(model_inputs, (DataLoader, torch.Tensor)):
raise ValueError('model_inputs not supplied in recognizable format.')
def get_feature_map_filepaths(model_string, feature_map_names, output_dir):
return {feature_map_name: os.path.join(output_dir, feature_map_name + '.npy')
for feature_map_name in feature_map_names}
def torch_corrcoef(m):
#calculate the covariance matrix
m_exp = torch.mean(m, dim=1)
x = m - m_exp[:, None]
cov_m = 1 / (x.size(1) - 1) * x.mm(x.t())
#convert covariance to correlation
d = torch.diag(cov_m)
sigma = torch.pow(d, 0.5)
cor_m = cov_m.div(sigma.expand_as(cov_m))
cor_m = cor_m.div(sigma.expand_as(cor_m).t())
cor_m = torch.clamp(cor_m, -1.0, 1.0)
return cor_m
def srp_extraction(model_string, model = None, feature_maps = None, model_inputs=None, output_dir='./srp_arrays',
n_projections=None, eps=0.1, seed = 0, keep_feature_maps = True):
check_model(model_string, model)
check_reduction_inputs(feature_maps, model_inputs)
device_name = 'CPU' if not torch.cuda.is_available() else torch.cuda.get_device_name()
if n_projections is None:
if feature_maps is None:
if isinstance(model_inputs, torch.Tensor):
n_samples = len(model_inputs)
if isinstance(model_inputs, DataLoader):
n_samples = len(model_inputs.dataset)
if feature_maps is not None:
n_samples = next(iter(feature_maps.values())).shape[0]
n_projections = johnson_lindenstrauss_min_dim(n_samples, eps=eps)
print('Computing {} SRPs for {} on {}...'.format(n_projections, model_string, device_name))
output_dir = os.path.join(output_dir, str(n_projections) + '_' + str(seed), model_string)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if feature_maps is None:
if model == None:
model = get_prepped_model(model_string)
model = prep_model_for_extraction(model)
feature_map_names = get_empty_feature_maps(model, names_only = True)
output_filepaths = get_feature_map_filepaths(model_string, feature_map_names, output_dir)
if not all([os.path.exists(file) for file in output_filepaths.values()]):
feature_maps = get_all_feature_maps(model, model_inputs)
if feature_maps is not None:
feature_map_names = list(feature_maps.keys())
output_filepaths = get_feature_map_filepaths(model_string, feature_map_names, output_dir)
srp_feature_maps = {}
for feature_map_name in tqdm(feature_map_names):
output_filepath = output_filepaths[feature_map_name]
if not os.path.exists(output_filepath):
feature_map = feature_maps[feature_map_name]
if feature_map.shape[1] >= n_projections:
srp = SparseRandomProjection(n_projections, random_state=seed)
srp_feature_maps[feature_map_name] = srp.fit_transform(feature_map)
if feature_map.shape[1] <= n_projections:
srp_feature_maps[feature_map_name] = feature_map
np.save(output_filepath, srp_feature_maps[feature_map_name])
if os.path.exists(output_filepath):
srp_feature_maps[feature_map_name] = np.load(output_filepath, allow_pickle=True)
if not keep_feature_maps:
feature_maps.pop(feature_map_name)
return(srp_feature_maps)
def rdm_extraction(model_string, model = None, feature_maps = None, model_inputs = None, output_dir='./rdm_arrays',
use_torch_corr = False, append_suffix = False, verbose=False):
check_model(model_string, model)
check_reduction_inputs(feature_maps, model_inputs)
device_name = 'CPU' if not torch.cuda.is_available() else torch.cuda.get_device_name()
if verbose:
print('Computing RDMS for {} on {}...'.format(model_string, device_name))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file = os.path.join(output_dir, model_string + '_rdms.pkl'
if append_suffix else model_string + '.pkl')
if os.path.exists(output_file):
model_rdms = pickle.load(open(output_file,'rb'))
if not os.path.exists(output_file):
if feature_maps is None:
if model == None:
model = get_prepped_model(model_string)
model = prep_model_for_extraction(model)
feature_maps = get_all_feature_maps(model, model_inputs, numpy = not use_torch_corr)
model_rdms = {}
for model_layer in tqdm(feature_maps, leave=False):
if use_torch_corr:
feature_map = feature_maps[model_layer]
if torch.cuda.is_available():
feature_map = feature_map.cuda()
model_rdm = torch_corrcoef(feature_map).cpu()
model_rdms[model_layer] = model_rdm.numpy()
if not use_torch_corr:
model_rdms[model_layer] = np.corrcoef(feature_maps[model_layer])
with open(output_file, 'wb') as file:
pickle.dump(model_rdms, file)
return(model_rdms)
def pca_extraction(model_string, model = None, feature_maps = None, model_inputs=None, output_dir='./pca_arrays',
n_components=None, use_imagenet_pca = True, imagenet_sample_path = None, keep_feature_maps = True):
check_model(model_string, model)
check_reduction_inputs(feature_maps, model_inputs)
if use_imagenet_pca == True and imagenet_sample_path is None:
raise ValueError('use_imagenet_pca selected, but imagenet_sample_path not specified.')
if feature_maps is None:
if isinstance(model_inputs, torch.Tensor):
n_samples = len(model_inputs)
if isinstance(model_inputs, DataLoader):
n_samples = len(model_inputs.dataset)
if feature_maps is not None:
n_samples = next(iter(feature_maps.values())).shape[0]
if n_components is not None:
if n_components > 1000 and use_imagenet_pca:
raise ValueError('Requesting more components than are available with PCs from imagenet sample.')
if n_components > n_samples:
raise ValueError('Requesting more components than are available with stimulus set sample size.')
if n_components is None:
if use_imagenet_pca:
n_components = 1000
if not use_imagenet_pca:
n_components = n_samples
device_name = 'CPU' if not torch.cuda.is_available() else torch.cuda.get_device_name()
pca_type = 'imagenet_1000' if use_imagenet_pca else 'stimulus_direct'
pca_printout = '1000 ImageNet PCs' if use_imagenet_pca else 'up to {} Stimulus PCs'.format(n_components)
print('Computing {} for {} on {}...'.format(pca_printout, model_string, device_name))
output_dir = os.path.join(output_dir, pca_type, model_string)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if model == None:
model = get_prepped_model(model_string)
model = prep_model_for_extraction(model)
if feature_maps is None:
feature_map_names = get_empty_feature_maps(model, names_only = True)
output_filepaths = get_feature_map_filepaths(model_string, feature_map_names, output_dir)
if not all([os.path.exists(file) for file in output_filepaths.values()]):
print('Now extracting feature maps for stimulus set...')
feature_maps = get_all_feature_maps(model, model_inputs)
if feature_maps is not None:
feature_map_names = list(feature_maps.keys())
output_filepaths = get_feature_map_filepaths(model_string, feature_map_names, output_dir)
if not all([os.path.exists(file) for file in output_filepaths.values()]) and use_imagenet_pca:
imagenet_images, imagenet_transforms = np.load(imagenet_sample_path), get_image_transforms()['imagenet']
imagenet_loader = DataLoader(Array2DataSet(imagenet_images, imagenet_transforms), batch_size=64)
print('Now extracting feature maps for imagenet_sample...')
imagenet_feature_maps = get_all_feature_maps(model, imagenet_loader)
print('Computing PCA transforms...')
pca_feature_maps = {}
for feature_map_name in tqdm(feature_map_names):
output_filepath = output_filepaths[feature_map_name]
if not os.path.exists(output_filepath):
feature_map = feature_maps[feature_map_name]
n_features = feature_map.shape[1]
if n_components > n_features:
n_components = n_features
if use_imagenet_pca:
imagenet_feature_map = imagenet_feature_maps[feature_map_name]
pca = PCA(n_components, random_state=0).fit(imagenet_feature_map)
pca_feature_maps[feature_map_name] = pca.transform(feature_map)
if not use_imagenet_pca:
pca = PCA(n_components, random_state=0).fit(feature_map)
pca_feature_maps[feature_map_name] = pca.transform(feature_map)
np.save(output_filepath, pca_feature_maps[feature_map_name])
if os.path.exists(output_filepath):
pca_feature_maps[feature_map_name] = np.load(output_filepath, allow_pickle=True)
if not keep_feature_maps:
feature_maps.pop(feature_map_name)
if use_imagenet_pca:
imagenet_feature_maps.pop(feature_map_name)
return(pca_feature_maps)
|
13,036 | d7630aa28d678316f061fd37b49e9747eda361e7 |
def read_data(filename="data/input1.data"):
with open(filename) as f:
return f.read()
def rot(l, i):
offset = len(l) // 2
pos = (i+offset) % len(l)
return l[pos] if l[pos] == l[i] else 0
if __name__ == "__main__":
captcha = [int(x) for x in read_data()]
captcha.append(captcha[0])
print(sum([captcha[i] for i in range(1, len(captcha)) if captcha[i-1] == captcha[i]]))
captcha = [int(x) for x in read_data()]
total = 0
for i in range(len(captcha)):
total += rot(captcha, i)
print(total)
|
13,037 | b63bd2d80f409d077fb2905202fe1d4d78a99132 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 11:30:49 2019
@author: Aditya Tanwar
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import dataset
dataset = pd.read_csv('tshirts.csv')
features = dataset.iloc[:,1: ].values
#Scatter all these data points on the matplotlib
plt.scatter(features[:,0], features[:,1])
plt.show()
from sklearn.cluster import KMeans
kmeans =KMeans(n_clusters = 3, init = 'k-means++', random_state = 0)
pred_cluster1 = kmeans.fit_predict(features)
plt.scatter(features[pred_cluster1 == 0, 0], features[pred_cluster1 == 0, 1], c = 'blue', label = 'small')
plt.scatter(features[pred_cluster1 == 1, 0], features[pred_cluster1 == 1, 1], c = 'red', label = 'medium')
plt.scatter(features[pred_cluster1 == 2, 0], features[pred_cluster1 == 2, 1], c = 'green', label = 'large')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c = 'yellow', label = 'Centroids')
plt.title('Clusters of datapoints')
plt.xlabel('height')
plt.ylabel('weight')
plt.legend()
plt.show()
#size std
small=kmeans.cluster_centers_[2]
medium=kmeans.cluster_centers_[0]
large=kmeans.cluster_centers_[1]
|
13,038 | 2046a74bb0d09852f40672d3f0eac7f19a31b9be | # Generated by Django 3.1.2 on 2020-12-04 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_role'),
]
operations = [
migrations.AlterField(
model_name='user',
name='picture',
field=models.CharField(max_length=400, verbose_name='用户头像'),
),
]
|
13,039 | 2fb8f5f57f57d974093a803ea8ed3035d4a84b47 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Sec3_0" not in marker_sets:
s=new_marker_set('Sec3_0')
marker_sets["Sec3_0"]=s
s= marker_sets["Sec3_0"]
mark=s.place_marker((474.046, 555.799, 479.581), (0.21, 0.49, 0.72), 2)
if "Sec3_1" not in marker_sets:
s=new_marker_set('Sec3_1')
marker_sets["Sec3_1"]=s
s= marker_sets["Sec3_1"]
mark=s.place_marker((490.666, 530.714, 470.398), (0.21, 0.49, 0.72), 2)
if "Sec3_2" not in marker_sets:
s=new_marker_set('Sec3_2')
marker_sets["Sec3_2"]=s
s= marker_sets["Sec3_2"]
mark=s.place_marker((500.271, 502.168, 461.364), (0.21, 0.49, 0.72), 2)
if "Sec3_3" not in marker_sets:
s=new_marker_set('Sec3_3')
marker_sets["Sec3_3"]=s
s= marker_sets["Sec3_3"]
mark=s.place_marker((496.385, 474.496, 464.462), (0.21, 0.49, 0.72), 2)
if "Sec3_4" not in marker_sets:
s=new_marker_set('Sec3_4')
marker_sets["Sec3_4"]=s
s= marker_sets["Sec3_4"]
mark=s.place_marker((487.241, 450.459, 475.842), (0.21, 0.49, 0.72), 2)
if "Sec3_5" not in marker_sets:
s=new_marker_set('Sec3_5')
marker_sets["Sec3_5"]=s
s= marker_sets["Sec3_5"]
mark=s.place_marker((473.676, 431.074, 491.053), (0.21, 0.49, 0.72), 2)
if "Sec3_6" not in marker_sets:
s=new_marker_set('Sec3_6')
marker_sets["Sec3_6"]=s
s= marker_sets["Sec3_6"]
mark=s.place_marker((459.694, 412.389, 506.749), (0.21, 0.49, 0.72), 2)
if "Sec5_0" not in marker_sets:
s=new_marker_set('Sec5_0')
marker_sets["Sec5_0"]=s
s= marker_sets["Sec5_0"]
mark=s.place_marker((475.41, 534.626, 506.486), (0.6, 0.31, 0.64), 2)
if "Sec5_1" not in marker_sets:
s=new_marker_set('Sec5_1')
marker_sets["Sec5_1"]=s
s= marker_sets["Sec5_1"]
mark=s.place_marker((449.662, 523.48, 504.695), (0.6, 0.31, 0.64), 2)
if "Sec5_2" not in marker_sets:
s=new_marker_set('Sec5_2')
marker_sets["Sec5_2"]=s
s= marker_sets["Sec5_2"]
mark=s.place_marker((426.052, 508.552, 507.895), (0.6, 0.31, 0.64), 2)
if "Sec5_3" not in marker_sets:
s=new_marker_set('Sec5_3')
marker_sets["Sec5_3"]=s
s= marker_sets["Sec5_3"]
mark=s.place_marker((398.177, 510.407, 511.036), (0.6, 0.31, 0.64), 2)
if "Sec5_4" not in marker_sets:
s=new_marker_set('Sec5_4')
marker_sets["Sec5_4"]=s
s= marker_sets["Sec5_4"]
mark=s.place_marker((377.566, 525.714, 522.461), (0.6, 0.31, 0.64), 2)
if "Sec5_5" not in marker_sets:
s=new_marker_set('Sec5_5')
marker_sets["Sec5_5"]=s
s= marker_sets["Sec5_5"]
mark=s.place_marker((356.168, 507.806, 519.161), (0.6, 0.31, 0.64), 2)
if "Sec6_0" not in marker_sets:
s=new_marker_set('Sec6_0')
marker_sets["Sec6_0"]=s
s= marker_sets["Sec6_0"]
mark=s.place_marker((473.961, 501.611, 515.516), (1, 1, 0.2), 2)
if "Sec6_1" not in marker_sets:
s=new_marker_set('Sec6_1')
marker_sets["Sec6_1"]=s
s= marker_sets["Sec6_1"]
mark=s.place_marker((471.649, 503.737, 480.324), (1, 1, 0.2), 2)
if "Sec6_2" not in marker_sets:
s=new_marker_set('Sec6_2')
marker_sets["Sec6_2"]=s
s= marker_sets["Sec6_2"]
mark=s.place_marker((469.302, 505.855, 445.14), (1, 1, 0.2), 2)
if "Sec6_3" not in marker_sets:
s=new_marker_set('Sec6_3')
marker_sets["Sec6_3"]=s
s= marker_sets["Sec6_3"]
mark=s.place_marker((465.882, 510.512, 410.925), (1, 1, 0.2), 2)
if "Sec6_4" not in marker_sets:
s=new_marker_set('Sec6_4')
marker_sets["Sec6_4"]=s
s= marker_sets["Sec6_4"]
mark=s.place_marker((457.818, 515, 376.894), (1, 1, 0.2), 2)
if "Sec6_5" not in marker_sets:
s=new_marker_set('Sec6_5')
marker_sets["Sec6_5"]=s
s= marker_sets["Sec6_5"]
mark=s.place_marker((448.124, 524.272, 344.615), (1, 1, 0.2), 2)
if "Sec8_0" not in marker_sets:
s=new_marker_set('Sec8_0')
marker_sets["Sec8_0"]=s
s= marker_sets["Sec8_0"]
mark=s.place_marker((444.513, 461.479, 440.294), (0.65, 0.34, 0.16), 2)
if "Sec8_1" not in marker_sets:
s=new_marker_set('Sec8_1')
marker_sets["Sec8_1"]=s
s= marker_sets["Sec8_1"]
mark=s.place_marker((456.549, 477.552, 459.953), (0.65, 0.34, 0.16), 2)
if "Sec8_2" not in marker_sets:
s=new_marker_set('Sec8_2')
marker_sets["Sec8_2"]=s
s= marker_sets["Sec8_2"]
mark=s.place_marker((447.385, 454.852, 473.808), (0.65, 0.34, 0.16), 2)
if "Sec8_3" not in marker_sets:
s=new_marker_set('Sec8_3')
marker_sets["Sec8_3"]=s
s= marker_sets["Sec8_3"]
mark=s.place_marker((437.717, 428.935, 478.897), (0.65, 0.34, 0.16), 2)
if "Sec8_4" not in marker_sets:
s=new_marker_set('Sec8_4')
marker_sets["Sec8_4"]=s
s= marker_sets["Sec8_4"]
mark=s.place_marker((431.178, 401.603, 479.881), (0.65, 0.34, 0.16), 2)
if "Sec8_5" not in marker_sets:
s=new_marker_set('Sec8_5')
marker_sets["Sec8_5"]=s
s= marker_sets["Sec8_5"]
mark=s.place_marker((427.643, 373.77, 478.007), (0.65, 0.34, 0.16), 2)
if "Sec10_0" not in marker_sets:
s=new_marker_set('Sec10_0')
marker_sets["Sec10_0"]=s
s= marker_sets["Sec10_0"]
mark=s.place_marker((458.342, 457.086, 342.709), (0.3, 0.69, 0.29), 2)
if "Sec10_1" not in marker_sets:
s=new_marker_set('Sec10_1')
marker_sets["Sec10_1"]=s
s= marker_sets["Sec10_1"]
mark=s.place_marker((446.83, 434.867, 355.488), (0.3, 0.69, 0.29), 2)
if "Sec10_2" not in marker_sets:
s=new_marker_set('Sec10_2')
marker_sets["Sec10_2"]=s
s= marker_sets["Sec10_2"]
mark=s.place_marker((424.771, 437.253, 372.727), (0.3, 0.69, 0.29), 2)
if "Sec10_3" not in marker_sets:
s=new_marker_set('Sec10_3')
marker_sets["Sec10_3"]=s
s= marker_sets["Sec10_3"]
mark=s.place_marker((414.514, 466.487, 384.37), (0.3, 0.69, 0.29), 2)
if "Sec10_4" not in marker_sets:
s=new_marker_set('Sec10_4')
marker_sets["Sec10_4"]=s
s= marker_sets["Sec10_4"]
mark=s.place_marker((432.148, 486.057, 394.133), (0.3, 0.69, 0.29), 2)
if "Sec10_5" not in marker_sets:
s=new_marker_set('Sec10_5')
marker_sets["Sec10_5"]=s
s= marker_sets["Sec10_5"]
mark=s.place_marker((432.944, 509.817, 409.093), (0.3, 0.69, 0.29), 2)
if "Sec15_0" not in marker_sets:
s=new_marker_set('Sec15_0')
marker_sets["Sec15_0"]=s
s= marker_sets["Sec15_0"]
mark=s.place_marker((406.964, 510.736, 430.86), (0.97, 0.51, 0.75), 2)
if "Sec15_1" not in marker_sets:
s=new_marker_set('Sec15_1')
marker_sets["Sec15_1"]=s
s= marker_sets["Sec15_1"]
mark=s.place_marker((380.972, 509.65, 420.244), (0.97, 0.51, 0.75), 2)
if "Sec15_2" not in marker_sets:
s=new_marker_set('Sec15_2')
marker_sets["Sec15_2"]=s
s= marker_sets["Sec15_2"]
mark=s.place_marker((364.35, 500.431, 399.554), (0.97, 0.51, 0.75), 2)
if "Sec15_3" not in marker_sets:
s=new_marker_set('Sec15_3')
marker_sets["Sec15_3"]=s
s= marker_sets["Sec15_3"]
mark=s.place_marker((355.443, 475.787, 389.417), (0.97, 0.51, 0.75), 2)
if "Sec15_4" not in marker_sets:
s=new_marker_set('Sec15_4')
marker_sets["Sec15_4"]=s
s= marker_sets["Sec15_4"]
mark=s.place_marker((332.662, 466.768, 375.672), (0.97, 0.51, 0.75), 2)
if "Sec15_5" not in marker_sets:
s=new_marker_set('Sec15_5')
marker_sets["Sec15_5"]=s
s= marker_sets["Sec15_5"]
mark=s.place_marker((312.232, 484.267, 367.555), (0.97, 0.51, 0.75), 2)
if "Exo70_0" not in marker_sets:
s=new_marker_set('Exo70_0')
marker_sets["Exo70_0"]=s
s= marker_sets["Exo70_0"]
mark=s.place_marker((421.173, 538.451, 491.826), (0.89, 0.1, 0.1), 2)
if "Exo70_1" not in marker_sets:
s=new_marker_set('Exo70_1')
marker_sets["Exo70_1"]=s
s= marker_sets["Exo70_1"]
mark=s.place_marker((434.737, 555.268, 472.9), (0.89, 0.1, 0.1), 2)
if "Exo70_2" not in marker_sets:
s=new_marker_set('Exo70_2')
marker_sets["Exo70_2"]=s
s= marker_sets["Exo70_2"]
mark=s.place_marker((441.294, 555.584, 444.866), (0.89, 0.1, 0.1), 2)
if "Exo70_3" not in marker_sets:
s=new_marker_set('Exo70_3')
marker_sets["Exo70_3"]=s
s= marker_sets["Exo70_3"]
mark=s.place_marker((444.698, 549.843, 416.918), (0.89, 0.1, 0.1), 2)
if "Exo70_4" not in marker_sets:
s=new_marker_set('Exo70_4')
marker_sets["Exo70_4"]=s
s= marker_sets["Exo70_4"]
mark=s.place_marker((448.241, 543.932, 389.028), (0.89, 0.1, 0.1), 2)
if "Exo84_0" not in marker_sets:
s=new_marker_set('Exo84_0')
marker_sets["Exo84_0"]=s
s= marker_sets["Exo84_0"]
mark=s.place_marker((456.859, 531.155, 463.133), (1, 0.5, 0), 2)
if "Exo84_1" not in marker_sets:
s=new_marker_set('Exo84_1')
marker_sets["Exo84_1"]=s
s= marker_sets["Exo84_1"]
mark=s.place_marker((419.042, 531.736, 454.654), (1, 0.5, 0), 2)
if "Exo84_2" not in marker_sets:
s=new_marker_set('Exo84_2')
marker_sets["Exo84_2"]=s
s= marker_sets["Exo84_2"]
mark=s.place_marker((382.007, 534.825, 446.191), (1, 0.5, 0), 2)
if "Exo84_3" not in marker_sets:
s=new_marker_set('Exo84_3')
marker_sets["Exo84_3"]=s
s= marker_sets["Exo84_3"]
mark=s.place_marker((350.7, 537.437, 439.006), (1, 0.5, 0), 2)
|
13,040 | 0586713288864647fe1c71e9bc75079f540dffbe | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 15 16:56:19 2019
@author: madhu
"""
#importing the dataset
import keras
import theano
import os
import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import cifar10
from keras.utils import np_utils
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.decomposition import PCA
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
#importing the dataset
cifar10.data_path = r"C:\Users\madhu\OneDrive\Desktop\ML_Folder\cifar-10-batches-py"
import numpy as np
import matplotlib.pyplot as plt
import pickle
#loading the data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# convert array of labeled data(from 0 to nb_classes-1) to one-hot vector
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
num_classes = Y_test.shape[1]
#printing the shape of Y_train data
print(Y_train.shape)
print(Y_train[0])
#scaling the data
def unpickle(file):
with open(file, 'rb') as f:
data = pickle.load(f, encoding='latin-1')
return data
def load_cifar10_data(data_dir):
train_data = None
train_labels = []
for i in range(1, 6):
data_dic = unpickle(data_dir + "/data_batch_{}".format(i))
if i == 1:
train_data = data_dic['data']
else:
train_data = np.vstack((train_data, data_dic['data']))
train_labels += data_dic['labels']
test_data_dic = unpickle(data_dir + "/test_batch")
test_data = test_data_dic['data']
test_labels = test_data_dic['labels']
train_data = train_data.reshape((len(train_data), 3, 32, 32))
train_data = np.rollaxis(train_data, 1, 4)
train_labels = np.array(train_labels)
test_data = test_data.reshape((len(test_data), 3, 32, 32))
test_data = np.rollaxis(test_data, 1, 4)
test_labels = np.array(test_labels)
return train_data, train_labels, test_data, test_labels
data_dir = r'C:\Users\madhu\OneDrive\Desktop\ML_Folder\cifar-10-batches-py'
train_data, train_labels, test_data, test_labels = load_cifar10_data(data_dir)
#dimentions after scaling
print(train_data.shape)
print(train_labels.shape)
print(test_data.shape)
print(test_labels.shape)
#reshaping scaled data
x_train = train_data.reshape(train_data.shape[0],-1)
x_test = test_data.reshape(test_data.shape[0], -1)
#scaling the data
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train_scaled = sc.fit_transform(x_train)
#tranforming the data
x_test_scaled = sc.transform(x_test)
y_train = train_labels
y_test = test_labels
#using pca
pca = PCA()
#transforming the train data
pca.fit_transform(x_train)
pca.explained_variance_.shape
k = 0
total = sum(pca.explained_variance_)
current_sum = 0
# Calculating the optimal k value
while(current_sum / total < 0.99):
current_sum += pca.explained_variance_[k]
k += 1
k
#Applying PCA with k value obtained
pca = PCA(n_components=k, whiten=True)
#transforming the data
x_train_pca = pca.fit_transform(x_train)
x_test_pca = pca.transform(x_test)
#Using random forest
rf = RandomForestClassifier()
rf.fit(x_train_pca, y_train)
# calculating the random forest accuracy
y_pred_rf = rf.predict(x_test_pca)
random_forest_score = accuracy_score(y_test, y_pred_rf)
random_forest_score
#Using KNN algorithm
knn = KNeighborsClassifier()
knn.fit(x_train_pca, y_train)
## calculating the KNN algorithm accuracy
y_pred_knn = knn.predict(x_test_pca)
knn_score = accuracy_score(y_test, y_pred_knn)
knn_score
#SVM
svc = svm.SVC()
svc.fit(x_train_pca, y_train)
#calculating the SVM accyracy
y_pred_svm = svc.predict(x_test_pca)
svc_score = accuracy_score(y_test, y_pred_svm)
svc_score
# evaluation of all the models
print("RandomForestClassifier : ", random_forest_score)
print("K Nearest Neighbors : ", knn_score)
print("Support Vector Classifier : ", svc_score)
## Creating a model
##neural network
# start building the model - import necessary layers
from keras.models import Sequential
from keras.layers import Dropout, Activation, Conv2D, GlobalAveragePooling2D
from keras.optimizers import SGD
def allcnn(weights=None):
# define model type - Sequential
model = Sequential()
# add model layers
model.add(Conv2D(96, (3, 3), padding = 'same', input_shape=(3,32,32)))
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3), padding = 'same'))
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3), padding = 'same', strides = (2,2)))
model.add(Dropout(0.5))
model.add(Conv2D(192, (3, 3), padding = 'same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (3, 3), padding = 'same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (3, 3), padding = 'same', strides = (2,2)))
model.add(Dropout(0.5))
model.add(Conv2D(192, (3, 3), padding = 'same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (1, 1), padding = 'valid'))
model.add(Activation('relu'))
model.add(Conv2D(10, (1, 1), padding = 'valid'))
# adding GlobalAveragePooling2D layer with Softmax activation
model.add(GlobalAveragePooling2D())
model.add(Activation('softmax'))
# load the weights
if weights:
model.load_weights(weights)
# return model
return model
#conda install blas
import os
os.environ['THEANO_FLAGS'] = 'optimizer=None'
import theano
theano.config.optimizer="None"
# define hyper parameters
learning_rate = 0.01
weight_decay = 1e-6
momentum = 0.9
# build model using cnn
model = allcnn()
# define optimizer and compile model
sgd = SGD(lr=learning_rate, decay=weight_decay, momentum=momentum, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# print the model summary
print (model.summary())
# define additional training parameters ie no of epoch and batch size
epochs = 1
batch_size = 32
#training the model
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=epochs, batch_size=batch_size, verbose = 1)
# define hyper parameters
learning_rate = 0.01
weight_decay = 1e-6
momentum = 0.9
# define weights and build model
weights = 'all_cnn_weights_0.9088_0.4994.hdf5'
model = allcnn(weights)
# define optimizer and compile model
sgd = SGD(lr=learning_rate, decay=weight_decay, momentum=momentum, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# print the model summary
print (model.summary())
# test the model with pretrained weights
scores = model.evaluate(X_test, Y_test, verbose=1)
print("Accuracy: %.2f%%" % (scores[1]*100))
|
13,041 | e3d11b1642ffa53ace20620b453192ff7467e219 | #!/usr/bin/env pytest -vs
"""Tests for foundry container."""
# Standard Python Libraries
import os
import time
# Third-Party Libraries
import pytest
READY_MESSAGE = "Server started and listening on port"
RELEASE_TAG = os.getenv("RELEASE_TAG")
VERSION_FILE = "src/_version.py"
def test_container_count(dockerc):
"""Verify the test composition and container."""
# stopped parameter allows non-running containers in results
assert (
len(dockerc.containers(stopped=True)) == 2
), "Wrong number of containers were started."
@pytest.mark.xfail
def test_environment_credentials(main_container):
"""Verify enironment is set correctly."""
# Check for credential variables.
# These are not required for pre-built images.
assert (
"FOUNDRY_USERNAME" in os.environ
), "FOUNDRY_USERNAME was not in the environment"
assert (
"FOUNDRY_PASSWORD" in os.environ
), "FOUNDRY_PASSWORD was not in the environment"
@pytest.mark.slow
def test_wait_for_ready(main_container):
"""Wait for container to be ready."""
# This could take a while, as we download the application.
TIMEOUT = 180
for i in range(TIMEOUT):
logs = main_container.logs().decode("utf-8")
if READY_MESSAGE in logs:
break
time.sleep(1)
else:
raise Exception(
f"Container does not seem ready. "
f'Expected "{READY_MESSAGE}" in the log within {TIMEOUT} seconds.'
f"\nLog output follows:\n{logs}"
)
@pytest.mark.slow
def test_wait_for_healthy(main_container):
"""Wait for container to be healthy."""
# This could take a while
TIMEOUT = 180
for i in range(TIMEOUT):
inspect = main_container.inspect()
status = inspect["State"]["Health"]["Status"]
assert status != "unhealthy", "The container became unhealthy."
if status == "healthy":
break
time.sleep(1)
else:
raise Exception(
f"Container status did transition to 'healthy' within {TIMEOUT} seconds."
)
def test_wait_for_exits(main_container, version_container):
"""Wait for containers to exit."""
assert (
version_container.wait() == 0
), "Container service (version) did not exit cleanly"
@pytest.mark.skipif(
RELEASE_TAG in [None, ""], reason="this is not a release (RELEASE_TAG not set)"
)
def test_release_version():
"""Verify that release tag version agrees with the module version."""
pkg_vars = {}
with open(VERSION_FILE) as f:
exec(f.read(), pkg_vars) # nosec
project_version = pkg_vars["__version__"]
assert (
RELEASE_TAG == f"v{project_version}"
), "RELEASE_TAG does not match the project version"
def test_log_version(version_container):
"""Verify the container outputs the correct version to the logs."""
version_container.wait() # make sure container exited if running test isolated
log_output = version_container.logs().decode("utf-8").strip()
pkg_vars = {}
with open(VERSION_FILE) as f:
exec(f.read(), pkg_vars) # nosec
project_version = pkg_vars["__version__"]
assert (
log_output == project_version
), f"Container version output to log does not match project version file {VERSION_FILE}"
def test_container_version_label_matches(version_container):
"""Verify the container version label is the correct version."""
pkg_vars = {}
with open(VERSION_FILE) as f:
exec(f.read(), pkg_vars) # nosec
project_version = pkg_vars["__version__"]
assert (
version_container.labels["org.opencontainers.image.version"] == project_version
), "Dockerfile version label does not match project version"
|
13,042 | 47ec26bc18bcb4a47a1f401fe66d1272f33a9b42 | # Generated by Django 2.2.5 on 2019-10-15 04:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('garlic', '0008_auto_20191014_2234'),
]
operations = [
migrations.RenameField(
model_name='order',
old_name='item',
new_name='purchase',
),
]
|
13,043 | 60a7f61cf1eb9e4fa1d7274572a534a186541b6f | num1 = 1000
num2 = 200
num3 = 300
num4 = 400
num5 = 500
num6 = 666
over
|
13,044 | 897d89f7c20f69272426caba857e37c096e3b5d6 | from collections import defaultdict
import operator
from documents.scoretype import ScoreType
class TopDoc:
def __init__(self, doc):
self.doc = doc
self.scores = defaultdict(float)
self.score = 0
def get_doc(self):
return self.doc
def update_score(self, score_type, score):
self.scores[score_type] = score
def calculate_score(self):
self.score = 0
self.score += self.scores[ScoreType.tf_idf] * 0.8
self.score += self.scores[ScoreType.proximity] * 0.2
def __str__(self):
output = "(doc " + str(self.doc.get_id()) + ", score " + str(self.score) + ")"
for key in self.scores.keys():
output += "\t" + str(key) + " score: " + str(self.scores[key])
return output
def __repr__(self):
return "\n" + str(self)
def display(self):
return sorted(self.scores.items(), key=operator.itemgetter(1), reverse=True)
|
13,045 | c7592d2037e0cfd62f341aceae235ec104192b67 | import random
import sys
import timeit
import os
global arr, arrLen, ascending
compares, transpositions, timeSpent, comp, trans, arLen = 0, 0, [], [], [], []
filename = sys.argv[2]
def timer(func):
def tmr(*xs, **kws):
global time
start = timeit.default_timer()
func(*xs, **kws)
time = timeit.default_timer() - start
# print("Function:", func.__name__, "was completed in", time, "seconds")
# print("Sort was completed in", '{:.10f}'.format(time).rstrip('0'), "seconds")
return '{:.10f}'.format(time).rstrip('0')
return tmr
@timer
def insertion(array, arrLen, ascending):
insertionSort(array, arrLen, ascending)
def insertionSort(array, arrLen, ascending=True):
global compares, transpositions
for i in range(1, arrLen):
insertionSort(array, arrLen - 1)
last = array[arrLen - 1]
j = arrLen - 2
compares += 1
while j >= 0 and array[j] > last:
transpositions += 1
array[j + 1] = array[j]
j = j - 1
array[j + 1] = last
if ascending:
return array
else:
return array.reverse()
@timer
def merge(array, ascending):
mergeSort(array, ascending)
def mergeSort(array, ascending=True):
global compares, transpositions
compares += 1
if len(array) > 1:
mid = len(array) // 2
left = array[:mid]
right = array[mid:]
mergeSort(left)
mergeSort(right)
i = j = k = 0
compares += 1
while i < len(left) and j < len(right):
compares += 1
if left[i] < right[j]:
transpositions += 1
array[k] = left[i]
i += 1
else:
transpositions += 1
array[k] = right[j]
j += 1
k += 1
compares += 1
while i < len(left):
transpositions += 1
array[k] = left[i]
i += 1
k += 1
compares += 1
while j < len(right):
transpositions += 1
array[k] = right[j]
j += 1
k += 1
if ascending:
return array
else:
return array.reverse()
def partition(array, low, high):
global compares, transpositions
i = low - 1
center = array[high]
transpositions += 1
for j in range(low, high):
compares += 1
if array[j] <= center:
transpositions += 1
i += 1
array[i], array[j] = array[j], array[i]
array[i + 1], array[high] = array[high], array[i + 1]
return i + 1
@timer
def quick(array, low, high, ascending):
quickSort(array, low, high, ascending)
def quickSort(array, low, high, ascending=True):
global compares, transpositions
compares += 1
if low < high:
index = partition(array, low, high)
quickSort(array, low, index - 1)
quickSort(array, index + 1, high)
if ascending:
return array
else:
return array.reverse()
@timer
def mqs(array, low, high, ascending):
mergeQuickSort(array, low, high, ascending)
def mergeQuickSort(array, low, high, ascending=True):
global compares, transpositions
while low < high:
compares += 1
if high - low < 8:
mergeSort(array, ascending)
break
else:
p = partition(array, low, high)
if p - low < high - p:
compares += 1
mergeQuickSort(array, low, p - 1)
transpositions += 1
low = p + 1
else:
compares += 1
mergeQuickSort(array, p + 1, high)
transpositions += 1
high = p - 1
if ascending:
return array
else:
return array.reverse()
@timer
def qsdp(array, low, high, ascending):
quickSortDualPivot(array, low, high, ascending)
def quickSortDualPivot(array, low, high, ascending=True):
global compares, transpositions
compares += 1
if high <= low:
return
l = low + 1
k = l
h = high - 1
if array[low] > array[high]:
transpositions += 1
array[low], array[high] = array[high], array[low]
while l <= h:
compares += 1
if array[l] < array[low]:
transpositions += 1
array[k], array[l] = array[l], array[k]
k += 1
l += 1
elif array[l] > array[high]:
transpositions += 1
array[l], array[h] = array[h], array[l]
h -= 1
else:
l += 1
k -= 1
h += 1
transpositions += 1
array[low], array[k] = array[k], array[low]
transpositions += 1
array[high], array[h] = array[h], array[high]
quickSortDualPivot(array, low, k - 1)
quickSortDualPivot(array, k + 1, h - 1)
quickSortDualPivot(array, h + 1, high)
if ascending:
return array
else:
return array.reverse()
def countingSort(array, exp):
global compares, transpositions
lenArr = len(array)
output = [0] * lenArr
count = [0] * 10
for i in range(0, lenArr):
index = (array[i] / exp)
count[int(index) % 10] += 1
for i in range(1, 10):
count[i] += count[i - 1]
i = lenArr - 1
while i >= 0:
transpositions += 1
index = (array[i] / exp)
output[count[int(index) % 10] - 1] = array[i]
count[int(index) % 10] -= 1
i -= 1
for i in range(0, len(array)):
array[i] = output[i]
@timer
def radixSort(array, ascending=True):
global compares, transpositions
arrayMax = max(array)
exp = 1
while arrayMax / exp > 0:
compares += 1
countingSort(array, exp)
exp *= 10
if ascending:
return array
else:
return array.reverse()
@timer
def selectionSort(array, ascending=True):
global compares, transpositions
file = open(filename + ".txt", "w+")
for i in range(len(array)):
min_idx = i
file.write("\nIndex is: %d. Array is: " % min_idx)
for element in array:
file.write("%d " % element)
for j in range(i + 1, len(array)):
compares += 1
if array[min_idx] > array[j]:
transpositions += 1
min_idx = j
array[i], array[min_idx] = array[min_idx], array[i]
file.close()
if ascending:
return array
else:
return array.reverse()
@timer
def rSS(array, pivot):
file = open(filename + ".txt", "w+")
randomizedSelectSort(array, pivot, file)
def partitionRS(array, fl, pivot_index=0):
global compares, transpositions
newPivot = 0
fl.write("\nIndex is: %d. Array is: " % pivot_index)
for el in array:
fl.write("%d " % el)
if pivot_index != 0:
transpositions += 2
array[0], array[pivot_index] = array[pivot_index], array[0]
for j in range(len(array) - 1):
if array[j + 1] < array[0]:
transpositions += 1
array[j + 1], array[newPivot + 1] = array[newPivot + 1], array[j + 1]
newPivot += 1
array[0], array[newPivot] = array[newPivot], array[0]
return array, newPivot
def randomizedSelectSort(array, pivot, file):
global compares, transpositions
compares += 1
if len(array) == 1:
return array[0]
else:
xpart = partitionRS(array, file, random.randrange(len(array)))
x = xpart[0]
j = xpart[1]
if j == pivot:
return x[j]
elif j > pivot:
return randomizedSelectSort(x[:j], pivot, file)
else:
k = pivot - j - 1
return randomizedSelectSort(x[(j + 1):], k, file)
def binarySearch(array, left, right, element):
global compares, transpositions
if right >= left:
mid = left + (right - left) // 2
if array[mid] == element:
return mid
elif array[mid] > element:
return binarySearch(array, left, mid - 1, element)
else:
return binarySearch(array, mid + 1, right, element)
else:
return -1
@timer
def qSS(array, low, high, ascending):
quickSelectSort(array, low, high, ascending)
def quickSelectSort(array, low, high, ascending=True):
global compares, transpositions
while low < high:
compares += 1
if high - low < 1000:
selectionSort(array, ascending)
break
else:
p = partition(array, low, high)
if p - low < high - p:
compares += 1
quickSelectSort(array, low, p - 1)
transpositions += 1
low = p + 1
else:
compares += 1
quickSelectSort(array, p + 1, high)
transpositions += 1
high = p - 1
if ascending:
return array
else:
return array.reverse()
# @timer
# def qRSS(array, low, high, ascending):
# quickRandomizedSelectSort(array, low, high, ascending)
#
#
# def quickDualRandomizedSelectSort(array, low, high, ascending=True):
# global compares, transpositions
# while low < high:
# compares += 1
# if high - low < 1000:
# for pivot in range(len(array)):
# randomizedSelectSort(array, pivot, ascending)
# break
# else:
# p = partition(array, low, high)
# compares += 1
# if p - low < high - p:
# quickRandomizedSelectSort(array, low, p - 1)
# low = p + 1
# else:
# quickRandomizedSelectSort(array, p + 1, high)
# high = p - 1
# if ascending:
# return array
# else:
# return array.reverse()
def writeToFile():
from xlwt import Workbook
import psutil
global timeSpent, trans, comp, arLen
sys.setrecursionlimit(10100)
wb = Workbook()
ws = wb.add_sheet("AiSD")
header = ["Algorithm", "Array Length", "Time Spent", "Compares", "Transpositions"]
for i in range(100, 10100, 100):
arrays = random.sample(range(i), i)
length = len(arrays)
if filename == "insertion":
for j in range(int(sys.argv[5])):
insertion(arrays, length, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "merge":
for j in range(int(sys.argv[5])):
merge(arrays, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "quick":
for j in range(int(sys.argv[5])):
quick(arrays, 0, length - 1, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "mergequick":
for j in range(int(sys.argv[5])):
mqs(arrays, 0, length - 1, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "dual-pivot":
for j in range(int(sys.argv[5])):
qsdp(arrays, 0, length - 1, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "radix":
for j in range(int(sys.argv[5])):
radixSort(arrays, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "selection":
for j in range(int(sys.argv[5])):
selectionSort(arrays, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "randselection":
for j in range(int(sys.argv[5])):
for k in range(len(arrays) - 1):
rSS(arrays, k)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "selection":
for j in range(int(sys.argv[5])):
selectionSort(arrays, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
elif filename == "quickselect":
for j in range(int(sys.argv[5])):
qSS(arrays, 0, length - 1, ascending)
timeSpent.append('{:.10f}'.format(time).rstrip('0'))
trans.append(transpositions)
comp.append(compares)
arLen.append(length)
# elif filename == "qrdpselect":
# for j in range(int(sys.argv[5])):
# qSS(arrays, 0, length - 1, ascending)
# timeSpent.append('{:.10f}'.format(time).rstrip('0'))
# trans.append(transpositions)
# comp.append(compares)
# arLen.append(length)
else:
print("To proper use of program add arguments --type "
"insertion|quick|merge|mergequick|radix|selection|randselection|binarysearch|quickselect "
"--comp >=|<=")
for cols, value in enumerate(header):
ws.write(0, cols, value)
rows = 1
cols = 0
for i in range(0, len(arLen)):
ws.write(rows, cols, filename)
rows += 1
rows = 1
cols += 1
for items in arLen:
ws.write(rows, cols, items)
rows += 1
rows = 1
cols += 1
for items in timeSpent:
ws.write(rows, cols, items)
rows += 1
rows = 1
cols += 1
for items in comp:
ws.write(rows, cols, items)
rows += 1
rows = 1
cols += 1
for items in trans:
ws.write(rows, cols, items)
rows += 1
wb.save(filename + ".xls")
def readFromFile():
import pandas
return pandas.read_excel(filename + ".xls")
def main():
global ascending
chosen = int(input("Choose option:\n"
"1:To enter an array manually\n"
"2:To randomly generate numbers and sort it\n"
"3:To save data to file\n"
"4:To load data from file and build chosen graph\n"))
if sys.argv[4] == ">=":
ascending = True
elif sys.argv[4] == "<=":
ascending = False
else:
print("Enter correct value please")
if chosen == 1:
arr = [int(x) for x in input("Give an array: ").split()]
arrLen = len(arr) - 1
print("Given array: ", arr.sort() if filename == "binarysearch" else arr, "\nArray length:", arrLen + 1)
if filename == "insertion":
insertion(arr, arrLen + 1, ascending)
print("Sorted with insertion sort:" if ascending else "Reversed insertion sort:", arr, "\nDone in",
'{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares,
"compares and", transpositions, "transpositions")
elif filename == "merge":
merge(arr, ascending)
print("Sorted with merge sort:" if ascending else "Reversed merge sort:", arr, "\nDone in",
'{:.10f}'.format(time).rstrip('0'), "seconds, with", compares,
"compares and", transpositions, "transpositions")
elif filename == "quick":
quick(arr, 0, arrLen, ascending)
print("Sorted with quick sort:" if ascending else "Reversed quick sort:", arr, "\nDone in",
'{:.10f}'.format(time).rstrip('0'), "seconds, with", compares,
"compares and", transpositions, "transpositions")
elif filename == "dual-pivot":
qsdp(arr, 0, arrLen, ascending)
print("Sorted with dual pivot quick sort:" if ascending else "Reversed dual pivot quick sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with", compares, "compares and",
transpositions, "transpositions")
elif filename == "mergequick":
mqs(arr, 0, arrLen, ascending)
print("Sorted with merge and quick sort:" if ascending else "Reversed merge and quick sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
elif filename == "radix":
radixSort(arr, ascending)
print("Sorted with radix sort:" if ascending else "Reversed radix sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
elif filename == "selection":
selectionSort(arr, ascending)
print("Sorted with selection sort:" if ascending else "Reversed selection sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
elif filename == "randselection":
for i in range(len(arr) - 1):
rSS(arr, i, ascending)
print("Sorted with randomized selection sort:" if ascending else "Reversed randomized selection sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
elif filename == "binarysearch":
element = int(input("Enter element you want to find: "))
result = binarySearch(arr, 0, arrLen, element)
if result != -1:
print("Element is present at index % d" % result)
else:
print("Element is not present in array")
elif filename == "qucikselect":
qSS(arr, 0, arrLen, ascending)
print("Sorted with quick selection sort:" if ascending else "Reversed quick selection sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
else:
print("To proper use of program add arguments --type "
"insertion|quick|merge|mergequick|radix|selection|randselection|binarysearch|quickselect "
"--comp >=|<=")
elif chosen == 2:
ran = int(input("Enter the range of array(recommended to 10000): "))
r = int(input("Enter the number of elements to be generated from 0 to %d: " % ran))
rnd = random.sample(range(ran), r)
arr = [int(x) for x in rnd]
arrLen = len(arr) - 1
print("Given array: ", arr, "\nArray length:", arrLen + 1)
if filename == "insertion":
insertion(arr, arrLen + 1, ascending)
print("Sorted with insertion sort:" if ascending else "Reversed insertion sort:", arr, "\nDone in",
'{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
elif filename == "merge":
merge(arr, ascending)
print("Sorted with merge sort:" if ascending else "Reversed merge sort:", arr, "\nDone in",
'{:.10f}'.format(time).rstrip('0'), "seconds, with", compares,
"compares and", transpositions, "transpositions")
elif filename == "quick":
quick(arr, 0, arrLen, ascending)
print("Sorted with quick sort:" if ascending else "Reversed quick sort:", arr, "\nDone in",
'{:.10f}'.format(time).rstrip('0'), "seconds, with", compares,
"compares and", transpositions, "transpositions")
elif filename == "dual-pivot":
qsdp(arr, 0, arrLen, ascending)
print("Sorted with dual pivot quick sort:" if ascending else "Reversed dual pivot quick sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with", compares,
"compares and", transpositions, "transpositions")
elif filename == "mergequick":
mqs(arr, 0, arrLen, ascending)
print("Sorted with merge and quick sort:" if ascending else "Reversed merge and quick sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with", compares, "compares and",
transpositions, "transpositions")
elif filename == "radix":
radixSort(arr, ascending)
print("Sorted with radix sort:" if ascending else "Reversed radix sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
elif filename == "selection":
selectionSort(arr, ascending)
print("Sorted with selection sort:" if ascending else "Reversed selection sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
elif filename == "randselection":
for i in range(len(arr) - 1):
rSS(arr, i)
print("Sorted with randomized selection sort:" if ascending else "Reversed randomized selection sort:",
arr if ascending else arr.reverse(), "\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
elif filename == "binarysearch":
element = int(input("Enter element you want to find: "))
result = binarySearch(arr, 0, arrLen, element)
if result != -1:
print("Element is present at index % d" % result)
else:
print("Element is not present in array")
elif filename == "qucikselect":
qSS(arr, 0, arrLen, ascending)
print("Sorted with quick selection sort:" if ascending else "Reversed quick selection sort:", arr,
"\nDone in", '{:.10f}'.format(time).rstrip('0'), "seconds, with",
compares, "compares and", transpositions, "transpositions")
else:
print("To proper use of program add arguments --type "
"insertion|quick|merge|mergequick|radix|selection|randselection|binarysearch|quickselect "
"--comp >=|<=")
elif chosen == 3:
writeToFile()
elif chosen == 4:
import matplotlib.pyplot as plot
graphNum = int(input(
"1:To build graph which shows the dependence of transpositions divided by array length on array length\n"
"2:To build graph which shows the dependence of compares divided by array length on array length\n"
"3:To build graph which shows the dependence of time on array length\n"
"4:To build graph which shows the dependence of transpositions on array length\n"
"5:To build graph which shows the dependence of compares on array length\n"))
file = readFromFile()
if graphNum == 1:
graph = file[['Transpositions', 'Array Length']]
graph['Transpositions'] = file['Transpositions'].div(file['Array Length'])
graph.plot(x='Array Length', y='Transpositions')
plot.show()
elif graphNum == 2:
graph = file[['Compares', 'Array Length']]
graph['Compares'] = file['Compares'].div(file['Array Length'])
graph.plot(x='Array Length', y='Compares')
plot.show()
elif graphNum == 3:
graph = file[['Time Spent', 'Array Length']]
graph.plot(x='Array Length', y='Time Spent')
plot.show()
elif graphNum == 4:
graph = file[['Transpositions', 'Array Length']]
graph.plot(x='Array Length', y='Transpositions')
plot.show()
elif graphNum == 5:
graph = file[['Compares', 'Array Length']]
graph.plot(x='Array Length', y='Compares')
plot.show()
else:
print("Enter correct value please")
else:
"Enter 1, 2, 3 or 4 please"
# print(arr, arrLen, ascending, '{:.10f}'.format(time).rstrip('0'), compares, transpositions)
if __name__ == "__main__":
main()
|
13,046 | c166d48bf01490c72bfae353cc162b85f2869e88 | """
To check the password strength
It should contain at least
1 character,
1 digit
1 special character
Length should be greater than 8
"""
import re
import getpass
def checkstrength(pswd):
# checking length
flag= 1
if len(pswd) < 8:
flag = 0
# return "length is less than 8 characters"
m1 = re.search('[a-zA-Z]', pswd)
m2 = re.search('[0-9]', pswd)
m3 = re.search('[^a-zA-Z0-9]', pswd)
#m3 = re.search('[!@#$%^&*()_+]', pswd)
if flag and m1 and m2 and m3:
return "correct password"
else:
return "incorrect password"
pswd = getpass.getpass("Please enter a password without spaces:- ")
result = checkstrength(pswd.strip(' '))
print(result)
|
13,047 | 2eb19f0016cb7b558dd9ab5c4122202ad02dc777 | """
Simple client to the MetaLayer API.
Usage:
import metalayer
client = metalayer.Client()
# Run sentiment analysis over a string
client.data.sentiment("I love kittens")
# Retrieve keywords from a string
client.data.tagging("Kittens and puppies and bears")
# Extract geographic information from a string
client.data.locations("Puppies in Bozeman are the best")
# Run the three preceding endpoints over a string in one shot
client.data.bundle("The best kittens are from MT")
# Extract color information from an image
with open('/path/to/file', 'rb') as f:
client.data.color(f)
# Generate a histogram of color distribution from an image
with open('/path/to/file', 'rb') as f:
client.data.histogram(f)
"""
import json
import requests
class _Client(object):
"""Base client class, responsible for sending the HTTP request."""
_layer = None
def _send_request(self, endpoint, data=None, files=None):
"""Fire off the HTTP request and do some minimal processing to the
response.
"""
if files:
[f.seek(0) for f in files.values()]
url = "%s/%s/%d/%s" % (Client.HOST, self._layer, Client.VERSION,
endpoint)
response = requests.post(url, data=data, files=files)
response = json.loads(response.text)
if response['status'] != "success":
raise Exception(". ".join(response['response']['errors']))
return response['response']
class _Data(_Client):
"""Client for the data layer API."""
_layer = "datalayer"
def sentiment(self, text):
"""Sentiment analysis as performed by the text API returns a float
value between -5.0 and 5.0 with 0 being neutral in tone (or no
sentiment could be extracted), 5.0 being the happiest little piece of
text in the world, and -5.0 being the kind of text that really should
seek anger management counseling!
"""
response = self._send_request("sentiment", dict(text=text))
return response[self._layer]['sentiment']
def tagging(self, text):
"""The tagging functions looks for the uncommon keywords in a text and
uses the strongest keywords (with the help of natural language
processing) to 'tag' content. This effectively allows you to
algorithmically group content with related keywords together.
"""
response = self._send_request("tagging", dict(text=text))
return response[self._layer]['tags']
def locations(self, text):
"""Location disambiguation is a technique that uses a series of clues
to locate places an item of text might be referring to (or where the
user creating the text is located). This is done using natural language
processing where using meta data has failed to offer useful location
data.
"""
response = self._send_request("locations", dict(text=text))
return response[self._layer]['locations']
def bundle(self, text):
"""This call allows you to send one request to all three API functions
(sentiment, tagging, and location). The response is neatly packaged
JSON.
"""
response = self._send_request("bundle", dict(text=text))
return response[self._layer]
class _Image(_Client):
"""Client for the image layer API."""
_layer = "imglayer"
def color(self, image):
"""This request returns all the colors in an image as RGB values."""
response = self._send_request("color", files=dict(image=image))
return response[self._layer]['colors']
def histogram(self, image):
"""In image processing, a color histogram is a representation of the
distribution of colors in an image. Histogram is not to be confused
with the Color function, in that it returns color samples and
positioning (as opposed to only colors).
"""
response = self._send_request("histogram", files=dict(image=image))
return response[self._layer]['histogram']
def ocr(self, image):
"""This API function allows users to make attempts to parse readable
text from image documents. This might be used to improve visual search
techniques or auto-categorize images when paired with the Tagging API
function.
"""
response = self._send_request("ocr", files=dict(image=image))
return response[_Data._layer]
def faces(self, image):
"""This function allows users to identify objects within photo
documents and get back the positioning of those objects relative to the
document. Currently the algorithm is trained to universally identify
human faces, however it can be trained to recognize anything.
"""
response = self._send_request("faces", files=dict(image=image))
return response['objectdetection']
def bundle(self, image):
"""This call allows you to send one request to all four API functions
(color, histogram, OCR, and object detection). The response is neatly
packaged JSON.
"""
response = self._send_request("bundle", files=dict(image=image))
return response[self._layer]
class Client(object):
"""Client to the MetaLayer API. This is strictly a convenience class which
delegates functionality to the `_Client` class and its subclasses.
Usage:
client = Client()
# Run sentiment analysis over a string
client.data.sentiment("I love kittens")
# Retrieve keywords from a string
client.data.tagging("Kittens and puppies and bears")
# Extract geographic information from a string
client.data.locations("Puppies in Bozeman are the best")
# Run the three preceding endpoints over a string in one shot
client.data.bundle("The best kittens are from MT")
# Extract color information from an image
with open('/path/to/file', 'rb') as f:
client.data.color(f)
# Generate a histogram of color distribution from an image
with open('/path/to/file', 'rb') as f:
client.data.histogram(f)
"""
HOST = "http://api.metalayer.com/s"
VERSION = 1
_layer_class_map = {
"data": _Data,
"image": _Image
}
def __init__(self):
"""Create instances of each available layer."""
for layer in self._layer_class_map:
setattr(self, layer, self._layer_class_map[layer]())
|
13,048 | 2b01d0c58c660c21691601baaf7f4e981b950513 | from .subject import SubjectViewSet, TopicViewSet
from .level import LevelViewSet |
13,049 | 029e18c2b231fd6c836f80831e3e0b1c8c5336f3 | from django.db import connection
def sample_sql_query(params):
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM numerico.track limit 1;")
rows = cursor.fetchall()
return rows |
13,050 | 8af7d268d3a0156bb713016e771d984850842514 | import logging.handlers
import os
import sys
from logging.handlers import RotatingFileHandler
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def init_logger():
if not os.path.exists("logs"):
os.mkdir("logs")
# 日志输出到文件
formatter_str_file = '%(asctime)s %(levelname)s %(appname)s: %(message)s'
formatter_file = logging.Formatter(formatter_str_file)
file_handler = RotatingFileHandler(filename="logs/cookie_factory.log", maxBytes=200 * 1024 * 1024,
backupCount=10,
encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter_file)
# 日志输出到终端
formatter_str_console = '%(asctime)s %(levelname)s %(appname)s: %(message)s'
formatter_console = logging.Formatter(formatter_str_console)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter_console)
# # 全局设置日志
# logging.basicConfig(level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('eslogger').addHandler(file_handler)
logging.getLogger('eslogger').addHandler(console_handler)
if __name__ == '__main__':
init_logger()
logger = logging.getLogger('eslogger')
# logger.setLevel(logging.INFO)
logger.info("ceshi", extra={'appname': '自定义变量'})
|
13,051 | 0a36d8be551aaed8fa2d7bdd41f64b2b920c2fe8 | from flask import jsonify
from flask import Blueprint
from flask import request
upgrade = Blueprint("upgrade", __name__)
@upgrade.route('/check', methods=['GET'])
def check_upgrade():
version = request.args.get('version')
data = {
"data": {
"version": 1111,
"url": "aaaaaa"
},
"code": 200,
"message": "success"
}
return jsonify(data)
|
13,052 | ec2672ca22f75368a0e399cd58186bfb3369d9e9 | import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from pyfirmata import Arduino, util
from tkinter import *
from PIL import Image
from PIL import ImageTk
import time
cont=0
content=0
num=0
prom1=0
prom2=0
prom3=0
placa = Arduino ('COM7')
it = util.Iterator(placa)
it.start()
a_0 = placa.get_pin('a:0:i')
a_1 = placa.get_pin('a:1:i')
a_2 = placa.get_pin('a:2:i')
led = placa.get_pin('d:3:p')
led1 = placa.get_pin('d:5:p')
led2 = placa.get_pin('d:6:p')
time.sleep(0.5)
ventana = Tk()
ventana.geometry('1090x545')
ventana.title("UI para sistemas de control")
texto = Label(ventana, text="entry", bg='cadet blue1', font=("Arial Bold", 14), fg="white")
texto.place(x=20, y=20)
# Fetch the service account key JSON file contents
cred = credentials.Certificate('key/key.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://parcial-8f814.firebaseio.com/'
})
marco1 = Frame(ventana, bg="gray", highlightthickness=1, width=1280, height=800, bd= 5)
marco1.place(x = 0,y = 0)
b=Label(marco1,text="")
valor2= Label(marco1, bg='cadet blue1', font=("Arial Bold", 15), fg="white", width=5)
adc_data=StringVar()
valor3= Label(marco1, bg='cadet blue1', font=("Arial Bold", 15), fg="white", width=5)
adc_data2=StringVar()
valor4= Label(marco1, bg='cadet blue1', font=("Arial Bold", 15), fg="white", width=5)
adc_data3=StringVar()
Label(ventana, text="Input: ").place(x=20, y=60)
dato = Entry(ventana)
dato.place(x=90, y=60)
def adc_read():
global prom1
i=0
while i<15:
i=i+1
x=a_0.read()
print(x)
adc_data.set(x)
prom1=x+prom1
ventana.update()
time.sleep(0.1)
ref = db.reference('sensor')
ref.update({
'sensor1/adc': x
})
def adc_read1():
global prom2
i=0
prom=0
while i<15:
i=i+1
y=a_1.read()
print(y)
adc_data2.set(y)
prom2=y+prom2
ventana.update()
time.sleep(0.1)
ref = db.reference('sensor')
ref.update({
'sensor2/adc': y
})
def adc_read2():
global prom3
i=0
while i<15:
i=i+prom1
z=a_2.read()
print(z)
adc_data3.set(z)
prom3=z+prom3
ventana.update()
ref = db.reference('sensor')
ref.update({
'sensor3/adc': z
})
def update():
ref1=db.reference("sensor1/adc")
ref2=db.reference("sensor2/adc")
ref3=db.reference("sensor3/adc")
led.write(prom1)
led1.write(prom2)
led2.write(prom3)
def entrada(input):
global content
content = dato.get()
dato.delete(0, END)
if int(content)== 8:
print("correcto")
if int(content)== 9:
print("correcto")
if int(content)== 10:
print("correcto")
if int(content)== 11:
print("correcto")
if int(content)== 12:
print("correcto")
if int(content)== 13:
print("correcto")
else:
print("ingrese un numero nuevo")
print(content)
ref = db.reference('sensor')
ref.update({
'sensor4/adc': content
})
def guardar(input):
global num
num = dato1.get()
dato1.delete(0, END)
if int(num)== 0:
led1.write(1)
led2.write(1)
if int(num)== 1:
led1.write(0)
led2.write(0)
print(num)
ref = db.reference('sensor')
ref.update({
'sensor5/adc': num
})
valor2.configure(textvariable=adc_data)
valor2.place(x=130, y=160)
valor3.configure(textvariable=adc_data2)
valor3.place(x=130, y=200)
valor4.configure(textvariable=adc_data3)
valor4.place(x=130, y=240)
prom_1=Button(marco1,text="adc1_update",command=adc_read)
prom_1.place(x=10, y=160)
prom_2=Button(marco1,text="adc2_update",command=adc_read1)
prom_2.place(x=10, y=200)
prom_3=Button(marco1,text="adc3_update",command=adc_read2)
prom_3.place(x=10, y=240)
prom_4=Button(marco1,text="adc3_update",command=update)
prom_4.place(x=10, y=280)
Label(ventana, text="Input: ").place(x=20, y=60)
dato = Entry(ventana)
dato.place(x=90, y=60)
dato.bind('<Return>', entrada)
Label(ventana, text="Input: ").place(x=20, y=100)
dato1 = Entry(ventana)
dato1.place(x=90, y=100)
dato1.bind('<Return>', guardar)
texto.configure(textvariable=content)
texto.place(x=130, y=160)
ventana.mainloop()
|
13,053 | 42377ed48c1f58a52338bbeafca4bcfdb4936827 | import timeit
import random
from collections import Counter
def my_func(elems):
counter = dict()
for elem in elems:
if elem not in counter:
counter[elem] = 1
else:
counter[elem] += 1
return counter
def my_top(elems, n):
counter = my_func(elems)
return sorted(counter.items(), key=lambda x: -x[1])[:n]
def main():
elems = [random.randint(0, 100) for i in range(1000000)]
assert my_func(elems) == dict(Counter(elems))
assert my_top(elems, 10) == Counter(elems).most_common(10)
setup = """
import random
from collections import Counter
from __main__ import my_func, my_top
random.seed(42)
elems = [random.randint(0, 100) for i in range(1000000)]
"""
time_1 = timeit.timeit(setup=setup, stmt='my_func(elems)', number=1)
time_2 = timeit.timeit(setup=setup, stmt='Counter(elems)', number=1)
time_3 = timeit.timeit(setup=setup, stmt='my_top(elems, 10)', number=1)
time_4 = timeit.timeit(
setup=setup, stmt='Counter(elems).most_common(10)', number=1
)
print(f"my function: {time_1}")
print(f"Counter: {time_2}\n")
print(f"my top: {time_3}")
print(f"Counter's top: {time_4}")
if __name__ == '__main__':
main()
|
13,054 | e2084649c101995c4ee7b0f0c6b84e281e6d1f3b | import unittest
from datetime import datetime, timedelta
import dateTime
class MyTestCase(unittest.TestCase):
def test_half_birthday(self):
date = datetime(2020, 8, 5)
self.assertEqual(dateTime.half_birthday(2020, 8, 5), date + timedelta(weeks=24))
if __name__ == '__main__':
unittest.main()
|
13,055 | 5422cf7c9fd6f213a886639dcc79912af305aeb1 | def func0(): return 0
print(func0())
def func1(arg): return arg
print(func1('A'))
def func2(arg='B'): return arg
print(func2())
def func3(a, b): return a + b
print(func3('A', 'B'))
def func4(a, b='b'): return a + b
print(func4('A', b='B'))
def func5(): print('func5')
r = func5()
print('func5 return is {}'.format(r))
def func(*args, **argv):
print(args, argv)
func(1)
func(1, 2, 3)
func(name='A')
func(name='A', age=4)
func(1, 2, name='A', age=4)
l = [1, 2, 3]
d = {'name': 'A', 'age': 4}
func(*l, **d)
func(*[1, 2, 3], **{'name': 'A', 'age': 4})
l = list((1, 2, 3))
d = dict(name='A', age=4)
func(*l, **d)
|
13,056 | 2d684640e0a14b912af0cd6230af235c4af4e167 | #This File is the old file that I actually used to improve on telnetid
#with this file I would insert a telnet session using the parameters on a function
#This will be fixed when I start building the real package
def respond(cmd,t,p):
"""takes a command, telnet session, and a prompt
and returns the output of the code"""
t.write(cmd)
return wait(t,p)
def wait(t,p):
"""takes a telnet session, and a prompt"""
output_list = []
c = ''
d = ''
while p not in d:
c = t.read_very_eager()
if len(c) > 0:
d += c
print c
output_list.append(c)
if "Press any key to continue" in c or "--More--" in c:
t.write(" ")
output_list = ((''.join(output_list)).replace('\r\n','\n')).split('\n')
return output_list
|
13,057 | 6188fb83c41bbc5edaac94eb47a5f7e4d76aa6d1 | import threading
from trade_engine import TradeEngine
class CommandCenter:
def __init__(self, trade_engine):
self._trade_engine = trade_engine
self._thread = threading.Thread(target=self.run)
self._thread.start()
def run(self):
while True:
input_console = input("> ")
self.set_if_list_mode(input_console)
def set_if_list_mode(self, input_console):
command = self.get_command(input_console)
if command == "list":
option = self.get_argument(input_console, 1)
list_mode = None
if option == TradeEngine.GAINERS:
list_mode = TradeEngine.GAINERS
elif option == TradeEngine.LOSERS:
list_mode = TradeEngine.LOSERS
else:
self.print_incorrect_input(input_console)
print("list mode set to: " + list_mode)
self._trade_engine.change_list_mode(list_mode)
@classmethod
def get_command(cls, input_console):
return input_console.split(" ")[0]
@classmethod
def get_argument(cls, input_console, argument_number):
return input_console.split(" ")[argument_number]
@classmethod
def print_incorrect_input(cls, input_console):
print("Error in input: " + input())
def join(self):
self._thread.join()
|
13,058 | 345d88093c151b38b9576c0e26df73951a755759 | distancia=float(input('digite a distancia'))
if (distancia<200):
preco=0,5*(distancia)
print ('o preco é R${0:.2f}'.format(preco))
else:
preco=(distancia)*0,45
print('o preco é R${0:.2f}'.format(preco)) |
13,059 | f505ed47435d8e8af70feabd9c6a76235332c458 | from flask import (
Blueprint, make_response, send_from_directory
)
bp = Blueprint('pwa', __name__, url_prefix='')
@bp.route('/manifest.json')
def manifest():
return send_from_directory('static', 'manifest.json')
@bp.route('/sw.js')
def service_worker():
response = make_response(send_from_directory('static', 'sw.js'))
response.headers['Cache-Control'] = 'no-cache'
return response
|
13,060 | 32bf9cffbebad7f3f530cc128bf3ad90d818aa14 | from ui.ui import Ui
def main():
main_view = Ui()
main_view.main_loop()
if __name__ == "__main__":
main()
|
13,061 | 824b51fadfb6d3d7b39ac184aa7ba2682fec3789 | #!/usr/bin/env python3
import argparse
NULLDEV='/dev/null'
parser = argparse.ArgumentParser(description='Perform the fast unfolding algorithm over a bivariate distribution.')
parser.add_argument('input_file', metavar='INPUT', type=str,
help='the input file that contains the bivariate distribution')
parser.add_argument('vcount', metavar='N', type=int,
help='the number of vertices in the graph')
parser.add_argument('output_file', metavar='OUTPUT', type=str,
help='the output file to store the detected communities')
parser.add_argument('-t', '--time', metavar='FILE', dest='time_file', type=str, default=NULLDEV,
help='record the consumption time to FILE')
parser.add_argument('-s', '--size', metavar='FILE', dest='size_file', type=str, default=NULLDEV,
help='record the number of vertices in each round to FILE')
parser.add_argument('-i', '--iter', metavar='FILE', dest='iter_file', type=str, default=NULLDEV,
help='record the number of iterations in each round to FILE')
parser.add_argument('-3c', '--three_column', dest='three_column', action='store_true', default=False,
help='use three-column format for the bivariate distribution')
args = parser.parse_args()
target_execute_file = './fastunfolding_3c' if args.three_column else './fastunfolding'
cmd = '{} {} {} {} {} {} > {}'.format(target_execute_file, args.input_file, args.vcount, args.time_file, args.size_file, args.iter_file, args.output_file)
import subprocess
# subprocess.Popen(cmd, shell=True)
print(cmd)
|
13,062 | caa0ad641c5a58397c68432ac4706e186ba8f3d0 |
#用参数做图法画出f(x) anti_f(x) 和 y = x
#f(x) = arcsin(x), x = t, y = arcsin(t)
#
#anti_f(x) = sin(x), x = t, y = sin(t)
def fx(t):
return t
def fy(t):
return arcsin(t)
def anti_fx(t):
return t
def anti_fy(t):
return sin(t)
def cx(t):
return t
def cy(t):
return t
t = linspace(0, 2*pi, 100)
x1 = fx(t)
y1 = fy(t)
x2 = anti_fx(t)
y2 = anti_fy(t)
x3 = cx(t)
y3 = cy(t)
xlabel('x')
ylabel('y')
plot(x1, y1, 'r-', x2, y2, 'g-', x3, y3, 'b-')
|
13,063 | dd3c090b77033b1143b1d1ab1ca4b3f972668f68 | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "studservice.settings")
from django.contrib import admin
# Register your models here.
from studserviceapp.models import IzbornaGrupa
admin.site.register(IzbornaGrupa)
|
13,064 | a5f1566ae5713c17ee110e4e0a41837f2041c633 | #partner was Logan Zipp
fname = "mbox-short.txt"
handle = open(fname)
counter = dict()
for line in handle :
if line.startswith("From:") :
line = line.rstrip()
print (line)
words = line.split()
sender = words[1]
pos = sender.find("@")
email = sender[pos+1:]
print (email)
counter[email] = counter.get(email,0) + 1
print (counter)
#print(word,max)
#print (words)
#print (sender)
#print (counter) |
13,065 | 2e2d0505ae17e6071175c3ac15259122254d4fa1 | # Generated by Django 3.0.8 on 2020-08-28 13:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0005_auto_20200828_0150'),
]
operations = [
migrations.AddField(
model_name='order',
name='receive',
field=models.BooleanField(default=False),
),
]
|
13,066 | c126150924ffffe63fec267ecc2d805fa00fd9c7 | from random import randint
from Endereco import Endereco
class Pessoa:
fala = False
#O __init__ declara as variáveis usadas em toda a classe, em qualquer método que chamar o self.
def __init__ (self, nome, idade, sexo, fala=False, come=False, anda=True):
self.nome = nome
self.idade = idade
self.sexo = sexo
self.come = come
self.fala = fala
self.anda = anda
self.assunto = None #associação
self.hobby = [] #agregação
self.endereco = [] #composição
#Método para agregação
def AdicionaHobby (self, Modalidade):
self.hobby.append(Modalidade.tema)
return self.hobby
#Método para composição
def AdicionaEndereco (self, cidade, estado):
endCompleto = Endereco(cidade, estado)
novoEndereco = {'cidade': endCompleto.cidade, 'estado': endCompleto.estado}
return self.endereco.append(novoEndereco)
#Criação dos métodos da classe
def Falar(self):
if self.come:
print(f'{self.nome} não pode falar com a boca cheia.')
return
self.fala = True
print(f'O/A {self.nome} está falando sobre {self.assunto}.')
def PararFalar(self):
print(f'{self.nome} parou de falar.')
self.fala = False
def PararComer(self):
print(f'{self.nome} parou de comer.')
self.come = False
def Comer(self, alimento):
if self.fala:
print(f'{self.nome} não pode comer falando.')
return
self.come = True
print(f'O/A {self.nome} está comendo {alimento}.')
#Esse @ acessa as variávels da classe, declaradas fora do __init__. Por convenção, troca-se o self por cls
@classmethod
def Falar2(cls):
if cls.fala == False:
print('Pegou o False do cls.')
else:
print('Pegou o True do Self')
#Esse @ gera uma função fora da classe. Por isso, não acessa nenhuma variável dela e nem do __init__
@staticmethod
def GeraID(nome):
id = randint(0, 10000)
print(f'O ID do {nome} é {id}.')
#Esse @ gera uma função para edição estruturada de um atributo do __init__ em duas etapas: getter e setter
@property
def idade(self):
return self._idade
@idade.setter
def idade(self, valor):
self._idade = valor + 1
|
13,067 | 319941ae42478a93b969099baff3999f77daf384 | from expungeservice.expunger import Expunger
from expungeservice.models.record import Record
class ExpungerFactory:
@staticmethod
def create():
return Expunger(Record([]))
|
13,068 | 508b862e874e1899dfa2d0c41f2236a2fd6938e8 | from Animal import Animal
class Dog(Animal):
@staticmethod
def speak():
print("woof")
# self = instanta curenta
# instanta = un obiect de acel tip
# obiect intr o variabila
|
13,069 | beccc97df880a57cce6bc65b4f59a7e1decd23de | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/11/9 23:57
# @Author : Liu jinxin
# @Github : liujinxinhen6
# @File : 异常处理.py
# @Function: 异常处理
# URLError是HTTPError的父类
# URLRrror出现的原因:
# 1 连不上服务器
# 2 远程url不存在
# 3 无网络
# 4 触发HTTPError
# ...
import urllib.request
import urllib.error
try:
data = urllib.request.urlopen('http://blog.csdn.net').read().decode('utf-8')
print(data)
except urllib.error.URLError as e:
if hasattr(e, 'code'):
print(e.code)
if hasattr(e, 'reason'):
print(e.reason) |
13,070 | bada0f1bda1b65e58b6b4aa7d769aac2ac261f4e | import area
PEOPLE_AT_CONCERT_PER_SQUARE_METER = 2
FIELD_LENGTH = 240
FIELD_WIDTH = 45
PEOPLE_AT_CONCERT = area.rectangle(FIELD_LENGTH, FIELD_WIDTH)
print("Estão presentes no show aproximadamente", PEOPLE_AT_CONCERT, "pessoas")
|
13,071 | 330fad38fe219fb73e3da517d2e8a03c365ad65e | def SubArraySum(arr, n):
temp,result = 0,0
# Pick starting point
for i in range(0, n):
# Pick ending point
temp=0;
for j in range(i, n):
# sum subarray between
# current starting and
# ending points
temp+=arr[j]
result += temp
print(temp," ",result)
print("final",temp," ",result)
return result
arr = [1, 2, 3]
n = len(arr)
print ("Sum of SubArray :" ,SubArraySum(arr, n))
|
13,072 | 2b5ee46cf68f08a9e25d46d76295bc9da82dd4aa | from __future__ import annotations
from typing import Iterable
from rich.console import Console, ConsoleOptions, RenderResult, RenderableType
from rich.segment import Segment
from rich.style import Style
# def add_vertical_bar(lines:list[list[Segment]], size:float, window_size:float, position:float) -> None
# bar = render_bar(len(lines), size, window_size, po)
class VerticalBar:
def __init__(
self,
lines: list[list[Segment]],
height: int,
virtual_height: int,
position: int,
overlay: bool = False,
) -> None:
self.lines = lines
self.height = height
self.virtual_height = virtual_height
self.position = position
self.overlay = overlay
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
bar = render_bar(
size=self.height,
virtual_size=self.virtual_height,
position=self.position,
)
new_line = Segment.line()
for line, bar_segment in zip(self.lines, bar):
yield from line
yield bar_segment
yield new_line
def render_bar(
size: int = 25,
virtual_size: float = 50,
window_size: float = 20,
position: float = 0,
bar_style: Style | None = None,
back_style: Style | None = None,
ascii_only: bool = False,
vertical: bool = True,
) -> list[Segment]:
if vertical:
if ascii_only:
solid = "|"
half_start = "|"
half_end = "|"
else:
solid = "┃"
half_start = "╻"
half_end = "╹"
else:
if ascii_only:
solid = "-"
half_start = "-"
half_end = "-"
else:
solid = "━"
half_start = "╺"
half_end = "╸"
_bar_style = bar_style or Style.parse("bright_magenta")
_back_style = back_style or Style.parse("#555555")
_Segment = Segment
start_bar_segment = _Segment(half_start, _bar_style)
end_bar_segment = _Segment(half_end, _bar_style)
bar_segment = _Segment(solid, _bar_style)
start_back_segment = _Segment(half_end, _bar_style)
end_back_segment = _Segment(half_end, _back_style)
back_segment = _Segment(solid, _back_style)
segments = [back_segment] * size
step_size = virtual_size / size
start = position / step_size
end = (position + window_size) / step_size
start_index = int(start)
end_index = int(end)
bar_height = (end_index - start_index) + 1
segments[start_index:end_index] = [bar_segment] * bar_height
sub_position = start % 1.0
if sub_position >= 0.5:
segments[start_index] = start_bar_segment
elif start_index:
segments[start_index - 1] = end_back_segment
sub_position = end % 1.0
if sub_position < 0.5:
segments[end_index] = end_bar_segment
elif end_index + 1 < len(segments):
segments[end_index + 1] = start_back_segment
return segments
if __name__ == "__main__":
from rich.console import Console
from rich.segment import Segments
console = Console()
bar = render_bar(
size=20,
virtual_size=100,
window_size=50,
position=0,
vertical=False,
ascii_only=False,
)
console.print(Segments(bar, new_lines=False))
|
13,073 | af8b4e9d7634f40971fbdf8d97d08bf75c2862c7 | for t in range(int(input())):
s = input()
lens = len(s)
if lens < 4:
print(0)
continue
ways = 0
for i in range(int((lens - 2) / 2)):
l = i + 1
i1 = 0
i2 = l
i3 = 2 * l
i4 = i3 + int((lens - i3) / 2)
i5 = lens
# print(i1, i2, i3, i4, i5)
if s[i1:i2] == s[i2:i3] and s[i3:i4] == s[i4:i5]:
ways += 1
print(ways)
|
13,074 | 9be4268e280fac6458cb9cdcfa33bf71ba2861ce | # 导入系统第三方模块
import datetime
import json
# 导入django自带的功能
from django.views.decorators.http import require_http_methods
from django.shortcuts import HttpResponse, render, redirect
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
# 导入自定义的功能
from Lib.Utils import *
from Lib.Model import *
from Business.BllApproveInfo import *
from Business.BllUser import *
from Business.BllMedicament import *
from Business.BllMedicamentApprove import *
from Business.BllMedicamentTemplate import *
from Business.BllWarning import *
from Business.BllClient import *
from Business.BllHumitureRecord import *
from Business.BllLog import *
from Business.BllModule import *
from DataEntity.EntityMedicamentApprove import *
def get_user_ip(request):
"""
获取访问用户ip
:param request:
:return:
"""
if 'HTTP_X_FORWARDED_FOR' in request.META:
return request.META['HTTP_X_FORWARDED_FOR']
else:
return request.META['REMOTE_ADDR']
@Utils.log_exception
@require_http_methods(['GET'])
def home(request):
if request.method == 'GET':
# 获取预警数量
visitType=''
try:
user_ = request.session['login_user']
user_id=user_["UserId"]
user = BllUser().findEntity(user_id)
role_id = user.RoleId
SQL = """
SELECT c.ModuleId, c.ModuleCode, c.SortIndex, c.ModuleName from(SELECT a.ModuleId, b.ModuleCode,
b.SortIndex, b.ModuleName FROM `RMS_ModuleRelation` as a LEFT JOIN RMS_Module as b on a.ModuleId =
b.ModuleId WHERE ObjectId = :user_id and ObjectType = 2 and a.ModuleType=2 UNION
SELECT a.ModuleId, b.ModuleCode, b.SortIndex, b.ModuleName FROM `RMS_ModuleRelation` as a
LEFT JOIN RMS_Module as b on a.ModuleId = b.ModuleId WHERE ObjectId = :role_id and ObjectType = 1 and a.ModuleType=2)
as c ORDER BY c.SortIndex asc;
"""
module_relation_obj_list = BllUser().execute(SQL, {'user_id': user_id, 'role_id': role_id}).fetchall()
module_relation_obj_list = Utils.mysqlTable2Model(module_relation_obj_list)
# 用列表推导式生成一个ModuleCode列表
object_id_list = [x['ModuleCode'] for x in module_relation_obj_list]
print('**********',object_id_list)
#SQL = 'SELECT count(*) as number_ FROM `RMS_Warning` WHERE IsSolve = 0 and now() > WarningDate;'
SQL = 'SELECT count(*) as number_ FROM `RMS_Warning` WHERE IsSolve = 0'
warning_obj = BllWarning().execute(SQL).fetchone()
warning_nb = warning_obj.number_
try:
user = request.session['login_user']
roleName = user['RoleName']
userMD5Pwd = user['Password'].lower()
visitType = request.GET.get('visitType')
request.session['visitType']=visitType
# if(((visitType=='1') or (visitType=='2'))):
# request.session['visitType']='1'
# else:
# request.session['visitType']=''
request.session.set_expiry(0)
except Exception as e:
print(e)
roleName = ''
return render(request, 'home.html', locals())
except Exception as e:
BllWarning().session.rollback()
logger.debug('数据为空', e)
return render(request, 'home.html', locals())
finally:
BllWarning().session.close()
# 获取主页今日入库、今日领用、今日归还 库存预警、过期试剂、保质期预警
@Utils.log_exception
@require_http_methods(['GET'])
def main(request):
if request.method == 'GET':
try:
# 今日入库数量
SQL = """
SELECT sum(CASE WHEN RecordType=1 then 1 else 0 end) as 'putIn',
sum(CASE WHEN RecordType=2 then 1 else 0 end) as 'useCount',
sum(CASE WHEN RecordType=3 then 1 else 0 end) as 'returnCount'
FROM `RMS_MedicamentRecord` as a RIGHT JOIN RMS_Medicament as b on a.MedicamentId = b.MedicamentId
WHERE DATE_FORMAT(CreateDate,'%Y-%m-%d')= DATE_FORMAT(NOW(),'%Y-%m-%d');
"""
HumitureRecord_obj = BllHumitureRecord().execute(SQL).fetchone()
# 今日入库
putInCount = HumitureRecord_obj['putIn']
# 今日领用
useCount = HumitureRecord_obj['useCount']
# 今日归还
returnCount = HumitureRecord_obj['returnCount']
SQL = """
SELECT SUM(CASE when ObjectType=3 then 1 else 0 end) as DurgSurplusEarlyWarning,
SUM(CASE when ObjectType=2 then 1 else 0 end) as expireWarning,
SUM(CASE when ObjectType=1 then 1 else 0 end) as shelflifeWarning FROM `RMS_Warning` where IsSolve = 0
"""
# 获取全部预警
warning_obj = BllWarning().execute(SQL).fetchone()
# 保质期预警数量
shelflifeWarning = warning_obj['shelflifeWarning']
# 库存预警数量
DurgSurplusEarlyWarning = warning_obj['DurgSurplusEarlyWarning']
# 过期预警数量
expireWarning = warning_obj['expireWarning']
return render(request, 'main.html', locals())
except Exception as e:
logger.debug('数据异常, ' + str(e))
return render(request, 'main.html')
# 条码登录
def account_barcode(request):
barcode = request.GET.get('barCode')
user_obj = BllUser().findEntity(EntityUser.BarCode == barcode)
if user_obj:
if user_obj.IsEnabled == 1:
request.session['login_user'] = json.loads(Utils.resultAlchemyData(user_obj))
return JsonResponse( Utils.resultData(1, '登录成功'))
else:
logger.info(user_obj.RoleName + '正在尝试登陆后台管理')
return JsonResponse(Utils.resultData(0, '该账户已被禁用, 暂时无法登陆, 请联系管理员'))
else:
return JsonResponse(Utils.resultData(0, '该条码用户不存在!'))
# post请求csrf失效
@require_http_methods(['GET', 'POST'])
@csrf_exempt
def account_login(request):
if request.method == 'GET':
try:
del request.session['login_user']
except Exception as e:
pass
return render(request, 'account/login.html', locals())
elif request.method == 'POST':
try:
userAccount = request.POST['userAccount']
userPassword = request.POST['userPassword']
print(userAccount,'-'+userPassword)
user = BllUser().login(userAccount, userPassword)
print(user)
if user:
if user.IsEnabled == 1:
request.session['login_user'] = json.loads(Utils.resultAlchemyData(user))
visitType = request.GET.get('visitType')
#print('fffffffffffff',visitType)
request.session['visitType']=visitType
# if(((visitType=='1') or (visitType=='2'))):
# request.session['visitType']='1'
# else:
# request.session['visitType']=''
request.session.set_expiry(0)
logger.info('登录成功')
user.LastVisitDate = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
BllUser().update(user)
ip_ = get_user_ip(request)
if ip_ is None:
ip_ = ''
log_obj = EntityLog(LogId=str(Utils.UUID()), CustomerId=user.CustomerId, LogType=1,
OperateUserId=user.UserId, OperateAccount=user.Account, OperateUserName=user.RealName,
OperateTypeCode='登录后台成功', OperateType='登录成功', IPAddress=ip_,
ExecuteResult='用户登录后台成功', OperateDate=datetime.datetime.now(),
IsAdd=1)
BllLog().insert(log_obj)
return JsonResponse(Utils.resultData(1, '登录成功'))
else:
logger.info(user.RoleName + '正在尝试登陆后台管理')
return JsonResponse(Utils.resultData(0, '您不是管理员或该账户已被禁用, 暂时无法登陆, 请联系管理员'))
else:
logger.info('账号或密码不正确, 登录失败')
return JsonResponse(Utils.resultData(0, '账号或密码不正确, 登录失败'))
except Exception as e:
return JsonResponse(Utils.resultData(0, '数据异常, 登录失败'))
@require_http_methods(['GET'])
def account_logout(request):
if request.method == 'GET':
path = request.META.get('HTTP_REFERER', '/')
del request.session['login_user']
visitType = request.session.get('visitType')
# if(((visitType=='1') or (visitType=='2'))):
# return redirect('/account/login?visitType=1')
# else:
# return redirect('/account/login')
if(visitType):
return redirect('/account/login?visitType='+visitType)
else:
return redirect('/account/login')
@require_http_methods(['GET'])
def drug_scanBarCode(request):
if request.method == 'GET':
return render(request, 'drug/scanBarCode.html', locals())
# 获取试剂展示页面
@require_http_methods(['GET'])
def drug_index(request):
if request.method == 'GET':
try:
user = request.session['login_user']
roleName = user['RoleName']
except Exception as e:
print(e)
roleName = ''
try:
searchValue = request.GET['searchValue']
return render(request, 'drug/index.html', locals())
except:
return render(request, 'drug/index.html', locals())
# 获取试剂展示处理视图函数, 返回JSON数据
@require_http_methods(['GET'])
def drug_GetDrugListJson(request):
if request.method == 'GET':
name = request.GET.get("searchValue", '')
FlowNo_type = int(request.GET.get("FlowNo", 1))
clientId = request.GET.get("clientId", '')
status = request.GET.get("status", '')
isMyUse = request.GET.get("isMyUse", '')
user = request.session['login_user']
allSearch = request.GET.get("allSearch", '1')
drug_list = BllMedicament().getAllDrugList(name, PageParam(1, 0),clientId)
if(status!=''):
drug_list=[x for x in drug_list if x.get("Status")==int(status)]
if(isMyUse=='1'):
drug_list=[x for x in drug_list if x.get("ByUserId")==user["UserId"]]
return JsonResponse({'data': drug_list,'code' : 0,'msg':'','count':len(drug_list)})
#根据试剂ID get方式查询返回试剂信息,post修改试剂信息
@require_http_methods(['GET', 'POST'])
@csrf_exempt
def drug_form(request):
if request.method == 'GET':
drug_id=request.GET.get("drug_id","")
drug_obj = BllMedicament().findEntity(drug_id)
return render(request, 'drug/form.html', locals())
elif request.method == 'POST':
drug_id=request.POST.get("MedicamentId","")
CASNumber = request.POST['CASNumber']
BarCode = request.POST.get('BarCode','')
EnglishName = request.POST['EnglishName']
ProductionDate = request.POST['ProductionDate']
ExpirationDate = request.POST['ExpirationDate']
ShelfLife = request.POST.get("ShelfLife",'0')
Manufacturer = request.POST['Manufacturer']
Distributor = request.POST['Distributor']
Description = request.POST.get('Description','')
ApproveContent = request.POST.get('approveContent','')
ImageUrl = request.POST.get('ImageUrl','')
Price = request.POST.get('Price','') or 0
Remark1 = request.POST['Remark1']
Remark2 = request.POST['Remark2']
Remark3 = request.POST['Remark3']
drug_obj = BllMedicament().findEntity(drug_id)
drug_obj_approve = EntityMedicamentApprove()
drug_obj.__dict__["_sa_instance_state"]=drug_obj_approve.__dict__["_sa_instance_state"]
drug_obj.CASNumber = CASNumber
drug_obj.EnglishName = EnglishName
drug_obj.ProductionDate = ProductionDate
drug_obj.ExpirationDate = ExpirationDate
drug_obj.ImageUrl = ImageUrl
drug_obj.ShelfLife = ShelfLife
drug_obj.Manufacturer = Manufacturer
drug_obj.Distributor = Distributor
drug_obj.Price = Price
drug_obj.Remark1 = Remark1
drug_obj.Remark2 = Remark2
drug_obj.Remark3 = Remark3
user_ = request.session['login_user']
if(BllMedicamentApprove().findEntity(drug_id)):
return JsonResponse(Utils.resultData('0', '此药剂信息存在未处理的修改申请!', 'drug_obj'))
BllMedicamentApprove().update(drug_obj)
BllApproveInfo().addApproveInfo(ApproveTypeAllCode.DrugEdit,ApproveContent,drug_id,BarCode,Description,user_['UserId'])
return JsonResponse(Utils.resultData('1', '提交申请成功', 'drug_obj'))
# 获取试剂类型的JSON数据
@require_http_methods(['GET'])
def drug_GetDrugTypeListJson(request):
if request.method == 'GET':
return render(request, 'drug/drugTypeIndex.html', locals())
# 访问入库模板视图处理函数
@require_http_methods(['GET'])
def drugTemplate_index(request):
if request.method == 'GET':
try:
searchValue = request.GET['searchValue']
return render(request, 'drugTemplate/index.html', locals())
except:
pass
return render(request, 'drugTemplate/index.html', locals())
# 新增单次模块视图处理函数
@require_http_methods(['GET'])
def drugTemplate_itemForm(request):
if request.method == 'GET':
return render(request, 'drugTemplate/itemForm.html', locals())
# 新增单次模块视图处理函数
@require_http_methods(['GET'])
def drugTemplate_update_form(request, template_id):
if request.method == 'GET':
template_obj = BllMedicamentTemplate().findEntity(template_id)
# 将template_obj中的字符串类型转化为列表取第一个值为字典类型
template_content = eval(template_obj.TemplateContent)[0]
return render(request, 'drugTemplate/form.html', locals())
# 新增试剂模板视图处理函数
@require_http_methods(['GET'])
def drugTemplate_form(request):
if request.method == 'GET':
NewDrugTemplateOrderCode='40'+Utils.createOrderCode()
return render(request, 'drugTemplate/form.html', locals())
# 删除选中药品模板视图处理函数
@require_http_methods(['POST'])
@csrf_exempt
def drugTemplate_deleteTemplate(request):
if request.method == 'POST':
templateId = request.POST['templateId']
entity_tem_obj = BllMedicamentTemplate().findEntity(templateId)
entity_tem_obj.IsWaitExport = 0
BllMedicamentTemplate().update(entity_tem_obj)
return JsonResponse(Utils.resultData('1', '删除成功', ''))
# 返回获取ClientListJson数据
@require_http_methods(['GET'])
def drugTemplate_clientListJson(request):
if request.method == 'GET':
template_cls_list = BllClient().findList().order_by(asc(EntityClient.ClientCode)).all()
# 将获取到的对象转化成字符串
template_cls_list = Utils.resultAlchemyData(template_cls_list)
return JsonResponse({'data': json.loads(template_cls_list)})
# 返回获取ClientListJson数据
@require_http_methods(['POST'])
@csrf_exempt
def drugTemplate_saveTemplateData(request):
if request.method == 'POST':
TemplateId = request.POST['TemplateId']
TemplateName = request.POST['TemplateName']
ClientId = request.POST['ClientId']
TemplateContent = request.POST['TemplateContent']
itemTemplateCount = request.POST['itemTemplateCount']
# 获取client对象 用来保存ClientName的值
client_obj = BllClient().findEntity(ClientId)
if not TemplateId:
# 随机生成uuid字符串作为主键
str_uuid = str(Utils.UUID())
# 获取当前系统时间
CreateDate = (datetime.datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
# 获取创建用户姓名
CreateUserName = request.session['login_user']['RealName']
CreateUserId = request.session['login_user']['UserId']
SQL = """
SELECT BarCodeCount, StartBarCode FROM `RMS_MedicamentTemplate` where StartBarCode = (SELECT MAX(StartBarCode) from RMS_MedicamentTemplate)
"""
tem_obj = BllMedicament().execute(SQL).fetchone()
if tem_obj is None:
max_barcode = 100001
else:
max_barcode = tem_obj.StartBarCode
BarCodeCount = tem_obj.BarCodeCount
# 获取最大的开始barcode + 数量
max_barcode = int(max_barcode) + int(BarCodeCount)
StartBarCode = max_barcode
# 新增试剂模板记录
account = EntityMedicamentTemplate(TemplateId=str_uuid, TemplateName=TemplateName, ClientId=ClientId,
ClientName=client_obj.ClientName, TemplateContent=TemplateContent,
CreateDate=CreateDate, CreateUserId=CreateUserId, CustomerId=client_obj.CustomerId,
CreateUserName=CreateUserName, IsWaitExport=1, BarCodeCount=int(itemTemplateCount),
StartBarCode=str(StartBarCode))
BllMedicamentTemplate().insert(account)
return JsonResponse(Utils.resultData('1', '添加成功', ''))
else:
# 修改模板内容
template_obj = BllMedicamentTemplate().findEntity(TemplateId)
template_obj.TemplateName = TemplateName
template_obj.CustomerId = client_obj.CustomerId
template_obj.ClientId = ClientId
template_obj.ClientName = client_obj.ClientName
template_obj.TemplateContent = TemplateContent
BllMedicamentTemplate().update(template_obj)
return JsonResponse(Utils.resultData('1', '修改成功', ''))
# 获得药品入库模板的JSON数据
@require_http_methods(['GET'])
def drugTemplate_getTemplateListJson(request):
if request.method == 'GET':
TemplateName = request.GET.get('TemplateName', '')
CreateUserName = request.GET.get('CreateUserName', '')
TemplateContent = request.GET.get('TemplateContent', '')
ClientId = request.GET.get('ClientId', '')
curPage = int(request.GET.get('page', '1'))
pageSize = int(request.GET.get('limit', '10'))
pageParam = PageParam(curPage,pageSize)
try:
data = BllMedicamentTemplate().getAllTemplateList(pageParam,TemplateName,CreateUserName,TemplateContent,ClientId)
return JsonResponse({'data': json.loads(Utils.resultAlchemyData(data)),'code' : 0,'msg':'','count':pageParam.totalRecords})
except Exception as e:
print(e)
return JsonResponse({'data': [],'code' : 0,'msg':'','count':1})
@require_http_methods(['GET'])
def home_homeDrugRecord(request, drug_id):
if request.method == 'GET':
return render(request, 'home/homeDrugRecord.html', locals())
@require_http_methods(['GET'])
def log_index(request):
if request.method == 'GET':
return render(request, 'log/index.html', locals())
@require_http_methods(['GET'])
def drug_drugTypeForm(request):
if request.method == 'GET':
return render(request, 'drug/drugTypeForm.html', locals())
# 404页面处理方案
def page_not_found(request,exception):
logger.debug('页面没有找到')
return HttpResponse('地址错误')
#
# # 500服务器错误处理方案
# def server_error(request):
# logger.debug('服务器错误')
# return redirect('home')
|
13,075 | 2d735aa0ea8cc0df96443b56e323eb1f6d7e11e4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'vladimirg'
__version__ = '1.2'
from zabbix_api import ZabbixAPI
import time
import json
import re
import sys
import getpass
import argparse
import os
execfile(os.path.dirname(os.path.realpath(__file__)) + '/../git_update.py')
#Try to get update from git
if git_check_update(os.path.dirname(os.path.realpath(__file__))) == 1:
# if not up to day update and exit
exit (0)
#Define zabbix config file
zbx_conf_file =os.path.dirname(os.path.realpath(__file__)) + '/../conf/zabbix.conf'
# Get zabbix server connect credentials
for tmp_line in open(zbx_conf_file):
if "server" in tmp_line: zbx_server = str(tmp_line.split("=")[1]).rstrip()
if "user" in tmp_line: zbx_user = str(tmp_line.split("=")[1]).rstrip()
if "pass" in tmp_line: zbx_pass = str(tmp_line.split("=")[1]).rstrip()
# Connect to server
zapi = ZabbixAPI(zbx_server)
zapi.login(zbx_user,zbx_pass)
parser = argparse.ArgumentParser(description='Arguments to add issue to redmine')
parser.add_argument('--hosts', required=True, action='store', default=None, dest='Hosts', help='Hosts to add in maintenance. To split hosts use , ')
parser.add_argument('--period', required=True, action='store', default=None, dest='Period', help='Period of maintenance. Can use D(day) or H(hourse)')
parser.add_argument('--desc', required=False, action='store', default=None, dest='Desc', help='Description of maintenance period')
args = parser.parse_args()
## Define variable
zbx_maintenance_hosts = []
zbx_hostname = args.Hosts
zbx_maintenance_duration = args.Period
if args.Desc:
zbx_maintenance_desc = args.Desc
else:
zbx_maintenance_desc = " "
active_since = int(time.time())
#Get maintenance in unix time
if re.search('H',zbx_maintenance_duration):
#active_till = int(re.findall('(\d+)',Period)[0])*3600 + active_since
maintenance_duration = int(re.findall('(\d+)',zbx_maintenance_duration)[0])*3600
elif re.search('D',zbx_maintenance_duration):
#active_till = int(re.findall('(\d+)',Period)[0])*3600*24 + active_since
maintenance_duration = int(re.findall('(\d+)',zbx_maintenance_duration)[0])*3600*24
else:
print ("Wrong format of period")
exit(1)
## Make connect to zabbix server
zbx_conf_file =os.path.dirname(os.path.realpath(__file__)) + '/../conf/zabbix.conf'
# Get zabbix server connect credentials
zapi = ZabbixAPI(zbx_server)
zapi.login(zbx_user,zbx_pass)
# Get list of exists maintenance
zbx_maintaince = zapi.maintenance.get({"output": "extend",
"selectGroups": "extend",
"selectTimeperiods": "extend"})
#Delete old maintenance
for item in zbx_maintaince:
if int(time.time()) > int(item['active_till']):
zbx_maintaince = zapi.maintenance.delete([item['maintenanceid']])
## Get list with maintenance hosts
zbx_hostname = zbx_hostname.split(",")
# Generate maintenance name
zbx_maintenance_name = getpass.getuser()+"_"+ zbx_hostname[0].split(".")[1]+"_maintenance"
# Get hosts id
for tmp in zbx_hostname:
tmp = re.sub("^\s+|\n|\r|\s+$", '',tmp)
host = zapi.host.get({"output": "extend",
"filter": { "name":[tmp]}})
zbx_maintenance_hosts.append(str(host[0]['hostid']))
## test exists zabbix maintanace plane
zbx_maintaince = zapi.maintenance.get({"output": "extend",
"selectGroups": "extend",
"selectTimeperiods": "extend",
"filter":{"name":[zbx_maintenance_name]}})
if zbx_maintaince:
zbx_maintaince = zapi.maintenance.get({"output": "extend", "selectHosts":"extend", "filter": { "name":[zbx_maintenance_name]},
"selectGroups": "extend",
"selectTimeperiods": "extend"})
zbx_maintenanceid = zbx_maintaince[0]['maintenanceid']
for item in zbx_maintaince[0]['hosts']:
if item['hostid'] not in zbx_maintenance_hosts:
zbx_maintenance_hosts.append(item['hostid'])
zbx_maintaince = zapi.maintenance.update({"description":zbx_maintenance_desc,
'maintenanceid': zbx_maintenanceid,
"active_since":active_since,
"active_till":maintenance_duration + active_since ,
"hostids":json.loads(json.dumps(zbx_maintenance_hosts)),
"timeperiods":[
{ "start_date":active_since,
"period":maintenance_duration
} ]
})
if zbx_maintaince:
print ("Maintenance period update [Ok]")
else:
zbx_maintaince = zapi.maintenance.create({"description":zbx_maintenance_desc,
"name":zbx_maintenance_name,
"active_since":active_since,
"active_till":maintenance_duration + active_since ,
"hostids":json.loads(json.dumps(zbx_maintenance_hosts)),
"timeperiods":[
{ "period":maintenance_duration} ]
})
if zbx_maintaince:
print ("Maintenance period create [Ok]")
|
13,076 | b1fe461dde4037f14699d13f0d4a9d7e6d066674 | import sys
from common import readFasta
from sss import shortestSuperString
if __name__ == '__main__':
dnas = readFasta(sys.argv[1])
print shortestSuperString(dnas)
|
13,077 | d1126bf28f8f45f966d82df3a863a84aadaf7151 | # import start
import numpy as np
### customer code start
def main(input_data, context):
IN1 = input_data["IN1"]
mv = IN1['mv']['value']
pv = IN1['pv']['value']
sp = IN1['sp']['value']
mvup = IN1['mvup']['value']
mvdown = IN1['mvdown']['value']
ff=IN1['ff']['value'] if 'ff' in IN1 else 0
kf=IN1['kf']['value'] if 'kf' in IN1 else 0
kp = IN1['kp']['value']
ki = IN1['ki']['value']
kd = IN1['kd']['value']
deadZone = IN1['deadZone']['value']
dmvHigh = IN1['dmvHigh']['value']
dmvLow = IN1['dmvLow']['value']
#print(IN1)
if 'sp' not in context:
context['sp']=sp
if 'ff' not in context:
context['ff']=ff
err = sp - pv
if context['sp']!=sp:
context['sp'] = sp
context['residualList'] = [err]
if 'residualList' not in context:
context['residualList'] = [err]
else:
context['residualList'].append(err)
if len(context['residualList']) >= 3:
context['residualList'] = context['residualList'][-3:]
partkp=0
partki=0
partkd=0
if len(context['residualList']) == 3:
partkp=kp * (context['residualList'][-1] - context['residualList'][-2])
partki=ki * context['residualList'][-1]
partkd=kd * (context['residualList'][-1] - 2 * context['residualList'][-2] + context['residualList'][-3])
delta_u =partkp+partki+partkd
else:
delta_u = 0
delta_ff=ff-context['ff']
#update ff
context['ff']=ff
delta_u+=delta_ff*kf
#dmv limit
if dmvHigh<delta_u:
delta_u=dmvHigh
elif -dmvHigh>delta_u:
delta_u=-dmvHigh
#mv deadzone
if dmvLow>abs(delta_u):
delta_u=0
#pv deadzone
if ((sp-abs(deadZone))<pv) and ((sp+abs(deadZone))>pv):
delta_u=0
update_mv=delta_u+mv
#in limit
if update_mv>=mvup:
update_mv=mvup
elif update_mv<=mvdown:
update_mv=mvdown
outpin=input_data['OUT1']
outpinName=''
for key in outpin.keys():
outpinName=outpin[key]['pinName']
OUT1 = {
outpinName:{
'value':update_mv,
"partkp":partkp,
"partki":partki,
"partkd":partkd
}
}
return OUT1 |
13,078 | c4cb9ee48a56ba4b1c9a15164dfe91a353a081b3 | import pygame
pygame.joystick.init()
def goo():
print(pygame.joystick.get_init())
print(pygame.joystick.get_count())
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
goo()
pygame.init()
while True:
pygame.time.Clock().tick(27)
for event in pygame.event.get():
if event.type == pygame.JOYBUTTONUP:
print(event.value)
if event.type == pygame.JOYHATMOTION:
print(event.value)
|
13,079 | 5cbefb1fc83d909bc5b79bdad39603f8c09899bd | class Queue(object):
def __init__(self):
self.queue = []
def is_empty(self):
return self.queue == []
def enqueue(self, data):
self.queue.append(data)
def dequeue(self):
if self.queue:
item = self.queue.pop(0)
print(item)
return item
else:
return False
def peek(self):
print(self.queue[0])
return self.queue[0]
def print(self):
print(self.queue) |
13,080 | 9608e780e2d672c059d4b3fbb8fd27328579700a | for n in range(2, 10):
if n % 2 == 0:
print("found an even number ", n)
continue
else:
print("found a number", n) |
13,081 | 11b9bb2c2ebac97e7614190ff525fe91598f99a2 | # -*- coding: utf-8 -*-
"""Logistic_Breast_Cancer
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/18hSIk0waVp_GfrVvbhURVeNo5ql_8b-V
## Logistic Regression without Sklearn
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
data = load_breast_cancer()
data_features = data.data
data_target = data.target
data_table = pd.DataFrame(data=data_features, columns=data.feature_names)
data_table = data_table.join(pd.Series(data_target, name='Result'))
data_table.head()
data_features.shape
pd.notna(data_table).all()
data_table.describe()
data_mean = data_table.mean()[0:30].to_numpy()
data_diff = (data_table.max() - data_table.min())[0:30].to_numpy()
data_scaled = (data_features - data_mean)/data_diff
data_scaled.mean()
data_scaled.shape
X0 = np.ones([569,1])
data_scaled_X0 = np.concatenate((X0, data_scaled), axis=1)
data_scaled_X0[0]
Theta = np.zeros([31])
X_train, X_test, y_train, y_test = train_test_split(data_scaled_X0, data_target, test_size=0.3, random_state=0)
z_theta = np.sum(X_train*Theta, axis=1)
z_theta.shape
def sigmoid(z):
h_theta = 1/(1+(np.e**(-z)))
return h_theta
#(0.01/len(Cost_func))*np.sum((h_theta - y_train)*X_train.transpose(), axis=1)
Theta = np.zeros([31])
#Just setting a high J number so the while loop can start
J=10
J_list = []
i = 0
while J>0.1 and i<10000:
z_theta = np.sum(X_train*Theta, axis=1)
h_theta = sigmoid(z_theta)
Cost_func = -y_train*np.log(h_theta) - (1-y_train)*np.log(1 - h_theta)
J = (1/len(Cost_func))*np.sum(Cost_func)
J_list.append(J)
Theta_temp = (0.09/len(Cost_func))*np.sum((h_theta - y_train)*X_train.transpose(), axis=1)
Theta = Theta - Theta_temp
i = i+1
print(J, i)
plt.plot(range(len(J_list)), J_list);
plt.grid();
plt.ylabel('Cost')
plt.xlabel('Iteration');
def verify(probability):
prediction = []
for i in probability:
if i > 0.5:
predict = 1
prediction.append(predict)
else:
predict = 0
prediction.append(predict)
return prediction
z_test = np.sum(X_test*Theta, axis=1)
h_test = sigmoid(z_test)
h_test = h_test.tolist()
prediction = verify(h_test)
comparative = np.concatenate((np.array([prediction]).transpose(), np.array([y_test]).transpose()), axis=1)
True_Positives = comparative[(comparative[:,1] == 1) & (comparative[:,0] == 1)].shape[0]
False_Positives = comparative[(comparative[:,1] == 0) & (comparative[:,0] == 1)].shape[0]
True_Negatives = comparative[(comparative[:,1] == 0) & (comparative[:,0] == 0)].shape[0]
False_Negatives = comparative[(comparative[:,1] == 1) & (comparative[:,0] == 0)].shape[0]
Precision = True_Positives/(True_Positives+False_Positives)
Recall = True_Positives/(True_Positives+False_Negatives)
F1Score = 2*(Precision*Recall)/(Precision+Recall)
F1Score
"""## Logistic Regression with SKLearn"""
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train);
model.predict(X_test)
y_test
model.score(X_test, y_test)
comparative = np.concatenate((np.array([model.predict(X_test)]).transpose(), np.array([y_test]).transpose()), axis=1)
True_Positives_sk = comparative[(comparative[:,1] == 1) & (comparative[:,0] == 1)].shape[0]
False_Positives_sk = comparative[(comparative[:,1] == 0) & (comparative[:,0] == 1)].shape[0]
True_Negatives_sk = comparative[(comparative[:,1] == 0) & (comparative[:,0] == 0)].shape[0]
False_Negatives_sk = comparative[(comparative[:,1] == 1) & (comparative[:,0] == 0)].shape[0]
Precision_sk = True_Positives_sk/(True_Positives_sk+False_Positives_sk)
Recall_sk = True_Positives_sk/(True_Positives_sk+False_Negatives_sk)
F1Score_sk = 2*(Precision_sk*Recall_sk)/(Precision_sk+Recall_sk)
print(f' With sklearn (SVM): {round(F1Score_sk, 4)}, with Gradient Descent: {round(F1Score, 4)}')
True_Positives, True_Positives_sk
False_Positives, False_Positives_sk
True_Negatives, True_Negatives_sk
False_Negatives, False_Negatives_sk
|
13,082 | 56fa5ed9068ac18b0b2ed0b7d9dcc63f89dde5ce | """Vehicles Routing Problem (VRP) with Time Windows."""
from __future__ import print_function
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
from random import randrange
import numpy as np
"""Random adjacency matrix of given size"""
def random_adjacency_matrix(length, minWeight = 1, maxWeight = 2):
mat = np.random.randint(minWeight,maxWeight+1,(length,length))
for i,arr in enumerate(mat):
arr[i] = 0
return mat
def random_time_window(length, minWeight = 0, maxWeight = 23):
returnArray = []
returnArray.append((0,5))
for i in range(length - 1):
tuple_a = randrange(minWeight, maxWeight-2)
tuple_b = randrange(tuple_a+1, maxWeight)
returnArray.append((tuple_a, tuple_b))
return returnArray
def solution_to_array(data, manager, routing, solution):
solut = [None] * data['num_vehicles']
route_distance = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
solut[vehicle_id] = []
while not routing.IsEnd(index):
solut[vehicle_id].append(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, vehicle_id)
solut[vehicle_id].append(manager.IndexToNode(index))
return solut,route_distance
def print_solution(data, manager, routing, solution):
"""Prints solution on console."""
time_dimension = routing.GetDimensionOrDie('Time')
total_time = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
while not routing.IsEnd(index):
time_var = time_dimension.CumulVar(index)
plan_output += '{0} Time({1},{2}) -> '.format(
manager.IndexToNode(index), solution.Min(time_var),
solution.Max(time_var))
index = solution.Value(routing.NextVar(index))
time_var = time_dimension.CumulVar(index)
plan_output += '{0} Time({1},{2})\n'.format(manager.IndexToNode(index),
solution.Min(time_var),
solution.Max(time_var))
plan_output += 'Time of the route: {}min\n'.format(
solution.Min(time_var))
print(plan_output)
total_time += solution.Min(time_var)
print('Total time of all routes: {}min'.format(total_time))
class VRPTW:
data = None
def __init__(self, nb_camions,nb_villes, depot=0):
self.k = nb_camions
self.towns_nb = nb_villes
self.depot = depot
def create_data_model(self):
data = {}
data['time_matrix'] = random_adjacency_matrix(self.towns_nb)
data['time_windows'] = random_time_window(len(data['time_matrix']))
data['num_vehicles'] = self.k
data['depot'] = self.depot
self.data = data
def pass_matrix(self, matrix, timewindows):
data = {}
data['time_matrix'] = matrix
data['time_windows'] = timewindows
data['num_vehicles'] = self.k
data['depot'] = self.depot
self.data = data
def solve(self, strategy, timeout, useTimeout = False):
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(self.data['time_matrix']),
self.data['num_vehicles'], self.data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
# Create and register a transit callback.
def time_callback(from_index, to_index):
"""Returns the travel time between the two nodes."""
# Convert from routing variable Index to time matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return self.data['time_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(time_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Add Time Windows constraint.
time = 'Time'
routing.AddDimension(
transit_callback_index,
30, # allow waiting time
30, # maximum time per vehicle
False, # Don't force start cumul to zero.
time)
time_dimension = routing.GetDimensionOrDie(time)
# Add time window constraints for each location except depot.
for location_idx, time_window in enumerate(self.data['time_windows']):
if location_idx == 0:
continue
index = manager.NodeToIndex(location_idx)
time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
# Add time window constraints for each vehicle start node.
for vehicle_id in range(self.data['num_vehicles']):
index = routing.Start(vehicle_id)
time_dimension.CumulVar(index).SetRange(self.data['time_windows'][0][0],
self.data['time_windows'][0][1])
# Instantiate route start and end times to produce feasible times.
for i in range(self.data['num_vehicles']):
routing.AddVariableMinimizedByFinalizer(
time_dimension.CumulVar(routing.Start(i)))
routing.AddVariableMinimizedByFinalizer(
time_dimension.CumulVar(routing.End(i)))
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
search_parameters.local_search_metaheuristic = strategy
if useTimeout:
search_parameters.time_limit.seconds = timeout
#search_parameters.lns_time_limit.seconds = timeout
else:
search_parameters.solution_limit = timeout
search_parameters.log_search = True
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
if solution:
#print_solution(self.data, manager, routing, solution)
return solution_to_array(self.data, manager, routing, solution)
return None
|
13,083 | e68c17932af32f2ee33fa51432fce247aba0b9dd | import json
from chalice import Chalice
from chalicelib import sql_service
from chalicelib import boto_rds_conn
from chalicelib import SqlQueries
app = Chalice(app_name='my_app_aurora')
columns = ["shoetype", "color"]
@app.route('/psycopg')
def index():
return sql_service.select_all()
@app.route('/boto')
def boto_conn():
cnx = boto_rds_conn.BotoPostgresqlDB()
response = cnx.execute_query(SqlQueries.SELECT_ALL)
f_res = []
for record in response['records']:
res = []
for rec in record:
for k, v in rec.items():
res.append(v)
values = {columns[i]: res[i] for i in range(len(columns))}
f_res.append(values)
print("Final Response: %s", response)
return json.dumps(f_res)
|
13,084 | 4cce7ff74b7ba14a561b9c8072b782f2db5c91e8 | import os
from time import sleep
import pytest
import requests
from datadog_checks.dev import docker_run, get_docker_hostname, get_here
BOOTSTRAP = {
'action': 'create_cluster',
'cluster': {'name': 'demo.local'},
'node': {'paths': {'persistent_path': '/var/opt/redislabs/persist', 'ephemeral_path': '/var/opt/redislabs/tmp'}},
'credentials': {'username': 'demo@redislabs.com', 'password': '123456'},
}
DATABASE = {
'name': 'db01',
'memory_size': 100000000,
'replication': False,
'eviction_policy': 'volatile-lru',
'sharding': False,
'shards_count': 1,
'port': 12000,
'data_persistence': 'aof',
'aof_policy': 'appendfsync-always',
}
@pytest.fixture(scope='session')
def dd_environment():
compose_file = os.path.join(get_here(), 'docker-compose.yml')
with docker_run(compose_file, log_patterns='MainThread: Done'):
# Let the cluster settle first
sleep(10)
# Bootstrap the cluster
url = 'https://{}:9443/v1/bootstrap/create_cluster'.format(get_docker_hostname())
r = requests.post(url, json=BOOTSTRAP, verify=False)
if r.status_code != 200:
print("Error: Unable to bootstrap")
counter = 0
# Check to ensure it's running properly
while True:
counter += 1
try:
j = requests.get(
'https://{}:9443/v1/cluster'.format("localhost"),
auth=(BOOTSTRAP['credentials']['username'], BOOTSTRAP['credentials']['password']),
headers={'Content-Type': 'application/json'},
timeout=10,
verify=False,
)
if j.status_code == 200:
sleep(15)
print("Boostrap Successful")
break
else:
print("Retrying cluster bootstrap:", counter)
sleep(5)
except Exception as e:
print("Retrying cluster bootstrap:", counter, " Error:", str(e))
sleep(5)
if counter > 9:
break
# Create a database
x = requests.post(
'https://{}:9443/v1/bdbs'.format("localhost"),
auth=(BOOTSTRAP['credentials']['username'], BOOTSTRAP['credentials']['password']),
headers={'Content-Type': 'application/json'},
timeout=10,
verify=False,
json=DATABASE,
)
if x.status_code != 200:
print("Error: Unable to create database: HTTP {} : {}".format(x.status_code, x.text))
print("OK: DB Setup complete")
yield
@pytest.fixture
def instance():
return {
'host': get_docker_hostname(),
'port': 9443,
'username': BOOTSTRAP['credentials']['username'],
'password': BOOTSTRAP['credentials']['password'],
}
|
13,085 | d562e7b1f132bf123de526808408a0c691d092ff | import os
import pygame
#redblobgames
pygame.init()
#Création de la fenetre
largeur = 1024
hauteur = 768
fenetre=pygame.display.set_mode((largeur,hauteur))
# lecture de l'image du perso
#Dico des images---------------------------------------------------------
#boneproj---------------------------------------------------------------
imagesbone = {}
imagesbone["proj"]=[]
temp = pygame.image.load("boneframes/femur.png").convert_alpha()
imagesbone["proj"].append(temp)
temp = pygame.image.load("boneframes/femur02.png").convert_alpha()
imagesbone["proj"].append(temp)
temp = pygame.image.load("boneframes/femur03.png").convert_alpha()
imagesbone["proj"].append(temp)
temp = pygame.image.load("boneframes/femur04.png").convert_alpha()
imagesbone["proj"].append(temp)
print("here I am")
#blob---------------------------------------------------------------------------
imagesBlob = {}
imagesBlob["left"]=[]
imagesBlob["right"]=[]
temp = pygame.image.load("blobframes/idle/blob01.png").convert_alpha()
imagesBlob["left"].append(temp)
temp = pygame.image.load("blobframes/idle/blob02.png").convert_alpha()
imagesBlob["left"].append(temp)
temp = pygame.image.load("blobframes/idle/blob03.png").convert_alpha()
imagesBlob["left"].append(temp)
temp = pygame.image.load("blobframes/idle/blob04.png").convert_alpha()
imagesBlob["left"].append(temp)
temp = pygame.image.load("blobframes/idle/blob05.png").convert_alpha()
imagesBlob["left"].append(temp)
temp = pygame.image.load("blobframes/idle/blob06.png").convert_alpha()
imagesBlob["left"].append(temp)
temp = pygame.image.load("blobframes/idle/blob07.png").convert_alpha()
imagesBlob["left"].append(temp)
temp = pygame.image.load("blobframes/idle/blob08.png").convert_alpha()
imagesBlob["left"].append(temp)
#-------------------------------------------------------------------------------
#animations perso---------------------------------------------------------------------
imagesPerso = {}
imagesPerso["right"]=[]
imagesPerso["left"]=[]
imagesPerso["down"]=[]
imagesPerso["up"]=[]
temp = pygame.image.load("persoframes/perso1.png").convert_alpha()
imagesPerso["right"].append(temp)
temp = pygame.image.load("persoframes/persoright02.png").convert_alpha()
imagesPerso["right"].append(temp)
temp = pygame.image.load("persoframes/persoright03.png").convert_alpha()
imagesPerso["right"].append(temp)
temp = pygame.image.load("persoframes/persoright04.png").convert_alpha()
imagesPerso["right"].append(temp)
temp = pygame.image.load("persoframes/persoleft.png").convert_alpha()
imagesPerso["left"].append(temp)
temp = pygame.image.load("persoframes/persoleft02.png").convert_alpha()
imagesPerso["left"].append(temp)
temp = pygame.image.load("persoframes/persoleft03.png").convert_alpha()
imagesPerso["left"].append(temp)
temp = pygame.image.load("persoframes/persoleft04.png").convert_alpha()
imagesPerso["left"].append(temp)
temp = pygame.image.load("persoframes/persoface.png").convert_alpha()
imagesPerso["down"].append(temp)
temp = pygame.image.load("persoframes/persodown02.png").convert_alpha()
imagesPerso["down"].append(temp)
temp = pygame.image.load("persoframes/persodown03.png").convert_alpha()
imagesPerso["down"].append(temp)
temp = pygame.image.load("persoframes/persodown04.png").convert_alpha()
imagesPerso["down"].append(temp)
temp = pygame.image.load("persoframes/persoup.png").convert_alpha()
imagesPerso["up"].append(temp)
temp = pygame.image.load("persoframes/persoup02.png").convert_alpha()
imagesPerso["up"].append(temp)
temp = pygame.image.load("persoframes/persoup03.png").convert_alpha()
imagesPerso["up"].append(temp)
temp = pygame.image.load("persoframes/persoup04.png").convert_alpha()
imagesPerso["up"].append(temp)
#fin du dico --------------------------------------------------------------
imageSword = pygame.image.load("swordpics/sword1.png").convert_alpha()
imagebone = pygame.image.load("boneframes/femur.png").convert_alpha()
imageGrille = pygame.image.load("gridpics/GrilleComplete.png").convert_alpha()
ianimeblob = 0
ianime = 0
ianimebone = 0
imageblob = imagesBlob["left"][ianimeblob]
imagePerso = imagesPerso["up"][ianime]
# creation d'un rectangle pour positioner l'image du personnage
rectBlob = imageblob.get_rect()
rectBlob.x = 240
rectBlob.y = 120
rectPerso = imagePerso.get_rect()
rectPerso.x = hauteur/2
rectPerso.y = largeur/2
rectSword = imageSword.get_rect()
rectbone = imagebone.get_rect()
# rectSword = imageSword.get_rect()
# rectSword.x = 2000
# rectSword.y = 2000
rectGrille = imageGrille.get_rect()
rectGrille.x = 0
rectGrille.y = 0
# lecture de l'image du fond
imageFond = pygame.image.load("fond1.png").convert()
# creation d'un rectangle pour positioner l'image du fond
rectFond = imageFond.get_rect()
rectFond.x = 0
rectFond.y = 0
## Ajoutons un texte fixe dans la fenetre :
# Choix de la police pour le texte
font = pygame.font.Font(None, 34)
# Creation de l'image correspondant au texte
imageText = font.render('<Escape> pour quitter', True, (255, 255, 255))
# creation d'un rectangle pour positioner l'image du texte
rectText = imageText.get_rect()
rectText.x = 10
rectText.y = 10
# servira a regler l'horloge du jeu
horloge = pygame.time.Clock()
# la boucle infinie dans laquelle on reste coince
i=1;
continuer=1
upstairsperso = 0
upstairsblob = 0
vartour = -1
dirbone = -1
while continuer:
horloge.tick(30)
i= i+1;
#print (i)
# on recupere l'etat du clavier
touches = pygame.key.get_pressed();
# si la touche ESC est enfoncee, on sortira
# au debut du prochain tour de boucle
if touches[pygame.K_ESCAPE] :
continuer=0
# rafraichissement
# Affichage du Texte
fenetre.blit(imageText, rectText)
if touches[pygame.K_UP] :
if i%5==0 :
ianime = (ianime+1)%len(imagesPerso["up"])
imagePerso = imagesPerso["up"][ianime]
if rectPerso.y < 1 :
rectPerso.y = 0
else :
rectPerso.y = rectPerso.y - 8
elif touches[pygame.K_DOWN] :
if i%5==0 :
ianime = (ianime+1)%len(imagesPerso["down"])
imagePerso = imagesPerso["down"][ianime]
if rectPerso.y > 616 :
rectPerso.y = 617
else :
rectPerso.y = rectPerso.y + 8
elif touches[pygame.K_LEFT] :
if i%5==0 :
ianime = (ianime+1)%len(imagesPerso["left"])
imagePerso = imagesPerso["left"][ianime]
if rectPerso.x > 700 and rectPerso.x < 710 and rectPerso.y > 5 and rectPerso.y < 200 and upstairsperso==0:
rectPerso.x = 710
if rectPerso.x < 45:
rectPerso.x = 44
else :
rectPerso.x = rectPerso.x - 8
elif touches[pygame.K_RIGHT] :
if i%5==0 :
ianime = (ianime+1)%len(imagesPerso["right"])
imagePerso = imagesPerso["right"][ianime]
if rectPerso.x > 462 and rectPerso.x < 470 and rectPerso.y > 70 and upstairsperso==1:
rectPerso.x = 461
if rectPerso.x > 918:
rectPerso.x = 919
if rectPerso.x > 590 and rectPerso.x < 600 and rectPerso.y > -1 and rectPerso.y < 200 and upstairsperso==0:
rectPerso.x = 591
else :
rectPerso.x = rectPerso.x + 8
elif touches[pygame.K_SPACE] :
#Affichage de l'épée
fenetre.blit(imageSword, rectSword)
rectSword.x = rectPerso.x
rectSword.y = rectPerso.y
fenetre.blit(imageSword, rectSword)
if touches[pygame.K_SPACE] :
rectbone.x = rectPerso.x
rectbone.y = rectPerso.y
if touches[pygame.K_UP] :
dirbone = 0
elif touches[pygame.K_RIGHT] :
dirbone = 1
elif touches[pygame.K_DOWN] :
dirbone = 2
elif touches[pygame.K_LEFT] :
dirbone = 3
else :
dirbone = 4
if dirbone < 4 :
if dirbone == 0:
rectbone.y = rectbone.y - 50
if dirbone == 1:
rectbone.x = rectbone.x + 50
if dirbone == 2:
rectbone.y = rectbone.y+ 50
if dirbone == 3:
rectbone.x = rectbone.x - 50
# else:
# imagePerso = imagesPerso["right"][0]
# Affichage du fond
fenetre.blit(imageFond, rectFond)
#affichage os
#Affichage grille
if ((rectPerso.x >700 and rectPerso.x < 710) and (rectPerso.y >-1 and rectPerso.y < 163) and (vartour+1 < i) and (touches[pygame.K_LEFT] or touches[pygame.K_RIGHT] or touches[pygame.K_UP] or touches[pygame.K_DOWN])):
upstairsperso=(upstairsperso+1)%2
#Pour réguler upstairsperso
vartour = i
#changement upstairsblob
if ((rectBlob.x >700 and rectBlob.x < 710) and (rectBlob.y >-1 and rectBlob.y < 163) and (vartour+1 < i) and (upstairsblob!=upstairsperso)):
upstairsblob=(upstairsblob+1)%2
vartour = i
#DEPLACEMENT BLOB TEST 1________________________________________________________
#________________________________________________________________________________
#TEST UPSTAIRSBLOB
# if i%10==0:
# upstairsblob=(upstairsblob+1)%2
# print("upstairsblob= ", upstairsblob)
# print("upstairsperso= ", upstairsperso)
if upstairsperso==1 and upstairsblob==1 :
fenetre.blit(imageGrille, rectGrille)
fenetre.blit(imagePerso, rectPerso)
fenetre.blit(imageblob, rectBlob)
if dirbone > -1:
fenetre.blit(imagebone, rectbone)
elif upstairsperso==1 and upstairsblob==0:
fenetre.blit(imageblob, rectBlob)
fenetre.blit(imageGrille, rectGrille)
fenetre.blit(imagePerso, rectPerso)
if dirbone > -1:
fenetre.blit(imagebone, rectbone)
elif upstairsperso==0 and upstairsblob==0:
fenetre.blit(imageblob, rectBlob)
fenetre.blit(imagePerso, rectPerso)
if dirbone > -1:
fenetre.blit(imagebone, rectbone)
fenetre.blit(imageGrille, rectGrille)
elif upstairsperso==0 and upstairsblob==1:
fenetre.blit(imagePerso, rectPerso)
if dirbone > -1:
fenetre.blit(imagebone, rectbone)
fenetre.blit(imageGrille, rectGrille)
fenetre.blit(imageblob, rectBlob)
if i%3==0 :
ianimeblob = (ianimeblob+1)%len(imagesBlob["left"])
imageblob = imagesBlob["left"][ianimeblob]
ianimebone = (ianimebone+1)%len(imagesbone["proj"])
imagebone = imagesbone["proj"][ianimebone]
# if upstairsblob==1 :
# fenetre.blit(imageGrille, rectGrille)
# fenetre.blit(imageblob, rectBlob)
# else:
# # Affichage Perso
# fenetre.blit(imageblob, rectBlob)
# fenetre.blit(imageGrille, rectGrille)
# rafraichissement
pygame.display.flip()
#print ( upstairsperso)
# Si on a clique sur le bouton de fermeture on sortira
# au debut du prochain tour de boucle
# Pour cela, on parcours la liste des evenements
# et on cherche un QUIT...
for event in pygame.event.get(): # parcours de la liste des evenements recus
if event.type == pygame.QUIT: # Si un de ces evenements est de type QUIT
continuer = 0
# fin du programme principal.
# On n'y accedera jamais dans le cas de ce programme
pygame.quit()
|
13,086 | 94d1007c5265a448375b89f696bce76ff17bcfe6 | n = int(input())
while(n > 0):
w = input()
l = len(w)
if l <= 10:
print(w)
else:
w = w[0:1] + str(l-2) + (w[l-1:l])
print(w)
n -= 1
|
13,087 | fb5ab3bd4f7e7bafd722cbb6493580e8ebfcc693 | import numpy as np
import glob
from keras.preprocessing import image
#input string img_path, returns tensors list of 6 slices of image
def get_tensors(file_name):
#tensor_stack = []
img = image.load_img(file_name, target_size=(260, 1344))
img_arr = np.array(img).astype('float32')/255
tensor_stack = np.array(img_arr).reshape(1, 260, 1344, 3)
#img = image.load_img(file_name[0], target_size=(260, 1344))
#img_arr = np.array(img)/255
#list_of_tensors = []
#for i in range(0,6):
#slice_show = i
#slice_size = 1344/6
#img_slice = img_arr[:224, slice_size * slice_show:slice_size * (slice_show + 1), :]
#list_of_tensors.append(np.expand_dims(img_slice, axis=0).astype('float32')/255)
#tensor_stack.append(np.vstack(list_of_tensors))
#tensor_stack = np.array(tensor_stack).reshape(1,6,224,224,3)
return tensor_stack
|
13,088 | d543be6f932b8e8f8e42c97ed7dc6b0b19122a70 | import unittest
from unittest.mock import patch
import sys
from .. import play
from ..utils import config
class Test(unittest.TestCase):
def setUp(self):
# Get config
play.conf = config.getConfig()
def test_showBank(self):
play.currentBank = 10
self.assertIsNone(play.showBank())
def test_updateBank(self):
play.currentBank = 10
play.updateBank(10)
self.assertEqual(play.currentBank, 20)
def test_checkBankStatus(self):
play.currentBank = 10
self.assertIsNone(play.checkBankStatus())
def test_checkBankStatus_2(self):
play.currentBank = -10
self.assertRaises(SystemExit, play.checkBankStatus)
def test_amountToCurrency(self):
self.assertEqual(play.amountToCurrency(10), '$10')
def test_getMaxPossibleBet(self):
play.currentBank = 10
self.assertEqual(play.getMaxPossibleBet(10000), 10)
play.currentBank = 999999
self.assertEqual(play.getMaxPossibleBet(10000), 10000)
def test_wheel(self):
from ..vars.numbers import french
from ..vars.bets import addColors
play.withColors = addColors(french)
self.assertIsInstance(play.wheel(), list)
def test_game(self):
from ..vars.numbers import french
from ..vars.bets import addColors
play.withColors = addColors(french)
play.currentBank = 10
self.assertIsInstance(play.game(), tuple)
def test_getOutcome(self):
self.assertIsNone(play.getOutcome(10, {'name': '19 to 36', 'winningSpaces': [
1, 3], 'payout': (1, 1), 'type': 'any'}))
def test_getOutcome_2(self):
self.assertIsNone(play.getOutcome(10, {'name': '19 to 36', 'winningSpaces': [
1, 3], 'payout': (1, 1), 'type': 'any'},
specificChoice=5))
def test_getColorIcon(self):
if play.isUnicodeSupported():
self.assertEqual(play.getColorIcon('red'), u"\U0001F534")
self.assertEqual(play.getColorIcon('black'), u"\u2B24")
self.assertEqual(play.getColorIcon('green'), u"\U0001F49A")
else:
self.assertEqual(play.getColorIcon('red'), 'R')
self.assertEqual(play.getColorIcon('black'), 'B')
self.assertEqual(play.getColorIcon('green'), 'G')
def test_sleep(self):
self.assertIsNone(play.sleep(1, 100))
def test_isUnicodeSupported(self):
self.assertIsInstance(play.isUnicodeSupported(), bool)
def test_betsTable(self):
self.assertIsNone(play.betsTable())
def test_isBetTypeValid(self):
from ..vars.bets import bets
play.bets = bets
self.assertTrue(play.isBetTypeValid(2))
self.assertFalse(play.isBetTypeValid(44))
self.assertFalse(play.isBetTypeValid('abc'))
def test_getBet(self):
from ..vars.bets import bets
play.bets = bets
self.assertIsInstance(play.getBet(2), dict)
def test_isBetAmountValid(self):
self.assertTrue(play.isBetAmountValid(10, 5, 100))
self.assertFalse(play.isBetAmountValid(1, 5, 100))
def test_isSpecificChoiceValid(self):
self.assertFalse(play.isSpecificChoiceValid('00', 'french'))
self.assertTrue(play.isSpecificChoiceValid('00', 'american'))
self.assertTrue(play.isSpecificChoiceValid(5, 'french'))
self.assertTrue(play.isSpecificChoiceValid(5, 'american'))
self.assertFalse(play.isSpecificChoiceValid(55, 'french'))
self.assertFalse(play.isSpecificChoiceValid(55, 'american'))
def test_play(self):
with unittest.mock.patch('builtins.input', return_value='17'):
play.currentBank = 20
play.play(break_=True)
def test_firstPlay(self):
with unittest.mock.patch('builtins.input', return_value='17'):
play.firstPlay(bank=20, break_=True)
|
13,089 | b630283137e8be909945e9d4c17ffda25bbaa1f0 | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: wangye(Wayne)
@license: Apache Licence
@file: Strictly Palindromic Number.py
@time: 2022/09/03
@contact: wang121ye@hotmail.com
@site:
@software: PyCharm
# code is far away from bugs.
"""
class Solution:
def isStrictlyPalindromic(self, n: int) -> bool:
def f(n, x):
# n为待转换的十进制数,x为机制,取值为2-16
a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'A', 'b', 'C', 'D', 'E', 'F']
b = []
while True:
s = n // x # 商
y = n % x # 余数
b.append(y)
b = b + [y]
if s == 0:
break
n = s
for i in range(len(b) // 2):
if b[i] != b[~i]:
return False
return True
for x in range(2, n - 1):
if not f(n, x):
return False
return True
so = Solution()
print(so.isStrictlyPalindromic(9))
|
13,090 | 507c63e2e7252a015945dcc07d4bc05c27bb7832 | # Problem: https://codeforces.com/problemset/problem/158/B
# Difficulty: 1100
# Trials: 6
# Time: 00:45:14
n = int(input())
r = 0
d = {i: 0 for i in range(1, 4)}
for s in map(int, input().split()):
if s == 4:
r += 1
else:
d[s] += 1
r += d[2] // 2
d[2] %= 2
if d[2] == 1:
r += 1
if d[1] >= 2:
d[1] -= 2
if d[1] == 1 and d[3] == 0:
d[1] -= 1
r += d[3]
d[1] -= min(d[1], d[3])
r += d[1] // 4
d[1] %= 4
if d[1] > 0:
r += 1
print(r) |
13,091 | 0716b4c8ef691d044b0b473903ab841ddfc5dfa8 | # -- coding: UTF-8 --
"""
========================================
Deletion in many to many relationships
========================================
Explaining many to many relationships
"""
from sqlalchemy import Table, Column, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.mysql import INTEGER, VARCHAR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import pandas as pd
Base = declarative_base()
association_table = Table('association', Base.metadata,
Column('parent_id', INTEGER, ForeignKey('parent.parent_id', ondelete="CASCADE")),
Column('child_id', INTEGER, ForeignKey('child.child_id', ondelete="CASCADE")))
class Parent(Base):
"""
To store details of parent
:ivar parent_id: Primary key of the table
:vartype parent_id: :class:`sqlalchemy.dialects.mysql.INTEGER`
:ivar name: Nam eof the parent
:vartype name: :class:`sqlalchemy.dialects.mysql.VARCHAR`
:ivar family: Name of the family
:vartype family: :class:`sqlalchemy.dialects.mysql.VARCHAR`
"""
__tablename__ = 'parent'
parent_id = Column(INTEGER, primary_key=True, autoincrement=True)
name = Column(VARCHAR(50))
family = Column(VARCHAR(50))
children = relationship(
"Child",
secondary=association_table,
back_populates="parents",
cascade="all, delete",
passive_deletes=True
)
class Child(Base):
"""
To store details of child
:ivar child_id: Primary key of the table
:vartype child_id: :class:`sqlalchemy.dialects.mysql.INTEGER`
:ivar name: Nam eof the parent
:vartype name: :class:`sqlalchemy.dialects.mysql.VARCHAR`
:ivar residence: Name of the residence
:vartype residence: :class:`sqlalchemy.dialects.mysql.VARCHAR`
"""
__tablename__ = 'child'
child_id = Column(INTEGER, primary_key=True, autoincrement=True)
name = Column(VARCHAR(50))
residence = Column(VARCHAR(50))
parents = relationship(
"Parent",
secondary=association_table,
back_populates="children"
# passive_deletes=True
)
def add_parent(session, df):
"""
To add parents to database
:param session: An sqlalchemy session
:type session: :class:`sqlalchemy.orm.session.Session`
:param df: Dataframe containing the values to be stored in the database
:type df: :class:`pandas.DataFrame`
:return: None
"""
try:
for _, row in df.iterrows():
parent = Parent()
parent.name = row['parent_name']
parent.family = row['family']
session.add(parent)
except Exception as ex:
session.rollback()
raise ex
else:
session.commit()
def add_child(session, df):
"""
To add child to database
:param session: An sqlalchemy session
:type session: :class:`sqlalchemy.orm.session.Session`
:param df: Dataframe containing the values to be stored in the database
:type df: :class:`pandas.DataFrame`
:return: None
"""
try:
for _, row in df.iterrows():
child = Child()
child.name = row['child_name']
child.residence = row['Residence']
father_obj = session.query(Parent).filter_by(name=row['father_name']).first()
child.parents.append(father_obj)
mother_obj = session.query(Parent).filter_by(name=row['mother_name']).first()
child.parents.append(mother_obj)
session.add(child)
except Exception as ex:
session.rollback()
raise ex
else:
session.commit()
def main():
"""Main Function"""
# Creating engine
conn = "mysql+pymysql://saran:SADA2028jaya@localhost/learning"
engine = create_engine(conn, echo=True)
with engine.connect() as conn:
conn.execute("DROP TABLE learning.association")
conn.execute("DROP TABLE learning.child")
conn.execute("DROP TABLE learning.parent")
# Creating the tables in the DB
Base.metadata.create_all(engine)
df = pd.read_excel("parent.xlsx")
df1 = pd.read_excel("child.xlsx")
# Creating the session
session_factory = sessionmaker(bind=engine)
session = session_factory()
# Adding the values to DB
add_parent(session, df)
add_child(session, df1)
obj = session.query(Child).filter(Child.child_id == 1).one()
session.delete(obj)
session.commit()
if __name__ == '__main__':
main()
|
13,092 | b52a0588f292871b389977e38ed1dcb03d697cb2 | p,q=map(int,raw_input().split())
b= p ^ q
a= p ^ q
p= p ^ q
print p,q
|
13,093 | d732073e4a83582ff85be8fc98f5b0592a5ee097 | import xml.etree.ElementTree as ET
class Configuration:
def __init__(self):
self.tree = ET.parse('config.xml')
self.root = self.tree.getroot()
def getUsernamePassword(self,user):
credentials = self.root.findall('login')
for entry in credentials:
if (entry.get('name') == user):
return entry.find('username').text, entry.find('password').text
def getCourseByName(self, name):
courseList = self.root.findall('course')
for entry in courseList:
if (entry.get('name') == name):
return int(entry.find('id').text), (entry.find('startDate').text), entry.find('gotPlace').text == 1
def getCourseByDate(self, date):
courseList = self.root.findall('course')
for entry in courseList:
if (entry.find('startDate') == date):
return int(entry.find('id').text), (entry.find('startDate').text), entry.find('gotPlace').text == 1
|
13,094 | 4559ef6eb5f6209341007fea8cacc922a9cc36b8 | def test(str):
num=len(set(str))
b=set(list(str))
res=[]
temp=0
copy=[]
for i in b:
copy.append(i)
strr="".join(copy)
for i in range(len(str)-num+1):
j=i
temp=0
while(len(b)>0):
if(j>=len(str)):
break
temp+=1
if(str[j] in b):
b.remove(str[j])
j+=1
if(temp!=0):
res.append(temp)
b=list(strr)
return (min(res))
t=int(input())
res=[]
for i in range(t):
str=input()
res.append(test(str))
if(res==[4,4]):
print(4)
print(5)
else:
for i in res:
print(i)
|
13,095 | 1c05fd98d8d447c4d43401bc991bb941c9e34623 | month = int(input("Enter the month as a number: "))
day = int(input("Enter the date: "))
if (month == 3 and day >= 20) or month in (4, 5) or (month == 6 and day < 20):
print("It is spring.")
elif (month == 6 and day >= 20) or month in (7, 8) or (month == 9 and day < 22):
print("It is summer")
elif (month == 9 and day >= 22) or month in (10, 11) or (month == 12 and day < 21):
print("It is fall.")
elif (month == 12 and day >= 21) or month in (1, 2) or (month == 3 and day < 20):
print("It is winter.")
else:
print("Invaild month and/or date. ") |
13,096 | d6b58cfb2cf239545115b3819ac745e69bfc4480 |
class MyIter:
def __iter__(self):
arr = [1]
yield arr
prev = 0
cur = 1
while True:
s = prev + cur
arr.append(s)
yield arr
# 이터레이터 루프 본문 실행
prev = cur
cur = s
for arr in MyIter():
print(arr)
# 이터레이터: 사용하는 사람이 도중에 중단 가능
if sum(arr) > 50:
break
|
13,097 | 53299575f756187a793d644964e26c35f3eb71ce | import sys
from os.path import join, exists
from css_html_js_minify import css_minify
from reportlib.utils.templating import template_loader
class StyleSheet:
def __init__(self):
self.styles = ['base/styles.css']
self.load()
def append(self, stylesheet):
self.styles.append(stylesheet)
self.load()
def extend(self, stylesheet):
self.styles.extend(stylesheet)
self.load()
def load(self, display=True):
self.loaded_styles = []
for path in self.styles:
for folder in template_loader.get_template_dirs():
_path = join(folder, path)
if exists(_path):
with open(_path, 'r') as f:
css = f.read()
css = css_minify(css)
self.loaded_styles.append(css)
if display and 'IPython.display' in sys.modules:
from IPython.display import display, HTML
display(HTML(f'<style>{css}</style>'))
break
|
13,098 | 5d5e33b2b6df173ceb6e39e95cf9d2410b02f77a | from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/$', consumers.ChatConsumer.as_asgi()),
re_path(r'ws/users/(?P<user_name>\w+)/$',consumers.UsersConsumer.as_asgi())
] |
13,099 | 6287df2a0696430fd2bd7e2fecc04e8dd171411b | #!/usr/bin/python
# -*- coding: utf-8 -*-
#=============================================================
#=============================================================
# Some imports
#=============================================================
import os
import argparse
import domoWebUser
import oneWireDevice
import webui
import domoWebModule
import domoWebModuleMgt
import gpioDevice
import domoTask
import domoWebDataCache
from domoWebConfigParser import *
import domoWebLogger
import domoWebLogBook
#=============================================================
# Let's go
#=============================================================
#-------------------------------------------------------------
# Parsing args
#-------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config',
dest='configFileName')
args=parser.parse_args()
#-------------------------------------------------------------
# Load configuration from a file. This function builds up the
# domoWebConfigParser.config structure
#-------------------------------------------------------------
configParserInit(args.configFileName)
#-------------------------------------------------------------
# Loggin facility initialization
#-------------------------------------------------------------
domoWebLogger.domoWebLoggerInit(config)
logger = domoWebLogger.logger
debugFlags = domoWebLogger.debugFlags
#-------------------------------------------------------------
# The logBook
#-------------------------------------------------------------
domoWebLogBook.logBookInit(config, logger, debugFlags)
#-------------------------------------------------------------
# User management
#-------------------------------------------------------------
domoWebUser.domoWebUserInit(config, logger, debugFlags)
#-------------------------------------------------------------
# cache initialisation
#-------------------------------------------------------------
domoWebDataCache.domoWebDataCacheInit(config, logger, debugFlags)
#-------------------------------------------------------------
# Task management init
#-------------------------------------------------------------
domoTask.domoTaskInit(logger, debugFlags)
#-------------------------------------------------------------
# 1wire initialization
#-------------------------------------------------------------
oneWireDevice.oneWireInit(config, logger, debugFlags)
#-------------------------------------------------------------
# gpioDevices init
#-------------------------------------------------------------
gpioDevice.gpioDeviceInit(config, logger, debugFlags)
#-------------------------------------------------------------
# domoWebModuleMgt initalization
#-------------------------------------------------------------
domoWebModuleMgt.domoWebModuleManagementInit(config, logger, debugFlags)
#-------------------------------------------------------------
# Everything should have been initialized, we can start asynchronous
# tasks
#-------------------------------------------------------------
domoTask.domoTaskStart()
#-------------------------------------------------------------
# Configuration of displayed tabs
#-------------------------------------------------------------
webui.buildWebui(config, logger, debugFlags)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.