text stringlengths 38 1.54M |
|---|
def igualLista(lista1,lista2):
if len(lista1) == 0:
return True
else:
return igualLista(lista1[1:],lista2[1:]) and lista1[0]==lista2[0]
l1 = [1,2,3,4]
l2 = [2,1,3,4]
print(igualLista(l1,l2)) |
#!/usr/bin/env python3
import random
class Gun:
def __init__(self, name, damage):
self.name = name
self.damage = damage
def __repr__(self):
return "%s (%d damage)" % (self.name, self.damage)
# example: prints "AWP (100 damage)"
class CSGOPlayer:
# class variables
num_players = 0
def __init__(self, name, gun):
self.name = name
self.hp = 100
self.gun = gun # Gun class object
self.alive = True
CSGOPlayer.num_players += 1
def shoots(self, other):
# other is going to another CSGOPlayer
# we shoot the other player
# and they lose hp equal to our gun's damage
if other.alive:
other.hp = other.hp - self.gun.damage
if other.hp <= 0:
other.alive = False
if not other.alive:
print('%s shot %s dead for %d damage' % (self.name, other.name, self.gun.damage))
else:
print('%s shot %s for %d damage' % (self.name, other.name, self.gun.damage))
# CSGOPlayer>Gun>shoot>damage
def __repr__(self):
return self.name
def team_alive(team):
for player in team:
if player.alive:
return True
return False
def choose_alive_player(team):
while True:
player = random.choice(team)
if player.alive:
return player
# stuff
t_wins = 0
ct_wins = 0
NUM_ROUNDS = 10000
for _ in range(NUM_ROUNDS):
# re-initialize players
guns = [['AWP', 100], ['AK47', 36], ['P90', 26]]
a = CSGOPlayer('Player1', Gun(*random.choice(guns)))
b = CSGOPlayer('Player2', Gun(*random.choice(guns)))
c = CSGOPlayer('Player3', Gun(*random.choice(guns)))
d = CSGOPlayer('Player4', Gun(*random.choice(guns)))
e = CSGOPlayer('Player5', Gun(*random.choice(guns)))
f = CSGOPlayer('Player6', Gun(*random.choice(guns)))
teamT = [a, b, c]
teamCT = [d, e, f]
# simulate round
while True:
T_player = choose_alive_player(teamT)
CT_player = choose_alive_player(teamCT)
if random.choice([True, False]):
T_player.shoots(CT_player)
else:
CT_player.shoots(T_player)
if team_alive(teamT) and not team_alive(teamCT):
print('T wins')
t_wins += 1
break
elif team_alive(teamCT) and not team_alive(teamT):
print('CT wins')
ct_wins += 1
break
print('Round over.')
print('Rounds simulated: %d' % (NUM_ROUNDS))
print('probability of T winning: %f' % (t_wins / (t_wins + ct_wins)))
print('probability of CT winning: %f' % (ct_wins / (t_wins + ct_wins)))
|
#Latest Python Feature for Asynchronus Programming
import asyncio
import time
async def waiter(n):
await asyncio.sleep(n)
print(f"Waited for {n} seconds")
async def main():
# print("HEllo")
# await asyncio.sleep(1)
# print("world")
task1 = asyncio.create_task(waiter(2))
task2 = asyncio.create_task(waiter(3))
print(time.strftime('%X'))
await task1
await task2
print(time.strftime('%X'))
if __name__ == '__main__':
asyncio.run(main())
print("Program ended")
|
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
sc = SparkContext.getOrCreate()
'''
1. loads the text datafile to pyspark
2. apply schema and create DataFrame table
'''
financial_suite = sc.textFile("file:///data/staging/final/financial_suite.txt")
financial_suite_noHeader = financial_suite.zipWithIndex().filter(lambda (row,index): index > 0).keys()
financial_suite_tuple = financial_suite_noHeader.map(lambda x:(x.split(",")[1], x))
schemaString = "gvkey permno adate qdate public_date\
CAPEI BE bm evm pe_op_basic pe_op_dil pe_exi\
pe_inc ps pcf dpr npm opmbd opmad gpm ptpm\
cfm roa roe roce efftax aftret_eq aftret_invcapx\
aftret_equity pretret_noa pretret_earnat GProf\
equity_invcap debt_invcap totdebt_invcap capital_ratio\
int_debt int_totdebt cash_lt invt_act rect_act\
debt_at debt_ebitda short_debt curr_debt lt_debt\
profit_lct ocf_lct cash_debt fcf_ocf lt_ppent\
dltt_be debt_assets debt_capital de_ratio\
intcov intcov_ratio cash_ratio quick_ratio curr_ratio\
cash_conversion inv_turn at_turn rect_turn pay_turn\
sale_invcap sale_equity sale_nwc rd_sale adv_sale\
staff_sale accrual gsector gicdesc sp500 ptb PEG_trailing\
DIVYIELD PEG_1yrforward PEG_ltgforward FFI5_desc FFI5\
FFI10_desc FFI10 FFI12_desc FFI12 FFI17_desc FFI17\
FFI30_desc FFI30 FFI38_desc FFI38 FFI48_desc FFI48 FFI49_desc FFI49"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split("\t")]
schema = StructType(fields)
financial_suite_schema = sqlContext.createDataFrame(financial_suite_noHeader, schema)
|
from distutils.spawn import find_executable
from gwemlightcurves.KNModels import table
from gwpy.table import EventTable
from gwpy.plotter import EventTablePlot
import astropy.units as u
import astropy.constants as C
G = C.G.value; c = C.c.value; msun = u.M_sun.to(u.kg)
plot = EventTablePlot(figsize=(20.5, 10.5))
EOS = ['ap4', 'H4', 'ms1b']
Color = ['blue', 'green', 'red']
locations = [(1,3,1), (1,3,2), (1,3,3)]
plot_location = dict(zip(EOS, locations))
for eos in EOS:
ax = plot.add_subplot(plot_location[eos][0], plot_location[eos][1], plot_location[eos][2])
ax.set_title('EOS: {0}'.format(eos), fontsize='small')
t_mon=EventTable.read(find_executable(eos+'_mr.dat'), format='ascii')
t_wk=EventTable.read(find_executable(eos+'.tidal.seq'), format='ascii')
t_lalsim=EventTable.read(find_executable(eos+'_lalsim_mr.dat'), format='ascii')
wk_conversion=(msun * G / c**2)*10**-3
mask_mon=t_mon['radius']<20
mask_wk=t_wk['Circumferential_radius']<20
mask_lalsim=t_lalsim['radius']<20
plot.add_scatter(t_mon['radius'][mask_mon], t_mon['mass'][mask_mon], label='Monica '+eos ,color=Color[0], ax=ax)
plot.add_scatter(t_wk['Circumferential_radius'][mask_wk]*wk_conversion, t_wk['grav_mass'][mask_wk], label='Wolfgang '+eos ,color=Color[1], ax=ax)
plot.add_scatter(t_lalsim['radius'][mask_lalsim], t_lalsim['mass'][mask_lalsim], label='lalsim '+eos ,color=Color[2], ax=ax)
plot.add_legend(loc="upper left", fancybox=True, fontsize='small')
plot.text(0.5, 0.04, 'Radius (km)', ha='center', fontsize='x-large')
plot.text(0.04, 0.5, 'Mass ($M_{\odot}$)', va='center', rotation='vertical', fontsize='x-large')
plot.suptitle('Mass-Radius Curve Comparison', fontsize='x-large')
|
# -*- coding: UTF-8 -*-
import os
import xml.etree.ElementTree as ET
import unicodedata
import operator
import re
from nltk.tokenize import sent_tokenize
def get_link(pdfPath):
pwd = os.path.dirname(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + ".")
exePath = os.path.join(father_path, 'tool', 'pdftoxml.exe')
pdfname = os.path.basename(pdfPath).split('.')[0]
htmlName = pdfname + '.html'
htmlPath = os.path.join(father_path, 'html', htmlName)
# run pdf2xml
run_exe = exePath + ' ' + '-noImage -noImageInline' + ' ' + pdfPath + ' ' + htmlPath
os.system(run_exe)
tree = ET.parse(htmlPath)
root = tree.getroot()
# pre_process
y_diff = {}
fsizes = {}
pre_Y = 0
for pages in root.findall('PAGE'):
for texts in pages.findall('TEXT'):
y_sizes = {}
for token in texts.findall('TOKEN'):
fsizes[round(float(token.attrib['font-size']))] = fsizes.get(round(float(token.attrib['font-size'])), 0) + 1
y_sizes[float(token.attrib['y'])] = y_sizes.get(float(token.attrib['y']), 0) + 1
now_Y = max(y_sizes, key=y_sizes.get)
y_diff[round(abs(now_Y-pre_Y))] = y_diff.get(round(abs(now_Y-pre_Y)), 0) + 1
pre_Y = now_Y
max_fs = 0
for size in fsizes.keys():
if max_fs == 0 or fsizes[size]>fsizes[max_fs]:
max_fs = size
new_l = sorted(y_diff.items(), key=operator.itemgetter(1), reverse=True)[:7]
limit1 = max(new_l[1][0],new_l[0][0])
limit3 = min(new_l[1][0],new_l[0][0])
limit2 = 3
xroot = ET.Element("Document")
chunk = ET.SubElement(xroot, "chunk")
pre_Y = 0
for pages in root.findall('PAGE'):
for texts in pages.findall('TEXT'):
is_first = True
y_sizes = {}
for token in texts.findall('TOKEN'):
y_sizes[float(token.attrib['y'])] = y_sizes.get(float(token.attrib['y']), 0) + 1
now_Y = max(y_sizes, key=y_sizes.get)
for token in texts.findall('TOKEN'):
word = token.text
if word and len(word.replace(' ','')) > 0:
if is_first and (pre_Y == 0 or abs(now_Y - pre_Y) >= limit2):
chunk = ET.SubElement(xroot, "chunk")
p_yloc = float(token.attrib['y'])
ET.SubElement(chunk, "token", pages=pages.attrib['id'], x=token.attrib['x'], y=token.attrib['y'], font_size=token.attrib['font-size'], bold=token.attrib['bold']).text = word
is_first = False
pre_Y = now_Y
tree = ET.ElementTree(xroot)
newxroot = tree.getroot()
pre_footnote_list = []
link_list = []
for chunk_pos,achunk in enumerate(newxroot.findall('chunk')):
tokens = achunk.findall('token')
if len(tokens) == 0 :
continue
else:
# sentence = ' '.join([str(i.text) for i in tokens])
sentence = get_sentence(tokens, False)
# link_reg = re.compile('http[s]?://(?:[a-zA-Z]*|[0-9]*|[\$-_@.&+]*|[!*\(\),]*|(?:\%[0-9a-fA-F]*[0-9a-fA-F]*))').findall(sentence)
link_reg = re.compile('[a-zA-Z]+://[^\s]*').findall(sentence)
if link_reg and round(float(tokens[0].attrib['font_size'])) != max_fs:
# print(link_reg)
footnote_num = -1
if sentence.split()[0].isdigit():
footnote_num = sentence.split()[0]
else:
if chunk_pos > 1:
pre_tokens = newxroot.findall('chunk')[chunk_pos-1].findall('token')
pre_sentence = get_sentence(pre_tokens, False)
if round(float(pre_tokens[0].attrib['font_size'])) != max_fs and pre_sentence.split()[0].isdigit():
footnote_num = pre_sentence.split()[0]
elif chunk_pos > 2:
pre_tokens = newxroot.findall('chunk')[chunk_pos-2].findall('token')
pre_sentence = get_sentence(pre_tokens, False)
if round(float(pre_tokens[0].attrib['font_size'])) != max_fs and pre_sentence.split()[0].isdigit():
footnote_num = pre_sentence.split()[0]
if footnote_num != -1:
now_link = link_reg[0]
now_pos = chunk_pos
if sentence.split()[-1] == now_link:
while 1 :
try :
next_token = newxroot.findall('chunk')[now_pos+1].findall('token')
next_sentence = get_sentence(next_token, True).split()
if ('/' in next_sentence[0] or ('.' in next_sentence[0] and next_sentence[0][-1] != '.')) and not next_sentence[0][0].isdigit():
now_link += next_sentence[0]
if len(next_sentence) == 1 :
now_pos += 1
else:
break
else:
break
except:
break
link_list.append(now_link)
# print(now_link)
link_info = {
'index': footnote_num,
'link': now_link,
'page_id': tokens[0].attrib['pages'],
'x': None,
'y': None
}
ids = tokens[0].attrib['pages']
for pages in root.findall('PAGE'):
if pages.attrib['id'] == ids :
link_find = False
for texts in pages.findall('TEXT'):
fsizes = {}
for token in texts.findall('TOKEN'):
fsizes[round(float(token.attrib['font-size']))] = fsizes.get(round(float(token.attrib['font-size'])), 0) + 1
now_fs = max(fsizes, key=fsizes.get)
tests_list = texts.findall('TOKEN')
for pos in range(len(tests_list)):
if (pos == 0 or float(tests_list[pos-1].attrib['y']) > float(tests_list[pos].attrib['y']) )and \
(pos == len(tests_list) - 1 or float(tests_list[pos + 1].attrib['y']) > float(tests_list[pos].attrib['y'])) and \
round(float(tests_list[pos].attrib['font-size'])) != max_fs and str(tests_list[pos].text) == footnote_num and now_fs == max_fs:
link_info['x'] = tests_list[pos].attrib['x']
link_info['y'] = tests_list[pos].attrib['y']
pre_footnote_list.append(link_info)
link_find = True
if link_find == False:
for texts in pages.findall('TEXT'):
tests_list = texts.findall('TOKEN')
if len(tests_list) == 1 and str(tests_list[0].text) == footnote_num:
link_info['x'] = tests_list[0].attrib['x']
link_info['y'] = tests_list[0].attrib['y']
pre_footnote_list.append(link_info)
link_find = True
# pre_footnote_list = [dict(t) for t in set([tuple(d.items()) for d in pre_footnote_list])]
pre_list = []
for i in pre_footnote_list:
is_exist = False
for j in pre_list:
if i == j :
is_exist = True
if is_exist == False:
pre_list.append(i)
pre_footnote_list = pre_list
# print(pre_footnote_list)
ALL_paragraph = []
content = ""
pre_Y = 0
is_done = False
pre_fs = 0
for pages in root.findall('PAGE'):
page_id = re.compile('[1-9]\d*').findall(pages.attrib['id'])[0]
for texts in pages.findall('TEXT'):
is_first = True
fsizes = {}
y_sizes = {}
for token in texts.findall('TOKEN'):
y_sizes[float(token.attrib['y'])] = y_sizes.get(float(token.attrib['y']), 0) + 1
fsizes[round(float(token.attrib['font-size']))] = fsizes.get(round(float(token.attrib['font-size'])), 0) + 1
now_fs = max(fsizes, key=fsizes.get)
now_Y = max(y_sizes, key=y_sizes.get)
for token in texts.findall('TOKEN'):
if is_done:
break
if (token.text == 'References' and int(page_id) > 3 and (token.attrib['bold'] == 'yes' or round(float(token.attrib['font-size'])) >= max_fs)):
ALL_paragraph.append(content)
is_done = True
break
pos = [id for id,x in enumerate(pre_footnote_list) if x['page_id'] == pages.attrib['id'] and x['x'] == token.attrib['x'] and x['y'] == token.attrib['y']]
if len(pos) == 0:
word = token.text
else:
word = pre_footnote_list[pos[0]]['index'] + '< ' + pre_footnote_list[pos[0]]['link'] + ' >'
if word and len(word.replace(' ','')) > 0:
if is_first and (pre_Y == 0 or round(abs(now_Y - pre_Y)) > limit1):
if len(content.split()) > 5 :
if len(ALL_paragraph) > 0 and ALL_paragraph[-1][-1] == '-':
ALL_paragraph[-1] = ALL_paragraph[-1][0:-1] + content
elif content[0].islower():
if len(ALL_paragraph) > 0 and ALL_paragraph[-1][0].isdigit() and ALL_paragraph[-1][1] == ' ':
ALL_paragraph[-2] += ' ' + content
elif len(ALL_paragraph) > 0:
ALL_paragraph[-1] += ' ' + content
elif len(ALL_paragraph) > 0 and (ALL_paragraph[-1][-1].isdigit() or ALL_paragraph[-1][-1].isalpha()):
ALL_paragraph[-1] += ' ' + content
else:
ALL_paragraph.append(content)
content = word
else:
if is_first and word[0].isupper() and content[-1] == '.':
ALL_paragraph.append(content)
content = word
elif is_first and content[-1] == '-':
content = content[0:-1] + word
else:
content += ' ' + word
is_first = False
pre_fs = now_fs
pre_Y = now_Y
footnote_link = []
for pos1,i in enumerate(ALL_paragraph):
for pos2,j in enumerate(i.split()):
link_flag = False
for k in pre_footnote_list:
if k['index'] + '< '+ k['link'] + ' >' == j :
link_flag = True
break
if link_flag == False:
try :
ALL_paragraph[pos1][pos2] = unicodedata.normalize('NFKD', j)
except :
continue
for i in pre_footnote_list:
link_info = {
'pos_flag': 1,
'index': i['index'],
'link': i['link'],
'context': None,
}
context = [id for id,x in enumerate(ALL_paragraph) if i['index'] + '< '+ i['link'] + ' >' in x]
if len(context) == 1 :
context = ALL_paragraph[context[0]].replace(i['index'] + '< '+ i['link'] + ' >', '[ ' + i['link'] + ' ]')
now_reg = re.compile('<\s([a-zA-Z]+://[^\s]*)\s>').findall(context)
for j in now_reg:
context = context.replace(j, '').replace('< >','')
link_info['context'] = context
footnote_link.append(link_info)
bodytext_list = []
body_link_num = 1
for pos_i,i in enumerate(ALL_paragraph):
if pos_i == 0:
continue
link_reg = re.compile('[a-zA-Z]+://[^\s]*').findall(i)
if link_reg:
for link in link_reg:
flag = 1
for j in link_list:
if link in j:
flag = 0
break
if flag:
body_text = i.split()
for pos in range(len(body_text)):
if link in body_text[pos] and pos+1 != len(body_text) and \
('/' in body_text[pos+1] or ('.' in body_text[pos+1] and body_text[pos+1][-1] != '.')):
link = body_text[pos] + ' ' + body_text[pos+1]
if link[0] == '(' :
link = link[1:]
if link[-1] == ')':
link = link[0:-1]
elif (link[-1] == '.' or link[-1] == ',') and link[-2] == ')':
link = link[0:-2]
link1 = link.replace(' ','')
link_info = {
'pos_flag': 0,
'index': body_link_num,
'link': link1,
'context': i.replace(link, '[ ' + link1 + ' ]'),
}
bodytext_list.append(link_info)
body_link_num += 1
return footnote_link, bodytext_list
def get_sentence(tokens, pre_url):
sentence = None
for token_pos, token in enumerate(tokens):
if token_pos == 0:
sentence = token.text
else:
if pre_url:
if len(token.text) == 1:
if not token.text.isalnum() and (token_pos + 1 != len(tokens) and '/' in tokens[token_pos+1].text):
sentence += token.text
else:
sentence += ' ' + token.text
pre_url = False
elif '/' in token.text or ('.' in token.text and token.text[-1] != '.'):
sentence += token.text
else:
sentence += ' ' + token.text
pre_url = False
else:
sentence += ' ' + token.text
if re.compile('[a-zA-Z]+://[^\s]*').findall(token.text):
pre_url = True
return sentence
|
import png
cols = 40
rowSize = cols*2
rows = 20
def printText(vmem, px, py, s, fg, bg):
addr = py*rowSize + px*2
for i in range(len(s)):
vmem[addr + i*2] = ord(s[i])
vmem[addr + 1 + i*2] = fg*16+bg
fg+=1
bg+=1
if (fg == 16):
fg = 0
if (bg == 16):
bg = 0
fn = "ega-test2"
r = png.Reader(fn + ".png")
img = r.read()
w = img[0]
h = img[1]
pixels = list(img[2])
params = img[3]
print(w, h, params)
#pal = [0, 8, 7, 6, 15, 4, 12, 2, 5, 9, 1, 3, 14, 13, 11]
tex = [ [0 for x in range(w)] for y in range(h) ]
x = 0
y = 0
for row in pixels:
for p in row:
tex[y][x] = p
x += 1
x = 0
y += 1
vmem = []
for y in range(h):
for x in range(w//2):
vmem.append(tex[y][x*2]*16 + tex[y][x*2+1])
s = "Fridge is a 8-bit computer based on extended Intel 8080 instruction set with graphics acceleration. "\
"Current implementation consists of an emulator (Windows x64 + DirectX 11) and VHDL design for FPGA "\
"development board Terasic DE0-CV (Altera Cyclone V), as well as various tools such as - assembly compiler, "\
"custom simplistic language compiler and an IDE."\
"System specs "\
"CPU "\
"Modified Big-Endian Intel 8080 "\
"Graphical instructions "\
"10 MHz clock frequency "\
"RAM "\
"64 KB (16-bit address) "\
"Video "\
"Display: 240x160 pixels "\
"4-bit pallette (16 colors) from 4096 possible colors "\
"Two framebuffers 240x160x4 "\
"64 KB sprite memory (102 KB total video memory) "\
"40x20 ASCII text mode (6x8 font) "\
"ROM "\
"SD card (16 MB maximum)"
printText(vmem, 0, 0, s, 2, 7)
fb = open(fn + ".vhd", 'w')
fb.write("(\n")
i = 0
#for y in range(h):
# for x in range(w//2):
for i in range(len(vmem)):
fb.write('X"%0.2X"' % vmem[i])#(tex[y][x*2]*16 + tex[y][x*2+1]))
fb.write(", ")
if i % 16 == 15:
fb.write("\n")
#i += 1
fb.write('others => X"00");')
fb.close()
|
import numpy as np
import torch
from os.path import join, split, isdir, isfile, split, abspath, dirname
import os
import torch.nn as nn
from dataload.dataset import rec_chroma
from torch.optim import lr_scheduler
from models.nrcnn import NRCNN, Extened_NRCNN
import torch.nn.functional as F
import argparse
import sys, time
from utils import Logger, Averagvalue, save_checkpoint, psnr
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
import socket
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--batch_size', default=16, type=int, metavar='BT', help='batch size')
parser.add_argument('--res_block', default=10, type=int, metavar='RB', help='the number of residual block')
# =============== optimizer
parser.add_argument('--lr', '--learning_rate', default=1e-4, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight_decay', '--wd', default=0, type=float, metavar='W', help='default weight decay')
parser.add_argument('--stepsize', default=400, type=int, metavar='SS', help='learning rate step size')
parser.add_argument('--gamma', '--gm', default=0.1, type=float, help='learning rate decay parameter: Gamma')
parser.add_argument('--maxepoch', default=400, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--itersize', default=1, type=int, metavar='IS', help='iter size')
# =============== misc
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--print_freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 100)')
parser.add_argument('--gpu', default='0', type=str, help='GPU ID')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--save_path', help='path to save checkpoint', default='./checkpoint_chroma')
parser.add_argument('--show_path', help='path to save data for tensorboard', type=str, default='./plot')
# ================ qp
parser.add_argument('--qp_start', help='start qp', default=45, type=int)
parser.add_argument('--qp_end', help='end qp', default=45, type=int)
parser.add_argument('--chroma_idx', default=1, type=int, metavar='RB', help='1: U component, 2: V component')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
qplist = [qp for qp in range(args.qp_start, args.qp_end + 1)]
hostname = socket.gethostname()
traindataset = rec_chroma(r'L:\Dataset\DIV2K\hpm\patch', r'L:\Dataset\DIV2K\org\patch', qplist, args.chroma_idx, './trainlist.txt')
testdataset = rec_chroma(r'L:\Dataset\DIV2K\hpm\patch', r'L:\Dataset\DIV2K\org\patch', qplist, args.chroma_idx, './testlist.txt')
testloader = DataLoader(testdataset, batch_size = 32, shuffle = False, num_workers = 0)
trainloader = DataLoader(traindataset, batch_size = args.batch_size, shuffle = True, num_workers = 0)
def main():
# model
model = Extened_NRCNN(args.res_block, 64)
model.cuda()
#model.apply(weights_init)
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}'"
.format(args.resume))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
#tune lr
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
# log
if not isdir(args.save_path):
os.makedirs(args.save_path)
log = Logger(join(args.save_path, '%s-%d-log.txt' %('sgd',args.lr)))
sys.stdout = log
for epoch in range(args.start_epoch, args.maxepoch):
if epoch == 0:
print("Performing initial testing...")
train(trainloader, model, optimizer, epoch,
save_dir = join(args.save_path, 'epoch-%d-training-record' % epoch))
log.flush() # write log
scheduler.step() # will adjust learning rate
writer.close()
def train(trainloader, model, optimizer, epoch, save_dir):
global_step = epoch * len(trainloader) // args.print_freq
batch_time = Averagvalue()
loss_list = Averagvalue()
model.train()
end = time.time()
for i, (luma, chroma_rec, chroma_en, chroma_gd, qpmap) in enumerate(trainloader):
luma, chroma_rec, chroma_en, chroma_gd, qpmap = luma.cuda(), chroma_rec.cuda(), chroma_en.cuda(), chroma_gd.cuda(), qpmap.cuda()
outputs = model(torch.cat([chroma_en, qpmap], 1), luma)
psnr_1 = psnr(F.mse_loss(chroma_rec, chroma_gd).item())
psnr_2 = psnr(F.mse_loss(outputs, chroma_gd - chroma_rec).item())
loss = F.mse_loss(outputs, chroma_gd - chroma_rec)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_list.update(loss.item(), luma.size(0))
if i % args.print_freq == args.print_freq - 1:
info = 'Epoch: [{0}/{1}][{2}/{3}] '.format(epoch, args.maxepoch, i, len(trainloader)) + \
'Time {batch_time.val:.3f} (avg:{batch_time.avg:.3f})' .format(batch_time = batch_time) + \
'Loss {loss.val:f} (avg:{loss.avg:f})'.format(loss = loss_list) + ' PSNR {:.4f}'.format(psnr_2 - psnr_1)
print(info)
global_step += 1
writer = SummaryWriter(args.show_path)
writer.add_scalar('scalar/loss', loss_list.avg, global_step)
delta_psnr = test_chroma(model)
writer.add_scalar('scalar/psnr', delta_psnr, global_step)
loss_list.reset()
writer.close()
if not isdir(save_dir):
os.makedirs(save_dir)
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()
}, filename = join(save_dir, "epoch-%d-checkpoint.pth" % epoch))
def test_chroma(model):
psnr_before = Averagvalue()
psnr_after = Averagvalue()
for i, (luma, chroma_rec, chroma_en, chroma_gd, qpmap) in enumerate(trainloader):
luma, chroma_rec, chroma_en, chroma_gd, qpmap = luma.cuda(), chroma_rec.cuda(), chroma_en.cuda(), chroma_gd.cuda(), qpmap.cuda()
outputs = model(torch.cat([chroma_en, qpmap], 1), luma)
psnr_1 = psnr(F.mse_loss(chroma_rec, chroma_gd).item())
psnr_2 = psnr(F.mse_loss(outputs, chroma_gd - chroma_rec).item())
info = '[{}]'.format(i) + 'PSNR from {:.4f} to {:.4f}'.format(psnr_1, psnr_2) + ' Delta:{:.4f}'.format(psnr_2 - psnr_1)
psnr_before.update(psnr_1)
psnr_after.update(psnr_2)
return psnr_after.avg - psnr_before.avg
def weights_init(m):
if isinstance(m, nn.Conv2d):
# xavier(m.weight.data)
m.weight.data.normal_(0, 0.01)
if m.weight.data.shape == torch.Size([1, 5, 1, 1]):
# for new_score_weight
torch.nn.init.constant_(m.weight, 0.2)
if m.bias is not None:
m.bias.data.zero_()
if __name__ == "__main__":
main()
|
from Builder.Relation import *
import json
import os
import datetime # For observation grouping #Delta Time Creation
class Controller:
def __init__(self):
self.relationships_main = []
self.dependencies_main = []
self.project_name = None
def update(self, project):
# Clear previous data, if any
self.relationships_main.clear()
self.dependencies_main.clear()
self.project_name = project
self.eceld_folder_path = self.eceld_file_path()
"""
# Import relationships into the controller
"""
relationship_dir = 'Project Data/' + self.project_name + '/CE/Relationships/'
file_list = []
for file in os.listdir(relationship_dir):
file_list.append(file)
file_list.sort()
for file_name in file_list:
with open(relationship_dir + file_name, 'r') as relation:
self.relationships_main.append(Relation(json.load(relation),None,None,self.eceld_folder_path))
self.create_delta()
def move_to_dependency(self, relationship):
"""
# Removes relationship from relationship list and translates to dependency list
"""
if relationship in self.relationships_main:
relationship.name = "Dependency " + str(relationship.number)
self.dependencies_main.append(self.relationships_main.pop(self.relationships_main.index(relationship)))
return True
return False
def move_to_relationship(self, dependency):
"""
# Removes dependency from dependency list and translates to relationship list
"""
if dependency in self.dependencies_main:
dependency.name = "Relationship " + str(dependency.number)
self.relationships_main.append(self.dependencies_main.pop(self.dependencies_main.index(dependency)))
return True
return False
def search(self, keyword = '', table = 'relationships'):
search = []
if table.lower() == 'relationships':
if keyword == '' or keyword == None:
return self.relationships_main
else:
for relation in self.relationships_main:
for item in relation.observation_list:
if keyword in item.show():
search.append(relation)
break
return search
elif table.lower() == 'dependencies':
if keyword == '' or keyword == None:
return self.dependencies_main
else:
for relation in self.dependencies_main:
for item in relation.observation_list:
if keyword in item.show():
search.append(relation)
break
return search
def search_dep(self, keyword = '', table = 'dependencies'):
search = []
if keyword == '' or keyword == None:
return self.dependencies_main
else:
for relation in self.dependencies_main:
for item in relation.observation_list:
if keyword in item.show():
search.append(relation)
break
return search
#NOTE: start is a str before and after translation to deltaTime
def create_delta(self):
for relationship in self.relationships_main:
#print(relationship.name) #DEBUG
timeDiff_list = [0.0] #Initial observation will always occur at 0.0 in script
#Makes DeltaTime Calculations
for x in range(1, len(relationship.observation_list)):
currTime = datetime.datetime.strptime(relationship.observation_list[x].start,'%Y-%m-%dT%H:%M:%S')
pastTime = datetime.datetime.strptime(relationship.observation_list[x-1].start,'%Y-%m-%dT%H:%M:%S')
timeDiff = currTime-pastTime
timeDiff_list.append(timeDiff.total_seconds())
#print("delta:",timeDiff.total_seconds()) #DEBUG
#Replaces startTime with DeltaTimes, Normalizes times differences that got rounded down to 0
for y in range(len(relationship.observation_list)):
if timeDiff_list[y] == 0.0:
relationship.observation_list[y].start = str(timeDiff_list[y]+0.1)
else:
relationship.observation_list[y].start = str(timeDiff_list[y])
def save_object(self):
"""
Deep copy of project
"""
print("save object")
objectToSave = []
objectToSaveRelations = {"Relationships": {}}
objectToSaveDependencies = {"Dependencies": {}}
for relation in self.relationships_main:
objectToSaveRelations["Relationships"][relation.name] = {}
print(relation.name)
for observation in relation.observation_list:
objectToSaveRelations["Relationships"][relation.name][observation.index_observation] = {}
objectToSaveRelations["Relationships"][relation.name][observation.index_observation]['start'] = observation.start
objectToSaveRelations["Relationships"][relation.name][observation.index_observation]['data'] = observation.data
objectToSaveRelations["Relationships"][relation.name][observation.index_observation]['data_type'] = observation.data_type
objectToSaveRelations["Relationships"][relation.name][observation.index_observation]['artifact'] = observation.artifact
objectToSaveRelations["Relationships"][relation.name][observation.index_observation]["select_filters"] = observation.select_filters
objectToSave.append(objectToSaveRelations)
# Dependencies
for dependency in self.dependencies_main:
objectToSaveDependencies["Dependencies"][dependency.name] = {}
# objectToSave[dependency.name] = {}
print(dependency.name)
for observation in dependency.observation_list:
objectToSaveDependencies["Dependencies"][dependency.name][observation.index_observation] = {}
objectToSaveDependencies["Dependencies"][dependency.name][observation.index_observation]['start'] = observation.start
objectToSaveDependencies["Dependencies"][dependency.name][observation.index_observation]['data'] = observation.data
objectToSaveDependencies["Dependencies"][dependency.name][observation.index_observation]['data_type'] = observation.data_type
objectToSaveDependencies["Dependencies"][dependency.name][observation.index_observation]['artifact'] = observation.artifact
objectToSaveDependencies["Dependencies"][dependency.name][observation.index_observation]["select_filters"] = observation.select_filters
#objectToSave["dependencies"].append(dependency.name)
objectToSave.append(objectToSaveDependencies)
with open("Project Data/"+self.project_name+"/Builder/" + self.project_name + ".json", 'w') as outfile:
json.dump(objectToSave, outfile, indent=4)
def load_object(self,project_name):
"""
Import project
"""
self.project_name = project_name
observation_list = []
self.relationships_main.clear()
self.dependencies_main.clear()
self.eceld_folder_path = self.eceld_file_path()
with open("Project Data/" + self.project_name + "/Builder/" + self.project_name + ".json", 'r') as load_file:
a = json.load(load_file)
# Relations
relations_dictionary = a[0]["Relationships"]
for key in relations_dictionary.keys():
index = int(key.split()[1])
for observation in relations_dictionary[key]:
observation_list.append(relations_dictionary[key][observation])
self.relationships_main.append(Relation(observation_list, index, None, eceld_folder= self.eceld_folder_path))
observation_list.clear()
# Dependencies
observation_list = []
dependencies_dictionary = a[1]["Dependencies"]
for key in dependencies_dictionary.keys():
index = int(key.split()[1])
for observation in dependencies_dictionary[key]:
observation_list.append(dependencies_dictionary[key][observation])
self.dependencies_main.append(Relation(observation_list, index, True, eceld_folder= self.eceld_folder_path))
observation_list.clear()
def eceld_file_path(self):
self.eceld_folder_path = open('Project Data/' + self.project_name + '/CE/CE_logs/eceld_project_path.txt')
self.lines = self.eceld_folder_path.readlines()
self.eceld_folder_path.close()
return self.lines[0]
def unified_list(self):
uni_list = []
i = 1
user_action_counter = 1
observation_counter = 1
for dep in self.dependencies_main:
for obs in dep.observation_list:
if obs.ignore != 1:
if obs.user_action:
obs.observation_name = "User Action " + str(user_action_counter)
obs.user_action_number = user_action_counter
user_action_counter += 1
else:
obs.observation_name = "Observation " + str(observation_counter)
obs.observation_number = observation_counter
observation_counter += 1
uni_list.append(obs)
i += 1
return uni_list
|
from calcular import Calcular
def start(pontos):
pontos: int = 0
jogar(pontos)
def jogar(pontos: int):
dificuldade: int = int(input('Qual o nivel de dificuldade [1, 2, 3 e 4]: '))
calc: Calcular = Calcular(dificuldade)
calc.mostra_calc
resultado: int = int(input('Resultado: '))
if calc.resposta(resultado):
print('Resposta correta!')
pontos += 1
else:
print('Resposta incorreta!')
pontos -= 1
print(f'Você tem {pontos} Ponto(s)')
continuar: int = int(input('Deseja continuar? [1 - Sim / 0 - Não]: '))
if continuar:
jogar(pontos)
else:
print('obrigado por participar, Volte sempre!')
if __name__ == "__main__":
start(0) |
import base64
import copy
import datetime
import os
from decimal import Decimal
from unittest.mock import patch
import django
import pytest
import pytz
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from rest_framework import serializers
from rest_framework.fields import DecimalField
from drf_extra_fields import compat
from drf_extra_fields.compat import DateRange, DateTimeTZRange, NumericRange
from drf_extra_fields.fields import (
Base64FileField,
Base64ImageField,
DateRangeField,
DateTimeRangeField,
DecimalRangeField,
FloatRangeField,
HybridImageField,
IntegerRangeField,
LowercaseEmailField,
)
from drf_extra_fields.geo_fields import PointField
class UploadedBase64Image:
def __init__(self, file=None, created=None):
self.file = file
self.created = created or datetime.datetime.now()
class UploadedBase64File(UploadedBase64Image):
pass
class DownloadableBase64Image:
class ImageFieldFile:
def __init__(self, path):
self.path = path
def open(self):
return open(self.path, "rb")
def __init__(self, image_path):
self.image = self.ImageFieldFile(path=image_path)
class DownloadableBase64File:
class FieldFile:
def __init__(self, path):
self.path = path
def open(self):
return open(self.path, "rb")
def __init__(self, file_path):
self.file = self.FieldFile(path=file_path)
class UploadedBase64ImageSerializer(serializers.Serializer):
file = Base64ImageField(required=False)
created = serializers.DateTimeField()
def update(self, instance, validated_data):
instance.file = validated_data['file']
return instance
def create(self, validated_data):
return UploadedBase64Image(**validated_data)
class DownloadableBase64ImageSerializer(serializers.Serializer):
image = Base64ImageField(represent_in_base64=True)
class Base64ImageSerializerTests(TestCase):
def test_create(self):
"""
Test for creating Base64 image in the server side
"""
now = datetime.datetime.now()
file = 'R0lGODlhAQABAIAAAP///////yH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
serializer = UploadedBase64ImageSerializer(data={'created': now, 'file': file})
uploaded_image = UploadedBase64Image(file=file, created=now)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], uploaded_image.created)
self.assertFalse(serializer.validated_data is uploaded_image)
def test_create_with_base64_prefix(self):
"""
Test for creating Base64 image in the server side
"""
now = datetime.datetime.now()
file = 'data:image/gif;base64,R0lGODlhAQABAIAAAP///////yH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
serializer = UploadedBase64ImageSerializer(data={'created': now, 'file': file})
uploaded_image = UploadedBase64Image(file=file, created=now)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], uploaded_image.created)
self.assertFalse(serializer.validated_data is uploaded_image)
def test_create_with_invalid_base64(self):
"""
Test for creating Base64 image with an invalid Base64 string in the server side
"""
now = datetime.datetime.now()
file = 'this_is_not_a_base64'
serializer = UploadedBase64ImageSerializer(data={'created': now, 'file': file})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'file': [Base64ImageField.INVALID_FILE_MESSAGE]})
def test_validation_error_with_non_file(self):
"""
Passing non-base64 should raise a validation error.
"""
now = datetime.datetime.now()
serializer = UploadedBase64ImageSerializer(data={'created': now,
'file': 'abc'})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'file': [Base64ImageField.INVALID_FILE_MESSAGE]})
def test_remove_with_empty_string(self):
"""
Passing empty string as data should cause image to be removed
"""
now = datetime.datetime.now()
file = 'R0lGODlhAQABAIAAAP///////yH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
uploaded_image = UploadedBase64Image(file=file, created=now)
serializer = UploadedBase64ImageSerializer(instance=uploaded_image, data={'created': now, 'file': ''})
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], uploaded_image.created)
self.assertIsNone(serializer.validated_data['file'])
def test_download(self):
encoded_source = 'R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs='
with open('im.jpg', 'wb') as im_file:
im_file.write(base64.b64decode(encoded_source))
image = DownloadableBase64Image(os.path.abspath('im.jpg'))
serializer = DownloadableBase64ImageSerializer(image)
try:
self.assertEqual(serializer.data['image'], encoded_source)
finally:
os.remove('im.jpg')
def test_hybrid_image_field(self):
field = HybridImageField()
with patch('drf_extra_fields.fields.Base64FieldMixin') as mixin_patch:
field.to_internal_value({})
self.assertTrue(mixin_patch.to_internal_value.called)
with patch('drf_extra_fields.fields.Base64FieldMixin') as mixin_patch:
mixin_patch.to_internal_value.side_effect = ValidationError('foobar')
with patch('drf_extra_fields.fields.ImageField') as image_patch:
field.to_internal_value({})
self.assertTrue(mixin_patch.to_internal_value.called)
self.assertTrue(image_patch.to_internal_value.called)
def test_create_with_webp_image(self):
"""
Test for creating Base64 image with webp format in the server side
"""
now = datetime.datetime.now()
file = "data:image/webp;base64,UklGRkAAAABXRUJQVlA4IDQAAADwAQCdASoBAAEAAQAc" \
"JaACdLoB+AAETAAA/vW4f/6aR40jxpHxcP/ugT90CfugT/3NoAAA"
serializer = UploadedBase64ImageSerializer(data={'created': now, 'file': file})
uploaded_image = UploadedBase64Image(file=file, created=now)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], uploaded_image.created)
self.assertFalse(serializer.validated_data is uploaded_image)
class PDFBase64FileField(Base64FileField):
ALLOWED_TYPES = ('pdf',)
def get_file_extension(self, filename, decoded_file):
return 'pdf'
class UploadedBase64FileSerializer(serializers.Serializer):
file = PDFBase64FileField(required=False)
created = serializers.DateTimeField()
def update(self, instance, validated_data):
instance.file = validated_data['file']
return instance
def create(self, validated_data):
return UploadedBase64File(**validated_data)
class DownloadableBase64FileSerializer(serializers.Serializer):
file = PDFBase64FileField(represent_in_base64=True)
class Base64FileSerializerTests(TestCase):
def test_create(self):
"""
Test for creating Base64 file in the server side
"""
now = datetime.datetime.now()
file = 'R0lGODlhAQABAIAAAP///////yH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
serializer = UploadedBase64FileSerializer(data={'created': now, 'file': file})
uploaded_file = UploadedBase64File(file=file, created=now)
serializer.is_valid()
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], uploaded_file.created)
self.assertFalse(serializer.validated_data is uploaded_file)
def test_create_with_base64_prefix(self):
"""
Test for creating Base64 file in the server side
"""
now = datetime.datetime.now()
file = 'data:image/gif;base64,R0lGODlhAQABAIAAAP///////yH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
serializer = UploadedBase64FileSerializer(data={'created': now, 'file': file})
uploaded_file = UploadedBase64File(file=file, created=now)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], uploaded_file.created)
self.assertFalse(serializer.validated_data is uploaded_file)
def test_validation_error_with_non_file(self):
"""
Passing non-base64 should raise a validation error.
"""
now = datetime.datetime.now()
serializer = UploadedBase64FileSerializer(data={'created': now, 'file': 'abc'})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'file': [Base64FileField.INVALID_FILE_MESSAGE]})
def test_remove_with_empty_string(self):
"""
Passing empty string as data should cause file to be removed
"""
now = datetime.datetime.now()
file = 'R0lGODlhAQABAIAAAP///////yH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
uploaded_file = UploadedBase64File(file=file, created=now)
serializer = UploadedBase64FileSerializer(instance=uploaded_file, data={'created': now, 'file': ''})
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], uploaded_file.created)
self.assertIsNone(serializer.validated_data['file'])
def test_download(self):
encoded_source = 'R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs='
with open('im.jpg', 'wb') as im_file:
im_file.write(base64.b64decode(encoded_source))
file = DownloadableBase64File(os.path.abspath('im.jpg'))
serializer = DownloadableBase64FileSerializer(file)
try:
self.assertEqual(serializer.data['file'], encoded_source)
finally:
os.remove('im.jpg')
class SavePoint:
def __init__(self, point=None, created=None):
self.point = point
self.created = created or datetime.datetime.now()
class PointSerializer(serializers.Serializer):
point = PointField(required=False)
created = serializers.DateTimeField()
def update(self, instance, validated_data):
instance.point = validated_data['point']
return instance
def create(self, validated_data):
return SavePoint(**validated_data)
class StringPointSerializer(PointSerializer):
point = PointField(required=False, str_points=True)
class SridPointSerializer(PointSerializer):
point = PointField(required=False, srid=4326)
class PointSerializerTest(TestCase):
def test_create(self):
"""
Test for creating Point field in the server side
"""
now = datetime.datetime.now()
point = {
"latitude": 49.8782482189424,
"longitude": 24.452545489
}
serializer = PointSerializer(data={'created': now, 'point': point})
saved_point = SavePoint(point=point, created=now)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], saved_point.created)
self.assertFalse(serializer.validated_data is saved_point)
self.assertIsNone(serializer.validated_data['point'].srid)
def test_validation_error_with_non_file(self):
"""
Passing non-dict contains latitude and longitude should raise a validation error.
"""
now = datetime.datetime.now()
serializer = PointSerializer(data={'created': now, 'point': '123'})
self.assertFalse(serializer.is_valid())
def test_remove_with_empty_string(self):
"""
Passing empty string as data should cause point to be removed
"""
now = datetime.datetime.now()
point = {
"latitude": 49.8782482189424,
"longitude": 24.452545489
}
saved_point = SavePoint(point=point, created=now)
serializer = PointSerializer(data={'created': now, 'point': ''})
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['created'], saved_point.created)
self.assertIsNone(serializer.validated_data['point'])
def test_empty_latitude(self):
now = datetime.datetime.now()
point = {
"latitude": 49.8782482189424,
"longitude": ""
}
serializer = PointSerializer(data={'created': now, 'point': point})
self.assertFalse(serializer.is_valid())
def test_invalid_latitude(self):
now = datetime.datetime.now()
point = {
"latitude": 49.8782482189424,
"longitude": "fdff"
}
serializer = PointSerializer(data={'created': now, 'point': point})
self.assertFalse(serializer.is_valid())
def test_serialization(self):
"""
Regular JSON serialization should output float values
"""
from django.contrib.gis.geos import Point
now = datetime.datetime.now()
point = Point(24.452545489, 49.8782482189424)
saved_point = SavePoint(point=point, created=now)
serializer = PointSerializer(saved_point)
self.assertEqual(serializer.data['point'], {'latitude': 49.8782482189424, 'longitude': 24.452545489})
def test_str_points_serialization(self):
"""
PointField with str_points=True should output string values
"""
from django.contrib.gis.geos import Point
now = datetime.datetime.now()
# test input is shortened due to string conversion rounding
# gps has at max 8 decimals, so it doesn't make a difference
point = Point(24.452545489, 49.8782482189)
saved_point = SavePoint(point=point, created=now)
serializer = StringPointSerializer(saved_point)
self.assertEqual(serializer.data['point'], {'latitude': '49.8782482189', 'longitude': '24.452545489'})
def test_srid_point(self):
"""
PointField with srid should should result in a Point object with srid set
"""
now = datetime.datetime.now()
point = {
"latitude": 49.8782482189424,
"longitude": 24.452545489
}
serializer = SridPointSerializer(data={'created': now, 'point': point})
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['point'].srid, 4326)
# Backported from django_rest_framework/tests/test_fields.py
def get_items(mapping_or_list_of_two_tuples):
# Tests accept either lists of two tuples, or dictionaries.
if isinstance(mapping_or_list_of_two_tuples, dict):
# {value: expected}
return mapping_or_list_of_two_tuples.items()
# [(value, expected), ...]
return mapping_or_list_of_two_tuples
class IntegerRangeSerializer(serializers.Serializer):
range = IntegerRangeField()
class IntegerRangeChildAllowNullSerializer(serializers.Serializer):
range = IntegerRangeField(child_attrs={"allow_null": True})
class FloatRangeSerializer(serializers.Serializer):
range = FloatRangeField()
class DateTimeRangeSerializer(serializers.Serializer):
range = DateTimeRangeField()
class DateRangeSerializer(serializers.Serializer):
range = DateRangeField(initial=DateRange(None, None, '()'))
class DateRangeWithAllowEmptyFalseSerializer(serializers.Serializer):
range = DateRangeField(allow_empty=False)
class DateRangeWithAllowEmptyTrueSerializer(serializers.Serializer):
range = DateRangeField(allow_empty=True)
class DecimalRangeSerializer(serializers.Serializer):
range = DecimalRangeField()
class DecimalRangeSerializerWithChildAttribute(serializers.Serializer):
range = DecimalRangeField(child=DecimalField(max_digits=5, decimal_places=2))
class FieldValues:
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
initial_input_value = copy.deepcopy(input_value)
serializer = self.serializer_class(data=input_value)
serializer.is_valid()
assert serializer.initial_data == initial_input_value
assert self.field.run_validation(initial_input_value) == expected_output
def test_invalid_inputs(self):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(serializers.ValidationError) as exc_info:
self.field.run_validation(input_value)
assert exc_info.value.detail == expected_failure
def test_outputs(self):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output
# end of backport
class TestIntegerRangeField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
serializer_class = IntegerRangeSerializer
valid_inputs = [
({'lower': '1', 'upper': 2, 'bounds': '[)'},
NumericRange(**{'lower': 1, 'upper': 2, 'bounds': '[)'})),
({'lower': 1, 'upper': 2},
NumericRange(**{'lower': 1, 'upper': 2})),
({'lower': 1},
NumericRange(**{'lower': 1})),
({'upper': 1},
NumericRange(**{'upper': 1})),
({'empty': True},
NumericRange(**{'empty': True})),
({}, NumericRange()),
]
invalid_inputs = [
({'lower': 'a'}, ['A valid integer is required.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
({'foo': 'bar'}, ['Extra content not allowed "foo".']),
({'lower': 2, 'upper': 1}, ['The start of the range must not exceed the end of the range.']),
({'lower': 1, 'upper': None, 'bounds': '[)'}, ['This field may not be null.']),
({'lower': None, 'upper': 1, 'bounds': '[)'}, ['This field may not be null.']),
]
outputs = [
(NumericRange(**{'lower': '1', 'upper': '2'}),
{'lower': 1, 'upper': 2, 'bounds': '[)'}),
(NumericRange(**{'empty': True}), {'empty': True}),
(NumericRange(bounds='()'), {'bounds': '()', 'lower': None, 'upper': None}),
({'lower': '1', 'upper': 2, 'bounds': '[)'},
{'lower': 1, 'upper': 2, 'bounds': '[)'}),
({'lower': 1, 'upper': 2},
{'lower': 1, 'upper': 2, 'bounds': None}),
({'lower': 1},
{'lower': 1, 'upper': None, 'bounds': None}),
({'upper': 1},
{'lower': None, 'upper': 1, 'bounds': None}),
({}, {}),
]
field = IntegerRangeField()
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
IntegerRangeField(child=serializers.IntegerField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
class TestIntegerRangeChildAllowNullField(FieldValues):
serializer_class = IntegerRangeChildAllowNullSerializer
valid_inputs = [
({'lower': '1', 'upper': 2, 'bounds': '[)'},
NumericRange(**{'lower': 1, 'upper': 2, 'bounds': '[)'})),
({'lower': 1, 'upper': 2},
NumericRange(**{'lower': 1, 'upper': 2})),
({'lower': 1},
NumericRange(**{'lower': 1})),
({'upper': 1},
NumericRange(**{'upper': 1})),
({'empty': True},
NumericRange(**{'empty': True})),
({}, NumericRange()),
({'lower': 1, 'upper': None, 'bounds': '[)'},
NumericRange(**{'lower': 1, 'upper': None, 'bounds': '[)'})),
({'lower': None, 'upper': 1, 'bounds': '[)'},
NumericRange(**{'lower': None, 'upper': 1, 'bounds': '[)'})),
]
invalid_inputs = [
({'lower': 'a'}, ['A valid integer is required.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
({'foo': 'bar'}, ['Extra content not allowed "foo".']),
({'lower': 2, 'upper': 1}, ['The start of the range must not exceed the end of the range.']),
]
outputs = [
(NumericRange(**{'lower': '1', 'upper': '2'}),
{'lower': 1, 'upper': 2, 'bounds': '[)'}),
(NumericRange(**{'empty': True}), {'empty': True}),
(NumericRange(bounds='()'), {'bounds': '()', 'lower': None, 'upper': None}),
({'lower': '1', 'upper': 2, 'bounds': '[)'},
{'lower': 1, 'upper': 2, 'bounds': '[)'}),
({'lower': 1, 'upper': 2},
{'lower': 1, 'upper': 2, 'bounds': None}),
({'lower': 1},
{'lower': 1, 'upper': None, 'bounds': None}),
({'upper': 1},
{'lower': None, 'upper': 1, 'bounds': None}),
({}, {}),
]
field = IntegerRangeField(child_attrs={"allow_null": True})
class TestDecimalRangeField(FieldValues):
serializer_class = DecimalRangeSerializer
valid_inputs = [
({'lower': '1', 'upper': 2., 'bounds': '[)'},
NumericRange(**{'lower': 1., 'upper': 2., 'bounds': '[)'})),
({'lower': 1., 'upper': 2.},
NumericRange(**{'lower': 1, 'upper': 2})),
({'lower': 1},
NumericRange(**{'lower': 1})),
({'upper': 1},
NumericRange(**{'upper': 1})),
({'empty': True},
NumericRange(**{'empty': True})),
({}, NumericRange()),
]
invalid_inputs = [
({'lower': 'a'}, ['A valid number is required.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
({'lower': 2., 'upper': 1.}, ['The start of the range must not exceed the end of the range.']),
]
outputs = [
(NumericRange(**{'lower': '1.1', 'upper': '2'}),
{'lower': '1.1', 'upper': '2', 'bounds': '[)'}),
(NumericRange(**{'empty': True}), {'empty': True}),
(NumericRange(bounds='()'), {'bounds': '()', 'lower': None, 'upper': None}),
({'lower': Decimal('1.1'), 'upper': "2.3", 'bounds': '[)'},
{'lower': "1.1", 'upper': "2.3", 'bounds': '[)'}),
({'lower': Decimal('1.1'), 'upper': "2.3"},
{'lower': "1.1", 'upper': "2.3", 'bounds': None}),
({'lower': 1},
{'lower': "1", 'upper': None, 'bounds': None}),
({'upper': 1},
{'lower': None, 'upper': "1", 'bounds': None}),
({}, {}),
]
field = DecimalRangeField()
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
DecimalRangeField(child=serializers.DecimalField(source='other', max_digits=None, decimal_places=None))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
class TestDecimalRangeFieldWithChildAttribute(FieldValues):
serializer_class = DecimalRangeSerializerWithChildAttribute
field = DecimalRangeField(child=DecimalField(max_digits=5, decimal_places=2))
valid_inputs = [
({'lower': '1', 'upper': 2., 'bounds': '[)'},
NumericRange(**{'lower': 1., 'upper': 2., 'bounds': '[)'})),
({'lower': 1., 'upper': 2.},
NumericRange(**{'lower': 1, 'upper': 2})),
({'lower': 1},
NumericRange(**{'lower': 1})),
({'upper': 1},
NumericRange(**{'upper': 1})),
({'empty': True},
NumericRange(**{'empty': True})),
({}, NumericRange()),
]
invalid_inputs = [
({'lower': 'a'}, ['A valid number is required.']),
({'upper': '123456'}, ['Ensure that there are no more than 5 digits in total.']),
({'lower': '9.123'}, ['Ensure that there are no more than 2 decimal places.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
({'lower': 2., 'upper': 1.}, ['The start of the range must not exceed the end of the range.']),
]
outputs = [
(NumericRange(**{'lower': '1.1', 'upper': '2'}),
{'lower': '1.10', 'upper': '2.00', 'bounds': '[)'}),
(NumericRange(**{'empty': True}), {'empty': True}),
(NumericRange(bounds='()'), {'bounds': '()', 'lower': None, 'upper': None}),
({'lower': Decimal('1.1'), 'upper': "2.3", 'bounds': '[)'},
{'lower': "1.10", 'upper': "2.30", 'bounds': '[)'}),
({'lower': Decimal('1.1'), 'upper': "2.3"},
{'lower': "1.10", 'upper': "2.30", 'bounds': None}),
({'lower': 1},
{'lower': "1.00", 'upper': None, 'bounds': None}),
({'upper': 1},
{'lower': None, 'upper': "1.00", 'bounds': None}),
({}, {}),
]
@pytest.mark.skipif(django.VERSION >= (3, 1) or not hasattr(compat.postgres_fields, "FloatRangeField"),
reason='FloatRangeField deprecated on django 3.1 ')
class TestFloatRangeField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
serializer_class = FloatRangeSerializer
valid_inputs = [
({'lower': '1', 'upper': 2., 'bounds': '[)'},
NumericRange(**{'lower': 1., 'upper': 2., 'bounds': '[)'})),
({'lower': 1., 'upper': 2.},
NumericRange(**{'lower': 1, 'upper': 2})),
({'lower': 1},
NumericRange(**{'lower': 1})),
({'upper': 1},
NumericRange(**{'upper': 1})),
({'empty': True},
NumericRange(**{'empty': True})),
({}, NumericRange()),
]
invalid_inputs = [
({'lower': 'a'}, ['A valid number is required.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
({'lower': 2., 'upper': 1.}, ['The start of the range must not exceed the end of the range.']),
]
outputs = [
(NumericRange(**{'lower': '1.1', 'upper': '2'}),
{'lower': 1.1, 'upper': 2, 'bounds': '[)'}),
(NumericRange(**{'empty': True}), {'empty': True}),
(NumericRange(bounds='()'), {'bounds': '()', 'lower': None, 'upper': None}),
({'lower': '1', 'upper': 2., 'bounds': '[)'},
{'lower': 1., 'upper': 2., 'bounds': '[)'}),
({'lower': 1., 'upper': 2.},
{'lower': 1, 'upper': 2, 'bounds': None}),
({'lower': 1},
{'lower': 1, 'upper': None, 'bounds': None}),
({'upper': 1},
{'lower': None, 'upper': 1, 'bounds': None}),
({}, {}),
]
field = FloatRangeField()
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
FloatRangeField(child=serializers.IntegerField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
@override_settings(USE_TZ=True)
class TestDateTimeRangeField(TestCase, FieldValues):
"""
Values for `ListField` with CharField as child.
"""
serializer_class = DateTimeRangeSerializer
valid_inputs = [
({'lower': '2001-01-01T13:00:00Z',
'upper': '2001-02-02T13:00:00Z',
'bounds': '[)'},
DateTimeTZRange(
**{'lower': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=pytz.utc),
'upper': datetime.datetime(2001, 2, 2, 13, 00, tzinfo=pytz.utc),
'bounds': '[)'})),
({'upper': '2001-02-02T13:00:00Z',
'bounds': '[)'},
DateTimeTZRange(
**{'upper': datetime.datetime(2001, 2, 2, 13, 00, tzinfo=pytz.utc),
'bounds': '[)'})),
({'lower': '2001-01-01T13:00:00Z',
'bounds': '[)'},
DateTimeTZRange(
**{'lower': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=pytz.utc),
'bounds': '[)'})),
({'empty': True},
DateTimeTZRange(**{'empty': True})),
({}, DateTimeTZRange()),
]
invalid_inputs = [
({'lower': 'a'}, ['Datetime has wrong format. Use one of these'
' formats instead: '
'YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
({'lower': '2001-02-02T13:00:00Z',
'upper': '2001-01-01T13:00:00Z'},
['The start of the range must not exceed the end of the range.']),
]
outputs = [
(DateTimeTZRange(
**{'lower': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=pytz.utc),
'upper': datetime.datetime(2001, 2, 2, 13, 00, tzinfo=pytz.utc)}),
{'lower': '2001-01-01T13:00:00Z',
'upper': '2001-02-02T13:00:00Z',
'bounds': '[)'}),
(DateTimeTZRange(**{'empty': True}),
{'empty': True}),
(DateTimeTZRange(bounds='()'),
{'bounds': '()', 'lower': None, 'upper': None}),
({'lower': '2001-01-01T13:00:00Z',
'upper': '2001-02-02T13:00:00Z',
'bounds': '[)'},
{'lower': '2001-01-01T13:00:00Z',
'upper': '2001-02-02T13:00:00Z',
'bounds': '[)'}),
({'lower': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=pytz.utc),
'upper': datetime.datetime(2001, 2, 2, 13, 00, tzinfo=pytz.utc),
'bounds': '[)'},
{'lower': '2001-01-01T13:00:00Z',
'upper': '2001-02-02T13:00:00Z',
'bounds': '[)'}),
({'upper': '2001-02-02T13:00:00Z', 'bounds': '[)'},
{'lower': None, 'upper': '2001-02-02T13:00:00Z', 'bounds': '[)'}),
({'lower': '2001-01-01T13:00:00Z', 'bounds': '[)'},
{'lower': '2001-01-01T13:00:00Z', 'upper': None, 'bounds': '[)'}),
({}, {}),
]
field = DateTimeRangeField()
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
DateTimeRangeField(child=serializers.IntegerField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
class TestDateRangeField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
serializer_class = DateRangeSerializer
valid_inputs = [
({'lower': '2001-01-01',
'upper': '2001-02-02',
'bounds': '[)'},
DateRange(
**{'lower': datetime.date(2001, 1, 1),
'upper': datetime.date(2001, 2, 2),
'bounds': '[)'})),
({'upper': '2001-02-02',
'bounds': '[)'},
DateRange(
**{'upper': datetime.date(2001, 2, 2),
'bounds': '[)'})),
({'lower': '2001-01-01',
'bounds': '[)'},
DateRange(
**{'lower': datetime.date(2001, 1, 1),
'bounds': '[)'})),
({'empty': True},
DateRange(**{'empty': True})),
({}, DateRange()),
]
invalid_inputs = [
({'lower': 'a'}, ['Date has wrong format. Use one of these'
' formats instead: '
'YYYY-MM-DD.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
({'lower': '2001-02-02',
'upper': '2001-01-01'},
['The start of the range must not exceed the end of the range.']),
]
outputs = [
(DateRange(
**{'lower': datetime.date(2001, 1, 1),
'upper': datetime.date(2001, 2, 2)}),
{'lower': '2001-01-01',
'upper': '2001-02-02',
'bounds': '[)'}),
(DateRange(**{'empty': True}),
{'empty': True}),
(DateRange(bounds='()'), {'bounds': '()', 'lower': None, 'upper': None}),
({'lower': '2001-01-01',
'upper': '2001-02-02',
'bounds': '[)'},
{'lower': '2001-01-01',
'upper': '2001-02-02',
'bounds': '[)'}),
({'lower': datetime.date(2001, 1, 1),
'upper': datetime.date(2001, 2, 2),
'bounds': '[)'},
{'lower': '2001-01-01',
'upper': '2001-02-02',
'bounds': '[)'}),
({'upper': '2001-02-02', 'bounds': '[)'},
{'lower': None, 'upper': '2001-02-02', 'bounds': '[)'}),
({'lower': '2001-01-01', 'bounds': '[)'},
{'lower': '2001-01-01', 'upper': None, 'bounds': '[)'}),
({}, {}),
]
field = DateRangeField()
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
DateRangeField(child=serializers.IntegerField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_initial_value_of_field(self):
serializer = DateRangeSerializer()
assert serializer.data['range'] == {'lower': None, 'upper': None, 'bounds': '()'}
def test_allow_empty(self):
serializer = DateRangeWithAllowEmptyFalseSerializer(data={"range": {}})
with pytest.raises(serializers.ValidationError) as exc_info:
serializer.is_valid(raise_exception=True)
assert exc_info.value.detail == ["This dictionary may not be empty."]
serializer = DateRangeWithAllowEmptyTrueSerializer(data={"range": {}})
assert serializer.is_valid()
class EmailSerializer(serializers.Serializer):
email = LowercaseEmailField()
class LowercaseEmailFieldTest(TestCase):
def test_serialization(self):
email = 'ALL_CAPS@example.com'
serializer = EmailSerializer(data={'email': email})
serializer.is_valid()
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['email'], email.lower())
|
# Generated by Django 3.0.7 on 2020-07-01 10:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0003_remove_userinfo_age'),
]
operations = [
migrations.CreateModel(
name='addapps',
fields=[
('app_id', models.UUIDField(primary_key=True, serialize=False)),
('appname', models.CharField(max_length=15)),
('webaddress', models.CharField(max_length=15)),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.userInfo')),
],
),
]
|
# hashtable
from hashtable import HashTable
from time import time
class CheckPrice(object):
def __init__(self, file_path):
# instance variables
self.number_and_cost = self.store_routes_cost(file_path)
# store cost of routes into the hash table
def store_routes_cost(self, file_path):
hash_cost_route = HashTable(100000) # O(n) space complexity
with open(file_path, "r") as file:
for line in file: # O(n) time complexity
line = line[:-1]
# split the incoming route and cost
route_cost = line.split(",")
# store the route and cost into it's own variable
route = route_cost[0]
cost = route_cost[1]
# check if the route is already in our hashtable O(1)
if hash_cost_route.contains(route):
old_cost = hash_cost_route.get(route)
if old_cost > cost: # check if the cost already in greater than the new cost coming in
hash_cost_route.set(route, cost) # O(1)
else:
hash_cost_route.set(route, cost) # O(1)
return hash_cost_route
# find cost of the phone number input
def find_cost(self, phone_number):
for _ in range(len(phone_number)-1): # O(n) time complexity
if self.number_and_cost.contains(phone_number):
# O(1) space complexity
return self.number_and_cost.get(phone_number)
else:
phone_number = phone_number[:len(phone_number)-1]
return 0
# write to a new file function
def write_results(self, phone_number, cost):
with open("call-costs-2.txt", "a") as file:
file.write("NUM: {}, COSTS: {}\n".format(phone_number, cost))
if __name__ == "__main__":
start_timer = time()
print("Working over time, I better get paid...")
file_path = "route-costs-106000.txt" # 10600 routes with costs
check_price = CheckPrice(file_path)
print("This took {}".format(round(time() - start_timer, 4)))
start_second_timer = time()
with open("phone-numbers-10000.txt", "r") as file: # file for the 10000 numbers
for phone_number in file:
phone_number = phone_number[:-1]
cost = check_price.find_cost(phone_number)
check_price.write_results(phone_number, cost)
print("Finding cost took: {}".format(round(time() - start_second_timer, 4))) |
# -*- coding: utf-8 -*-
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import func
from sqlalchemy import update
from datetime import datetime
from config import mysql
# from SQLAlchemy.pool import NullPool
# 安装的扩展库SQLAlchemy
conn = 'mysql://{user}:{password}@{host}/{db}?charset={charset}'.format(**mysql)
engine = create_engine(conn, echo=True) # 使用非连接池的方式连接数据库
Base = declarative_base(engine)
class Area(Base):
__tablename__ = 'area'
id = Column(SmallInteger, primary_key=True)
page_id = Column(Integer)
paper_id = Column(Integer)
article_id = Column(Integer)
x = Column(String(11))
y = Column(String(11))
width = Column(String(11))
height = Column(String(11))
def __repr__(self):
return "<Area(page_id='%s', paper_id='%s', article_id='%s', x='%s', y='%s', width='%s', height='%s')>" % (
self.page_id, self.paper_id, self.article_id, self.x, self.y, self.width, self.height)
class Article(Base):
__tablename__ = 'article'
# __table_args__ = {'autoload': True}
id = Column(Integer, primary_key=True)
title = Column(String(11))
sub_title = Column(String)
content = Column(Text)
time = Column(DateTime)
paper_id = Column(Integer)
page_id = Column(Integer)
reply_title = Column(String)
author = Column(String(64))
keyword = Column(String)
show_author = Column(Integer)
has_pic = Column(Integer)
class KeyInfo(Base):
__tablename__ = 'keyinfo'
id = Column(Integer, primary_key=True)
keyword = Column(String)
article_id = Column(Integer)
class Paper(Base):
__tablename__ = 'paper'
# __table_args__ = {'autoload': True}
id = Column(Integer, primary_key=True, nullable=False)
num = Column(Integer)
issued = Column(Integer)
time = Column(DateTime)
class Page(Base):
__tablename__ = 'page'
# __table_args__ = {'autoload': True}
id = Column(Integer, primary_key=True, nullable=False)
paper_id = Column(Integer)
num = Column(Integer)
pic_url = Column(String)
name = Column(String)
class Connection():
table_page = Page.__table__
table_paper = Paper.__table__
table_article = Article.__table__
table_area = Area.__table__
def __init__(self):
super(Connection, self).__init__()
self.conn = engine.connect()
class Model():
def __init__(self):
# moz_article = Table('article', metadata, autoload=True)
# mapper(Article, moz_article)
# metadata = MetaData(engine)
session = sessionmaker(bind=engine)
self.session = session()
def get_article_list(self, page_id):
"""根据页面获得页面所有文章列表."""
article_list = self.session.query(Article.id, Article.title,
Article.reply_title,
Article.has_pic,
Article.sub_title)
article_list = article_list.filter(Article.page_id == page_id).all()
return article_list
def get_articles(self, paper_id):
"""获得一期中所有的文章"""
article_list = self.session.query(Article.id, Article.title)
article_list = article_list.filter(Article.paper_id == paper_id).all()
return article_list
def get_all_paper(self, all_select=False, issued=1):
"""获得所有的期刊"""
if all_select:
paper_list = self.session.query(Paper).order_by(desc(Paper.id)).all()
else:
paper_list = self.session.query(Paper).filter(Paper.issued == issued).order_by(desc(Paper.id)).all()
return paper_list
def get_max_paper(self):
"""获得最近的一期"""
last = self.session.query(Paper.num, Paper.id).filter(Paper.issued == 1).order_by(desc(Paper.id)).first()
return last
def get_max_paper_issued(self):
last = self.session.query(Paper).order_by(desc(Paper.id)).first()
return last
def get_area_list(self, page_id):
area_list = self.session.query(Area).filter(Area.page_id == page_id).all()
return area_list
def get_pic_info(self, paper_id, num=1):
"""获得当前期数的默认第一个页面"""
pic_info = self.session.query(Page).filter(Page.paper_id == paper_id).filter(Page.num == num).first()
return pic_info
def get_page_info(self, page_id):
"""获得指定页面."""
page_info = self.session.query(Page).filter(Page.id == page_id).first()
return page_info
def get_article_info(self, article_id):
"""获得文章内容"""
article_info = self.session.query(Article).filter(Article.id == article_id).first()
return article_info
def get_paper_info(self, paper_id):
page_info = self.session.query(Paper.num).filter(Paper.id == paper_id).all()
return page_info[0].num
def get_paper(self, paper_num):
return self.session.query(Paper).filter(Paper.num == paper_num).first()
def get_column_list(self, paper_id):
"""根据期数id获得报纸4个页面"""
column_list = self.session.query(Page).filter(Page.paper_id == paper_id).all()
return column_list
def get_paper_list(self, offset=0, limit=20):
"""获得所有的期刊列表."""
paper_list = self.session.query(Paper).order_by(desc(Paper.id)).limit(limit).offset(offset).all()
return paper_list
def get_paper_count(self):
num = self.session.query(func.count(Paper.id)).all()
return num[0][0]
def new_paper(self, paper_num, pub_time=datetime.now()):
"""新增一个期刊."""
is_exist = self.session.query(Paper).filter(Paper.num == paper_num).first()
if is_exist:
p = update(Paper).where(Paper.num == paper_num).values(time=pub_time)
self.session.execute(p)
self.session.commit()
return is_exist
else:
paper = Paper()
paper.num = paper_num
paper.time = pub_time
paper.issued = 0
self.session.add(paper)
self.session.commit()
return paper
def delete_paper(self, paper_num):
"""删除报纸期刊."""
is_exist = self.session.query(Paper.id).filter(Paper.num == paper_num).first()
if is_exist:
self.session.query(Paper).filter(Paper.num == paper_num).delete()
self.session.commit()
return True
else:
return None
def insert_article(self, **kwargs):
"""新增一篇文章."""
args = ["title", "sub_title", "reply_title", "content",
"paper_id", "page_id", "show_author", "has_pic",
"author"]
d = dict()
for key in kwargs.iterkeys():
if key in args:
d[key] = kwargs[key]
d["time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
article = Article(**d)
self.session.add(article)
self.session.commit()
return article
def update_article(self, **kwargs):
"""修改一篇文章"""
# article = self.session.query(Article).get(kwargs["id"])
# for key in kwargs.iterkeys():
# article.key = kwargs[key]
# self.session.commit()
return update(Article).where(Article.id == kwargs['id']).values(kwargs).execute()
def delete_article(self, article_id):
"""删除一篇文章"""
article = self.session.query(Article).filter(Article.id == article_id)
self.session.delete(article)
self.session.commit()
return article
def new_page(self, paper_id, num, pic_url, name):
"""添加报刊"""
page = Page(paper_id=paper_id, num=num, pic_url=pic_url, name=name)
self.session.add(page)
self.session.commit()
return page
def update_page(self, page_id, paper_id=None, num=None, pic_url=None, name=None):
"""修改报刊"""
page = self.session.query(Page).filter(Page.id == page_id).first()
if paper_id is not None:
page.paper_id = paper_id
if pic_url is not None:
page.pic_url = pic_url
if name is not None:
page.name = name
if num is not None:
page.num = num
self.session.add(page)
self.session.commit()
def insert_area(self, **kwargs):
"""新增一个报刊区域"""
args = ["page_id", "paper_id", "article_id", "x", "y", "width", "height"]
d = dict()
for key in kwargs.iterkeys():
if key in args:
d[key] = kwargs[key]
d['x'] = str(d['x']) + "px"
d['y'] = str(d['y']) + "px"
d['width'] = str(d['width']) + "px"
d['height'] = str(d['height']) + "px"
area = Area(**d)
self.session.add(area)
self.session.commit()
return area
def paper_issued(self, paper_id, issued):
"""改变期刊的发布状态"""
paper = self.session.query(Paper).filter(Paper.id == paper_id).first()
if not paper:
return False
paper.issued = issued
self.session.add(paper)
self.session.commit()
def delete_page(self, page_id):
"""删除报刊"""
page = self.session.query(Page).filter(Page.id == page_id).first()
self.session.delete(page)
self.session.commit()
def close_session(self):
self.session.close()
|
import numpy as np
import tflite_runtime.interpreter as tflite
import open_myo as myo
from kulka import Kulka
import time
isReadyToRegisterData = False
samplesPerSeconds = 0
dataRecollectedPerIteration = list()
ADDR = '68:86:e7:00:ef:40'
#interpreter = tf.lite.Interpreter(model_path="myLittleModel.tflite")
interpreter = tflite.Interpreter(model_path="modelXDXD.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
STEPS = 10
SPEED = 0x30
SLEEP_TIME = 0.3
#def section
#-------------------------------------------------------------------------------
def make_a_step(kulka, current_angle):
kulka.roll(SPEED, current_angle)
time.sleep(SLEEP_TIME)
kulka.roll(0, current_angle)
def make_a_circle(kulka, steps):
rotate_by = 360 // steps
current_angle = 1
for _ in range(steps):
make_a_step(kulka, current_angle % 360)
current_angle += rotate_by
def process_emg(emg):
if(isReadyToRegisterData):
print("readings-> ", emg)
global dataRecollectedPerIteration
#dataRecollectedPerIteration += emg
dataRecollectedPerIteration.append(emg)
global samplesPerSeconds
samplesPerSeconds += 1
def classifySignal():
global interpreter
global input_details
global output_details
arrayXD = np.empty((1,150,8),dtype="float32")
super_inp = np.asarray(dataRecollectedPerIteration,dtype="float32")
arrayXD[0] = super_inp
interpreter.set_tensor(input_details[0]['index'], arrayXD)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
if(np.argmax(output_data) == 1):
with Kulka(ADDR) as kulka:
make_a_circle(kulka, STEPS)
else:
print("nothing")
#-------------------------------------------------------------------------------
if __name__ == "__main__":
myo_mac_addr = myo.get_myo()
myo_device = myo.Device()
myo_device.services.sleep_mode(1) # never sleep
myo_device.services.vibrate(1) #short vibration
myo_device.services.emg_filt_notifications()
print("Battery: %d" % myo_device.services.battery())
myo_device.services.set_mode(myo.EmgMode.FILT, myo.ImuMode.OFF, myo.ClassifierMode.OFF)
samplesPerGesture = int(input("insert number of samplesPerGesture: "))
myo_device.add_emg_event_handler(process_emg)
while(True):
myo_device.services.vibrate(1) # short vibration to let user know we are recording
time.sleep(2) #add some delay to avoid the vibration causing any interference
isReadyToRegisterData = True
while(samplesPerSeconds < samplesPerGesture):
if myo_device.services.waitForNotifications(1):
continue #return to the beggining of while loop
else:
print("no data has been received from the peripheral, waiting...")
isReadyToRegisterData = False
print("received Info")
classifySignal()
dataRecollectedPerIteration.clear()
print("total number of samples: ", samplesPerSeconds)
samplesPerSeconds = 0;
|
# -*- coding: utf-8 -*-
import pandas as pd
import random
import operator
class Monster_In_A_Box:
def __init__(self, index, data_frame):
self.idx = index
self.generate_from_df(data_frame)
'''
self.readable_name (name of the encounter of this box (as of 4/2/2019 this is empty, need to work on this - C))
self.area (Wind Shrine, Karnak, etc)
self.monster_chest_id (3 byte address for the reward type)
self.monster_chest_data (byte that represents the monster in a box enemy to choose)
self.reward_id (3 byte address for the reward from the monster-in-a-box)
self.reward_data (byte that represents the reward for beating this monster-in-a-box)
'''
self.processed = False #flag used to track if this monster has been placed
def generate_from_df(self, df):
s = df[df['monster_box_id']==self.idx].iloc[0]
if s.empty:
print("No match on index found for Monster_In_A_Box class "+self.idx)
else:
for index in s.index:
setattr(self,index,s.loc[index])
class MonsterInABoxManager:
def __init__(self, data_manager, random=None, key_items_in_mib=False):
self.monsters_in_boxes = [Monster_In_A_Box(x, data_manager.files['monsters_in_boxes']) for x in range(0, data_manager.files['monsters_in_boxes'].shape[0])]
self.random = random
self.key_items_in_mib = key_items_in_mib
def get_mib_for_area(self, area):
#print("getting mib for area: " + area.area_name)
working_list = [x for x in self.monsters_in_boxes if x.area == area.area_name and x.processed == False]
# if working_list:
# breakpoint()
#print("mib list length: " + str(len(working_list)))
if len(working_list) > 0:
return random.choice(working_list)
else:
return None
def get_mib_by_address(self, address):
for i in self.monsters_in_boxes:
if i.monster_chest_id == address:
return i |
# wd.py
# WikiData things
import csv
from pprint import pprint
from time import sleep
from qwikidata.entity import WikidataItem, WikidataLexeme, WikidataProperty
from qwikidata.linked_data_interface import get_entity_dict_from_api
from qwikidata.sparql import (get_subclasses_of_item,
return_sparql_query_results)
def mk_isas(isas):
if isas:
# construct {wd:Q16917 wd:Q4287745}.
isas = " ".join([ "wd:"+isa for isa in isas ])
isas = "wdt:P31 ?kind. VALUES ?kind {{ {isas} }}.".format(isas=isas)
else:
isas = ""
return isas
def hospi_ll( lat, long, radius=.5, isas=[]):
isas = mk_isas(isas)
sparql_query = """
SELECT ?place ?placeLabel ?distance WHERE {{
?place wdt:P17 wd:Q30; # In US
{isas}
SERVICE wikibase:around {{
?place wdt:P625 ?location .
bd:serviceParam wikibase:center"Point({long} {lat})"^^geo:wktLiteral.
bd:serviceParam wikibase:radius "{radius}" .
bd:serviceParam wikibase:distance ?distance .
}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "en" .
}}
}}
ORDER BY ?distance
LIMIT 10""".format(lat=lat,long=long, radius=radius, isas=isas)
# print(sparql_query)
sleep(1)
res = return_sparql_query_results(sparql_query)
# pprint(res['results']['bindings'])
return res
# print("import sys; sys.exit()"); import code; code.interact(local=locals())
def label(word, isas=[]):
# if isa:
# isa = "wdt:P31/wdt:P279* wd:{};".format(isa)
isas = mk_isas(isas)
# hack cuz I don't know what I am doing
if isas: isas = "?item " + isas
sparql_query = """
SELECT ?item ?itemLabel ?itemDescription WHERE {{
{isas}
SERVICE wikibase:mwapi {{
bd:serviceParam wikibase:endpoint "www.wikidata.org";
wikibase:api "EntitySearch";
mwapi:search "{word}";
mwapi:language "en";
mwapi:limit "5".
?item wikibase:apiOutputItem mwapi:item.
?num wikibase:apiOrdinal true.
}}
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en". }}
}}""".format(word=word, isas=isas)
# print(sparql_query)
res = return_sparql_query_results(sparql_query)
# pprint(res)
for item in res['results']['bindings']:
# pprint(item)
# print(item['itemLabel']['value'])
# print(item['itemDescription']['value'])
# print(item['item']['value'])
print("{label}: {description} {uri}\n".format(
label=item['itemLabel']['value'],
description=item['itemDescription']['value'],
uri=item['item']['value']))
return res
def tag_qs():
tags = """
Doctor office
Dentist office
Food pantry
Post office
"""
with open("tags.csv") as csvfile:
rows = list(csv.DictReader(csvfile))
d = {}
for row in rows:
d[row['label']] = row
for k in d:
print("{label}: {description} https://www.wikidata.org/wiki/{id}\n".format(**d[k]))
for tag in tags.split('\n'):
if not tag:
continue
if tag in d:
continue
print(tag)
# q(tag)
def write_tags(rows):
with open('tags.csv', 'w') as f:
fieldnames = ['label', 'description', 'id', 'url' ]
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for r in rows:
writer.writerow(r)
def test():
# random bits of code to test other bits of code.
# q("Hospital")
# q("Urgent Care Clinic")
# q("Nursing Home")
res = label("Resurrection Medical Center", "Q16917")
# tag_qs()
# hospi_ll(41.89498135,-87.62153624)
# res = hospi_ll( 41.79027035, -87.60458343, .1, "Q16917" ) # hospital)
for row in res['results']['bindings']:
print(row['place']['value'])
print(row['placeLabel']['value'])
print()
# print("import sys; sys.exit()"); import code; code.interact(local=locals())
def main():
test()
if __name__ == '__main__':
main()
|
from packages_model import *
def get_packages( p_id_client, p_id_branch ):
packages_list = get_packages_by_id_client( p_id_client, p_id_branch )
def make_response( p_status, p_data ):
return 0
|
"""This will draw the plant loop for any file """
import pydot
import sys
sys.path.append('../EPlusInputcode')
from EPlusCode.EPlusInterfaceFunctions import readidf
import loops
iddfile = "../iddfiles/Energy+V6_0.idd"
# fname = "/Applications/EnergyPlus-6-0-0/Examples/DualDuctConstVolGasHC.idf"
# fname = "../idffiles/a.idf"
# fname = "/Volumes/Server/Active_Projects/stanttecE+Conssulting2/3_Simulation/2_Energy/EnergyPlus/fromMatt/Proposed110614exp.idf"
# fname = "/Volumes/Server/Active_Projects/stanttecE+Conssulting2/3_Simulation/2_Energy/EnergyPlus/workingfiles/5ZoneAirCooled.idf"
# fname = "/Volumes/Server/Active_Projects/LBNL_UHM/3_Simulation/2_Energy/Energyplus3/airflow/air6.expidf"
# fname = "/Volumes/Server/Staff/Santosh/transfer/asul/05_Baseline_06.idf"
fname = "/Applications/EnergyPlus-6-0-0/Examples/DualDuctConstVolGasHC.idf"
# fname = "../idffiles/HVACTemplate-5ZoneVAVFanPowered.idf"
# outname = "../idffiles/.idf"
fname = "../idffiles/CoolingTower.idf"
# fname = "../idffiles/a.idf"
fname = "../idffiles/HVACTemplate-5ZonePackagedVAV_exp.idf" # for supply mixer
fname = "a.idf"
data, commdct = readidf.readdatacommdct(fname, iddfile=iddfile)
# in plantloop get:
# demand inlet, outlet, branchlist
# supply inlet, outlet, branchlist
plantloops = loops.plantloopfields(data, commdct)
plantloop = plantloops[0]
#
# supply barnchlist
# branch1 -> inlet, outlet
# branch2 -> inlet, outlet
# branch3 -> inlet, outlet
sbranchlist = plantloop[3]
if sbranchlist.strip() != "":
sbranches = loops.branchlist2branches(data, commdct, sbranchlist)
s_in_out = [loops.branch_inlet_outlet(data, commdct,
sbranch) for sbranch in sbranches]
sbranchinout = dict(zip(sbranches, s_in_out))
dbranchlist = plantloop[6]
if dbranchlist.strip() != "":
dbranches = loops.branchlist2branches(data, commdct, dbranchlist)
d_in_out = [loops.branch_inlet_outlet(data, commdct,
dbranch) for dbranch in dbranches]
dbranchinout = dict(zip(dbranches, d_in_out))
#
# splitters
# inlet
# outlet1
# outlet2
splitters = loops.splitterfields(data, commdct)
#
# mixer
# outlet
# inlet1
# inlet2
mixers = loops.mixerfields(data, commdct)
#
# supply barnchlist
# branch1 -> inlet, outlet
# branch2 -> inlet, outlet
# branch3 -> inlet, outlet
#
# CONNET INLET OUTLETS
edges = []
# get all branches
branchkey = "branch".upper()
branches = data.dt[branchkey]
branch_i_o = {}
for br in branches:
br_name = br[1]
in_out = loops.branch_inlet_outlet(data, commdct, br_name)
branch_i_o[br_name] = dict(zip(["inlet", "outlet"], in_out))
for br_name, in_out in branch_i_o.items():
edges.append((in_out["inlet"], br_name))
edges.append((br_name, in_out["outlet"]))
# connect splitter to nodes
for splitter in splitters:
# splitter_inlet = inletbranch.node
splittername = splitter[0]
inletbranchname = splitter[1]
splitter_inlet = branch_i_o[inletbranchname]["outlet"]
# edges = splitter_inlet -> splittername
edges.append((splitter_inlet, splittername))
# splitter_outlets = ouletbranches.nodes
outletbranchnames = [br for br in splitter[2:]]
splitter_outlets = [branch_i_o[br]["inlet"] for br in outletbranchnames]
# edges = [splittername -> outlet for outlet in splitter_outlets]
moreedges = [(splittername, outlet) for outlet in splitter_outlets]
edges = edges + moreedges
for mixer in mixers:
# mixer_outlet = outletbranch.node
mixername = mixer[0]
outletbranchname = mixer[1]
mixer_outlet = branch_i_o[outletbranchname]["inlet"]
# edges = mixername -> mixer_outlet
edges.append((mixername, mixer_outlet))
# mixer_inlets = inletbranches.nodes
inletbranchnames = [br for br in mixer[2:]]
mixer_inlets = [branch_i_o[br]["outlet"] for br in inletbranchnames]
# edges = [mixername -> inlet for inlet in mixer_inlets]
moreedges = [(inlet, mixername) for inlet in mixer_inlets]
edges = edges + moreedges
# connect demand and supply side
for plantloop in plantloops:
supplyinlet = plantloop[1]
supplyoutlet = plantloop[2]
demandinlet = plantloop[4]
demandoutlet = plantloop[5]
# edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]
moreedges = [(supplyoutlet, demandinlet), (demandoutlet, supplyinlet)]
edges = edges + moreedges
# for edge in edges:
# print edge
g=pydot.graph_from_edges(edges, directed=True)
g.write('a.dot')
g.write_png('a.png') |
"""Adding column ex_date to daily_stock_data.
Revision ID: 63c962ea0422
Revises:
Create Date: 2021-06-03 14:35:30.262401
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '63c962ea0422'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('data')
op.add_column('daily_stock_data', sa.Column('ex_date', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('daily_stock_data', 'ex_date')
op.create_table('data',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('company_name', mysql.VARCHAR(length=200), nullable=True),
sa.Column('symbol', mysql.VARCHAR(length=100), nullable=True),
sa.Column('country', mysql.VARCHAR(length=100), nullable=True),
sa.Column('previous_close', mysql.VARCHAR(length=100), nullable=True),
sa.Column('open', mysql.VARCHAR(length=100), nullable=True),
sa.Column('high', mysql.VARCHAR(length=100), nullable=True),
sa.Column('low', mysql.VARCHAR(length=100), nullable=True),
sa.Column('close', mysql.VARCHAR(length=100), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
# ### end Alembic commands ###
|
from django.core.management.base import BaseCommand
from io import BytesIO
import pandas
import requests
import json
import os
from decaptcha.models import Image
class Command(BaseCommand):
OBJECT_JSON = '/Users/leohentschker/de-captcha/backend/data/objects_test.json'
METADATA_JSON = '/Users/leohentschker/de-captcha/backend/data/metadata_test.json'
NUM_TO_LOAD = 1
DEFAULT_LABEL_WEIGHT = 5
def load_image(self, object_data, metadata):
# extract the labels associated with
# the object
labels = []
for obj in object_data['objects']:
labels += obj['names']
# download the image file associated with it
req = requests.get(metadata['url'])
img_file = BytesIO(req.content)
img = Image.from_image_file(img_file)
# assign a default weight each of the labels from visual
# genome
img.labels = {label: self.DEFAULT_LABEL_WEIGHT for label in labels}
img.save()
def handle(self, *args, **options):
"""
Watch the log files and
"""
objects_list = json.load(open(self.OBJECT_JSON))
medatada_list = json.load(open(self.METADATA_JSON))
for i in range(self.NUM_TO_LOAD):
self.load_image(objects_list[i], medatada_list[i]) |
# -*- coding: utf-8 -*-
from gnet.protocol import TCPServerFactory, ReconnectingClientFactory, Protocol
from gnet.util import shorten
import gevent
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s %(levelname)s:%(module)s] %(message)s')
class EchoServerProtocol(Protocol):
def connection_made(self):
logger.info('connection made')
def data_received(self, data):
logger.debug('data received: %s', shorten(data, 32))
self.send_data(data)
def connection_lost(self, reason):
logger.info('connection lost')
super(EchoServerProtocol, self).connection_lost(reason)
class EchoServerFactory(TCPServerFactory):
def build_protocol(self, sock, addr):
logger.info('connection handler %s', str(addr))
p = EchoServerProtocol(sock, addr)
p.factory = self
return p
sf = EchoServerFactory(('127.0.0.1', 6011))
sf.start()
class EchoClientProtocol(Protocol):
def connection_made(self):
logger.info('connection made')
self.send_data('ooxx')
def data_received(self, data):
logger.debug('data received: %s', shorten(data, 32))
self.send_data(data)
gevent.sleep(2)
def connection_lost(self, reason):
logger.info('connection lost')
super(EchoClientProtocol, self).connection_lost(reason)
class EchoClientFactory(ReconnectingClientFactory):
reconnect_delay = 10
def build_protocol(self, sock, addr):
logger.info('connection handler %s', str(addr))
p = EchoClientProtocol(sock, addr)
p.factory = self
return p
cf = EchoClientFactory(('127.0.0.1', 6011))
cf.start()
gevent.wait()
|
#!/usr/bin/env python3
import argparse
import sys
import requests
import tabulate
import json
class Mondo:
# 0: title
INDEX_TITLE = 0
# 1: icon
# 2: type
INDEX_TYPE = 2
# 3: containerType
# 4: containerPlayable
# 5: personType
# 6: albumType
# 7: imageType
# 8: audioType
INDEX_AUDIOTYPE = 8
# 9: videoType
# 10: epgType
# 11: modifiable
# 12: disabled
# 13: flags
# 14: path
INDEX_PATH = 14
# 15: value
INDEX_VALUE = 15
# 16: valueOperation()
# 17: edit
INDEX_EDIT = 17
# 18: mediaData
INDEX_MEDIADATA = 18
# 19: query
# 20: activate
# 21: likeIt
# 22: rowsOperation
# 23: setRoles
# 24: timestamp
# 25: id
# 26: valueUnit
# 27: context
# 28: description
# 29: longDescription
# 30: search
# 31: valueBlob
# 32: prePlay
# 33: activity
# 34: cancel
# 35: accept
# 36: risky
# 37: preferred
# 38: httpRequest
# 39: encrypted
# 40: encryptedValue
# 41: rating
# 42: fillParent
# 43: autoCompletePath
# 44: busyText
# 45: sortKey
# 46: renderAsButton
# 47: doNotTrack
# 48: persistentMetaData
# 49: releaseDate
# 50: audioType
# 51: unknownSize
ROLES = 'title,icon,type,containerType,containerPlayable,personType,albumType,imageType,audioType,videoType,' \
'epgType,modifiable,disabled,flags,path,value,valueOperation(),edit,mediaData,query,' \
'activate,likeIt,rowsOperation,setRoles,timestamp,id,valueUnit,context,description,longDescription,' \
'search,valueBlob,prePlay,activity,cancel,accept,risky,preferred,httpRequest,encrypted,' \
'encryptedValue,rating,fillParent,autoCompletePath,busyText,sortKey,renderAsButton,doNotTrack,persistentMetaData,releaseDate,' \
'audioType,unknownSize'
VOLUME_BAR_LEN = 20
def __init__(self, hostname):
self.hostname = 'http://' + hostname
vol, self.max_vol = self.get_volume()
def get_data(self, path):
params = { 'path': path, 'roles': self.ROLES }
return requests.get(self.hostname + '/api/getData', params=params)
def get_rows(self, path, from_= 0, to = 20):
params = { 'path': path, 'roles': self.ROLES, 'from': from_, 'to': to }
return requests.get(self.hostname + '/api/getRows', params=params)
def set_data(self, path, value, role='activate'):
params = { 'path': path, 'role': role, 'value': value }
return requests.get(self.hostname + '/api/setData', params=params)
def stop_playing(self):
params = { 'control': 'stop' }
resp = self.set_data('player:player/control', value=json.dumps(params))
if (resp.status_code != 200):
print("Error when stopping")
else:
print("Playing stopped")
def get_current(self):
current = self.get_data('player:player/data')
if (current.status_code != 200):
return None
return current.json()
def get_presets(self):
presets = self.get_rows('/app:/presets')
if (presets.status_code != 200):
return None
return presets.json()
def print_current(self):
current = self.get_current()
if (current == None):
print("Error while getting current")
else:
if ('trackRoles' in current[self.INDEX_VALUE] and 'title' in current[self.INDEX_VALUE]['trackRoles']):
track = current[self.INDEX_VALUE]['trackRoles']
src = ""
if ('mediaData' in track and 'album' in track['mediaData']['metaData'] and 'artist' in track['mediaData']['metaData']):
if ('serviceNameOverride' in track['mediaData']['metaData']):
src = ", source: {}".format(track['mediaData']['metaData']['serviceNameOverride'])
print("Currently playing: '{}' (on '{}' by '{}'{})".format(track['title'],
track['mediaData']['metaData']['album'], track['mediaData']['metaData']['artist'],
src))
else:
if ('mediaData' in track and 'serviceNameOverride' in track['mediaData']['metaData']):
src = " (source: {})".format(track['mediaData']['metaData']['serviceNameOverride'])
print("Currently playing: {}{}".format(track['title'], src))
else:
print("Nothing is playing")
def get_volume(self):
volume = self.get_data('player:volume')
if (volume.status_code != 200):
return None
else:
volume = volume.json()
return int(volume[self.INDEX_VALUE]['i32_']), int(volume[self.INDEX_EDIT]['max'])
def print_volume(self):
vol, max = self.get_volume()
if (vol != None):
filledLength = int(self.VOLUME_BAR_LEN * vol // max)
bar = '█' * filledLength + '-' * (self.VOLUME_BAR_LEN - filledLength)
print(f'Volume: |{bar}| {vol}/{max}', end = '\n')
else:
print("Error when getting volume")
def set_volume(self, val):
if (val > self.max_vol):
val = self.max_vol
if (val < 0):
val = 0
value = { 'type': 'i32_', 'i32_': val }
self.set_data('player:volume', role='value', value=json.dumps(value))
def print_presets(self):
presets = self.get_presets()
presets_array = []
cnt = 1
for p in presets['rows']:
presets_array.append([cnt, p[0]])
cnt += 1
print(tabulate.tabulate(presets_array, headers=['#', 'Radio']))
def set_presets(self, index):
presets = self.get_presets()
if (presets == None):
print("Error getting presets")
return None
radio = presets['rows'][index - 1]
name = radio[self.INDEX_TITLE]
path = radio[self.INDEX_PATH]
media_data = radio[self.INDEX_MEDIADATA]
audio_type = radio[self.INDEX_AUDIOTYPE]
container_type = radio[self.INDEX_TYPE]
print('Playing preset #{}: {}'.format(index, name))
params = {
'control': 'play',
'mediaRoles': {
'title': name,
'type': container_type,
'audioType': audio_type,
'modifiable': True,
'path': path,
'mediaData': media_data
}
}
resp = self.set_data('player:player/control', value=json.dumps(params))
if (resp.status_code != 200):
print("Error while setting preset #{}: {}".format(index, name))
def main(argv):
parser = argparse.ArgumentParser(description='Control your radio')
parser.add_argument('-a', '--address', help='hostname or IP address of the radio', required=True)
parser.add_argument('-p', '--play', help='Preset to play', type=int, default=None)
parser.add_argument('-l', '--list', help='List presets', action='store_true')
parser.add_argument('-s', '--stop', help='Stop playing', action='store_true')
parser.add_argument('-v', '--volume', help='Get or set volume level', nargs='?', const=-1, default=None, type=int)
parser.add_argument('-d', '--vol-down', help='Lower volume', action='store_true')
parser.add_argument('-u', '--vol-up', help='Raise volume', action='store_true')
args = parser.parse_args(argv)
mondo = Mondo(args.address)
if (args.play != None):
mondo.set_presets(args.play)
elif (args.list == True):
mondo.print_presets()
elif (args.stop == True):
mondo.stop_playing()
else:
mondo.print_current()
if (args.volume != None or args.vol_down or args.vol_up):
if (args.vol_down or args.vol_up):
vol, max = mondo.get_volume()
if (args.vol_down):
target = vol - 2
else:
target = vol + 2
else:
target = args.volume
if (target >= 0):
mondo.set_volume(target)
mondo.print_volume()
if __name__ == "__main__":
main(sys.argv[1:])
|
file = [int(l.strip()) for l in open("data.txt")]
file.sort()
file.append(max(file) + 3)
file.insert(0, 0)
counts = [0] * 3
for x in range(len(file)-1) :
counts[file[x+1] - file[x] - 1] += 1
print "P1: " + str(counts[0] * counts[2])
def recurseCombinations(x) :
if x <= 1 :
return 0
if x == 2 :
return 1
return recurseCombinations(x-1) + recurseCombinations(x-2) + 1
p2 = 1
x = 0
while x < len(file) - 1 :
inARow = 1
for y in range(x+1, len(file)) :
if y - x == file[y] - file[x] :
inARow += 1
else :
break
p2 *= max(recurseCombinations(inARow), 1)
x += inARow
print "P2: " + str(p2)
combos = [1] + [0]*file[-1]
for i in file[1:]:
combos[i] = combos[i-3] + combos[i-2] + combos[i-1]
print "P2: " + str(combos[-1]) |
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gdk
from controller import Controller
from viewer import MainView
from gaphas import Canvas, GtkView
from gaphas.painter import DefaultPainter
def main():
builder = Gtk.Builder()
builder.add_from_file("AppWindow.glade")
graph_view = GtkView()
graph_view.painter = DefaultPainter()
graph_view.canvas = Canvas()
gaphas_window = builder.get_object("GaphasWindow")
gaphas_window.add(graph_view)
main_view = MainView()
controller = Controller(builder, main_view, graph_view)
builder.connect_signals(controller)
#view.connect("focus-changed", handler.focus_changed, "focus")
window = builder.get_object("MainWindow")
main_view.show_main_window(window)
Gtk.main()
if __name__ == "__main__":
main()
|
import pymongo
connection = pymongo.MongoClient()
db = connection.drugdb
file_path = "/home/ubuntu/flaskapp/"
#Open the temporary file populated in parse_xml
f = open(file_path+"temp_out.txt")
doc = []
#populate the content of the file to a temporary dictionary.
for line in f:
temp_list = []
for word in line.split():
temp_list.append(word)
if temp_list[0] == "DefType":
temp_dict = {}
temp_dict[temp_list[0]] = temp_list[1]
doc.append(temp_dict)
else:
temp_dict = {}
l = len(doc)
temp_dict[temp_list[0]] = " ".join(temp_list[1:])
doc[l-1].update(temp_dict)
#Insert the data in the dictionary to the DB Collection
for items in doc:
db.NDFRT_public.insert(items)
print "Data inserted"
|
# Generated by Django 3.1.7 on 2021-06-25 06:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hospital', '0023_auto_20210625_1006'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='mobile',
field=models.PositiveIntegerField(null=True),
),
]
|
__author__ = 'fernando.ormonde'
def maxsum(bets):
maximum = None
for i in range(len(bets)):
for j in range(i, len(bets)):
maximum = max(maximum, sum(bets[i:j+1]))
return maximum
print maxsum([20, -10, 3])
#testes
import unittest
class TestMaxSum(unittest.TestCase):
def test_one_bet(self):
self.assertEqual(maxsum([42]), 42)
self.assertEqual(maxsum([20]), 20)
def test_all_won_bets(self):
self.assertEqual(maxsum([1, 2]), 3)
self.assertEqual(maxsum([10, 20]), 30)
self.assertEqual(maxsum([10, 20, 30, 40]), 100)
def test_a_won_bet_and_lost_bet(self):
self.assertEqual(maxsum([-7, 5]), 5)
def test_two_won_bet_and_lost_bet(self):
self.assertEqual(maxsum([-7, 12, 5]), 17)
def test_two_won_bet_and_lost_betneg(self):
self.assertEqual(maxsum([-7, -12, -5]), -5)
unittest.main() |
# The MIT License (MIT)
#
# Copyright (c) 2020 Cian Byrne for Robotics Masters Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`roboticsmasters_mpu9250`
================================================================================
CircuitPython helper library for MPU9250 9-axis IMU
* Author(s): Cian Byrne
Implementation Notes
--------------------
**Hardware:**
.. todo:: Add links to any specific hardware product page(s), or category page(s). Use unordered list & hyperlink rST
inline format: "* `Link Text <url>`_"
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
.. todo:: Uncomment or remove the Bus Device and/or the Register library dependencies based on the library's use of either.
# * Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
# * Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/wallarug/CircuitPython_MPU9250.git"
import time
try:
import struct
except ImportError:
import ustruct as struct
# not required
from adafruit_register.i2c_bit import RWBit
from adafruit_register.i2c_bits import RWBits
from adafruit_register.i2c_struct import UnaryStruct, ROUnaryStruct
from adafruit_register.i2c_struct_array import StructArray
# required
import adafruit_bus_device.i2c_device as i2c_device
import adafruit_bus_device.spi_device as spi_device
from micropython import const
# Internal constants and register values:
# pylint: disable=bad-whitespace
_MPU9250_DEFAULT_ADDRESS = const(0x69) # MPU9250 default i2c address
_MPU9250_DEVICE_ID = const(0x71) # MPU9250 WHO_AM_I value
_MPU9250_INT_PIN_CFG = const(0x37) # I2C Bypass enable configuration
_MPU9250_INT_ENABLE = const(0x38) # Interrupt Enable
_MPU9250_INT_STATUS = const(0x3A) # Interrupt Status
_MPU9250_I2C_MST_CTRL = const(0x24) #
_MPU9250_WHO_AM_I = const(0x75) # Device ID register
_MPU6500_DEFAULT_ADDRESS = const(0x69) # MPU6500 default i2c address
_MPU6500_DEVICE_ID = const(0x71) # MPU9250 WHO_AM_I value
_MPU6500_SELF_TEST_X = const(0x0D) # Self test factory calibrated values register
_MPU6500_SELF_TEST_Y = const(0x0E) # Self test factory calibrated values register
_MPU6500_SELF_TEST_Z = const(0x0F) # Self test factory calibrated values register
_MPU6500_SELF_TEST_A = const(0x10) # Self test factory calibrated values register
_MPU6500_SMPLRT_DIV = const(0x19) # sample rate divisor register
_MPU6500_CONFIG = const(0x1A) # General configuration register
_MPU6500_GYRO_CONFIG = const(0x1B) # Gyro specfic configuration register
_MPU6500_ACCEL_CONFIG = const(0x1C) # Accelerometer specific configration register
_MPU6500_ACCEL_CONFIG2 = const(0x1D) # Accelerometer config register
_MPU6500_INT_PIN_CONFIG = const(0x37) # Interrupt pin configuration register
_MPU6500_INT_PIN_ENABLE = const(0x38) # Interrupt pin enable register
_MPU6500_ACCEL_OUT = const(0x3B) # base address for sensor data reads
_MPU6500_TEMP_OUT = const(0x42) # Temperature data low byte register (low: 0x41)
_MPU6500_GYRO_OUT = const(0x43) # base address for sensor data reads
_MPU6500_SIG_PATH_RESET = const(0x68) # register to reset sensor signal paths
_MPU6500_PWR_MGMT_1 = const(0x6B) # Primary power/sleep control register
_MPU6500_PWR_MGMT_2 = const(0x6C) # Secondary power/sleep control register
_MPU6500_WHO_AM_I = const(0x75) # Device ID register
_MPU6500_USER_CTRL = const(0x6A) # FIFO and I2C Master control register
_MPU6500_I2C_SLV4_CTRL = const(0x34) #
_MPU6500_I2C_MST_CTRL = const(0x24) #
_MPU6500_I2C_SLV0_ADDR = const(0x25) #
_MPU6500_I2C_SLV0_REG = const(0x26) #
_MPU6500_I2C_SLV0_CTRL = const(0x27) #
_MPU6500_I2C_SLV0_DO = const(0x63) #
_MPU6500_I2C_MST_DELAY_CTRL = const(0x67) #
_MPU6500_EXT_SENS_DATA_00 = const(0x49) #
_AK8963_DEFAULT_ADDRESS = const(0x0c) # AK8963 default i2c address
_AK8963_DEVICE_ID = const(0x48) # MPU9250 WHO_AM_I value
_AK8963_WIA = const(0x00) # Device ID register
_AK8963_INFO = const(0x01) # Device Information register
_AK8963_ST1 = const(0x02) # Status register 1
_AK8963_MAG_OUT = const(0x03) # base address for sensor data reads
_AK8963_HXL = const(0x03) #
_AK8963_HXH = const(0x04)
_AK8963_HYL = const(0x05)
_AK8963_HYH = const(0x06)
_AK8963_HZL = const(0x07)
_AK8963_HZH = const(0x08)
_AK8963_ST2 = const(0x09)
_AK8963_CNTL1 = const(0x0A) # control register 1
_AK8963_CNTL2 = const(0x0B) # control register 2
_AK8963_ADJUST = const(0x10) # base address for sensor adjust reads
_AK8963_ASAX = const(0x10)
_AK8963_ASAY = const(0x11)
_AK8963_ASAZ = const(0x12)
_MAGTYPE = True
_XGTYPE = False
STANDARD_GRAVITY = 9.80665
# pylint: enable=bad-whitespace
def _twos_comp(val, bits):
# Convert an unsigned integer in 2's compliment form of the specified bit
# length to its signed integer value and return it.
if val & (1 << (bits - 1)) != 0:
return val - (1 << bits)
return val
class MPU9250:
"""Driver for the MPU9250 9-DoF IMU accelerometer, magnetometer, gyroscope."""
# Class-level buffer for reading and writing data with the sensor.
# This reduces memory allocations but means the code is not re-entrant or
# thread safe!
_BUFFER = bytearray(6)
def __init__(self):
# defaults
Ascale = AccelRange.RANGE_2_G
Gscale = GyroRange.RANGE_500_DPS
Mscale = MagSensitivity.16BIT
Mmode = MagMode.MEASURE_100HZ
sampleRate = 0x04
self._filter_bandwidth = Bandwidth.BAND_260_HZ
self._gyro_range = GyroRange.RANGE_500_DPS
self._accel_range = AccelRange.RANGE_2_G
# soft reset & reboot accel/gyro
self._write_u8(_XGTYPE, _MPU6500_PWR_MGMT_1, 0x00)
time.sleep(0.01)
# Check ID registers.
if self._read_u8(_XGTYPE, _MPU6500_WHO_AM_I) != _MPU6500_DEVICE_ID:# or \
#self._read_u8(_MAGTYPE, _AK8963_WIA) != _AK8963_DEVICE_ID:
raise RuntimeError('Could not find MPU9250, check wiring!')
#self._write_u8(_XGTYPE, _MPU6500_SIG_PATH_RESET, 0x07)
#time.sleep(0.01)
# set stable timesource
# Auto select clock source to be PLL gyroscope reference if ready else
self._write_u8(_XGTYPE, _MPU6500_PWR_MGMT_1, 0x01)
time.sleep(0.01)
# Configure Gyro and Thermometer
self._write_u8(_XGTYPE, _MPU6500_CONFIG, 0x03)
# Set sample rate = gyroscope output rate/(1 + SMPLRT_DIV)
self._write_u8(_XGTYPE, _MPU6500_SMPLRT_DIV, sampleRate)
# Set Gyro full-scale range
c = self._read_u8(_XGTYPE, _MPU6500_GYRO_CONFIG)
c = c & ~0x02 # Clear Fchoice bits [1:0]
c = c & ~0x18 # Clear AFS bits [4:3]
c = c | Gscale << 3 # Set full scale range for the gyro
self._write_u8(_XGTYPE, _MPU6500_GYRO_CONFIG, c)
# Set accelerometer full-scale range configuration
c = self._read_u8(_XGTYPE, _MPU6500_ACCEL_CONFIG)
c = c & ~0x18 # Clear AFS bits [4:3]
c = c | Ascale << 3 # Set full scale range for the accelerometer
self._write_u8(_XGTYPE, _MPU6500_ACCEL_CONFIG, c)
# Set accelerometer sample rate configuration
c = self._read_u8(_XGTYPE, _MPU6500_ACCEL_CONFIG2)
c = c & ~0x0F # Clear accel_fchoice_b (bit 3) and A_DLPFG (bits [2:0])
c = c | 0x03 # Set accelerometer rate to 1 kHz and bandwidth to 41 Hz
self._write_u8(_XGTYPE, _MPU6500_ACCEL_CONFIG2, c)
# Magnetometer configuration values
self._offset = (143.725, 6.00244, -21.6755)
self._scale = (1.07464, 0.97619, 0.956875)
self._adjustment = (0,0,0)
# Configure Interrupts and Bypass Enable
self.initAK8963()
def read_temp_raw(self):
"""Read the raw temperature sensor value and return it as a 12-bit
signed value. If you want the temperature in nice units you probably
want to use the temperature property!
"""
# Read temp sensor - TODO: was low bit _MPU6500_TEMP_OUT
self._read_bytes(_XGTYPE, 0x80 | _MPU6500_TEMP_OUT, 2,
self._BUFFER)
temp = ((self._BUFFER[1] << 8) | self._BUFFER[0]) >> 4
return _twos_comp(temp, 12)
@property
def temperature(self):
"""The current temperature in º C"""
raw_temperature = self.read_temp_raw()
temp = (raw_temperature / 333.87) + 21.0
return temp
def read_accel_raw(self):
"""Read the raw accelerometer sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the acceleration in nice units you probably want to use the
accelerometer property!
"""
# Read the accelerometer
self._read_bytes(_XGTYPE, 0x80 | _MPU6500_ACCEL_OUT, 6,
self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('>hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def acceleration(self):
"""Acceleration X, Y, and Z axis data in m/s^2"""
raw_data = self.read_accel_raw()
raw_x = raw_data[0]
raw_y = raw_data[1]
raw_z = raw_data[2]
accel_range = self._accel_range
accel_scale = 1
if accel_range == AccelRange.RANGE_16_G:
accel_scale = 2048
if accel_range == AccelRange.RANGE_8_G:
accel_scale = 4096
if accel_range == AccelRange.RANGE_4_G:
accel_scale = 8192
if accel_range == AccelRange.RANGE_2_G:
accel_scale = 16384
# setup range dependant scaling
accel_x = (raw_x / accel_scale) * STANDARD_GRAVITY
accel_y = (raw_y / accel_scale) * STANDARD_GRAVITY
accel_z = (raw_z / accel_scale) * STANDARD_GRAVITY
return (accel_x, accel_y, accel_z)
def read_gyro_raw(self):
"""Read the raw gyroscope sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the gyroscope in nice units you probably want to use the
gyroscope property!
"""
# Read the gyroscope
self._read_bytes(_XGTYPE, 0x80 | _MPU6500_GYRO_OUT, 6,
self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('>hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def gyro(self):
"""Gyroscope X, Y, and Z axis data in º/s"""
raw_data = self.read_gyro_raw()
raw_x = raw_data[0]
raw_y = raw_data[1]
raw_z = raw_data[2]
gyro_scale = 1
gyro_range = self._gyro_range
if gyro_range == GyroRange.RANGE_250_DPS:
gyro_scale = 131
if gyro_range == GyroRange.RANGE_500_DPS:
gyro_scale = 62.5
if gyro_range == GyroRange.RANGE_1000_DPS:
gyro_scale = 32.8
if gyro_range == GyroRange.RANGE_2000_DPS:
gyro_scale = 16.4
# setup range dependant scaling
gyro_x = (raw_x / gyro_scale)
gyro_y = (raw_y / gyro_scale)
gyro_z = (raw_z / gyro_scale)
return (gyro_x, gyro_y, gyro_z)
@property
def cycle(self):
"""Enable or disable perodic measurement at a rate set by `cycle_rate`.
If the sensor was in sleep mode, it will be waken up to cycle"""
return self._cycle
@cycle.setter
def cycle(self, value):
self.sleep = not value
self._cycle = value
@property
def gyro_range(self):
"""The measurement range of all gyroscope axes. Must be a `GyroRange`"""
return self._gyro_range
@gyro_range.setter
def gyro_range(self, value):
if (value < 0) or (value > 3):
raise ValueError("gyro_range must be a GyroRange")
self._gyro_range = value
sleep(0.01)
@property
def accelerometer_range(self):
"""The measurement range of all accelerometer axes. Must be a `Range`"""
return self._accel_range
@accelerometer_range.setter
def accelerometer_range(self, value):
if (value < 0) or (value > 3):
raise ValueError("accelerometer_range must be a Range")
self._accel_range = value
sleep(0.01)
@property
def filter_bandwidth(self):
"""The bandwidth of the gyroscope Digital Low Pass Filter. Must be a `GyroRange`"""
return self._filter_bandwidth
@filter_bandwidth.setter
def filter_bandwidth(self, value):
if (value < 0) or (value > 6):
raise ValueError("filter_bandwidth must be a Bandwidth")
self._filter_bandwidth = value
sleep(0.01)
@property
def cycle_rate(self):
"""The rate that measurements are taken while in `cycle` mode. Must be a `Rate`"""
return self._cycle_rate
@cycle_rate.setter
def cycle_rate(self, value):
if (value < 0) or (value > 3):
raise ValueError("cycle_rate must be a Rate")
self._cycle_rate = value
sleep(0.01)
## MAG
def read_mag_raw(self):
"""Read the raw magnetometer sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the magnetometer in nice units you probably want to use the
magnetometer property!
"""
# Read the magnetometer
self._read_bytes(_MAGTYPE, 0x80 | _AK8963_MAG_OUT, 6,
self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def magnetic(self):
"""The magnetometer X, Y, Z axis values as a 3-tuple of
gauss values.
"""
raw_data = self._raw_magnet_data
#raw_x = _twos_comp(raw_data[0][0], 16)
#raw_y = _twos_comp(raw_data[1][0], 16)
#raw_z = _twos_comp(raw_data[2][0], 16)
raw_x = raw_data[0][0]
raw_y = raw_data[1][0]
raw_z = raw_data[2][0]
print(raw_x, raw_y, raw_z)
self._status # Enable updating readings again
# Apply factory axial sensitivy adjustments
#raw_x *= self._adjustment[0]
#raw_y *= self._adjustment[1]
#raw_z *= self._adjustment[2]
# Apply output scale determined in constructor
mag_range = self._mag_range
mag_scale = 1
if mag_range == Sensitivity.SENSE_16BIT:
#mag_scale = 0.15 - for uT (micro-tesla)
mag_scale = 1.499389499 # for mG (milliGauss) calc: 10.*4912./32760.0
if mag_range == Sensitivity.SENSE_14BIT:
#mag_scale = 0.6 - for uT (mico-tesla)
mag_scale = 5.997557998 # for mG (millGauss) calc: 10.*4912./8190.0
# setup range dependant scaling and offsets
#mag_x = ((raw_x / mag_scale) - self._offset[0]) * self._scale[0]
#mag_y = ((raw_y / mag_scale) - self._offset[1]) * self._scale[1]
#mag_z = ((raw_z / mag_scale) - self._offset[2]) * self._scale[2]
mag_x = (raw_x * mag_scale * self._scale[0]) - self._offset[0]
mag_y = (raw_y * mag_scale * self._scale[1]) - self._offset[1]
mag_z = (raw_z * mag_scale * self._scale[2]) - self._offset[1]
return (mag_x, mag_y, mag_z)
def read_gyro_calibration_raw(self):
"""Read the raw gyroscope calibration values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values.
"""
# Read the calibration
self._read_bytes(_MAGTYPE, 0x80 | _AK8963_ASAX, 3,
self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<BBB', self._BUFFER[0:3])
return (raw_x, raw_y, raw_z)
def initAK8963(self, scale, mode):
""" setup the AK8963 to be used with ONLY I2C native """
# Enable I2C bypass to access for MPU9250 magnetometer access.
self._write_u8(_XGTYPE, _MPU6500_INT_PIN_CONFIG, 0x12)
self._write_u8(_XGTYPE, _MPU6500_INT_PIN_ENABLE, 0x01)
time.sleep(0.1)
# soft reset & reboot magnetometer
self._write_u8(_MAGTYPE, _AK8963_CNTL, 0x00) # power down magnetometer
time.sleep(0.01)
self._write_u8(_MAGTYPE, _AK8963_CNTL, 0x0F) # enter fuse rom mode
time.sleep(0.01)
# factory calibration
raw_adjustment = self.read_gyro_calibration_raw()
asax = _twos_comp(raw_adjustment[0], 8)
asay = _twos_comp(raw_adjustment[1], 8)
asaz = _twos_comp(raw_adjustment[2], 8)
print(asax, asay, asaz)
self._adjustment = (
((asax - 128.0) / 256.0) + 1.0,
((asay - 128.0) / 256.0) + 1.0,
((asaz - 128.0) / 256.0) + 1.0
)
print(self._adjustment)
# soft reset & reboot magnetometer
self._write_u8(_MAGTYPE, _AK8963_CNTL, 0x00) # power down magnetometer
time.sleep(0.01)
self._write_u8(_MAGTYPE, _AK8963_CNTL, scale << 4 | mode) # Set magnetometer data resolution and sample ODR
time.sleep(0.01)
def initAK8963slave(self):
""" setup the AK8963 to be used in slave mode """
# Configure Interrupts and Bypass Enable
self._write_u8(_XGTYPE, _MPU6500_INT_PIN_CONFIG, 0x10) # INT is 50 microsecond pulse and any read to clear
self._write_u8(_XGTYPE, _MPU6500_INT_PIN_ENABLE, 0x01) # Enable data ready (bit 0) interrupt
time.sleep(0.01)
self._write_u8(_XGTYPE, _MPU6500_USER_CTRL, 0x20) # Enable I2C Master mode
self._write_u8(_XGTYPE, _MPU6500_I2C_MST_CTRL, 0x1D) # I2C configuration STOP after each transaction, master I2C bus at 400 KHz
self._write_u8(_XGTYPE, _MPU6500_I2C_MST_DELAY_CTRL, 0x81) # Use blocking data retreival and enable delay for mag sample rate mismatch
self._write_u8(_XGTYPE, _MPU6500_I2C_SLV4_CTRL, 0x01) # Delay mag data retrieval to once every other accel/gyro data sample
def i2c_slave(self, addr, reg, data, ,size=1, read=False):
if read:
self._write_u8(_XGTYPE, _MPU6500_I2C_SLV0_ADDR, addr)
self._write_u8(_XGTYPE, _MPU6500_I2C_SLV0_REG, reg)
self._write_u8(_XGTYPE, _MPU6500_I2C_SLV0_DO, data)
self._write_u8(_XGTYPE, _MPU6500_I2C_SLV0_CTRL, 0x80+size)
time.sleep(0.05)
else:
self._write_u8(_XGTYPE, _MPU6500_I2C_SLV0_ADDR, addr | 0x80)
self._write_u8(_XGTYPE, _MPU6500_I2C_SLV0_REG, reg)
self._write_u8(_XGTYPE, _MPU6500_I2C_SLV0_CTRL, size)
time.sleep(0.05)
# TODO fix this:
self._read_bytes(_XGTYPE, 0x80 | _MPU6500_EXT_SEN_DATA_00, size, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<BBB', self._BUFFER[0:size])
return (raw_x, raw_y, raw_z)
def calibrate(self, count=256, delay=0.200):
"""
Calibrate the magnetometer.
The magnetometer needs to be turned in all possible directions
during the callibration process. Ideally each axis would once
line up with the magnetic field.
count: int
Number of magnetometer readings that are taken for the calibration.
delay: float
Delay between the magntometer readings in seconds.
"""
print("Starting Calibration.")
print("The magnetometer needs to be turned in all possible directions \
during the callibration process. Ideally each axis would once \
line up with the magnetic field.")
self._offset = (0, 0, 0)
self._scale = (1, 1, 1)
raw_data = self._raw_magnet_data
raw_x = raw_data[0][0]
raw_y = raw_data[1][0]
raw_z = raw_data[2][0]
self._status # Enable updating readings again
minx = maxx = raw_x
miny = maxy = raw_y
minz = maxz = raw_z
while count:
sleep(delay)
raw_data = self._raw_magnet_data
print(raw_x, raw_y, raw_z)
raw_x = raw_data[0][0]
raw_y = raw_data[1][0]
raw_z = raw_data[2][0]
self._status # Enable updating readings again
minx = min(minx, raw_x)
maxx = max(maxx, raw_x)
miny = min(miny, raw_y)
maxy = max(maxy, raw_y)
minz = min(minz, raw_z)
maxz = max(maxz, raw_z)
count -= 1
# Hard iron correction
offset_x = (maxx + minx) / 2
offset_y = (maxy + miny) / 2
offset_z = (maxz + minz) / 2
self._offset = (offset_x, offset_y, offset_z)
print("+++++++++++")
print("Hard Iron Offset Values:")
print(self._offset)
# Soft iron correction
avg_delta_x = (maxx - minx) / 2
avg_delta_y = (maxy - miny) / 2
avg_delta_z = (maxz - minz) / 2
avg_delta = (avg_delta_x + avg_delta_y + avg_delta_z) / 3
scale_x = avg_delta / avg_delta_x
scale_y = avg_delta / avg_delta_y
scale_z = avg_delta / avg_delta_z
self._scale = (scale_x, scale_y, scale_z)
print("Soft iron values")
print(self._scale)
## DEFAULT FROM LSM DRIVER
def _read_u8(self, sensor_type, address):
# Read an 8-bit unsigned value from the specified 8-bit address.
# The sensor_type boolean should be _MAGTYPE when talking to the
# magnetometer, or _XGTYPE when talking to the accel or gyro.
# MUST be implemented by subclasses!
raise NotImplementedError()
def _read_bytes(self, sensor_type, address, count, buf):
# Read a count number of bytes into buffer from the provided 8-bit
# register address. The sensor_type boolean should be _MAGTYPE when
# talking to the magnetometer, or _XGTYPE when talking to the accel or
# gyro. MUST be implemented by subclasses!
raise NotImplementedError()
def _write_u8(self, sensor_type, address, val):
# Write an 8-bit unsigned value to the specified 8-bit address.
# The sensor_type boolean should be _MAGTYPE when talking to the
# magnetometer, or _XGTYPE when talking to the accel or gyro.
# MUST be implemented by subclasses!
raise NotImplementedError()
class MPU9250_I2C(MPU9250):
"""Driver for the MPU9250 connect over I2C.
:param ~busio.I2C i2c: The I2C bus object used to connect to the MPU9250.
.. note:: This object should be shared among other driver classes that use the
same I2C bus (SDA & SCL pins) to connect to different I2C devices.
:param int mag_address: A 8-bit integer that represents the i2c address of the
MPU9250's magnetometer. Options are limited to ``0x0c``.
Defaults to ``0x0c``.
:param int xg_address: A 8-bit integer that represents the i2c address of the
MPU9250's accelerometer and gyroscope. Options are limited to ``0x40`` or ``0x41``.
Defaults to ``0x41``.
"""
def __init__(self, i2c, mag_address=_AK8963_DEFAULT_ADDRESS,
xg_address=_MPU6500_DEFAULT_ADDRESS):
if xg_address in (0x68, 0x69): #and mag_address in (0x0c, 0x0b):
#self._mag_device = i2c_device.I2CDevice(i2c, mag_address)
self._xg_device = i2c_device.I2CDevice(i2c, xg_address)
super().__init__()
else:
raise ValueError('address parmeters are incorrect. Read the docs at '
'circuitpython.rtfd.io/projects/lsm9ds1/en/latest'
'/api.html#adafruit_lsm9ds1.LSM9DS1_I2C')
def _read_u8(self, sensor_type, address):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as i2c:
self._BUFFER[0] = address & 0xFF
i2c.write_then_readinto(self._BUFFER, self._BUFFER, out_end=1, in_start=1, in_end=2)
return self._BUFFER[1]
def _read_bytes(self, sensor_type, address, count, buf):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as i2c:
buf[0] = address & 0xFF
i2c.write_then_readinto(buf, buf, out_end=1, in_end=count)
def _write_u8(self, sensor_type, address, val):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as i2c:
self._BUFFER[0] = address & 0xFF
self._BUFFER[1] = val & 0xFF
i2c.write(self._BUFFER, end=2)
class MPU9250_SPI(MPU9250):
"""Driver for the MPU9250 connect over SPI.
:param ~busio.SPI spi: The SPI bus object used to connect to the MPU9250.
.. note:: This object should be shared among other driver classes that use the
same SPI bus (SCK, MISO, MOSI pins) to connect to different SPI devices.
:param ~digitalio.DigitalInOut mcs: The digital output pin connected to the
LSM9DS1's CSM (Chip Select Magnetometer) pin.
:param ~digitalio.DigitalInOut xgcs: The digital output pin connected to the
LSM9DS1's CSAG (Chip Select Accelerometer/Gyroscope) pin.
"""
# pylint: disable=no-member
def __init__(self, spi, xgcs, mcs):
self._mag_device = spi_device.SPIDevice(spi, mcs, baudrate=200000, phase=1, polarity=1)
self._xg_device = spi_device.SPIDevice(spi, xgcs, baudrate=200000, phase=1, polarity=1)
super().__init__()
def _read_u8(self, sensor_type, address):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as spi:
self._BUFFER[0] = (address | 0x80) & 0xFF
spi.write(self._BUFFER, end=1)
spi.readinto(self._BUFFER, end=1)
return self._BUFFER[0]
def _read_bytes(self, sensor_type, address, count, buf):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as spi:
buf[0] = (address | 0x80) & 0xFF
spi.write(buf, end=1)
spi.readinto(buf, end=count)
def _write_u8(self, sensor_type, address, val):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as spi:
self._BUFFER[0] = (address & 0x7F) & 0xFF
self._BUFFER[1] = val & 0xFF
spi.write(self._BUFFER, end=2)
class AccelRange: # pylint: disable=too-few-public-methods
"""Allowed values for `accelerometer_range`.
- ``Range.RANGE_2_G``
- ``Range.RANGE_4_G``
- ``Range.RANGE_8_G``
- ``Range.RANGE_16_G``
"""
RANGE_2_G = 0 # +/- 2g (default value)
RANGE_4_G = 1 # +/- 4g
RANGE_8_G = 2 # +/- 8g
RANGE_16_G = 3 # +/- 16g
class GyroRange: # pylint: disable=too-few-public-methods
"""Allowed values for `gyro_range`.
- ``GyroRange.RANGE_250_DPS``
- ``GyroRange.RANGE_500_DPS``
- ``GyroRange.RANGE_1000_DPS``
- ``GyroRange.RANGE_2000_DPS``
"""
RANGE_250_DPS = 0 # +/- 250 deg/s (default value)
RANGE_500_DPS = 1 # +/- 500 deg/s
RANGE_1000_DPS = 2 # +/- 1000 deg/s
RANGE_2000_DPS = 3 # +/- 2000 deg/s
class Bandwidth: # pylint: disable=too-few-public-methods
"""Allowed values for `filter_bandwidth`.
- ``Bandwidth.BAND_260_HZ``
- ``Bandwidth.BAND_184_HZ``
- ``Bandwidth.BAND_94_HZ``
- ``Bandwidth.BAND_44_HZ``
- ``Bandwidth.BAND_21_HZ``
- ``Bandwidth.BAND_10_HZ``
- ``Bandwidth.BAND_5_HZ``
"""
BAND_260_HZ = 0 # Docs imply this disables the filter
BAND_184_HZ = 1 # 184 Hz
BAND_94_HZ = 2 # 94 Hz
BAND_44_HZ = 3 # 44 Hz
BAND_21_HZ = 4 # 21 Hz
BAND_10_HZ = 5 # 10 Hz
BAND_5_HZ = 6 # 5 Hz
class Rate: # pylint: disable=too-few-public-methods
"""Allowed values for `cycle_rate`.
- ``Rate.CYCLE_1_25_HZ``
- ``Rate.CYCLE_5_HZ``
- ``Rate.CYCLE_20_HZ``
- ``Rate.CYCLE_40_HZ``
"""
CYCLE_1_25_HZ = 0 # 1.25 Hz
CYCLE_5_HZ = 1 # 5 Hz
CYCLE_20_HZ = 2 # 20 Hz
CYCLE_40_HZ = 3 # 40 Hz
class MagSensitivity:
"""Allowed values for `range`.
- ``Rate.CYCLE_1_25_HZ``
- ``Rate.CYCLE_5_HZ``
- ``Rate.CYCLE_20_HZ``
- ``Rate.CYCLE_40_HZ``
"""
14BIT = 0
16BIT = 1
class MagMode:
"""Allowed values for `mode` setting
- ``Mode.MODE_POWERDOWN``
- ``Mode.MODE_SINGLE``
- ``Mode.MODE_CONT1``
- ``Mode.MODE_CONT2``
- ``Mode.MODE_EXT_TRIG``
- ``Mode.MODE_SELFTEST``
- ``Mode.MODE_FUSE``
"""
POWERDOWN = 0 #0b0000
MEASURE_SINGLE = 1 #0b0001
MEASURE_8HZ = 2 #0b0010 # 8 Hz (mode 1)
EXT_TRIG = 4 #0b0100
MEASURE_100HZ = 5 #0b0110 # 100 Hz (mode 2)
SELFTEST = 8 #0b1000
FUSE = 15 #0b1111
|
#coding=utf-8
'''
test flask framework
by yan-bin-lin
'''
import os
from linebot.models import TextSendMessage,ImageSendMessage
from linebot import LineBotApi
from linebot.exceptions import LineBotApiError
from flask import Flask, request, Blueprint, url_for
from blueprint_pf.pf import PF,count_Chain
from help.helper import helper,help_all
from mission.mission import mission
from find_card.find_card import search_card
app = Flask(__name__)
app.register_blueprint(PF, url_prefix='/pf')
app.register_blueprint(helper, url_prefix='/help')
@app.route('/')
def website_test():
return '<p>This is a flask test</p>'
@app.route('/callback', methods=['POST'])
#first step
def callback():
#get json input
decode = request.get_json()
#start line_bot_api
channel_token = "w5LR/GWzxSwwVP910AG4AOaDBv0Ys7bEW2yJM9qYdyBfHhgnh1mRJlXKLOpncI/f5iEJLb38bLWriV9AoZ72p45BODmeil/Ux7iWSbYqgcFx9E1uLwf1kCWk6luXUQUH0ZN5WxhRITHYZjx5balb0AdB04t89/1O/w1cDnyilFU="
line_bot_api = LineBotApi(channel_token)
#get reply token
reply_token = decode['events'][0]["replyToken"]
#get message
text = decode['events'][0]['message']['text']
if(text.upper().find("EDDY") != -1):
line_bot_api.reply_message(reply_token, TextSendMessage(text='Eddy Green!'))
#example message:"lb. news 1"
tlist = text.split(' ')
if len(tlist) >= 2:
judge = tlist[0] #lb.
method = tlist[1] #news ,control = tlis[2]
if judge == "lb.":
#help method
if method == "help":
if len(tlist) == 2:
tlist.append('')
out_text = help_all(tlist[2])
#pf method
elif method == "pf":
out_text = count_Chain(tlist[2:])
#mission method
elif method == "ms":
if len(tlist) == 3:
tlist.append('')
out_text = mission(tlist[2:])
#find card method
elif method == 'card':
out_text = search_card(tlist[2:])
if out_text.find('http') != -1:
line_bot_api.reply_message(reply_token, ImageSendMessage(
original_content_url=out_text,
preview_image_url=out_text))
else:
out_text = out_text.encode('utf-8')
else:
out_text = u'無效指令: '.encode('utf-8') + method.encode('utf_8')
try:
line_bot_api.reply_message(reply_token, TextSendMessage(text = out_text.decode('utf-8')))
except LineBotApiError as e:
out_text = '回覆錯誤!!\n錯誤資訊: ' + e.error.message + '\n錯誤詳情: ' + str(e.error.details)
print('error!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
print(e.status_code)
print(e.error.message)
print(e.error.details)
line_bot_api.reply_message(reply_token, TextSendMessage(text = out_text))
return "<p>hello world</p>"
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port) |
from django.urls import path
from apps.cart.views import CartListView, CartAddView, CartDeleteView
app_name = 'cart'
urlpatterns = [
path('list/', CartListView.as_view(), name='list'),
path('add/', CartAddView.as_view(), name='add'),
path('delete/', CartDeleteView.as_view(), name='delete'),
] |
from datetime import datetime
from ems.datasets.case.case_set import CaseSet
from ems.generators.duration.duration import DurationGenerator
from ems.generators.location.location import LocationGenerator
from ems.generators.event.event_generator import EventGenerator
from ems.models.cases.random_case import RandomCase
from random import randint
from numpy.random import choice
# Implementation of a case set that randomly generates cases while iterating
class RandomCaseSet(CaseSet):
def __init__(self,
time: datetime,
case_time_generator: DurationGenerator,
case_location_generator: LocationGenerator,
event_generator: EventGenerator,
quantity: int = None):
super().__init__(time)
self.time = time
self.case_time_generator = case_time_generator
self.location_generator = case_location_generator
self.event_generator = event_generator
self.quantity = quantity
def iterator(self):
k = 1
while self.quantity is None or k <= self.quantity:
# Compute time and location of next event via generators
self.time = self.time + self.case_time_generator.generate(timestamp=self.time)['duration']
point = self.location_generator.generate(self.time)
lmda = self.case_time_generator.lmda
disaster = True if lmda > 0.3 else False
# A high lambda signifies a disaster scenario. This means higher
# severity cases occur.
# TODO I am wondering if something is wrong with the way the below
# TODO severity code has with the above self.time and generator code.
if disaster:
severity = choice(
[1,2,3,4],
p=[0.6, 0.2, 0.1, 0.1]
)
else:
severity = choice(
[1, 2, 3, 4],
p=[0.03397097625329815, 0.03781882145998241,
0.1994283201407212, 0.7287818821459983]
)
# Create case
case = RandomCase(id=k,
date_recorded=self.time,
incident_location=point,
event_generator=self.event_generator,
priority=severity
)
k += 1
yield case
def __len__(self):
return self.num_cases
|
from django import forms
# Selectores de genero
GENERO_CHOICES = (
('masculino', 'Masculino',),
('femenino', 'Femenino',),
)
#FORMATS = ['%Y-%m-%d', # '2006-10-25'
# '%m/%d/%Y', # '10/25/2006'
# '%m/%d/%y'] # '10/25/06'
FORMATS = ['%d/%m/%Y']
# Formulario de Paciente
class FormularioPaciente(forms.Form):
numero = forms.IntegerField(required=True)
nombre = forms.CharField(label='nombre del paciente',
max_length=25,
required=True)
apellido_paterno = forms.CharField(label='apellido paterno del paciente',
max_length=25,
required=True)
apellido_materno = forms.CharField(label='apellido materno del paciente',
max_length=25,
required=True)
curp = forms.CharField(label='curp del paciente',
max_length=25,
required=True)
fecha_nacimiento = forms.DateField(required=True,
input_formats=FORMATS,
widget=forms.DateInput(format = FORMATS),)
fecha_ingreso = forms.DateField(required=True,
input_formats=FORMATS,
widget=forms.DateInput(format = FORMATS),)
edad = forms.IntegerField(required=True)
sexo = forms.ChoiceField(required=True,
widget=forms.RadioSelect,
choices=GENERO_CHOICES)
direccion = forms.CharField(label='direccion',
max_length=40,
required=True,)
peso = forms.DecimalField(label='Kg',
required=True,
max_digits=5,
decimal_places=2,)
estatura = forms.DecimalField(label='Mts',
required=True,
max_digits=5,
decimal_places=2,)
diagnostico = forms.CharField(label='diagnostico',
max_length=100,
required=True,)
# Formulario Actualizar Paciente
class ActualizarPaciente(forms.Form):
mejorias = forms.CharField(label='Mejorias',
max_length=50,
required=False,)
diagnostico = forms.CharField(label='Diagnostico',
max_length=100,
required=False,)
enfermedad = forms.CharField(label='Enfermedad',
max_length=30,
required=False,)
tratamiento = forms.CharField(label='Tratamiento',
max_length=40,
required=False,)
direccion = forms.CharField(label='direccion',
max_length=40,
required=False)
peso = forms.DecimalField(label='Kg',
required=False,
max_digits=5,
decimal_places=2,)
estatura = forms.DecimalField(label='Mts',
required=False,
max_digits=5,
decimal_places=2,)
# Formulario buscar paciente
class BuscarMatricula(forms.Form):
matricula = forms.IntegerField(required=True)
# Selectores de opciones del menu reportes generales
OPCIONES_CHOICES = (
('1','Tercera Edad',),
('2','Sexo',),
('3','Adultos',),
('4','Ninos',),
)
# Formulario Menu Reportes:
class MenuReportesGenerales(forms.Form):
opcion = forms.ChoiceField(required=True,
widget=forms.RadioSelect,
choices=OPCIONES_CHOICES,)
|
import json
# serializing JSON
data = {
"president": {
"name": "Zaphod Beeblebrox",
"species": "Betelgeusian"
}
}
with open("data_file_.json", "w") as write_file:
json.dump(data, write_file)
json_string = json.dumps(data)
json_string_2 = json.dumps(data)
# deserializing JSON
blackjack_hand = (8, "Q")
encoded_hand = json.dumps(blackjack_hand)
decoded_hand = json.loads(encoded_hand)
blackjack_hand == decoded_hand
blackjack_hand == tuple(decoded_hand)
with open("data_file.json", "r") as read_file:
data = json.load(read_file)
json_string3 = """
{
"researcher": {
"name": "Ford Prefect",
"species": "Betelgusian",
"relatives": [
{
"name": "Xaapacsc asfsm".
"species": "baerfef"
}
]
}
}
"""
data = json.loads(json_string3) |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
url = 'https://raw.githubusercontent.com/mathav95raj/Learning-Content/master/Phase%203%20-%202020%20(Summer)/Week%201%20(Mar%2028%20-%20Apr%204)/assignment/data.txt'
data = np.loadtxt(url, skiprows = 1)
df = pd.DataFrame(data)
df=df.rename(columns={0: "Label"})
df["Label"].replace({1: "A", 2: "B"}, inplace=True)
g = sns.PairGrid(df, hue="Label", vars = df.iloc[:,1:], palette = ['r', 'b'], height = 1, aspect = 1)
g = g.map(plt.scatter, s=1)
g = g.add_legend()
x = df.iloc[:,1:]
y = df.iloc[:,0]
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
finalDf = pd.concat([principalDf, y], axis = 1)
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Principal Component 1', fontsize = 15)
ax.set_ylabel('Principal Component 2', fontsize = 15)
ax.set_title('2 component PCA', fontsize = 20)
targets = ['A', 'B']
colors = ['r', 'b']
for target, color in zip(targets,colors):
indicesToKeep = finalDf['Label'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1']
, finalDf.loc[indicesToKeep, 'principal component 2']
, c = color
, s = 50)
ax.legend(targets)
ax.grid()
for i in range(1,11):
print(np.dot(principalDf.iloc[:,1],df.iloc[:,i]))
print(np.dot(principalDf.iloc[:,0],df.iloc[:,i])) |
t = int(raw_input())
for i in range(t):
s = raw_input()
pre = '#'
ans = 0
for c in s:
if (c != pre):
pre = c
ans += 1
if (s[-1] == '+'):
ans -= 1
print "Case #{}: {}".format(i + 1, ans)
|
from shape import IShape
class Circle(IShape):
def draw(self):
print("Inside Circle::draw() method.")
|
from twisted.internet import reactor as reactor
from twisted.internet.protocol import ClientFactory, Protocol
from twisted.internet.task import LoopingCall
from twisted.internet.error import CannotListenError
from twisted.internet.defer import DeferredQueue
import pickle
import pygame
import sys
from maps import Maps
from menu import Menu
from ship import Ship
from gamespace import GameSpace
host = "student02.cse.nd.edu"
gs_queue = DeferredQueue()
gameOver = False
class ClientConnection(Protocol):
def __init__(self, port, gs):
self.delimiter = "\r\n"
self.port = port
self.gs = gs
def dataReceived(self, data):
print "received data:", data
strings = data.split()
# Let the users know what the other ship is
if strings[0] == "START":
self.gs.otherShip = Ship(self.gs, strings[1].lower(), strings[2])
self.gs.myShip.weapon.enemy_ship = self.gs.otherShip
self.gs.otherShip.weapon.enemy_ship = self.gs.myShip
self.gs.otherShip.tick()
# Share coordinates of fired shots
elif strings[0] == "FIRE":
self.gs.otherShip.weapon.target = (float(strings[2]), float(strings[3]))
self.gs.otherShip.weapon.firing_enabled = True
print "firing on coordinates", strings[2], strings[3]
# End the game
elif strings[0] == "END" and not gameOver:
self.gs.gameOver = int(strings[1])
global gameOver
gameOver = True
def connectionMade(self):
print "now connected to", host, "port", self.port
self.gs.queue.get().addCallback(self.sendData)
def sendData(self, data):
self.transport.write(data + self.delimiter)
self.gs.queue.get().addCallback(self.sendData)
def connectionLost(self, reason):
print "lost connection to", host, "port", self.port
reactor.stop()
class ClientConnectionFactory(ClientFactory):
def __init__(self, port, gs):
self.port = port
protocol = ClientConnection
self.gs = gs
def buildProtocol(self, addr):
return ClientConnection(self.port, self.gs)
class GameConnection:
def __init__(self, port):
gs = GameSpace(gs_queue, port)
lc = LoopingCall(gs.update)
lc.start(1/60)
reactor.connectTCP(host, port, ClientConnectionFactory(port, gs))
reactor.run()
|
import sys
import librosa as lbs
import numpy as np
from matplotlib import pyplot as plt
from scipy.io import wavfile
from scipy.signal import butter, lfilter
np.set_printoptions(threshold=sys.maxsize)
class NoiseReductionNew:
def bandpass(self, lowCut, highCut, fs, order=5):
nyq = 0.5 * fs
low = lowCut / nyq
high = highCut / nyq
b = butter(order, [low, high], btype='band')
return b
def lowpass(self, cutoffFrequency, fs, order=5):
nyq = 0.5 * fs
low = cutoffFrequency / nyq
b = butter(order, low, btype='low')
return b
def bandpass_filter(self, data, lowCut, highCut, fs, order=5):
b, a = self.bandpass(lowCut, highCut, fs, order=order)
y = lfilter(b, a, data)
return y
def lowpass_filter(self, data, cutoffFrequency, fs, order=5):
b, a = self.lowpass(cutoffFrequency, fs, order=order)
y = lfilter(b, a, data)
return y
def noiseRemover(self, data, cutoffValue):
array = np.asarray(data)
newArray = np.array([i for i in array if i > cutoffValue])
return newArray
if __name__ == '__main__':
nrn = NoiseReductionNew()
sampleData, sampleRate = lbs.load('Wav/sound_clip.wav')
noiseData, noiseRate = lbs.load('Wav/bgnosie.wav')
filteredAudio = nrn.bandpass_filter(sampleData, 95, 1500, sampleRate, 1)
wavfile.write('Wav/filtered_angry1.wav', 22050, filteredAudio)
length = sampleData.shape[0] / sampleRate
time = np.linspace(0., length, sampleData.shape[0]) # list the size of samplesize with 1 sample-time length per iteration
dt = time[4] - time[3] ##Iteration length variable
n = len(time) ##Amount of samples
f = sampleData # Signal
fhat = np.fft.fft(f, n) ### Fourier transformed signal
PSD = fhat * np.conj(fhat) / n ## Computing power spectrum of the signal
L = np.arange(1, np.floor(n / 2), dtype="int") ## Only plot the first half of freqs, this seperates the second half
indices = PSD > 2 # Find all freqs with large power
PSDclean = PSD * indices # Zero out all others
ffilt = np.fft.ifft(fhat)
absFfilt = ffilt.real
freq = (1 / (dt * n) * np.arange(n)) ## Making freqeuncies for x-axis
fig, axis = plt.subplots(3, 1)
plt.sca(axis[0])
plt.plot(time, f, color='c', LineWidth=1.5, label="Sample")
plt.xlim(time[0], time[-1])
plt.ylabel("Amplitude [?]")
plt.xlabel("Seconds [s]")
plt.legend()
plt.sca(axis[1])
plt.plot(time, ffilt, color="royalblue", label="Filtered")
plt.xlim(time[0], time[-1])
plt.ylabel("Amplitude [?]")
plt.xlabel("Seconds [s]")
plt.legend()
plt.sca(axis[2])
plt.plot(time, absFfilt, color="mediumslateblue", label="Sample - Filtered")
plt.xlim(time[0], time[-1])
plt.ylabel("Amplitude [?]")
plt.xlabel("Seconds [s]")
plt.legend()
plt.tight_layout()
plt.show()
|
class Solution:
def sumSubseqWidths(self, a):
"""
:type A: List[int]
:rtype: int
"""
p = (10**9) + 7
l = len(a)
a.sort()
ans = 0
for i, v in enumerate(a):
left = 1 << i
right = 1 << (l-i-1)
ans += (left - right) * v
return ans % p
s = Solution()
print(s.sumSubseqWidths([4,5]))
[1,2,3] |
from django.shortcuts import render, redirect, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from repository import models
import json, hashlib, time
# Create your views here.
def login(request):
if request.method == "GET":
return render(request, 'web/login.html')
elif request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
# print(password)
obj = models.User.objects.filter(username=username, password=password).first()
if obj:
role_list = models.Role.objects.filter(user2role__u__username=username)
menu_leaf_list = models.Permission2Role.objects.filter \
(r__in=role_list).values('p__url', 'p__caption', 'p__menu_id', 'p__icon').exclude(p__menu__isnull=True)
action_info = models.Permission2Role.objects.filter(r__in=role_list).values('p__url')
action_list = []
permission_list = {}
for i in menu_leaf_list:
permission_list[i['p__menu_id']] = [i['p__caption'], i['p__url'], i['p__icon']]
for item in action_info:
action_list.append(item['p__url'])
request.session['user_info'] = {
'nid': obj.id,
'username': obj.username,
"permission_list": permission_list,
'action_list': action_list
}
return redirect('/backend/index.html')
else:
return render(request, 'web/login.html', {'msg': "用户名或密码错误"})
else:
return render(request, 'web/login.html')
""" api demo 代码 """
def hash_key(key):
v = hashlib.md5()
v.update(bytes(key, encoding='utf-8'))
ret = v.hexdigest()
return ret
ck = "e10adc3949ba59abbe56e057f20f883e"
# 将这个列表放到redis里,设置过期时间
auth_list = []
@csrf_exempt #POST不受CSRF保护
def api(request):
auth_key_time = request.META['HTTP_AUTHKEY']
client_auth_key, client_time = auth_key_time.split('|')
server_time = time.time()
# 通过时间节点,将时间久远的key先过滤掉
if server_time - 10 > float(client_time):
return HttpResponse('授权失败')
# 通过空间节点,将已经访问过的key过滤掉
if auth_key_time in auth_list:
return HttpResponse('授权失败')
# 然后将MD5 key进行反解对比验证
key_time = "%s|%s" % (ck, client_time)
server_auth_key = hash_key(key_time)
# key对应不上,授权失败
if client_auth_key != server_auth_key:
return HttpResponse('授权失败')
# 这边就用redis设置过期key就行
auth_list.append(auth_key_time)
if request.method == "POST":
host_info = json.loads(str(request.body, encoding='utf-8'))
if host_info['status'] is True:
hostname = host_info['data']['hostname']
host_status = models.Host.objects.filter(hostname=hostname).count()
if host_status == 1:
# 这边主机存在,就更新记录,不写了。
return HttpResponse('success')
else:
# 这边可以创建主机,不写了。
return HttpResponse('主机不存在')
else:
return HttpResponse('JSON数据异常')
|
from typing import Callable, Dict, List, Optional, Sequence, Union
import numpy as np
from assertpy import assert_that
from bokeh import plotting
from bokeh.models import (
BasicTicker,
ColorBar,
ColumnDataSource,
HoverTool,
LinearColorMapper,
PrintfTickFormatter,
)
from bokeh.plotting import Figure
from sklearn import metrics as sklmetrics
from sklearn.metrics import accuracy_score
from metriculous.evaluators._bokeh_utils import (
DARK_BLUE,
FONT_SIZE,
HISTOGRAM_ALPHA,
SCATTER_CIRCLES_FILL_ALPHA,
SCATTER_CIRCLES_LINE_ALPHA,
TOOLBAR_LOCATION,
TOOLS,
add_title_rows,
apply_default_style,
scatter_plot_circle_size,
)
from metriculous.evaluators._classification_utils import check_normalization
def _bokeh_output_histogram(
y_true: np.ndarray,
y_pred: np.ndarray,
class_names: Sequence[str],
title_rows: Sequence[str],
sample_weights: Optional[np.ndarray] = None,
x_label_rotation: Union[str, float] = "horizontal",
) -> Callable[[], Figure]:
"""
Creates a scatter plot that contains the same information as a confusion matrix.
Args:
y_true:
1d integer array indicating the reference labels.
y_pred:
1d integer array indicating the predictions.
class_names:
Sequence of strings corresponding to the classes.
title_rows:
Sequence of strings to be used for the chart title.
sample_weights:
Sequence of floats to modify the influence of individual samples.
x_label_rotation:
Rotation of the class name labels.
Returns:
A callable that returns a fresh bokeh figure each time it is called
"""
n = len(class_names)
assert_that(np.shape(y_true)).is_equal_to(np.shape(y_pred))
if sample_weights is None:
sample_weights = np.ones_like(y_true)
assert_that(np.shape(y_true)).is_equal_to(np.shape(sample_weights))
bins = np.arange(0, n + 1, 1)
normalize = not np.allclose(sample_weights, 1.0)
def figure() -> Figure:
p = plotting.figure(
x_range=class_names,
plot_height=350,
plot_width=350,
tools=TOOLS,
toolbar_location=TOOLBAR_LOCATION,
)
# class distribution in prediction
p.vbar(
x=class_names,
top=np.histogram(
y_pred, bins=bins, weights=sample_weights, density=normalize
)[0],
width=0.85,
color=DARK_BLUE,
alpha=HISTOGRAM_ALPHA,
legend_label="Prediction",
)
# class distribution in ground truth
p.vbar(
x=class_names,
top=np.histogram(
y_true, bins=bins, weights=sample_weights, density=normalize
)[0],
width=0.85,
alpha=0.6,
legend_label="Ground Truth",
fill_color=None,
line_color="black",
line_width=2.5,
)
add_title_rows(p, title_rows)
apply_default_style(p)
p.yaxis.axis_label = (
"Fraction of Instances" if normalize else "Number of Instances"
)
p.xaxis.major_label_orientation = x_label_rotation
p.xgrid.grid_line_color = None
# prevent panning to empty regions
p.x_range.bounds = (-0.5, 0.5 + len(class_names))
return p
return figure
def _bokeh_confusion_matrix(
y_true: np.ndarray,
y_pred: np.ndarray,
class_names: Sequence[str],
title_rows: Sequence[str],
x_label_rotation: Union[str, float] = "horizontal",
y_label_rotation: Union[str, float] = "vertical",
) -> Callable[[], Figure]:
"""
Creates a confusion matrix heatmap.
Args:
y_true:
1d integer array indicating the reference labels.
y_pred:
1d integer array indicating the predictions.
class_names:
Sequence of strings corresponding to the classes.
title_rows:
Sequence of strings to be used for the chart title.
x_label_rotation:
Rotation of the x-axis class name labels.
y_label_rotation:
Rotation of the y-axis class name labels.
Returns:
A callable that returns a fresh bokeh figure each time it is called
"""
cm = sklmetrics.confusion_matrix(y_true, y_pred)
cm_normalized = cm.astype("float") / cm.sum()
cm_normalized_by_pred = cm.astype("float") / cm.sum(axis=0, keepdims=True)
cm_normalized_by_true = cm.astype("float") / cm.sum(axis=1, keepdims=True)
predicted = list()
actual = list()
count = list()
normalized = list()
normalized_by_pred = list()
normalized_by_true = list()
for i, i_class in enumerate(class_names):
for j, j_class in enumerate(class_names):
predicted.append(j_class)
actual.append(i_class)
count.append(cm[i, j])
normalized.append(cm_normalized[i, j])
normalized_by_pred.append(cm_normalized_by_pred[i, j])
normalized_by_true.append(cm_normalized_by_true[i, j])
def figure() -> Figure:
source = ColumnDataSource(
data={
"predicted": predicted,
"actual": actual,
"count": count,
"normalized": normalized,
"normalized_by_true": normalized_by_true,
"normalized_by_pred": normalized_by_pred,
}
)
p = plotting.figure(tools=TOOLS, x_range=class_names, y_range=class_names)
mapper = LinearColorMapper(palette="Viridis256", low=0.0, high=1.0)
p.rect(
x="actual",
y="predicted",
width=0.95,
height=0.95,
source=source,
fill_color={"field": "normalized_by_true", "transform": mapper},
line_width=0,
line_color="black",
)
p.xaxis.axis_label = "Ground Truth"
p.yaxis.axis_label = "Prediction"
p.xaxis.major_label_orientation = x_label_rotation
p.yaxis.major_label_orientation = y_label_rotation
p.add_tools(
HoverTool(
tooltips=[
("Predicted", "@predicted"),
("Ground truth", "@actual"),
("Count", "@count"),
("Normalized", "@normalized"),
("Normalized by prediction", "@normalized_by_pred"),
("Normalize by ground truth", "@normalized_by_true"),
]
)
)
color_bar = ColorBar(
color_mapper=mapper,
major_label_text_font_size=FONT_SIZE,
ticker=BasicTicker(desired_num_ticks=10),
formatter=PrintfTickFormatter(format="%.1f"),
label_standoff=5,
border_line_color=None,
location=(0, 0),
)
p.add_layout(color_bar, "right")
add_title_rows(p, title_rows)
apply_default_style(p)
return p
return figure
def _bokeh_confusion_scatter(
y_true: np.ndarray,
y_pred: np.ndarray,
class_names: Sequence[str],
title_rows: Sequence[str],
x_label_rotation: Union[str, float] = "horizontal",
y_label_rotation: Union[str, float] = "vertical",
) -> Callable[[], Figure]:
"""
Creates a scatter plot that contains the same information as a confusion matrix.
Args:
y_true:
1d integer array indicating the reference labels.
y_pred:
1d integer array indicating the predictions.
class_names:
Sequence of strings corresponding to the classes.
title_rows:
Sequence of strings to be used for the chart title.
x_label_rotation:
Rotation of the x-axis class name labels.
y_label_rotation:
Rotation of the y-axis class name labels.
Returns:
A callable that returns a fresh bokeh figure each time it is called
"""
if len(y_true) != len(y_pred):
raise ValueError("y_true and y_pred must have the same length!")
def figure() -> Figure:
p = plotting.figure(
x_range=(-0.5, -0.5 + len(class_names)),
y_range=(-0.5, -0.5 + len(class_names)),
plot_height=350,
plot_width=350,
tools=TOOLS,
toolbar_location=TOOLBAR_LOCATION,
match_aspect=True,
)
def noise() -> np.ndarray:
return (np.random.beta(1, 1, size=len(y_true)) - 0.5) * 0.6
p.scatter(
x=y_true + noise(),
y=y_pred + noise(),
size=scatter_plot_circle_size(
num_points=len(y_true),
biggest=4.0,
smallest=1.0,
use_smallest_when_num_points_at_least=5000,
),
color=DARK_BLUE,
fill_alpha=SCATTER_CIRCLES_FILL_ALPHA,
line_alpha=SCATTER_CIRCLES_LINE_ALPHA,
)
add_title_rows(p, title_rows)
apply_default_style(p)
p.xaxis.axis_label = "Ground Truth"
p.yaxis.axis_label = "Prediction"
arange = np.arange(len(class_names))
p.xaxis.ticker = arange
p.yaxis.ticker = arange
p.xaxis.major_label_overrides = {i: name for i, name in enumerate(class_names)}
p.yaxis.major_label_overrides = {i: name for i, name in enumerate(class_names)}
p.xaxis.major_label_orientation = x_label_rotation
p.yaxis.major_label_orientation = y_label_rotation
# grid between classes, not at classes
p.xgrid.ticker = arange[0:-1] + 0.5
p.ygrid.ticker = arange[0:-1] + 0.5
p.xgrid.grid_line_width = 3
p.ygrid.grid_line_width = 3
# prevent panning to empty regions
p.x_range.bounds = (-0.5, -0.5 + len(class_names))
p.y_range.bounds = (-0.5, -0.5 + len(class_names))
return p
return figure
def _bokeh_roc_curve(
y_true_binary: np.ndarray,
y_pred_score: np.ndarray,
title_rows: Sequence[str],
sample_weights: Optional[np.ndarray],
) -> Callable[[], Figure]:
"""Plots an interactive receiver operator characteristic (ROC) curve.
Args:
y_true_binary:
An array of zeros and ones.
y_pred_score:
A continuous value, such as a probability estimate for the positive class.
title_rows:
Sequence of strings to be used for the chart title.
sample_weights:
Sequence of floats to modify the influence of individual samples.
Returns:
A callable that returns a fresh bokeh figure each time it is called
"""
assert y_true_binary.shape == y_pred_score.shape
assert set(y_true_binary).issubset({0, 1}) or set(y_true_binary).issubset(
{False, True}
)
assert np.ndim(y_true_binary) == 1
fpr, tpr, thresholds = sklmetrics.roc_curve(
y_true=y_true_binary, y_score=y_pred_score, sample_weight=sample_weights
)
def figure() -> Figure:
source = ColumnDataSource(
data={
"FPR": fpr,
"TPR": tpr,
"threshold": thresholds,
"specificity": 1.0 - fpr,
}
)
p = plotting.figure(
plot_height=400,
plot_width=350,
tools=TOOLS,
toolbar_location=TOOLBAR_LOCATION,
# toolbar_location=None, # hides entire toolbar
match_aspect=True,
)
p.xaxis.axis_label = "FPR"
p.yaxis.axis_label = "TPR"
add_title_rows(p, title_rows)
apply_default_style(p)
curve = p.line(x="FPR", y="TPR", line_width=2, color=DARK_BLUE, source=source)
p.line(
x=[0.0, 1.0],
y=[0.0, 1.0],
line_alpha=0.75,
color="grey",
line_dash="dotted",
)
p.add_tools(
HoverTool(
# make sure there is no tool tip for the diagonal baseline
renderers=[curve],
tooltips=[
("TPR", "@TPR"),
("FPR", "@FPR"),
("Sensitivity", "@TPR"),
("Specificity", "@specificity"),
("Threshold", "@threshold"),
],
# display a tooltip whenever the cursor is vertically in line with a glyph
mode="vline",
)
)
return p
return figure
def _bokeh_precision_recall_curve(
y_true_binary: np.ndarray,
y_pred_score: np.ndarray,
title_rows: Sequence[str],
sample_weights: Optional[np.ndarray],
) -> Callable[[], Figure]:
"""
Plots an interactive precision recall curve.
Args:
y_true_binary:
An array of zeros and ones.
y_pred_score:
A continuous value, such as a probability estimate for the positive class.
title_rows:
Sequence of strings to be used for the chart title.
sample_weights:
Sequence of floats to modify the influence of individual samples.
Returns:
A callable that returns a fresh bokeh figure each time it is called
"""
assert y_true_binary.shape == y_pred_score.shape
assert set(y_true_binary).issubset({0, 1}) or set(y_true_binary).issubset(
{False, True}
)
assert np.ndim(y_true_binary) == 1
# Note: len(thresholds) == len(precision) - 1
# The last precision recall pair does not have a corresponding threshold.
precision, recall, thresholds = sklmetrics.precision_recall_curve(
y_true=y_true_binary, probas_pred=y_pred_score, sample_weight=sample_weights
)
precision = precision[:-1]
recall = recall[:-1]
def figure() -> Figure:
p = plotting.figure(
plot_height=400,
plot_width=350,
x_range=(-0.05, 1.05),
y_range=(-0.05, 1.05),
tools=TOOLS,
toolbar_location=TOOLBAR_LOCATION,
# match_aspect=True,
)
source = ColumnDataSource(
data={"precision": precision, "recall": recall, "threshold": thresholds}
)
# reminder: tpr == recall == sensitivity
p.line(x="recall", y="precision", line_width=2, source=source)
add_title_rows(p, title_rows)
apply_default_style(p)
p.xaxis.axis_label = "Recall"
p.yaxis.axis_label = "Precision"
p.add_tools(
HoverTool(
tooltips=[
("Precision", "@precision"),
("Recall", "@recall"),
("Threshold", "@threshold"),
],
# display a tooltip whenever the cursor is vertically in line with a glyph
mode="vline",
)
)
return p
return figure
def _bokeh_automation_rate_analysis(
y_target_one_hot: np.ndarray,
y_pred_proba: np.ndarray,
title_rows: Sequence[str],
sample_weights: Optional[np.ndarray],
) -> Callable[[], Figure]:
"""
Plots various quantities over automation rate, where a single probability threshold
is used for all classes to decide if we are confident enough to automate the
classification.
Args:
y_target_one_hot:
Array with one-hot encoded ground truth, shape(n_samples, n_classes).
y_pred_proba:
Array with estimated probability distributions, shape(n_samples, n_classes).
title_rows:
Sequence of strings to be used for the chart title.
sample_weights:
Sequence of floats to modify the influence of individual samples.
Returns:
A callable that returns a fresh bokeh figure each time it is called
"""
# ----- check input -----
assert y_target_one_hot.ndim == 2
assert y_pred_proba.ndim == 2
assert (
y_target_one_hot.shape == y_pred_proba.shape
), f"{y_target_one_hot.shape} != {y_pred_proba.shape}"
check_normalization(y_target_one_hot, axis=1)
check_normalization(y_pred_proba, axis=1)
assert set(y_target_one_hot.ravel()) == {0, 1}, set(y_target_one_hot.ravel())
if sample_weights is None:
sample_weights = np.ones(len(y_target_one_hot))
assert_that(sample_weights.shape).is_equal_to((len(y_target_one_hot),))
# ----- compute chart data -----
y_target = y_target_one_hot.argmax(axis=1)
argmaxes = y_pred_proba.argmax(axis=1)
maxes = y_pred_proba.max(axis=1)
assert isinstance(maxes, np.ndarray) # making IntelliJ's type checker happy
chart_data: Dict[str, List[float]] = {
"automation_rate": [],
"threshold": [],
"accuracy": [],
}
for threshold in sorted(maxes):
automated = maxes >= threshold
chart_data["automation_rate"].append(
np.average(automated, weights=sample_weights)
)
chart_data["threshold"].append(threshold)
chart_data["accuracy"].append(
accuracy_score(
y_true=y_target[automated],
y_pred=argmaxes[automated],
sample_weight=sample_weights[automated],
)
)
def figure() -> Figure:
# ----- bokeh plot -----
p = plotting.figure(
plot_height=400,
plot_width=350,
x_range=(-0.05, 1.05),
y_range=(-0.05, 1.05),
tools=TOOLS,
toolbar_location=TOOLBAR_LOCATION,
# match_aspect=True,
)
source = ColumnDataSource(
data={key: np.array(lst) for key, lst in chart_data.items()}
)
accuracy_line = p.line(
x="automation_rate",
y="accuracy",
line_width=2,
color=DARK_BLUE,
source=source,
legend_label="Accuracy",
)
p.line(
x="automation_rate",
y="threshold",
line_width=2,
color="grey",
source=source,
legend_label="Threshold",
)
# make sure something is visible if lines consist of just a single point
p.scatter(
x=source.data["automation_rate"][[0, -1]],
y=source.data["accuracy"][[0, -1]],
)
p.scatter(
x=source.data["automation_rate"][[0, -1]],
y=source.data["threshold"][[0, -1]],
color="grey",
)
add_title_rows(p, title_rows)
apply_default_style(p)
p.xaxis.axis_label = "Automation Rate"
p.legend.location = "bottom_left"
p.add_tools(
HoverTool(
renderers=[accuracy_line],
tooltips=[
("Accuracy", "@accuracy"),
("Threshold", "@threshold"),
("Automation Rate", "@automation_rate"),
],
# display a tooltip whenever the cursor is vertically in line with a glyph
mode="vline",
)
)
return p
return figure
|
# DF model used for non-defended dataset
from keras.models import Sequential
from keras.layers import Conv1D, MaxPooling1D, BatchNormalization
from keras.layers.core import Activation, Flatten, Dense, Dropout
from keras.layers.advanced_activations import ELU
from keras.initializers import glorot_uniform
class DFNet:
@staticmethod
def build(input_shape, classes):
model = Sequential()
#Block1
filter_num = ['None',32,32]
kernel_size = ['None',8,8]
conv_stride_size = ['None',1,1, 1]
pool_stride_size = ['None',1, 1]
pool_size = ['None',2,2]
model.add(Conv1D(filters=filter_num[1], kernel_size=kernel_size[1], input_shape=input_shape,
strides=conv_stride_size[1], padding='same',
name='block1_conv1'))
model.add(BatchNormalization(axis=-1))
model.add(ELU(alpha=1.0, name='block1_adv_act1'))
model.add(Conv1D(filters=filter_num[1], kernel_size=kernel_size[1],
strides=conv_stride_size[1], padding='same',
name='block1_conv2'))
model.add(BatchNormalization(axis=-1))
model.add(ELU(alpha=1.0, name='block1_adv_act2'))
model.add(MaxPooling1D(pool_size=pool_size[1], strides=pool_stride_size[1],
padding='same', name='block1_pool'))
model.add(Dropout(0.2, name='block1_dropout'))
model.add(Conv1D(filters=filter_num[2], kernel_size=kernel_size[2],
strides=conv_stride_size[2], padding='same',
name='block2_conv1'))
model.add(BatchNormalization())
model.add(ELU(alpha=1.0, name='block2_act1'))
model.add(Conv1D(filters=filter_num[2], kernel_size=kernel_size[2],
strides=conv_stride_size[2], padding='same',
name='block2_conv2'))
model.add(BatchNormalization())
model.add(ELU(alpha=1.0, name='block2_act2'))
model.add(MaxPooling1D(pool_size=pool_size[2], strides=pool_stride_size[2],
padding='same', name='block2_pool'))
model.add(Dropout(0.2, name='block2_dropout'))
model.add(Flatten(name='flatten'))
model.add(Dense(512, kernel_initializer=glorot_uniform(seed=0), name='fc1'))
model.add(BatchNormalization())
model.add(Activation('relu', name='fc1_act'))
model.add(Dropout(0.2, name='fc1_dropout'))
model.add(Dense(512, kernel_initializer=glorot_uniform(seed=0), name='fc2'))
model.add(BatchNormalization())
model.add(Activation('relu', name='fc2_act'))
model.add(Dropout(0.2, name='fc2_dropout'))
model.add(Dense(classes, kernel_initializer=glorot_uniform(seed=0), name='fc3'))
model.add(Activation('softmax', name="softmax"))
return model |
from plotly.offline import plot
from plotly.graph_objs import *
from django.shortcuts import render
from django.http import HttpResponse
from django.utils.translation import gettext as _
no_margin = Margin(l=0, r=0, b=0, t=0, pad=0)
no_margin_with_padding = Margin(l=60, r=0, b=0, t=30, pad=0)
def plot_web(xys, title, variable, unit, names=None, xaxis_title=_('time'), mode="lines+markers"):
'''Plot time series'''
data = [Scatter(x=xy[0], y=xy[1], mode=mode) for xy in xys]
if not names is None:
for i in range(len(data)):
data[i].name = names[i]
return plot({
'data': data,
'layout': Layout(
title=title,
margin=no_margin_with_padding,
legend=dict(orientation="h"),
xaxis={'title':xaxis_title},
yaxis={'title': "%s (%s)"%(str(variable), str(unit))}
)}, auto_open=False, output_type='div')
def box_plot(xys, title, variable, unit, names=[], xaxis_title=_('time')):
'''Funtion that plot boxplot'''
data = [Scatter(x=xys[0][0], y=xys[0][1], mode='markers')]
data.extend([Box(x=xy[0], y=xy[1], whiskerwidth=50, marker=dict(size=2)) for xy in xys[1:]])
if names:
for i in range(len(data)):
data[i].name = names[i]
return plot({
'data': data,
'layout': Layout(
title=title,
margin=no_margin_with_padding,
legend=dict(orientation="h"),
xaxis={'title':xaxis_title},
yaxis={'title': "%s (%s)"%(str(variable), str(unit))}
)}, auto_open=False, output_type='div')
def plot_polar(xys, title, variable, unit, names=[]):
data = [Scatter(t=[d.strftime("%Y") for d in xy[0]], r=xy[1],mode='lines+markers',marker=dict(opacity=0.7)) for xy in xys]
if names:
for i in range(len(data)):
data[i].name=names[i]
return plot({
'data':data,
'layout':Layout(title=title,orientation=-90,xaxis={'title':_('time')},)
},auto_open=False, output_type='div')
def plot_map(lat, lon, text):
'''Plot map of stations'''
mpt = 'pk.eyJ1IjoiYWRlbHNvbmpyIiwiYSI6ImNqNTV0czRkejBnMnkzMnBtdXdsbmRlbDcifQ.Ox8xbLTD_cD7h3uEz13avQ'
data = Data([
Scattermapbox(
lat=lat,
lon=lon,
mode='markers',
marker=Marker(size=14, color='rgb(0, 50, 40)'),
text=text
)
])
layout = Layout(
autosize=True,
margin=no_margin,
hovermode='closest',
mapbox=dict(
accesstoken=mpt,
bearing=0,
center=dict(lat=float(lat[0]), lon=float(lon[0])),
pitch=0,
zoom=7
)
)
fig = dict(data=data, layout=layout)
return plot(fig, auto_open=False, output_type='div')
|
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
# https://app.codility.com/demo/results/trainingUHZUU8-3XB/
def solution(A):
# write your code in Python 3.6
right_map = dict()
for data in A:
if data in right_map:
right_map[data] = right_map[data] + 1
else:
right_map[data] = 1
left_leader = 0
left_leader_count = 0
left_map = dict()
# size of length
right_length = len(A)
left_length = 0
result_count = 0
for data in A:
# decrease 1 from right_map
right_map[data] = right_map[data] - 1
right_length -= 1
# add 1 to left_map
if data in left_map:
left_map[data] = left_map[data] + 1
else:
left_map[data] = 1
left_length += 1
# get left leader
if left_map[data] > left_leader_count:
left_leader = data
left_leader_count = left_map[data]
# check left leader equals right leader
if left_leader_count > left_length // 2 and right_map[left_leader] > right_length // 2:
result_count += 1
return result_count
|
str = 'X-DSPAM-Confidence: 0.8475'
ipos = str.find(':')
piece = str[ipos+2:]
value = float(piece)
print(value)
|
#! /usr/bin/env python
"""
Usage:
report_slow.py [options]
report_slow.py [options] <query_tag> ...
report_slow.py [options] --days=<D> [<query_tag> ...]
report_slow.py [options] --start-date=<start_date> [<query_tag> ...]
report_slow.py [options] --start-date=<start_date> --end-date=<end_date> [<query_tag> ...]
report_slow.py (-h | --help)
report_slow.py (-v | --version)
Options:
-h, --help Show the help
-t <N>, --top=<N> Report on the resent top N queries
-v, --version Show the current version
--no-sql Don't show sql for the query tag
Others:
-d=<D>, --days=<D> Report for <D> days from today
--start-date=<start_date> Report starting from <start_date>
--end-date=<end_date> Report ending at <end_date>
Examples:
report_slow.py
With no options the script will report on top level statistics.
report_slow.py 0x71091F7BB15C9A47
Adding a query tag will reports on a specific query.
report_slow.py --top=5 --days=10
Report on the lastest top 5 queries and show 5 days worth of data
report_slow.py --top=5 --start-date=2016-01-10
Report on the lastest top 5 queries from start-date to the current date.
The date formating is very flexible you can enter dates like
1/1/16 1/01/2016
"""
"""
need to add a date range query option
"""
import os
import glob
import re
from datetime import datetime, timedelta
from dateutil.parser import parse
import itertools
import traceback
from docopt import docopt
import sqlparse
from multimethod import multimethod
__version__ = 0.11
def translate_unit(unit):
assert unit in ['G','M','k','us','s','ms',''],'bad unit pased'
if unit == 'G':
value = 1000000000
elif unit == 'M':
value = 1000000
elif unit == 'k':
value = 1000
elif unit == 's':
value = 1
elif unit == 'ms':
value = .001
elif unit == 'us':
value = .000001
else:
value = 1
return value
def getnumber(s):
num = float(reduce(lambda x,y: x if len(x)>0 else y, re.split(r'G|M|k|ms|s|us', s)))
unit = reduce(lambda x,y: x if len(x)>0 else y, re.split(r'[0-9.]*', s))
unit_value = translate_unit(unit)
return num*unit_value
def get_report_date(f):
f.seek(0,0)
ltimerange = [l for n,l in enumerate(f) if n<10 and 'Time range' in l]
timerange = ltimerange[0].split()
try:
time = ' '.join(timerange[6:8])
datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
date = datetime.strptime(timerange[6], '%Y-%m-%d')
time = datetime.strptime(timerange[7], '%H:%M:%S')
except ValueError, e:
date = datetime.strptime(timerange[3], '%Y-%m-%d')
time = datetime.strptime(timerange[4], '%H:%M:%S')
f.seek(0)
return date, time
def get_top_n_tags(n):
'''
Get the top N query tags
# Query size 5.57G 0 1.25M 172.04 381.65 1.10k 107.34
# Profile
# Rank Query ID Response time Calls R/Call V/M Item
# ==== ================== ================ ======== ====== ===== =========
# 1 0x71091F7BB15C9A47 23497.2507 12.1% 109369 0.2148 0.53 SELECT phys_availabilities
# 2 0x8331E48027474CF6 22663.5199 11.6% 8595 2.6368 1.36 SELECT users physician_types_users phys_availabilities provider_details provider_affiliations provider_profiles licenses phys_availabilities
# 385 0xDE071DCD8172D475 9.4817 0.0% 15 0.6321 0.63 SELECT users
# 394 0xED16593745E587E4 9.0296 0.0% 18 0.5016 1.05 SELECT activity_histories users user_statuses
...
# MISC 0xMISC 31859.9749 16.4% 31133657 0.0010 0.0 <14030 ITEMS>
# Query 1: 1.28 QPS, 0.28x concurrency, ID 0x71091F7BB15C9A47 at byte 5383552058
# This item is included in the report because it matches --limit.
# Query 1:
:param n: top N tags
:return: list of tags
'''
tags = None
log_path = 'log' if os.uname()[1].split('.')[0] == 'SUN-IT608L' else ''
files = glob.glob(os.path.join(log_path, 'slow_201?-*log'))
p = re.compile(r'# Profile.*# Query 1:', re.DOTALL)
if files is not []:
#file = [f for f in sorted(files, reverse=True) if ][0]
for file in sorted(files, reverse=True):
if os.path.getsize(file) < 512:
continue
else:
with open(file, 'r') as f:
buff = f.read()
break
m = p.search(buff)
n = int(n) + 3
plist = m.group(0).splitlines()[3:+n]
plist = [e.split() for e in plist]
tags = [e[2] for e in plist]
return tags
def get_top_level_data(data):
'''
Outputs the top level of the report:
# 8387.7s user time, 20.2s system time, 756.93M rss, 842.61M vsz
# Current date: Tue Jan 19 09:21:44 2016
# Hostname: MDLI01VMD01
# Files: STDIN
# Overall: 29.77M total, 11.80k unique, 352.31 QPS, 1.80x concurrency ____
# Time range: 2016-01-18 07:00:02 to 2016-01-19 06:28:26
# Attribute total min max avg 95% stddev median
# ============ ======= ======= ======= ======= ======= ======= =======
# Exec time 152222s 6us 436s 5ms 3ms 209ms 176us
# Lock time 5432s 0 436s 182us 204us 102ms 63us
# Rows sent 72.35M 0 558.77k 2.55 1.96 130.97 0.99
# Rows examine 86.38G 0 1.40G 3.04k 9.83 510.14k 0.99
# Rows affecte 3.18M 0 35.07k 0.11 0.99 14.68 0
# Bytes sent 29.16G 0 7.59M 1.03k 3.35k 5.80k 299.03
# Query size 4.76G 0 7.03M 171.83 420.77 1.70k 107.34
'''
top_data = {}
overall = [l for l in data if 'Overall:' in l][0].split()
timerange = [l for l in data if 'Time range:' in l][0].split()
exectime = [getnumber(d) for d in [l for l in data if 'Exec time' in l][0].split()[3:]]
locktime = [getnumber(d) for d in [l for l in data if 'Lock time' in l][0].split()[3:]]
rowsent = [getnumber(d) for d in [l for l in data if 'Rows sent' in l][0].split()[3:]]
rowsaffecte = [getnumber(d) for d in [l for l in data if 'Rows affecte' in l][0].split()[3:]]
bytessent = [getnumber(d) for d in [l for l in data if 'Bytes sent' in l][0].split()[3:]]
querysize = [getnumber(d) for d in [l for l in data if 'Query size' in l][0].split()[3:]]
top_data['tot_count'] = getnumber(overall[2])
top_data['unique_queries'] = getnumber(overall[4])
top_data['tot_exectime'] = exectime[0]
top_data['tot_locktime'] = locktime[0]
top_data['tot_rowssent'] = rowsent[0]
top_data['tot_bytessent'] = bytessent[0]
top_data['tot_querysize'] = querysize[0]
top_data['tot_rowsaffecte'] = rowsaffecte[0]
try:
time = ' '.join(timerange[6:8])
datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
top_data['date'] = timerange[6]
top_data['time'] = timerange[7]
except ValueError, e:
top_data['date'] = timerange[3]
top_data['time'] = timerange[4]
return top_data
def format_query_tag(date, time, qdata, hdata):
'''
# Query 1: 0.04 QPS, 0.12x concurrency, ID 0x0344CA752955E058 at byte 1238726570
# This item is included in the report because it matches --limit.
# Scores: V/M = 1.24
# Time range: 2016-01-01 07:05:32 to 2016-01-02 06:27:05
# Attribute pct total min max avg 95% stddev median
# ============ === ======= ======= ======= ======= ======= ======= =======
# Count 0 3690
# Exec time 12 10054s 330ms 7s 3s 5s 2s 1s
# Lock time 0 2s 307us 7ms 430us 596us 232us 384us
# Rows sent 0 60.59k 0 56 16.81 44.60 11.81 9.83
# Rows examine 10 3.91G 220.15k 1.89M 1.09M 1.86M 569.64k 717.31k
# Rows affecte 0 0 0 0 0 0 0 0
# Bytes sent 0 10.65M 966 7.55k 2.96k 6.01k 1.40k 2.16k
# Query size 0 7.04M 1.95k 1.96k 1.95k 1.86k 2.22 1.86k
# String:
# Databases telehealth
# Hosts 199.79.51.132
# Last errno 1292
# Users rubyuser
'''
aveqtime = qdata['exectime']/qdata['count'] if qdata['count']>0 else 0
taveqtime = hdata['tot_exectime']/hdata['tot_count'] if hdata['tot_count']>0 else 0
pct_exectime = qdata['exectime']/hdata['tot_exectime']*100 if hdata['tot_exectime']>0 else 0
pct_count = qdata['count']/hdata['tot_count']*100 if hdata['tot_count']>0 else 0
sdate = datetime.strftime(date, '%Y-%m-%d')
stime = datetime.strftime(time, '%H:%M:%S')
#
#print('{0:10} {1:8} {2:>10} {3:>9} {4:>9} {5:>12} {6:>11s} {7:>6s} {8:>12s} {9:>11s} {10:11s}'.format(
print('{0:10} {1:8} {2[count]:>10.0f} {2[exectime]:>9.0f} {2[locktime]:>9.0f} '
'{3:>12.5f} {4:>11.2f} {5:>6.2f} '
'{6[tot_count]:>12.0f} {6[tot_exectime]:>11.0f} {7:>11.2f}'.format(
sdate,
stime,
qdata,
aveqtime,
pct_exectime,
pct_count,
hdata,
taveqtime*1000))
def format(data):
tot_aveqtime = data['tot_exectime']/data['tot_count']*1000
print('{0[date]:10} {0[time]:8} {0[tot_count]:>12.0f} {0[unique_queries]:>6.0f} '
'{0[tot_exectime]:>9.0f} {0[tot_locktime]:>9.0f} {1:>12.5f}'.format(data,
tot_aveqtime))
def top_head(f, N):
"""
Extract the top level header of the report
:param f: file
:param N: line count
:return: list of data
"""
p = re.compile(r'Exec time|Current date:|Time range:|Overall|Lock time|Rows sent|Rows examined|Rows affecte|Bytes sent|Query size')
f.seek(0,0)
head = list(itertools.islice(f, N))
data = [l.strip() for l in head if p.search(l)]
tdata = get_top_level_data(data)
return tdata
def get_query_data(data):
"""
# Query 1: 0.04 QPS, 0.12x concurrency, ID 0x0344CA752955E058 at byte 1238726570
# This item is included in the report because it matches --limit.
# Scores: V/M = 1.24
# Time range: 2016-01-01 07:05:32 to 2016-01-02 06:27:05
0 1 2 3 4 5 6
# Attribute pct total min max avg 95% stddev median
# ============ === ======= ======= ======= ======= ======= ======= =======
# Count 0 3690
# Exec time 12 10054s 330ms 7s 3s 5s 2s 1s
# Lock time 0 2s 307us 7ms 430us 596us 232us 384us
# Rows sent 0 60.59k 0 56 16.81 44.60 11.81 9.83
# Rows examine 10 3.91G 220.15k 1.89M 1.09M 1.86M 569.64k 717.31k
# Rows affecte 0 0 0 0 0 0 0 0
# Bytes sent 0 10.65M 966 7.55k 2.96k 6.01k 1.40k 2.16k
# Query size 0 7.04M 1.95k 1.96k 1.95k 1.86k 2.22 1.86k
# String:
# Databases telehealth
# Hosts 199.79.51.132
# Last errno 1292
# Users rubyuser
:param data:
:return:
"""
qdata = {}
try:
qdata['count'] = getnumber([l for l in data if 'Count' in l][0].split()[3])
exectime = [getnumber(d) for d in [l for l in data if 'Exec time' in l][0].split()[4:]]
locktime = [getnumber(d) for d in [l for l in data if 'Lock time' in l][0].split()[4:]]
rowsent = [getnumber(d) for d in [l for l in data if 'Rows sent' in l][0].split()[4:]]
rowsaffecte = [getnumber(d) for d in [l for l in data if 'Rows affecte' in l][0].split()[4:]]
bytessent = [getnumber(d) for d in [l for l in data if 'Bytes sent' in l][0].split()[4:]]
querysize = [getnumber(d) for d in [l for l in data if 'Query size' in l][0].split()[4:]]
qdata['exectime'] = exectime[0]
qdata['locktime'] = locktime[0]
qdata['rowssent'] = rowsent[0]
qdata['bytessent'] = bytessent[0]
qdata['querysize'] = querysize[0]
qdata['rowsaffecte'] = rowsaffecte[0]
except IndexError, e:
qdata['count'] = 0
qdata['exectime'] = 0
qdata['locktime'] = 0
qdata['rowssent'] = 0
qdata['bytessent'] = 0
qdata['querysize'] = 0
qdata['rowsaffecte'] = 0
return qdata
def head_match(f, match, N):
'''
finds a query tag and returns head of the report
'''
p = re.compile(r'# Count|# Exec time|# Lock time|# Rows |# Bytes sent|# Query size|# Databases|# Hosts|#Users')
sqlp = re.compile(r'# EXPLAIN \/\*!50100 PARTITIONS\*\/|call |COMMIT|update|insert')
head = None
sql = None
is_match = lambda x: True if match in x[1] else False
matches = filter(is_match, enumerate(f))
if len(matches) == 2:
f.seek(0, 0)
S = matches[1][0]
buff = f.readlines()
E = buff[S:].index('\n')
head = [l.strip() for l in buff[S:S+E]]
#skip to start of query tag
#skip = [l.strip() for n,l in enumerate(f.next()) if n<L]
#head = [l.strip() for l in f.next()]
try:
s = [i for i,l in enumerate(head) if sqlp.search(l)][0]
sql = [re.sub(r'\\G', ';', l) for l in head[s:]]
except ValueError, e:
sql = ''
# find explain sql
'''# EXPLAIN /*!50100 PARTITIONS*/ match
/n end
'''
qdata = [l for l in head if p.search(l)] if head else []
qdata = get_query_data(qdata)
return qdata, sql
def gfiles(days=None, start_date=None, end_date=None):
"""
Generator to get the files that meet date criteria
:param days: int number of days from now
:param start_date: datetime
:param end_date: datetime
:return: iterable
"""
log_path = 'log' if os.uname()[1].split('.')[0] == 'SUN-IT608L' else ''
files = glob.glob(os.path.join(log_path, 'slow_201?-*log'))
days = days if isinstance(days, int) or days is None else int(days)
start = parse(start_date) if start_date else start_date
end = parse(end_date) if end_date else end_date
if days:
compare = lambda d:d > datetime.now().date() - timedelta(days=int(days))
for file in sorted(files, reverse=True):
if os.path.getsize(file) < 512:
continue
with open(file, 'r') as f:
date, time = get_report_date(f)
if compare(date.date()):
yield file
else:
break
elif start_date:
if end_date:
compare = lambda d:d >= start.date() and d <= end.date()
else:
compare = lambda d:d >= start.date()
for file in sorted(files, reverse=True):
with open(file, 'r') as f:
date, time = get_report_date(f)
if compare(date.date()):
yield file
def do_query_tag_report(query_tag, i, days, start, end, no_show_sql):
first = True
print('\nSlow report for tag #{} {}\n'.format(i+1,query_tag))
for file in gfiles(days, start, end):
with open(file, 'r') as f:
date, time = get_report_date(f)
qdata, sql = head_match(f, query_tag, 70)
hdata = top_head(f, 70)
if first:
first = False
if not no_show_sql:
try:
print(sqlparse.format('\n'.join(sql), reindent=True, keyword_case='upper'))
except IndexError, e:
print(sqlparse.format('\n'.join(sql), keyword_case='upper'))
print('')
print('{0:10} {1:8} {2:>10} {3:>9} {4:>9} {5:>12} {6:>11s} '
'{7:>6s} {8:>12s} {9:>11s} {10:11s}'.format(
'Date',
'Time',
'Count',
'Exec_Time',
'Lock_Time',
'QAve_Resp',
'%Qexec_time', #6
'%Count',
'Tot_Q_Count',
'Tot_time',
'Tot_ave_resp'
))
format_query_tag(date, time, qdata, hdata)
#@multimethod(dict)
def main_tags(query_tag, days=None, start=None, end=None, no_show_sql=False):
for i, tag in enumerate(query_tag):
do_query_tag_report(tag, i, days, start, end, no_show_sql)
#@multimethod()
def main_top(days=None, start=None, end=None):
print('{0:10} {1:8} {2:12} {3:6} {4:9} {5:9} {6:12}'.format(
'Date', 'Time', 'Total_Queries', 'Unique', 'Exec_Time', 'Lock_Time', 'Ave_Response'))
for file in gfiles(days, start, end):
with open(file, 'r') as f:
data = top_head(f, 70)
try:
format(data)
except Exception, e:
print(e.__doc__)
print(e.message)
print(traceback.format_exc())
#print(file)
pass
def main(**args):
print type(args)
if args['<query_tag>'] == [] and args['--top'] is None:
main_top(args['--days'], args['--start-date'], args['--end-date'])
elif len(args['<query_tag>'])>0:
main_tags(args['<query_tag>'], args['--days'], args['--start-date'], args['--end-date'], args['--no-sql'])
else:
tags = get_top_n_tags(args['--top'])
main_tags(tags, args['--days'], args['--start-date'], args['--end-date'], args['--no-sql'])
if __name__ == '__main__':
args = docopt(__doc__, version=__version__)
print args
"""
{
"--days": null,
"--end-date": null,
"--help": false,
"--start-date": null,
"--top": null,
"--version": true,
"<query_tag>": []
}
"""
main(**args)
"""
print [d for d in gfiles(start_date=datetime.strptime('2016-01-03', '%Y-%m-%d'),
end_date=datetime.strptime('2016-01-07', '%Y-%m-%d'))]
print '\n'.join([d for d in gfiles(
start_date=datetime.strptime('2016-01-10', '%Y-%m-%d'))
])
"""
|
import numpy as np
import pickle
import keras
from keras.models import Model , Sequential
from keras.layers import Dense, Input, Reshape, Lambda, Concatenate
from keras import backend as K
import tensorflow as tf
from keras import objectives , optimizers, callbacks
import matplotlib.pyplot as plt
import h5py
import os
import Kalman_tools
from Kalman_tools import expectation, maximization, EM_step
default_act_func = 'relu'
w_dim, z_dim, v_dim, x_dim, u_dim = 2, 2, 2, 40*40, 2
mu_0, Sig_0 = np.zeros([z_dim,1]), np.eye(z_dim)
A,b,H,Q = np.eye(z_dim) + np.random.uniform(-0.1,0.1,z_dim*z_dim).reshape([z_dim,z_dim]), np.zeros([z_dim,1]), np.ones([z_dim, v_dim])/v_dim, np.eye(z_dim)
C,d,R = np.ones([w_dim, z_dim])/z_dim + np.random.uniform(-0.1,0.1,w_dim*z_dim).reshape([w_dim,z_dim]), np.zeros([w_dim,1]), np.eye(w_dim)
x = Input(shape=(x_dim,))
h = Dense(256, activation='relu')(x)
w = Dense(w_dim)(h)
enc = Model([x],[w])
decoder_h = Dense(256, activation='relu')
decoder_mean = Dense(x_dim, activation='sigmoid')
h_decoded = decoder_h(w)
x_bar = decoder_mean(h_decoded)
decoder_input = Input(shape=(w_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
dec = Model(decoder_input, _x_decoded_mean)
AE = Model([x],[x_bar,w])
act_map = Sequential()
act_map.add(Dense(5, input_shape=(u_dim,), activation=default_act_func))
act_map.add(Dense(5, activation=default_act_func))
act_map.add(Dense(v_dim, activation='sigmoid'))
def AE_recons_loss(x_true, x_bar):
return x_dim * keras.metrics.binary_crossentropy(x_true, x_bar)#keras.losses.mean_squared_error(x_true, x_bar) ## might be better to be changed to binary_cross_entropy
def AE_TRAIN(net_in, net_out, LDS_loss, lr, loss_weights, epochs):
AE_adam = optimizers.Adam(lr=lr, beta_1=0.1)
AE.compile(optimizer=AE_adam, loss=[AE_recons_loss, LDS_loss], \
loss_weights=loss_weights)
hist = AE.fit( net_in, net_out,
shuffle=True,
epochs= epochs,
batch_size=batch_size,
verbose=1)
return hist
print('data load start')
[tmp_X, tmp_U, _] = pickle.load(open('plane_random_trajectory_train', 'rb'))
print('data load finish')
x_all = [None] * len(tmp_X)
u_all = [None] * len(tmp_U)
for i in range(len(tmp_X)):
x_all[i] = tmp_X[i].reshape([tmp_X[i].shape[0],-1])
u_all[i] = tmp_U[i][:-1,:].reshape([tmp_U[i].shape[0]-1,-1])
n_train = 0;
for i in range(len(x_all)):
n_train = n_train + x_all[i].shape[0]
x_train = np.zeros([n_train,x_dim])
i_start, i_finish = 0, -1
for i in range(len(x_all)):
i_finish = i_start + len(x_all[i])
x_train[i_start:i_finish,:] = x_all[i]
i_start = i_finish
'''
x_train = x_all[0]
for i in range(1,len(x_all)):
x_train = np.concatenate([x_train, x_all[i]])
'''
print('train data is ready.')
print('data load start')
[tmp_X, _, _] = pickle.load(open('plane_random_trajectory_test', 'rb'))
print('data load finish')
x_test_all = [None] * len(tmp_X)
for i in range(len(tmp_X)):
x_test_all[i] = tmp_X[i].reshape([tmp_X[i].shape[0],-1])
x_test = x_test_all[0]
for i in range(1,len(x_test_all)):
x_test = np.concatenate([x_test, x_test_all[i]])
print('test data is ready.')
EzT_CT_Rinv_minus_dT_Rinv = np.zeros([x_train.shape[0],w_dim])
u_train = u_all[0]
for i in range(1,len(u_all)):
u_train = np.concatenate([u_train, u_all[i]])
EztT_minus_Ezt_1TAT_bT_alltimes_QinvH = np.zeros([u_train.shape[0],v_dim])
w_all = [None] * len(x_all)
v_all = [None] * len(u_all)
IterNum_EM = 50
IterNum_CoordAsc = 5
recons_error = []
#IterNum_DeepTrain = 1000
batch_size = 100
loglik = []
for iter_EM in range(IterNum_EM):
if not(os.path.isdir('./tuned_params')):
os.mkdir('./tuned_params')
if not(os.path.isdir('./tuned_params/' + str(iter_EM))):
os.mkdir('./tuned_params/' + str(iter_EM))
for i in range(len(x_all)):
w_all[i] = enc.predict(x_all[i])
for i in range(len(u_all)):
v_all[i] = act_map.predict(u_all[i])
if iter_EM == 0:
loglik.append(Kalman_tools.log_likelihood(w_all,A,b,H,v_all,C,d,Q,R,mu_0,Sig_0))
print('')
print('loglik = ')
print(loglik)
[x_bar, _] = AE.predict(x_test)
tmp = np.mean((x_bar - x_test) ** 2)
recons_error.append(tmp)
print('recons_error = ')
print(recons_error)
[Ezt, EztztT, Ezt_1ztT] = expectation(w_all,A,b,H,v_all,C,d,Q,R,mu_0,Sig_0)
for iter_CoorAsc in range(IterNum_CoordAsc):
for i in range(len(x_all)):
w_all[i] = enc.predict(x_all[i])
for i in range(len(u_all)):
v_all[i] = act_map.predict(u_all[i])
[A,b,H,C,d,Q,R,mu_0,Sig_0] = maximization(Ezt, EztztT, Ezt_1ztT, w_all, v_all, b, d)
Rinv = np.linalg.inv(R)
Qinv = np.linalg.inv(Q)
i_start, i_end = 0, -1
for i in range(len(w_all)):
i_end = i_start + w_all[i].shape[0]
EzT_CT = np.matmul(Ezt[i].T, C.T)
EzT_CT_minus_dT = EzT_CT - np.tile(d.reshape([1,-1]),[EzT_CT.shape[0],1])
EzT_CT_Rinv_minus_dT_Rinv[i_start:i_end,:] = np.matmul(EzT_CT_minus_dT, Rinv)
i_start = i_end
Rinv = tf.constant(Rinv, dtype='float32')
N = x_train.shape[0]
def LDS_loss(EzT_CT_Rinv_minus_dT_Rinv, w):
sh = K.shape(w)
return -tf.matmul(tf.reshape(EzT_CT_Rinv_minus_dT_Rinv,[sh[0],1,sh[1]]), tf.reshape(w,[sh[0],sh[1],1])) \
+ 0.5 * tf.matmul(\
tf.reshape(tf.matmul(w,Rinv),[sh[0],1,-1])\
,tf.reshape(w,[sh[0],sh[1],1])\
)
if (iter_EM == 0) and (iter_CoorAsc == 0):
hist = AE_TRAIN(net_in=x_train, net_out=[x_train, EzT_CT_Rinv_minus_dT_Rinv], LDS_loss = LDS_loss, lr=0.001, loss_weights=[1., 0.], epochs=1000)
print('-------------------')
print(np.mean(hist.history[list(hist.history.keys())[1]][-10:]))
print(np.mean(hist.history[list(hist.history.keys())[3]][-10:]))
print('-------------------')
hist = AE_TRAIN(net_in=x_train, net_out=[x_train, EzT_CT_Rinv_minus_dT_Rinv], LDS_loss = LDS_loss, lr=0.0001, loss_weights=[1., 0.], epochs=50)
print('-------------------')
print(np.mean(hist.history[list(hist.history.keys())[1]][-10:]))
print(np.mean(hist.history[list(hist.history.keys())[3]][-10:]))
print('-------------------')
hist = AE_TRAIN(net_in=x_train, net_out=[x_train, EzT_CT_Rinv_minus_dT_Rinv], LDS_loss = LDS_loss, lr=0.0001, loss_weights=[1., .00001], epochs=50)
print('-------------------')
print(np.mean(hist.history[list(hist.history.keys())[1]][-10:]))
print(np.mean(hist.history[list(hist.history.keys())[3]][-10:]))
print('-------------------')
hist = AE_TRAIN(net_in=x_train, net_out=[x_train, EzT_CT_Rinv_minus_dT_Rinv], LDS_loss = LDS_loss, lr=0.00001, loss_weights=[1., .00001], epochs=50)
print('-------------------')
print(np.mean(hist.history[list(hist.history.keys())[1]][-10:]))
print(np.mean(hist.history[list(hist.history.keys())[3]][-10:]))
print('-------------------')
hist = AE_TRAIN(net_in=x_train, net_out=[x_train, EzT_CT_Rinv_minus_dT_Rinv], LDS_loss = LDS_loss, lr=0.000001, loss_weights=[1., 1., .00001], epochs=50)
print('-------------------')
print(np.mean(hist.history[list(hist.history.keys())[1]][-10:]))
print(np.mean(hist.history[list(hist.history.keys())[3]][-10:]))
print('-------------------')
i_start, i_end = 0, -1
for i in range(len(v_all)):
i_end = i_start + v_all[i].shape[0]
EztT_minus_Ezt_1TAT_bT = Ezt[i][:,1:].T - np.matmul(Ezt[i][:,:-1].T,A.T) - np.tile(b.T,[Ezt[i].shape[1]-1,1])
EztT_minus_Ezt_1TAT_bT_alltimes_QinvH[i_start:i_end,:] = np.matmul(np.matmul(EztT_minus_Ezt_1TAT_bT, Qinv),H)
HTQinvH = np.matmul(np.matmul(H.T, Qinv),H)
HTQinvH = tf.constant(HTQinvH, dtype='float32')
def act_map_loss(EztT_minus_Ezt_1TAT_bT_alltimes_QinvH, v):
sh = K.shape(v)
return -tf.matmul(tf.reshape(EztT_minus_Ezt_1TAT_bT_alltimes_QinvH, [sh[0],1,sh[1]]), tf.reshape(v,[sh[0],sh[1],1]))\
+ 0.5 * tf.matmul(tf.reshape(tf.matmul(v, HTQinvH),[sh[0],1,sh[1]]), tf.reshape(v,[sh[0],sh[1],1]))
act_map_learning_rate = .0005
act_map_adam = optimizers.Adam(lr=act_map_learning_rate, beta_1=0.1)
act_map.compile(optimizer=act_map_adam, loss=act_map_loss)
u_tr_len = u_train.shape[0]-np.mod(u_train.shape[0],batch_size)
hist = act_map.fit( u_train[:u_tr_len,:] , EztT_minus_Ezt_1TAT_bT_alltimes_QinvH[:u_tr_len,:],
shuffle=True,
epochs= 100,
batch_size=batch_size,
verbose=0)
print(np.mean(hist.history['loss'][-10:]))
loglik.append(Kalman_tools.log_likelihood(w_all,A,b,H,v_all,C,d,Q,R,mu_0,Sig_0))
print('')
print('loglik = ')
print(loglik)
[x_bar,_] = AE.predict(x_test)
tmp = np.mean((x_bar - x_test) ** 2)
recons_error.append(tmp)
print('recons_error = ')
print(recons_error)
AE.save_weights('./tuned_params/' + str(iter_EM) + '/' + str(iter_CoorAsc) + '_AE_params.h5')
act_map.save_weights('./tuned_params/' + str(iter_EM) + '/' + str(iter_CoorAsc) + '_act_map_params.h5')
pickle.dump([A,b,H,C,d,Q,R,mu_0,Sig_0], open('./tuned_params/' + str(iter_EM) + '/' + str(iter_CoorAsc) + 'LDS_params.pkl', 'wb'))
pickle.dump([loglik,recons_error], open('./results.pkl','wb'))
AE.save_weights('./tuned_params/' + str(iter_EM) + '_AE_params.h5')
act_map.save_weights('./tuned_params/' + str(iter_EM) + '_act_map_params.h5')
pickle.dump([A,b,H,C,d,Q,R,mu_0,Sig_0], open('./tuned_params/' + str(iter_EM) + '_LDS_params.pkl', 'wb'))
pickle.dump(loglik, open('./loglikelihood.pkl', 'wb'))
##################### TEST
AE.load_weights('./tuned_params/2/0_AE_params.h5')
act_map.load_weights('./tuned_params/0/0_act_map_params.h5')
[A,b,H,C,d,Q,R,mu_0,Sig_0] = pickle.load(open('./tuned_params/2/0LDS_params.pkl','rb'))
from pykalman import KalmanFilter
kf = KalmanFilter(initial_state_mean = mu_0.reshape([-1]),
initial_state_covariance = Sig_0,
transition_matrices = A,
transition_offsets = b.reshape([-1]),
transition_covariance = Q,
observation_matrices = C,
observation_offsets = d.reshape([-1]),
observation_covariance = R)
[x_test, u_test, _] = pickle.load(open('plane_random_trajectory_test', 'rb'))
x_test = x_test.reshape([x_test.shape[0],-1])
u_test = u_test[:-1,:]
w_test = enc.predict(x_test)
[z_est, z_est_var] = kf.filter(w_test)
w_est = np.matmul(z_est, C.T) + np.tile(d.reshape([1,-1]),[z_est.shape[0],1])
x_est = dec.predict(w_est)
ii = 90
plt.figure()
plt.subplot(2,1,1)
plt.imshow(x_test[ii].reshape(40,40), cmap='Greys')
plt.subplot(2,1,2)
plt.imshow(x_est[ii].reshape(40,40), cmap='Greys')
#################
[x_bar,_] = AE.predict(x_test)
np.mean((x_bar - x_test) ** 2)
ii = 40
plt.figure()
plt.subplot(2,1,1)
plt.imshow(x_test[ii].reshape(40,40), cmap='Greys')
plt.subplot(2,1,2)
plt.imshow(x_bar[ii].reshape(40,40), cmap='Greys')
#################
[x_bar,_] = AE.predict(x_train)
np.mean((x_bar - x_train) ** 2)
ii = 40
plt.figure()
plt.subplot(2,1,1)
plt.imshow(x_train[ii].reshape(40,40), cmap='Greys')
plt.subplot(2,1,2)
plt.imshow(x_bar[ii].reshape(40,40), cmap='Greys')
#################
def nearest_w(w, w_train):
mn, mn_i = -1, -1
for i in range(w_train.shape[0]):
d = np.linalg.norm(w - w_train[i,:])
if (i == 0) or d < mn:
mn = d
mn_i = i
return w_train[mn_i,:]
w_test = enc.predict(x_test)
w_train = enc.predict(x_train)
w_0 = w_test[10]
w_1 = w_test[70]
delta_w = (w_1 - w_0) / 9
plt.figure()
plt.subplot(2,6,1)
plt.imshow(x_test[10].reshape(40,40), cmap='Greys')
for i in range(10):
i
#w_t = nearest_w(w_0 + i*delta_w, w_train)
w_t = w_0 + i*delta_w
x_t = dec.predict(w_t.reshape([1,-1]))
plt.subplot(2,6,i+2)
plt.imshow(x_t.reshape(40,40), cmap='Greys')
plt.subplot(2,6,12)
plt.imshow(x_test[70].reshape(40,40), cmap='Greys')
|
# [SublimeLinter @python:3]
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import threading
import win32api
import win32con
import win32gui
class drag_accept_files(object):
def __init__(self, wnd, callback):
super(drag_accept_files, self).__init__()
self.callback = callback
self.hwnd = int(wnd.wm_frame(), 16)
self._old_wnd_proc = win32gui.SetWindowLong(
self.hwnd, win32con.GWL_WNDPROC, self._new_wnd_proc)
self.accept_files = True
@property
def accept_files(self):
raise NotImplementedError()
@accept_files.setter
def accept_files(self, value):
win32gui.DragAcceptFiles(self.hwnd, bool(value))
def _new_wnd_proc(self, hwnd, msg, wparam, lparam):
assert self.hwnd == hwnd
if msg == win32con.WM_DROPFILES:
files = []
for i in range(win32api.DragQueryFile(wparam)):
files.append(win32api.DragQueryFile(wparam, i))
if files:
threading.Thread(target=self.callback, args=(files, )).start()
if msg == win32con.WM_DESTROY:
win32api.SetWindowLong(hwnd, win32con.GWL_WNDPROC, self._old_wnd_proc)
return win32gui.CallWindowProc(self._old_wnd_proc, hwnd, msg, wparam, lparam)
|
import numpy as np
import cv2 as cv
img = np.zeros((300,512,3), np.uint8)
cv.namedWindow('image')
def output(x):
print(x)
# to create a switch for enabling or disablying the trackbar alternator
switch = '0:OFF\n 1:ON'
cv.createTrackbar(switch, 'image', 0, 1, output)
# to create a track bar
cv.createTrackbar('B', 'image', 0, 255, output) # Trackbar name, the image in connection, start varying point, end point, fuction to excute
cv.createTrackbar('G', 'image', 0, 255, output)
cv.createTrackbar('R', 'image', 0, 255, output)
while(1):
cv.imshow('image', img)
k = cv.waitKey(1)
if k == 27:
break
# to get the color channels (RGB) from the trackbar
b = cv.getTrackbarPos('B', 'image')
g = cv.getTrackbarPos('G', 'image')
r = cv.getTrackbarPos('R', 'image')
S = cv.getTrackbarPos(switch, 'image')
if S == 0:
img[:] = [255, 0, 0]
else:
img[:] = [b, g, r]
# to change the color of the image simultaneously with shifting the slider of trackbar
cv.destroyAllWindows()
|
# -*- coding: utf-8 -*-
# @TIME : 2021/3/11 10:25
# @AUTHOR : Xu Bai
# @FILE : 5-1.创建并测试一个函数,读取它的__doc__属性,再检查类型
# @DESCRIPTION :
def factorial(n):
'''
:returns n!
'''
return 1 if n < 2 else n * factorial(n - 1)
print(factorial(42))
print(factorial.__doc__)
print(type(factorial))
|
import os, cv2
import numpy as np
from .utils import normalize
from typing import Tuple, Dict
def dividir_dataset_em_treinamento_e_teste(dataset: np.ndarray, divisao=(80,20)):
"""
Divisão representa a porcentagem entre conj. de treinamento e conj. de teste.
Ex: (80,20) representa 80% para treino e 20% para teste.
"""
assert len(divisao) == 2, 'Divisão deve ser: % de conj. de treinamento e % de conj. de teste.'
n_treino, n_teste = divisao
assert n_treino + n_teste == 100, 'A soma da divisão deve ser igual a 100.'
total = dataset.shape[0]
porcentagem_treino = n_treino/100 #0.8
porcentagem_teste = n_teste/100 #0.2
return dataset[:int(porcentagem_treino*total)], dataset[int(porcentagem_treino*total):]
def carrega_dataset(caminho_diretorio: str, divisao: Tuple[int, int], embaralhar=True) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Especifique o caminho do diretório em que os arquivos `noisy.npy`e `original.npy` estão.
Args:
caminho_diretorio (str): caminho do diretório.
divisao (Tuple[int, int]): como será a divisão entre treinamento e teste.
embaralhar (bool, optional): se deseja embaralhar o dataset. Defaults to True.
Returns:
Tuple: retorna (x_train, y_train, x_test, y_test)
"""
if caminho_diretorio != '':
x = np.load(os.path.join(caminho_diretorio, 'noisy.npy'))
y = np.load(os.path.join(caminho_diretorio, 'original.npy'))
else:
x = np.load('noisy.npy')
y = np.load('original.npy')
if embaralhar:
np.random.seed(42)
np.random.shuffle(x)
np.random.seed(42)
np.random.shuffle(y)
x_train, x_test = dividir_dataset_em_treinamento_e_teste(x, divisao=divisao)
y_train, y_test = dividir_dataset_em_treinamento_e_teste(y, divisao=divisao)
return (x_train, y_train, x_test, y_test)
def adiciona_a_dimensao_das_cores(array:np.ndarray) -> np.ndarray:
"""
Adiciona a dimensão das cores no array numpy, considerando a imagem sendo escala de cinza.
"""
return array.reshape( array.shape + (1,) )
def pre_processing(caminho_dataset: str, tamanho_patch: Dict[int,int]) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Esta função executa a etapa de pré-processamento.
Serão executado as seguintes etapas:
1 - O dataset será carregado;
2 - O dataset embaralhado;
3 - O dataset será dividido 80/20 (treinamento/teste);
4 - Os exames, projeções e patches formarão uma só dimensão;
5 - Os valores serão normalizados entre [0,1];
6 - Por fim, é adicionado a dimensão das cores.
Args:
caminho_dataset (str): diretório onde o dataset está.
tamanho_patch (Dict[int,int]): tamanho do patch. Exemplo: 50x50, 100x100.
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: x_train, y_train, x_test, y_test
"""
x_train, y_train, x_test, y_test = carrega_dataset(
caminho_diretorio=caminho_dataset,
divisao=(80,20),
embaralhar=True
)
# junta exames, projeções e patches na mesma dimensão
x_train = np.reshape(x_train, (-1, tamanho_patch[0], tamanho_patch[1]))
y_train = np.reshape(y_train, (-1, tamanho_patch[0], tamanho_patch[1]))
x_test = np.reshape(x_test, (-1, tamanho_patch[0], tamanho_patch[1]))
y_test = np.reshape(y_test, (-1, tamanho_patch[0], tamanho_patch[1]))
#normaliza entre [0,1]
x_train = normalize(x_train, interval=(0,1))
y_train = normalize(y_train, interval=(0,1))
x_test = normalize(x_test, interval=(0,1))
y_test = normalize(y_test, interval=(0,1))
#adiciona a dimensão das cores
x_train = adiciona_a_dimensao_das_cores(x_train)
y_train = adiciona_a_dimensao_das_cores(y_train)
x_test = adiciona_a_dimensao_das_cores(x_test)
y_test = adiciona_a_dimensao_das_cores(y_test)
return (x_train, y_train, x_test, y_test)
|
'''
Sanjidah Wahid
PHYS 39906: Computational Physics Summer 2020
Assignment 4 - #401
Numerical Convergence of an Integral in Our Solar System
'''
#part (a)
import numpy as np
import matplotlib.pyplot as plt
def f(s):
return ((1-s)/s)**(1/2)
N_list = [100,200,400,800,1600,3200,6400,12800]
N_array = np.array([100,200,400,800,1600,3200,6400,12800],dtype=int)
N_array = 1/N_array
a = 0.5
b = 1.
I_exact = (np.pi - 2)/4
#Trapezoidal rule evalation for values in list
def trap_eval(lst,fun):
I_trap_end = 1/2*(fun(a)+fun(b))
I_trap_val = list(np.zeros(len(lst)))
for i in range(len(lst)):
j = int(i)
N = lst[j]
h = (b-a)/N
for n in range(1,N):
I_trap_val[j] += fun(a+n*h)
I_trap_val[j] += I_trap_end
I_trap_val[j] *= h
return I_trap_val
print(trap_eval(N_list,f))
I_trap_diff = np.array(trap_eval(N_list,f))
I_trap_diff -= I_exact
I_trap_diff = np.abs(I_trap_diff)
print(I_trap_diff)
#Simpson's rule evaluation
def simp_eval(lst,fun):
I_simp_end = 1/3 * (f(a) + f(b))
I_simp_val = list(np.zeros(len(lst)))
for i in range(len(lst)):
j = int(i)
N = lst[j]
h = (b-a)/N
for n in range(1,N):
if n%2 == 0:
I_simp_val[j] += 2/3 * fun(a+n*h)
else:
I_simp_val[j] += 4/3 * fun(a+n*h)
I_simp_val[i] += I_simp_end
I_simp_val[i] *= h
return I_simp_val
print(simp_eval(N_list,f))
I_simp_diff = np.array(simp_eval(N_list,f))
I_simp_diff -= I_exact
I_simp_diff = np.abs(I_simp_diff)
print(I_simp_diff)
print(str(I_exact) + " this is exact")
#Plotting for part (a)
fig, (plot1, plot2) = plt.subplots(1,2, figsize=(12,6))
plot1.plot(N_array,I_trap_diff, label = "Trapezoidal rule difference")
plot1.plot(N_array,I_simp_diff, label = "Simpson's rule difference")
plot1.set_title(r"Log scale of $\Delta$ versus $\frac{1}{N}$ for Part (a)")
plot1.set_ylabel(r"$\frac{1}{N}$", fontsize = 14)
plot1.set_xlabel(r"$\Delta$", fontsize = 14)
plot1.loglog()
plot1.legend()
##############################################################################
#part (b)
const = (b**(-.5)*(1-b)**(3/2)) - (a**(-.5)*(1-a)**(3/2))
def alt_f(s):
return (((1-s)/s)**(3/2))
#Trapezoidal rule evalation for values in list, modified for alt_f
def trap_eval_alt(lst):
I_trap_end = 1/2*(alt_f(a)+alt_f(b))
I_trap_val = list(np.zeros(len(lst)))
for i in range(len(lst)):
j = int(i)
N = lst[j]
h = (b-a)/N
for n in range(1,N):
I_trap_val[j] += alt_f(a+n*h)
I_trap_val[j] += I_trap_end
I_trap_val[j] *= h/2
I_trap_val[j] += const
I_trap_val[j] *= -2/3
return I_trap_val
I_trap_diff_alt = np.array(trap_eval_alt(N_list))
I_trap_diff_alt -= I_exact
I_trap_diff_alt = np.abs(I_trap_diff_alt)
print(I_trap_diff_alt)
#Simpson's rule evaluation, modified for alt_f
def simp_eval_alt(lst):
I_simp_end = 1/3 * (alt_f(a) + alt_f(b))
I_simp_val = list(np.zeros(len(lst)))
for i in range(len(lst)):
j = int(i)
N = lst[j]
h = (b-a)/N
for n in range(1,N):
if n%2 == 0:
I_simp_val[j] += 2/3 * alt_f(a+n*h)
else:
I_simp_val[j] += 4/3 * alt_f(a+n*h)
I_simp_val[i] += I_simp_end
I_simp_val[i] *= h/2
I_simp_val[i] += const
I_simp_val[i] *= -2/3
return I_simp_val
I_simp_diff_alt = np.array(simp_eval_alt(N_list))
I_simp_diff_alt -= I_exact
I_simp_diff_alt = np.abs(I_simp_diff_alt)
print(I_simp_diff_alt)
#Plotting for part (b)
plot2.plot(N_array,I_trap_diff_alt, label = "Trapezoidal rule difference")
plot2.plot(N_array,I_simp_diff_alt, label = "Simpson's rule difference")
plot2.set_title(r"Log scale of $\Delta$ versus $\frac{1}{N}$ for Part (b)")
plot2.set_ylabel(r"$\frac{1}{N}$", fontsize = 14)
plot2.set_xlabel(r"$\Delta$", fontsize = 14)
plot2.loglog()
plot2.legend()
fig.show()
##############################################################################
#part (c)
|
import os
import shutil
sdir = './tmp_cut/'#来源
ddir = './tmp_classed/'#目的
path = os.listdir(sdir)
for i in path:
sourceFile = sdir+i
targetDir = ddir+i[i.find('_')+1]+'/'
targetFile = targetDir+i
if not os.path.exists(targetDir):
os.makedirs(targetDir)
shutil.copyfile(sourceFile, targetFile) |
import re
import queue
from difflib import SequenceMatcher
from requester import requester
from colors import red,green,end
from log import factory_logger,time
from urllib3.exceptions import ConnectTimeoutError
def chambering(url,strike,payload = None,type = None):
if "=" in url and "?" in url:
data = url.split("?")[1].split("&")
params_extractor = tuple((i.split('=')[0],i.split('=')[1]) for i in data)
params = {i:j for i, j in params_extractor}
url = url.split('?')[0]
if strike and payload != None:
if type == "SQLi":
incursive = {key: "".join([params[key], payload]) for key in params.keys()}
if type in ["XSS","file_inclusion","command_injection","ssrf"]:
incursive = {key: payload for key in params.keys()}
return (url,incursive)
else:
return (url,params)
else:
return (url,None)
def receive_check(original,payloaded,type,payload = None):
lower_limit = 0.95
if type == "SQLi" or type == "file_inclusion" or type == "command_injection":
sequenceMatcher = SequenceMatcher(None)
sequenceMatcher.set_seq1(original)
sequenceMatcher.set_seq2(payloaded)
ratio = sequenceMatcher.quick_ratio()
if ratio < lower_limit:
return True
else:
return False
elif type == "XSS":
if re.search(payload,payloaded,re.I):
return True
else:
return False
def check_live(proxy):
check_ip = "http://httpbin.org/ip"
ip = proxy[0] + ":" + proxy[1]
try:
response = requester(check_ip, data=None, timeout=3, GET=True, proxy=ip)
if not response is None:
if proxy[0] in response.text:
return True
return False
return False
except ConnectTimeoutError:
return False
def get_proxy(proxy_queue):
proxy = proxy_queue.get()
while not proxy_queue.empty():
if check_live(proxy):
print(f"{red}[!][{time}]{proxy[0]} is alive and testing with it !{end}")
return proxy[0]
else:
print(f"{green}[!][{time}]{proxy[0]} is dead !{end}")
proxy = proxy_queue.get()
print(f"{red}[!][{time}] No more No available proxy{end}")
return None
def vul_message(vul,url,payload):
message = {
"SQLi" : "SQL injection vulnerability has already been detected",
"file_inclusion" : "File Inclusion vulnerability has already been detected",
"command_injection" : "Command Injection vulnerability has already been detected",
"ssrf" : "SSRF vulnerability has already been detected"
}
message_box = f"-------------------------------------------\n" \
f"url : {url}\n"\
f"payload : {payload}\n" \
f"{message[vul]}\n" \
f"--------------------------------------------\n"
return message_box
def convert_target(url):
if url.lower().startswith("http"):
return url
elif url.lower().startswith("/"):
return "http:/" + url
else:
return "http://"+url
def extract_domain(target):
if not target is None:
if isinstance(target, list):
domain = target[0].split(".")[1]
return domain
domain = target.split(".")[1]
return domain
return None
def file_handler(file):
domains = queue.Queue()
with open(file,'r',buffering=1024) as handler:
for i in handler:
url = convert_target(i)
domains.put(url)
return domains
def error_check(page):
if re.search("404",page):
return False
return True
def load_queue(subdomain):
subdomain_queue = queue.Queue()
for i in subdomain:
url = "http://"+i
subdomain_queue.put(url)
return subdomain_queue
if __name__ == '__main__':
# chambering(url, strike, payload=None, type=None):
url = "https://plus.jd.com/indexf?low_system=appicon&flow_entrance=appicon11&flow_channel=pc"
chambering(url,strike=False)
|
import hparams
from dataprocess.cleaners import englishStopWords
class XmlModel(object):
pass
class Comment(XmlModel):
def __init__(self, attributes):
self.id = attributes.get('Id')
self.PostTypeId = attributes.get('PostId')
self.AcceptedAnswerId = attributes.get('Score')
self.CreationDate = attributes.get('Text')
self.Score = attributes.get('CreationDate')
def __str__(self) -> str:
return "Comment( id = " + str(self.id) + ")"
class Post(XmlModel):
def __init__(self, attributes, answersAttributees = None):
self.comments = list()
self.id = attributes.get('Id')
self.acceptedAnswerId = attributes.get('AcceptedAnswerId')
self.creationDate = attributes.get('CreationDate')
self.score = attributes.get('Score')
self.viewCount = attributes.get('ViewCount')
self.body : str = attributes.get('Body')
self.tags = attributes.get('Tags')
self.answerCount = attributes.get('AnswerCount')
self.commentCount = attributes.get('CommentCount')
self.favoriteCount = attributes.get('FavoriteCount')
self.title = attributes.get('Title')
self.answers = [Answer(attr) for attr in answersAttributees] if answersAttributees else None
def __str__(self) -> str:
return "Post( id = {id}, answers = {answers} )".format(id=self.id, answers = self.answers)
def __repr__(self) -> str:
return self.__str__()
def addComment(self, comment: Comment):
self.comments.append(comment)
def addAnswer(self, answer):
self.answers.append(answer)
def getAcceptedAnswer(self):
if self.answers:
for ans in self.answers:
if ans.id == self.acceptedAnswerId:
return ans
return None
def toWordsArray(self, limit = hparams.HParams.MAX_SENTENCE_DIM):
words = list()
for w in self.title.split():
if w not in englishStopWords:
if len(words) == limit:
break
words.append(w)
return words
class Answer(Post):
def __init__(self, attributes):
super().__init__(attributes)
def __str__(self) -> str:
return "Answer( id = {id})".format(id=self.id)
def __repr__(self) -> str:
return self.__str__() |
"""Define and implement the callback interface."""
from .base import Callback, CallbackList, CallbackListFactory
from .checkpoint import ModelCheckpoint
from .logger import TxtLogger, StdoutLogger
|
import cv2
import pandas as pd
import numpy as np
import os
#get the image path
img_path = input("Enter the image file path including extension: ")
if(img_path == ""):
img_path = os.path.join(os.getcwd(), 'bg.jpg')
#read the image
img = cv2.imread(img_path)
clicked = False
r = g = b = xpos = ypos = 0
index=["color","color_name","hex","R","G","B"]
# read the csv that downloaded from https://github.com/codebrainz/color-names
colors_df = pd.read_csv('colors.csv', names=index, header=None)
def getColorName(R, G, B):
"""
if r, g, b is given find the nearest color name possible
"""
minimum = 10000
for i in range(len(colors_df)):
d = abs(R- int(colors_df.loc[i,"R"])) + abs(G- int(colors_df.loc[i,"G"]))+ abs(B- int(colors_df.loc[i,"B"]))
if(d <= minimum):
minimum = d
cname = colors_df.loc[i,"color_name"]
#following is the formula to find the nearest name possible
#d = abs(Red – ithRedColor) + (Green – ithGreenColor) + (Blue – ithBlueColor)
print(cname)
return cname
def draw_function(event, x, y, flags, param):
"""
from the place of mouse click extract the required data and return bgr colors of that location
"""
if event == cv2.EVENT_LBUTTONDBLCLK:
global b, g, r, xpos, ypos, clicked
clicked = True
xpos = x
ypos = y
b,g,r = img[y,x]
b = int(b)
g = int(g)
r = int(r)
return r, g, b
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_function)
while(1):
cv2.imshow("image", img)
if (clicked):
# draw the rectange on top left
# cv2.rectangle(image, startpoint, endpoint, color, thickness) -1 thickness fills rectangle.
cv2.rectangle(img,(20,20), (750,60), (b,g,r), -1)
# get the color name and rgb values
# Creating text string to display ( Color name and RGB values )
text = getColorName(r,g,b) + ' R='+ str(r) + ' G='+ str(g) + ' B='+ str(b)
#put text is used to display the text
#cv2.putText(img,text,start,font(0-7), fontScale, color, thickness, lineType, (optional bottomLeft bool) )
cv2.putText(img, text,(50,50),2,0.8,(255,255,255),2,cv2.LINE_AA)
#For very light colours we will display text in black colour because light will not display properly in light color bg
if(r+g+b>=600):
cv2.putText(img, text,(50,50),2,0.8,(0,0,0),2,cv2.LINE_AA)
clicked=False
#Break the loop when user hits 'esc' key
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
|
from typing import List
class Solution:
def reverseWords(self, s: str) -> str:
i = 0
n = len(s)
words = ''
while i < n:
if s[i].isspace():
words += ' '
i += 1
continue
word = []
while i < n and not s[i].isspace():
word.append(s[i])
i += 1
words += ''.join(self._reverse_word(word))
return words
def _reverse_word(self, s: List[str]):
low, high = 0, len(s) - 1
while low < high:
s[low], s[high] = s[high], s[low]
low += 1
high -= 1
return s
|
from __future__ import print_function, division
import numpy as np
import dqrsl
from sklearn.utils import check_array
from numpy.linalg import matrix_rank
from numpy.linalg.linalg import LinAlgError
# WARNING: there is little-to-no validation of input in these functions,
# and crashes may be caused by inappropriate usage. Use with care.
__all__ = [
'qr_decomposition',
'QRDecomposition'
]
def _validate_matrix_size(n, p):
if n * p > 2147483647:
raise ValueError('too many elements for Fortran LINPACK routine')
def _safecall(fun, name, *args, **kwargs):
"""A method to call a LAPACK or LINPACK subroutine internally"""
ret = fun(*args, **kwargs)
# since we're operating on arrays in place, we don't need this
#if ret[-1] < 0:
# raise ValueError("illegal value in %d-th argument of internal %s"
# % (-ret[-1], name))
def qr_decomposition(X, job=1):
"""Performs the QR decomposition using LINPACK, BLAS and LAPACK
Fortran subroutines.
Parameters
----------
X : array_like, shape (n_samples, n_features)
The matrix to decompose
job : int, optional (default=1)
Whether to perform pivoting. 0 is False, any other value
will be coerced to 1 (True).
"""
X = check_array(X, dtype='numeric', order='F', copy=True)
n, p = X.shape
# check on size
_validate_matrix_size(n, p)
rank = matrix_rank(X)
# validate job:
job_ = 0 if not job else 1
qraux, pivot, work = (np.zeros(p, dtype=np.double, order='F'),
# can't use arange, because need fortran order ('order' not kw in arange)
np.array([i for i in range(1,p+1)], dtype=np.int, order='F'),
np.zeros(p, dtype=np.double, order='F'))
# sanity checks
assert qraux.shape[0] == p, 'expected qraux to be of length %i' % p
assert pivot.shape[0] == p, 'expected pivot to be of length %i' % p
assert work.shape[0] == p, 'expected work to be of length %i' % p
# call the fortran module IN PLACE
_safecall(dqrsl.dqrdc, 'dqrdc', X, n, n, p, qraux, pivot, work, job_)
# do returns
return (X,
rank,
qraux,
(pivot-1) if job_ else None) # subtract one because pivot started at 1 for the fortran
def _qr_R(qr):
"""Extract the R matrix from a QR decomposition"""
min_dim = min(qr.shape)
return qr[:min_dim+1,:]
class QRDecomposition():
"""Performs the QR decomposition using LINPACK, BLAS and LAPACK
Fortran subroutines, and provides an interface for other useful
QR utility methods.
Parameters
----------
X : array_like, shape (n_samples, n_features)
The matrix to decompose
pivot : int, optional (default=1)
Whether to perform pivoting. 0 is False, any other value
will be coerced to 1 (True).
Attributes
----------
qr : array_like, shape (n_samples, n_features)
The decomposed matrix
qraux : array_like, shape (n_features,)
qraux contains further information required to recover
the orthogonal part of the decomposition.
pivot : array_like, shape (n_features,)
The pivots, if pivot was set to 1, else None
rank : int
The rank of the input matrix
"""
def __init__(self, X, pivot=1):
self.job_ = 0 if not pivot else 1
self._decompose(X)
def _decompose(self, X):
"""Decomposes the matrix"""
# perform the decomposition
self.qr, self.rank, self.qraux, self.pivot = qr_decomposition(X, self.job_)
def get_coef(self, X):
qr, qraux = self.qr, self.qraux
n, p = qr.shape
# sanity check
assert isinstance(qr, np.ndarray), 'internal error: QR should be a np.ndarray but got %s' % type(qr)
assert isinstance(qraux, np.ndarray), 'internal error: qraux should be a np.ndarray but got %s' % type(qraux)
# validate input array
X = check_array(X, dtype='numeric', copy=True, order='F')
nx, ny = X.shape
if nx != n:
raise ValueError('qr and X must have same number of rows')
# check on size
_validate_matrix_size(n, p)
# get the rank of the decomposition
k = self.rank
# get ix vector
#if p > n:
# ix = np.ones(n + (p - n)) * np.nan
# ix[:n] = np.arange(n) # i.e., array([0,1,2,nan,nan,nan])
#else:
# ix = np.arange(n)
# set up the structures to alter
coef, info = (np.zeros((k, ny), dtype=np.double, order='F'),
np.zeros(1, dtype=np.int, order='F'))
# call the fortran module IN PLACE
_safecall(dqrsl.dqrcf, 'dqrcf', qr, n, k, qraux, X, ny, coef, 0)
# post-processing
#if k < p:
# cf = np.ones((p,ny)) * np.nan
# cf[self.pivot[np.arange(k)], :] = coef
return coef if not k < p else coef[self.pivot[np.arange(k)], :]
def get_rank(self):
"""Get the rank of the decomposition"""
return self.rank
def get_R(self):
"""Get the R matrix from the decomposition"""
return _qr_R(self.qr)
def get_R_rank(self):
"""Get the rank of the R matrix"""
return matrix_rank(self.get_R())
|
from kinyu.rimport.api import RemoteImporter
from subprocess import Popen, PIPE
import pytest
import json
SRCDB = 'files://tmp/unittests/test_main'
@pytest.fixture
def remote_importer():
ri = RemoteImporter(SRCDB)
return ri
def _run_test(args):
command = ["python", "-m", "kinyu.main", "--srcdb=" + SRCDB] + args
with Popen(command, stdout=PIPE) as proc:
output = proc.stdout.read()
assert proc.returncode == 0
return output
def test_main(remote_importer):
remote_importer.add_script("unittest/main_module.py", '''
def main():
print("Hello World")
''')
assert _run_test(["unittest.main_module"]) == b'Hello World\n'
def test_entry(remote_importer):
remote_importer.add_script("unittest/test_entry.py", '''
def test_entry():
print("Welcome")
''')
assert _run_test(
["unittest.test_entry", "--entry=test_entry"]) == b'Welcome\n'
def test_args(remote_importer):
remote_importer.add_script("unittest/test_args.py", '''
def main(a, b, c=3):
assert a == 1
assert b == 2
assert c == 3
''')
args = {'a': 1, 'b': 2}
_run_test(["unittest.test_args", '--args=' + json.dumps(args)])
|
"""Funtions for logging.
Copyright (C) 2022-2023 C-PAC Developers
This file is part of C-PAC.
C-PAC is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
C-PAC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public
License along with C-PAC. If not, see <https://www.gnu.org/licenses/>."""
import logging
import os
import subprocess
from sys import exc_info as sys_exc_info
from traceback import print_exception
from nipype import logging as nipype_logging
from CPAC.utils.docs import docstring_parameter
from CPAC.utils.monitoring.config import MOCK_LOGGERS
def failed_to_start(log_dir, exception):
"""Launch a failed-to-start logger for a run that failed to start.
Must be called from within an ``except`` block.
Parameters
----------
log_dir : str
path to logging directory
exception : Exception
"""
logger = set_up_logger('failedToStart', 'failedToStart.log', 'error',
log_dir, True)
logger.exception('C-PAC failed to start')
logger.exception(exception)
def getLogger(name): # pylint: disable=invalid-name
"""Function to get a mock logger if one exists, falling back on
real loggers.
Parameters
----------
name : str
Returns
-------
logger : CPAC.utils.monitoring.custom_logging.MockLogger or logging.Logger
"""
if name in MOCK_LOGGERS:
return MOCK_LOGGERS[name]
logger = nipype_logging.getLogger(name)
return logging.getLogger(name) if logger is None else logger
def log_failed_subprocess(cpe):
"""Pass STDERR from a subprocess to the interface's logger
Parameters
----------
cpe : subprocess.CalledProcessError
"""
logger = getLogger('nipype.interface')
logger.error("%s\nExit code %s", cpe.output, cpe.returncode)
def log_subprocess(cmd, *args, raise_error=True, **kwargs):
"""Pass STDERR and STDOUT from subprocess to interface's logger.
This function is nearly a drop-in replacement for
`subprocess.check_output`.
Caveat: if you're assigning to a variable (like
>>> output = subprocess.check_output(cmd) # doctest: +SKIP
), the new function also returns the command's exit code, so you can just
assign that to a throwaway variable if you don't want it
>>> output, _ = log_subprocess(cmd) # doctest: +SKIP
or subscript the command like
>>> output = log_subprocess(cmd)[0] # doctest: +SKIP
. If you're not assigning to a variable, it doesn't matter and just
>>> log_subprocess(cmd) # doctest: +SKIP
should work just like
>>> subprocess.check_output(cmd) # doctest: +SKIP
Parameters
----------
cmd : str
command to run with `subprocess.check_output`
raise_error : boolean
raise any exception after logging
args, kwargs : any
pass-through arguments for subprocess.check_output
Returns
-------
output : str
exit_code : int
"""
logger = getLogger('nipype.interface')
try:
output = subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT,
universal_newlines=True, **kwargs)
logger.info(output)
except subprocess.CalledProcessError as cpe:
log_failed_subprocess(cpe)
if raise_error:
raise
return cpe.output, cpe.returncode
return output, 0
# pylint: disable=too-few-public-methods
class MockHandler:
"""Handler for MockLogger."""
def __init__(self, filename):
self.baseFilename = filename # pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
class MockLogger:
"""Mock logging.Logger to provide the same API without keeping the
logger in memory."""
def __init__(self, name, filename, level, log_dir):
self.name = name
self.level = level
self.handlers = [MockHandler(os.path.join(log_dir, filename))]
MOCK_LOGGERS[name] = self
for loglevel in ['debug', 'info', 'warning', 'error', 'critical']:
# set up log methods for all built-in levels
setattr(self, loglevel, self._factory_log(loglevel))
def exception(self, msg, *args, exc_info=True, **kwargs):
# pylint: disable=missing-function-docstring,no-member
return self.error(msg, *args, exc_info=exc_info, **kwargs)
exception.__doc__ = logging.exception.__doc__
def _factory_log(self, level):
r"""Generate a log method like `self.log(message)` for a given
built-in level."""
@docstring_parameter(level=level)
def _log(message, *items, exc_info=False):
"""Log a message if logging level >= {level}. See `Logging Levels <https://docs.python.org/3/library/logging.html#levels>`_ for a list of levels."""
if self.level == 0 or self.level >= getattr(logging, level.upper(),
logging.NOTSET):
with open(self.handlers[0].baseFilename, 'a',
encoding='utf-8') as log_file:
if exc_info and isinstance(message, Exception):
value, traceback = sys_exc_info()[1:]
print_exception(_lazy_sub(message, *items), value=value,
tb=traceback, file=log_file)
else:
print(_lazy_sub(message, *items), file=log_file)
return _log
def delete(self):
"""Delete the mock logger from memory."""
del MOCK_LOGGERS[self.name]
def _lazy_sub(message, *items):
"""Given lazy-logging syntax, return string with substitutions
Parameters
----------
message : str
items : tuple
Returns
-------
str
Examples
--------
>>> _lazy_sub('no substitution')
'no substitution'
>>> _lazy_sub('%s substitution', 'yes')
'yes substitution'
>>> _lazy_sub('%s substitution %s', 'yes', 'again')
'yes substitution again'
"""
try:
return str(message) % items
except (AttributeError, TypeError):
return str([message, *items])
def set_up_logger(name, filename=None, level=None, log_dir=None, mock=False,
overwrite_existing=False):
r"""Function to initialize a logger
Parameters
----------
name : str
logger name (for subsequent calls to ``logging.getLogger``) to
write to the same log file)
filename : str, optional
filename to write log to. If not specified, filename will be
the same as ``name`` with the extension ``log``
level : str, optional
one of ``{critical, error, warning, info, debug, notset}``,
case-insensitive
log_dir : str, optional
mock : bool, optional
if ``True``, return a ``CPAC.utils.monitoring.MockLogger``
instead of a ``logging.Logger``
Returns
-------
logger : logging.Handler
initialized logging Handler
Examples
--------
>>> lg = set_up_logger('test')
>>> lg.handlers[0].baseFilename.split('/')[-1]
'test.log'
>>> lg.level
0
>>> lg = set_up_logger('second_test', 'specific_filename.custom', 'debug')
>>> lg.handlers[0].baseFilename.split('/')[-1]
'specific_filename.custom'
>>> lg.level
10
>>> lg = set_up_logger('third_test', mock=True)
>>> getLogger('third_test') == lg
True
>>> 'third_test' in MOCK_LOGGERS
True
>>> lg.delete()
>>> 'third_test' in MOCK_LOGGERS
False
"""
if filename is None:
filename = f'{name}.log'
try:
level = getattr(logging, level.upper())
except AttributeError:
level = logging.NOTSET
if log_dir is None:
log_dir = os.getcwd()
filepath = os.path.join(log_dir, filename)
if overwrite_existing and os.path.exists(filepath):
with open(filepath, 'w') as log_file:
log_file.write('')
if mock:
return MockLogger(name, filename, level, log_dir)
logger = getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filepath)
logger.addHandler(handler)
return logger
|
{
'name': "incomingorder_tracking",
'summary': """
Module is for tracking incoming order to generate GRN
""",
'description': """
Module is for tracking incoming order
""",
'author': "Creator",
'website': "",
'category': 'Uncategorized',
'version': '0.1',
'depends': ['base'],
'data': [
'incomingorder_view.xml',
'checkorder_view.xml', 'incomingorder_workflow.xml',
],
'demo': [
],
}
|
from page.base_page import Page
from selenium.webdriver.common.by import By
class Header(Page):
home_page = (By.XPATH, "(//ul[@class='yz-navmenu'])//li[1]")
material_resource = (By.XPATH, "(//ul[@class='yz-navmenu'])//li[2]")
design_center = (By.XPATH, "(//ul[@class='yz-navmenu'])//li[3]")
clothing_company = (By.XPATH, "(//ul[@class='yz-navmenu'])//li[4]")
customer_center = (By.XPATH, "(//ul[@class='yz-navmenu'])//li[5]")
design_tool = (By.XPATH, "(//ul[@class='yz-navmenu'])//li[6]")
solution_project = (By.XPATH, "(//ul[@class='yz-navmenu'])//li[7]")
home_list = (By.XPATH, "//div[@class='login-info']//div[@class='avatar']")
control_button = (By.XPATH, "//a[text()='管理控制台']")
cancel_button = (By.XPATH, "//a[text()='退出登录']")
def click_material_resource(self):
self.click(self.material_resource)
def hover_home_list(self):
self.move_to_element(self.home_list)
def click_control(self):
self.click(self.control_button) |
lst=[1,2,3,4,5,6,7,8,9]
element=int(input("enter a number"))
for i in lst:
for j in lst:
if(i+j==element):
print(i,j) |
import math
from functools import reduce
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites import requests
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from blog.forms import PostCreateForm, PostUpdateForm, CommentForm
from blog.models import Category, Post, Rating
def main_page(request):
categories = Category.objects.all()
context = {
'categories': categories,
}
return render(request, 'blog/index.html', context)
@login_required()
def user_posts(request):
posts = Post.objects.filter(author=request.user)
paginator = Paginator(posts, 3)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'posts': page_obj
}
return render(request, 'blog/user_posts.html', context)
@login_required
def create_post(request):
if request.method == 'POST':
form = PostCreateForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
form = PostCreateForm()
return render(request, 'blog/post_create.html',
context={'form': form})
def category_posts(request, pk):
category = Category.objects.filter(id=pk).first()
posts = Post.objects.filter(category=category, published=True)
paginator = Paginator(posts, 9)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, 'blog/category_posts.html',
context={'category': category, 'posts': page_obj})
def search(request):
query = request.GET.get('q')
posts = Post.objects.filter(title__icontains=query, published=True)
return render(request, 'blog/search.html',
context={'posts': posts})
def post_detail(request, pk):
owner = None
post = Post.objects.filter(id=pk).first()
prev_post = Post.objects.filter(id=pk-1).first()
next_post = Post.objects.filter(id=pk+1).first()
not_anonymous = not (isinstance(request.user, AnonymousUser))
if not_anonymous and request.user == post.author:
pass
else:
post.seen_amount += 1
post.save()
if request.user == post.author:
owner = True
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
form = CommentForm()
context = {
'post': post,
'owner': owner,
'form': form,
'prev_post': prev_post,
'next_post': next_post
}
return render(request, 'blog/post_detail.html',
context)
def post_update(request, pk):
post = Post.objects.filter(id=pk).first()
if request.method == 'POST':
form = PostUpdateForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.save()
return redirect('my_posts')
form = PostUpdateForm(instance=post)
context = {
'post': post,
'form': form
}
return render(request, 'blog/post_update.html', context)
def post_delete(request, pk):
post = Post.objects.filter(author=request.user)
post.delete()
return redirect('my_posts')
def rate_post(request, pk, rate):
post = Post.objects.filter(id=pk).first()
not_anonymous = not (isinstance(request.user, AnonymousUser))
try:
rated_before = Rating.objects.filter(post=post, profile=request.user)
except TypeError:
messages.error(request, 'You should login first')
return redirect('login')
if not_anonymous and not rated_before:
rating = Rating(
post=post, profile=request.user,
rating=rate, rated=True
)
rating.save()
messages.success(request, 'Your rating have been saved')
else:
messages.error(request, 'You have rated before')
return redirect('main_page')
stars = Rating.objects.filter(post=post)
points = reduce(lambda x, y: x + y, [i.rating for i in stars])
final_rating = math.ceil(points/len(stars))
post.rating = final_rating
post.save()
return redirect('main_page')
|
#Sydney Howard, showard
#Lab partners: Kasdan Bakos (kbakos), Jack Sampiere (jsampier)
#Also got help from: siqingy; approved by Kosbie
from tkinter import *
import random
import decimal
import string
####################################
# customize these functions
####################################
### CONTROL
def init(data):
data.rows=15
data.cols=10
data.margin= 20
data.emptyColor= "blue"
data.board= make2dList(data.rows,data.cols,data.emptyColor)
getTetrisPiece(data)
newFallingPiece(data)
data.isGameOver= False
data.isPaused= False
data.score=0
def mousePressed(event, data):
# use event.x and event.y
pass
def keyPressed(event, data):
if event.keysym== "Right":
moveFallingPiece(data,0,1)
if event.keysym== "Left":
moveFallingPiece(data,0,-1)
if event.keysym== "Up":
rotateFallingPiece(data)
if event.keysym== "Down":
moveFallingPiece(data,1,0)
if event.keysym== "r": #resets game
data.board= make2dList(data.rows,data.cols,data.emptyColor)
data.isGameOver=False
data.isPaused= False
def timerFired(data):
if data.isPaused==False:
if moveFallingPiece(data,1,0) == False:
placeFallingPiece(data)
newFallingPiece(data)
removeFullRows(data)
if fallingPieceIsLegal(data)==False:
data.isGameOver=True
data.score=0
### MODEL
def roundHalfUp(d):
# Round to nearest with ties going away from zero.
rounding = decimal.ROUND_HALF_UP
# See other rounding optiorns here:
# https://docs.python.org/3/library/decimal.html#rounding-modes
return int(decimal.Decimal(d).to_integral_value(rounding=rounding))
def make2dList(rows, cols,n):
a=[]
for row in range(rows): a += [[n]*cols]
return a
#from: http://www.cs.cmu.edu/~112/notes/notes-2d-lists.html
def getTetrisPiece(data):
data.iPiece = [[ True, True, True, True]]
data.jPiece = [[ True, False, False ],[ True, True, True]]
data.lPiece = [[ False, False, True],[ True, True, True]]
data.oPiece = [[ True, True],[ True, True]]
data.sPiece = [[ False, True, True],[ True, True, False ]]
data.tPiece = [[ False, True, False ],[ True, True, True]]
data.zPiece = [[ True, True, False ],[ False, True, True]]
data.tetrisPieces= ([data.iPiece, data.jPiece, data.lPiece,
data.oPiece, data.sPiece, data.tPiece, data.zPiece])
data.tetrisPieceColors = ([ "red", "yellow", "magenta", "pink",
"cyan", "green", "orange"])
def getCellBounds(row,col,data): #finds coordinates for each cell
gridWidth= data.width - 2*data.margin
gridHeight= data.height - 2*data.margin
x0= data.margin + gridWidth * col / data.cols
x1= data.margin + gridWidth * (col+1) / data.cols
y0= data.margin + gridHeight * row / data.rows
y1= data.margin + gridHeight * (row+1) / data.rows
return x0, y0, x1, y1
def newFallingPiece(data): #chooses new piece and places it on board
data.fallingPiece= random.choice(data.tetrisPieces)
data.fallingPieceColor= random.choice(data.tetrisPieceColors)
data.fallingPieceRow=0
data.pieceCols= len(data.fallingPiece[0])//2
data.fallingPieceCol=data.cols//2- data.pieceCols
def fallingPieceIsLegal(data):
rows,cols=len(data.fallingPiece), len(data.fallingPiece[0])
for row in range(rows):
for col in range(cols):
if data.fallingPiece[row][col] == True: #if a colored cell
if data.fallingPieceRow<0 or\
data.fallingPieceCol>=data.cols-cols+1 or\
data.fallingPieceCol<=-1 or\
data.fallingPieceRow>=data.rows-rows+1 or\
(data.board[data.fallingPieceRow+row][data.fallingPieceCol+col]
!=data.emptyColor): #checks piece is in bounds
return False
return True
def moveFallingPiece(data,drow,dcol):
data.fallingPieceRow+=drow #moves piece
data.fallingPieceCol+=dcol
if fallingPieceIsLegal(data) ==False:
data.fallingPieceRow-=drow #undos move
data.fallingPieceCol-=dcol
return False
return True
def rotateFallingPiece(data):
oldPiece,newPiece=data.fallingPiece, []
oldRow,oldCol=data.fallingPieceRow,data.fallingPieceCol
oldRows,oldCols =len(data.fallingPiece), len(data.fallingPiece[0])
newRow,newCol,newRows,newCols=oldCol, oldRow,oldCols,oldRows
data.fallingPieceCol= oldCol + oldCols//2 - newCols//2
data.fallingPieceRow= oldRow+ oldRows//2-newRows//2
for i in range(oldCols): newPiece.append([0]*oldRows)
for row in range(oldRows):
for col in range(oldCols):
if oldPiece[row][col] == True:
newPiece[oldCols-1-col][row]= True #flips old rows and cols
data.fallingPiece= newPiece #rotates the piece
if fallingPieceIsLegal(data) == True: #checks if piece is legal
data.fallingPiece= newPiece
else:
data.fallingPieceRow=0
data.fallingPieceCol=data.cols//2- data.pieceCols
def placeFallingPiece(data):
rows,cols=len(data.fallingPiece), len(data.fallingPiece[0])
for row in range(rows):
for col in range(cols): #places piece colors on board
if data.fallingPiece[row][col]==True:
data.board[data.fallingPieceRow+row][data.fallingPieceCol+col]=\
(data.fallingPieceColor)
def removeFullRows(data):
newBoard=[]
for row in range(data.rows):
if "blue" not in data.board[row]: #checks for full rows
newBoard.insert(0,[data.emptyColor]*data.cols)
data.score+=1
if "blue" in data.board[row]: #saves rows that aren't full
newBoard.append(data.board[row])
data.board=newBoard
### DRAW
def drawGame(canvas,data):
canvas.create_rectangle(0, 0, data.width, data.height, fill= "orange")
drawBoard(canvas,data) #draws all components of game
drawFallingPiece(canvas,data)
def drawBoard(canvas,data):
for row in range(data.rows):
for col in range(data.cols):
drawCell(canvas,data,row,col,data.board[row][col])
def drawFallingPiece(canvas,data):
if data.isGameOver==False:
for row in range(len(data.fallingPiece)):
for col in range(len(data.fallingPiece[0])):
if data.fallingPiece[row][col] == True:#draws new piece
(drawCell(canvas,data,data.fallingPieceRow+
row,data.fallingPieceCol+col,data.fallingPieceColor))
else:
data.isPaused=True #is called when game is over
(canvas.create_text(data.width//2, data.height//2,
text= "GAME OVER. Hit 'r' to restart game."))
def drawCell(canvas,data,row,col,color):
miniMargin=1
(x0, y0, x1, y1) = getCellBounds(row, col, data)#draws spaces between cells
canvas.create_rectangle(x0, y0, x1, y1, fill="black")
(canvas.create_rectangle(x0+miniMargin, y0+miniMargin, x1-miniMargin,
y1-miniMargin, fill=color))
def drawScore(canvas,data):
(canvas.create_text(data.width//4, data.margin//2,
text="Score="+ str(data.score)))
def redrawAll(canvas, data):
drawGame(canvas,data)
drawScore(canvas,data)
####################################
# use the run function as-is
####################################
def run(width=300, height=300):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
canvas.create_rectangle(0, 0, data.width, data.height,
fill='white', width=0)
redrawAll(canvas, data)
canvas.update()
def mousePressedWrapper(event, canvas, data):
mousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
data.timerDelay = 200 # milliseconds
init(data)
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=data.width, height=data.height)
canvas.pack()
# set up events
root.bind("<Button-1>", lambda event:
mousePressedWrapper(event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
def playTetris():
rows=15
cols=10
margin=20
cellSize=40
width=2*margin+cols*cellSize
height= 2*margin+ rows*cellSize
run(width,height)
##### TEST FCNS
def testMoveFallingPiece():
print("testing moveFallingPiece")
assert(moveFallingPiece(data,1,0) == True)
assert(moveFallingPiece(data,0,-1) == True)
assert(moveFallingPiece(data,-1,0) == False)
print("passed")
playTetris()
#testMoveFallingPiece() |
import tortoise
class Music(tortoise.Model):
name = tortoise.fields.CharField(max_length=63, unique=True)
duration = tortoise.fields.FloatField()
path = tortoise.fields.CharField(max_length=127, unique=True)
genre = tortoise.fields.ForeignKeyField("app.Genre", null=True,
on_delete=tortoise.fields.SET_NULL)
class Genre(tortoise.Model):
name = tortoise.fields.CharField(max_length=200)
|
def record(result):
#test = [{'message': 'average CPU freq 1800.001MHZ!', 'type': 'CPU性能测试', 'flag': True}, {'message': 'num: 0 CPU freq: 1800.001MHZ!', 'type': 'CPU性能测试', 'flag': True}, {'message': 'num: 1 CPU freq: 1800.001MHZ!', 'type': 'CPU性能测试', 'flag': True}, {'message': 'num: 2 CPU freq: 1800.001MHZ!', 'type': 'CPU性能测试', 'flag': True}, {'message': 'num: 3 CPU freq: 1800.001MHZ!', 'type': 'CPU性能测试', 'flag': True}]
for joins in result:
read_file(str(joins.get('type'))+ ','+ str(joins.get('message'))+ ','+ str(joins.get('flag'))+ '\r\n')
def read_file(read):
f = open("result_log.txt", "a")
f.write(read)
|
# A. Winner
n = int(input())
scores = dict()
chrono = list()
for _ in range(n):
inp = input().split()
name = inp[0]
score = int(inp[1])
try:
scores[name] += score
except KeyError:
scores[name] = score
chrono.append([scores[name], name])
mx = max(scores.values())
candidate_winners = [name for name, score in scores.items() if score == mx]
ans = ""
for x in chrono:
if x[1] in candidate_winners and x[0] >= mx:
ans = x[1]
break
print(ans)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-24 12:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0013_auto_20170714_0541'),
]
operations = [
migrations.AlterField(
model_name='pageblock',
name='render',
field=models.CharField(choices=[('markdown', 'Markdown'), ('template', 'Django Template'), ('plain_text', 'Plain Text'), ('sanitized_html', 'Sanitized HTML'), ('bbcode', 'BBcode'), ('html', 'HTML'), ('magic_html', 'Magic HTML'), ('extended_html', 'Extended HTML')], default='extended_html', max_length=30),
),
]
|
# Import system modules
import arcpy, sys, math, os
from datetime import datetime
import time
from xml.dom.minidom import parse, parseString
#########################Variables################################
# Enter the location to your feature class that contains the features
# you wish to cache by.
# cacheFeatures = "C:/serverblog/blogCacheTest/personal.mdb/county"
# cacheFeatures = "C:/serverblog/blogCacheTest/file.gdb/county"
# cacheFeatures = "C:/serverblog/blogCacheTest/county.shp"
cacheFeatures = "D:/DemoDatas/CantonMap/Shp/admin.gdb/boder_p"
# Enter the cache configuration file (conf.xml) for your cached map service.
# This will give you and estimate on how many tiles will be generated for each extent
# cacheConfig = parse('C:/arcgisserver/arcgiscache/mapservice/Layers/conf.xml')
cacheConfig = parse('C:/Program Files/ArcGIS/Server/TilingSchemes/ArcGIS_Online_Bing_Maps_Google_Maps.xml')
# Enter the name of your ArcGIS Server
server_name = "seanpc"
# Enter the name of your predefined Cached Service
# If your service is in a folder then the syntax is: foldername/servicename
# If a service USAMap was in the basemap folder the syntax would be:
# service_name = "basemap/USAMap"
service_name = "SampleWorldCities"
# Enter the name of your predefined cached map dataframe
data_frame = "Layers"
# Enter the layers you wish to cache, leave blank to cache all layers
layers = ""
# layers = "ushigh;counties;states"
# Enter scales you wish to cache. These should be similar to the scales
# that you have already cached your service at.
scales = "16000000;8000000;4000000;2000000;1000000;500000;250000;125000;64000;32000;16000;5000;2500;1200;600;300;150"
# Enter update mode. Recreate All Tiles, replaces all tiles.
# Recreate Empty Tiles, replaces only empty tiles.
update_mode = "Recreate All Tiles"
# Enter the number of SOCs that you wish to cache with
# This can be no more than the max set for the service.
thread_count = "2"
# Whether you want to use Antialiasing or not.
antialiasing = "ANTIALIASING"
# antialiasing = "NONE"
#########################Variables################################
# Use to get text from XMLDom of cache configuration file
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
# function that parsed cache configuration xml file and an extent and computes the
# number of rows and columns that should have been computed for each scale within
# the update extent.
def handleTileCacheInfo(tilecacheinfo, extent):
MinX = extent.XMin
MinY = extent.YMin
MaxX = extent.XMax
MaxY = extent.YMax
TileRows = int(getText(tilecacheinfo.getElementsByTagName("TileRows")[0].childNodes))
TileCols = int(getText(tilecacheinfo.getElementsByTagName("TileCols")[0].childNodes))
TileOrigin = tilecacheinfo.getElementsByTagName("TileOrigin")[0]
TileOriginX = float(getText(TileOrigin.getElementsByTagName("X")[0].childNodes))
TileOriginY = float(getText(TileOrigin.getElementsByTagName("Y")[0].childNodes))
DPI = int(getText(tilecacheinfo.getElementsByTagName("DPI")[0].childNodes))
LODInfos = tilecacheinfo.getElementsByTagName("LODInfos")[0]
totalTiles = 0
for lodinfo in LODInfos.childNodes:
LevelID = getText(lodinfo.getElementsByTagName("LevelID")[0].childNodes)
Scale = getText(lodinfo.getElementsByTagName("Scale")[0].childNodes)
Resolution = float(getText(lodinfo.getElementsByTagName("Resolution")[0].childNodes))
## Compute number of tiles wide.
tileWidth = TileCols * Resolution
tileColumn = math.floor((MinX - TileOriginX) / tileWidth)
tileColumn_End = math.floor((MaxX - TileOriginX) / tileWidth)
numTilesWideFromTile = tileColumn_End - tileColumn +1
print "LevelID= " + str(LevelID)
print "Scale= " + str(Scale)
print "Resolution= " + str(Resolution)
print "numTilesWideFromTile = " + str(numTilesWideFromTile)
## Compute number of tiles high.
tileHeight = TileRows * Resolution
tileRow = math.floor((TileOriginY - MinY) / tileHeight)
tileRow_End = math.floor((TileOriginY - MaxY) / tileHeight)
numTilesHighFromTile = tileRow - tileRow_End +1
print "numTilesHighFromTile = " + str(numTilesHighFromTile)
print "totalTiles= " + str(numTilesWideFromTile * numTilesHighFromTile)
totalTiles += numTilesWideFromTile * numTilesHighFromTile
print ""
return totalTiles
# check input featureclass to see if the "Cached" field exists. If it exists then
# proceed. If it does not then add "Cached" field to the feature class
# if arcpy.ListFields(cacheFeatures,"CACHED").Next():
# print "field Cached exists"
# else:
# print "field does not exist"
# arcpy.addfield (cacheFeatures, "CACHED", "TEXT")
# Describe featureClass and get the shape field name.
desc = arcpy.Describe(cacheFeatures)
shapefieldname = desc.ShapeFieldName
# Create an update cursor on the featureclass for all features where the Cached field
# has not been set to 'yes'.
# if desc.DataType == "ShapeFile":
# qString = '"CACHED" <> ' + "'yes'"
# else:
# if cacheFeatures.find('.mdb') > 0:
# qString = '[CACHED] IS NULL'
# else:
# qString = '"CACHED" IS NULL'
with arcpy.da.SearchCursor(cacheFeatures, "SHAPE@") as rows:
# While row is not empty (until all features have been processed) get the feature for
# the current row and use the getFeatureExtent function to get the feature extent. Use
# the feature extent to call updateMapServerCache for the predefined scales and
# cache parameters. If the cache finishes successfully then update the featureclass
# so that the Cached field reads 'true'. If it does not finish successfully then the
# script will stop. You can rerun the script with the current featureclass and it
# pick up at the row it left off at.
for row in rows:
feat = row[0]
constraining_extent = feat.extent
#print "Updating envelope: " + str(row.GetValue(desc.OIDFieldName))
print constraining_extent
try:
startTime = datetime.now()
# arcpy.UpdateMapServerCache(server_name, service_name, data_frame, layers, constraining_extent, scales, update_mode, thread_count, antialiasing)
# arcpy.UpdateMapServerCache_server(server_name, object_name, data_frame, layers, constraining_extent, scales, update_mode, thread_count, antialiasing)
# print arcpy.GetMessages(1)
# row.CACHED = "yes"
# rows.UpdateRow(row)
# endTime = datetime.now()
# time_difference = endTime-startTime
tileCount = handleTileCacheInfo(cacheConfig, constraining_extent)
print "Total tiles generated for this extent: " + str(tileCount)
# print 'elapsed', time_difference
# print "Tiles generated per minute: " + str(tileCount/(time_difference.seconds / 60.0))
print "Finished env update."
print ""
except:
arcpy.AddMessage(arcpy.GetMessages(2))
print arcpy.GetMessages(2)
print "update failed, stop processing"
del rows
del arcpy
sys.exit(1)
print "Update Complete" |
#Programa
valor = 0
contador = 0
while contador < 7:
valor = valor + (valor / 2) + 4
if valor % 2 == 0:
print (valor)
else:
print (valor//2)
contador = contador + 1
#Resposta
4
10
9
16
26
41
64
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 14:51:02 2019
@author: Suraj Pawar
"""
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend as K
from scipy.interpolate import UnivariateSpline
from scipy.stats import norm
from numpy.random import seed
seed(1)
# from tensorflow import set_random_seed
# set_random_seed(2)
from utils import *
import os
import time as tm
import csv
import os
#import pydot
#%%
#Class of problem to solve 2D decaying homogeneous isotrpic turbulence
class DHIT:
def __init__(self,nx,ny,nxf,nyf,re,freq,n_snapshots,n_snapshots_train,n_snapshots_test,
istencil,ifeatures,ilabel):
'''
initialize the DHIT class
Inputs
------
n_snapshots : number of snapshots available
nx,ny : dimension of the snapshot
'''
self.nx = nx
self.ny = ny
self.nxf = nxf
self.nyf = nyf
self.re = re
self.freq = freq
self.n_snapshots = n_snapshots
self.n_snapshots_train = n_snapshots_train
self.n_snapshots_test = n_snapshots_test
self.istencil = istencil
self.ifeatures = ifeatures
self.ilabel = ilabel
self.max_min = np.zeros(shape=(15, 2), dtype='double')
self.wc = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.sc = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.pi = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
if self.ifeatures >= 2:
self.kwc = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.ksc = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
if self.ifeatures == 3:
self.wcx = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.wcy = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.wcxx = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.wcyy = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.wcxy = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.scx = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.scy = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.scxx = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.scyy = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
self.scxy = np.zeros(shape=(self.n_snapshots, self.nx+1, self.ny+1), dtype='double')
directory = f'../KT_DNS/solution_{nxf}_{nx}_{re:0.2e}/apriori'
for m in range(1,self.n_snapshots+1):
file_input = os.path.join(directory, f'ws_{m}.npz')
data_input = np.load(file_input)
self.wc[m-1,:,:] = data_input['wc']
self.sc[m-1,:,:] = data_input['sc']
self.pi[m-1,:,:] = data_input['pi']
if self.ifeatures >= 2:
self.kwc[m-1,:,:] = data_input['kw']
self.ksc[m-1,:,:] = data_input['ks']
if self.ifeatures == 3:
self.wcx[m-1,:,:] = data_input['wcx']
self.wcy[m-1,:,:] = data_input['wcy']
self.wcxx[m-1,:,:] = data_input['wcxx']
self.wcyy[m-1,:,:] = data_input['wcyy']
self.wcxy[m-1,:,:] = data_input['wcxy']
self.scx[m-1,:,:] = data_input['scx']
self.scy[m-1,:,:] = data_input['scy']
self.scxx[m-1,:,:] = data_input['scxx']
self.scyy[m-1,:,:] = data_input['scyy']
self.scxy[m-1,:,:] = data_input['scxy']
self.scale_data()
self.x_train,self.y_train = self.gen_train_data()
self.x_test,self.y_test = self.gen_test_data()
def scale_data(self):
'''
scaling the data between (-1,1) using (2x-(xmax+xmin))/(xmax-xmin)
'''
self.max_min[0,0], self.max_min[0,1] = np.max(self.wc), np.min(self.wc)
self.max_min[1,0], self.max_min[1,1] = np.max(self.sc), np.min(self.sc)
if self.ifeatures >= 2:
self.max_min[2,0], self.max_min[2,1] = np.max(self.kwc), np.min(self.kwc)
self.max_min[3,0], self.max_min[3,1] = np.max(self.ksc), np.min(self.ksc)
if self.ifeatures == 3:
self.max_min[4,0], self.max_min[4,1] = np.max(self.wcx), np.min(self.wcx)
self.max_min[5,0], self.max_min[5,1] = np.max(self.wcy), np.min(self.wcy)
self.max_min[6,0], self.max_min[6,1] = np.max(self.wcxx), np.min(self.wcxx)
self.max_min[7,0], self.max_min[7,1] = np.max(self.wcyy), np.min(self.wcyy)
self.max_min[8,0], self.max_min[8,1] = np.max(self.wcxy), np.min(self.wcxy)
self.max_min[9,0], self.max_min[9,1] = np.max(self.scx), np.min(self.scx)
self.max_min[10,0], self.max_min[10,1] = np.max(self.scy), np.min(self.scy)
self.max_min[11,0], self.max_min[11,1] = np.max(self.scxx), np.min(self.scxx)
self.max_min[12,0], self.max_min[12,1] = np.max(self.scyy), np.min(self.scyy)
self.max_min[13,0], self.max_min[13,1] = np.max(self.scxy), np.min(self.scxy)
self.max_min[14,0], self.max_min[14,1] = np.max(self.pi), np.min(self.pi)
self.wc = (2.0*self.wc - (np.max(self.wc) + np.min(self.wc)))/(np.max(self.wc) - np.min(self.wc))
self.sc = (2.0*self.sc - (np.max(self.sc) + np.min(self.sc)))/(np.max(self.sc) - np.min(self.sc))
if self.ifeatures >= 2:
self.kwc = (2.0*self.kwc - (np.max(self.kwc) + np.min(self.kwc)))/(np.max(self.kwc) - np.min(self.kwc))
self.ksc = (2.0*self.ksc - (np.max(self.ksc) + np.min(self.ksc)))/(np.max(self.ksc) - np.min(self.ksc))
if self.ifeatures == 3:
self.wcx = (2.0*self.wcx - (np.max(self.wcx) + np.min(self.wcx)))/(np.max(self.wcx) - np.min(self.wcx))
self.wcy = (2.0*self.wcy - (np.max(self.wcy) + np.min(self.wcy)))/(np.max(self.wcy) - np.min(self.wcy))
self.wcxx = (2.0*self.wcxx - (np.max(self.wcxx) + np.min(self.wcxx)))/(np.max(self.wcxx) - np.min(self.wcxx))
self.wcyy = (2.0*self.wcyy - (np.max(self.wcyy) + np.min(self.wcyy)))/(np.max(self.wcyy) - np.min(self.wcyy))
self.wcxy = (2.0*self.wcxy - (np.max(self.wcxy) + np.min(self.wcxy)))/(np.max(self.wcxy) - np.min(self.wcxy))
self.scx = (2.0*self.scx - (np.max(self.scx) + np.min(self.scx)))/(np.max(self.scx) - np.min(self.scx))
self.scy = (2.0*self.scy - (np.max(self.scy) + np.min(self.scy)))/(np.max(self.scy) - np.min(self.scy))
self.scxx = (2.0*self.scxx - (np.max(self.scxx) + np.min(self.scxx)))/(np.max(self.scxx) - np.min(self.scxx))
self.scyy = (2.0*self.scyy - (np.max(self.scyy) + np.min(self.scyy)))/(np.max(self.scyy) - np.min(self.scyy))
self.scxy = (2.0*self.scxy - (np.max(self.scxy) + np.min(self.scxy)))/(np.max(self.scxy) - np.min(self.scxy))
self.pi = (2.0*self.pi - (np.max(self.pi) + np.min(self.pi)))/(np.max(self.pi) - np.min(self.pi))
def gen_train_data(self):
'''
data generation for training and testing CNN model
'''
if self.ifeatures == 1:
x_train = np.zeros(shape=(self.n_snapshots_train, self.nx+1, self.ny+1, 2), dtype='double')
elif self.ifeatures == 2:
x_train = np.zeros(shape=(self.n_snapshots_train, self.nx+1, self.ny+1, 4), dtype='double')
elif self.ifeatures == 3:
x_train = np.zeros(shape=(self.n_snapshots_train, self.nx+1, self.ny+1, 12), dtype='double')
if self.ilabel == 1:
y_train = np.zeros(shape=(self.n_snapshots_train, self.nx+1, self.ny+1, 1), dtype='double')
elif self.ilabel == 2:
y_train = np.zeros(shape=(self.n_snapshots_train, self.nx+1, self.ny+1, 3), dtype='double')
for m in range(1,self.n_snapshots_train+1):
#m = p*self.freq
if self.ifeatures == 1:
x_train[m-1,:,:,0] = self.wc[m-1]
x_train[m-1,:,:,1] = self.sc[m-1]
if self.ifeatures == 2:
x_train[m-1,:,:,0] = self.wc[m-1]
x_train[m-1,:,:,1] = self.sc[m-1]
x_train[m-1,:,:,2] = self.kwc[m-1]
x_train[m-1,:,:,3] = self.ksc[m-1]
if self.ifeatures == 3:
x_train[m-1,:,:,0] = self.wc[m-1]
x_train[m-1,:,:,1] = self.sc[m-1]
x_train[m-1,:,:,2] = self.wcx[m-1]
x_train[m-1,:,:,3] = self.wcy[m-1]
x_train[m-1,:,:,4] = self.wcxx[m-1]
x_train[m-1,:,:,5] = self.wcyy[m-1]
x_train[m-1,:,:,6] = self.wcxy[m-1]
x_train[m-1,:,:,7] = self.scx[m-1]
x_train[m-1,:,:,8] = self.scy[m-1]
x_train[m-1,:,:,9] = self.scxx[m-1]
x_train[m-1,:,:,10] = self.scyy[m-1]
x_train[m-1,:,:,11] = self.scxy[m-1]
if self.ilabel == 1:
y_train[m-1,:,:,0] = self.pi[m-1]
elif self.ilabel == 2:
y_train[m-1,:,:,0] = self.t11[m-1]
y_train[m-1,:,:,1] = self.t12[m-1]
y_train[m-1,:,:,2] = self.t22[m-1]
return x_train, y_train
def gen_test_data(self):
if self.ifeatures == 1:
x_test = np.zeros(shape=(1, self.nx+1, self.ny+1, 2), dtype='double')
elif self.ifeatures == 2:
x_test = np.zeros(shape=(1, self.nx+1, self.ny+1, 4), dtype='double')
elif self.ifeatures == 3:
x_test = np.zeros(shape=(1, self.nx+1, self.ny+1, 12), dtype='double')
if self.ilabel == 1:
y_test = np.zeros(shape=(1, self.nx+1, self.ny+1, 1), dtype='double')
elif self.ilabel == 2:
y_test = np.zeros(shape=(1, self.nx+1, self.ny+1, 3), dtype='double')
m = self.n_snapshots_test
if self.ifeatures == 1:
x_test[0,:,:,0] = self.wc[m-1]
x_test[0,:,:,1] = self.sc[m-1]
if self.ifeatures == 2:
x_test[0,:,:,0] = self.wc[m-1]
x_test[0,:,:,1] = self.sc[m-1]
x_test[0,:,:,2] = self.kwc[m-1]
x_test[0,:,:,3] = self.ksc[m-1]
if self.ifeatures == 3:
x_test[0,:,:,0] = self.wc[m-1]
x_test[0,:,:,1] = self.sc[m-1]
x_test[0,:,:,2] = self.wcx[m-1]
x_test[0,:,:,3] = self.wcy[m-1]
x_test[0,:,:,4] = self.wcxx[m-1]
x_test[0,:,:,5] = self.wcyy[m-1]
x_test[0,:,:,6] = self.wcxy[m-1]
x_test[0,:,:,7] = self.scx[m-1]
x_test[0,:,:,8] = self.scy[m-1]
x_test[0,:,:,9] = self.scxx[m-1]
x_test[0,:,:,10] = self.scyy[m-1]
x_test[0,:,:,11] = self.scxy[m-1]
if self.ilabel == 1:
y_test[0,:,:,0] = self.pi[m-1]
elif self.ilabel == 2:
y_test[0,:,:,0] = self.t11[m-1]
y_test[0,:,:,1] = self.t12[m-1]
y_test[0,:,:,2] = self.t22[m-1]
return x_test, y_test
#%%
#A Convolutional Neural Network class
class CNN:
def __init__(self,x_train_f,x_train_k,y_train,nx,ny,ncf,nck,nco):
'''
initialize the CNN class
Inputs
------
ue : output label of the CNN model
f : input features of the CNN model
nx,ny : dimension of the snapshot
nci : number of input features
nco : number of output labels
'''
self.x_train_f = x_train_f
self.x_train_k = x_train_k
self.y_train = y_train
self.nx = nx
self.ny = ny
self.ncf = ncf
self.nck = nck
self.nco = nco
# self.model = self.CNN()
self.model = self.CNN_PGML()
def coeff_determination(self,y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def CNN(self):
'''
define CNN model
Inputs
------
ue : output labels
f : input features (snapshot images)
nx,ny : snapshot images shape
nci: number of input features
nco: number of labels
Output
------
model: CNN model with defined activation function, number of layers
'''
model = Sequential()
input_img = Input(shape=(self.nx,self.ny,self.ncf))
x = Conv2D(16, (4, 4), activation='relu', padding='same')(input_img)
x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
encoded = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
x = Conv2D(16, (4, 4), activation='relu', padding='same')(encoded)
x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
decoded = Conv2D(nco, (4, 4), activation='linear', padding='same')(x)
model = Model(input_img, decoded)
return model
def CNN_PGML(self):
'''
define CNN model
Inputs
------
ue : output labels
f : input features (snapshot images)
nx,ny : snapshot images shape
nci: number of input features
nco: number of labels
Output
------
model: CNN model with defined activation function, number of layers
'''
field = Input(shape=(self.nx,self.ny,self.ncf))
kernels = Input(shape=(self.nx,self.ny,self.nck))
x = Conv2D(16, (4, 4), activation='relu', padding='same')(field)
x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
x = concatenate(inputs=[x, kernels])
x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
sgs = Conv2D(nco, (4, 4), activation='linear', padding='same')(x)
model = Model(inputs=[field, kernels], outputs=sgs)
return model
def CNN_compile(self,optimizer):
'''
compile the CNN model
Inputs
------
optimizer: optimizer of the CNN
'''
self.model.compile(loss='mean_squared_error', optimizer=optimizer,metrics=[self.coeff_determination])
def CNN_train(self,epochs,batch_size):
'''
train the CNN model
Inputs
------
epochs: number of epochs of the training
batch_size: batch size of the training
Output
------
history_callback: return the loss history of CNN model training
'''
history_callback = self.model.fit(x = [self.x_train_f,self.x_train_k],
y = self.y_train,
epochs=epochs,batch_size=batch_size,
validation_split= 0.2,)
return history_callback
def CNN_history(self, history_callback):
'''
get the training and validation loss history
Inputs
------
history_callback: loss history of CNN model training
Output
------
loss: training loss history of CNN model training
val_loss: validation loss history of CNN model training
'''
loss = history_callback.history["loss"]
val_loss = history_callback.history["val_loss"]
mse = history_callback.history['coeff_determination']
val_mse = history_callback.history['val_coeff_determination']
return loss, val_loss, mse, val_mse
def CNN_predict(self,x_test_sc_f,x_test_sc_k):
'''
predict the label for input features
Inputs
------
ftest: test data (has same shape as input features used for training)
Output
------
y_predict: predicted output by the CNN (has same shape as label used for training)
'''
testing_time_init1 = tm.time()
y_test = self.model.predict(x=[x_test_sc_f,x_test_sc_k])
t1 = tm.time() - testing_time_init1
testing_time_init2 = tm.time()
y_test = self.model.predict(x=[x_test_sc_f,x_test_sc_k])
#y_test = custom_model.predict(x_test)
t2 = tm.time() - testing_time_init2
testing_time_init3 = tm.time()
y_test = self.model.predict(x=[x_test_sc_f,x_test_sc_k])
y_test = self.model.predict(x=[x_test_sc_f,x_test_sc_k])
t3 = tm.time() - testing_time_init3
return y_test,t1,t2,t3
def CNN_predict1(self,ftest,ist,ift,nsm):
'''
predict the label for input features
Inputs
------
ftest: test data (has same shape as input features used for training)
Output
------
y_predict: predicted output by the CNN (has same shape as label used for training)
'''
filepath = 'tcfd_paper_data/new_data/cnn_'+str(ist)+'_'+str(ift)+'_'+str(nsm)
custom_model = load_model(filepath+'/CNN_model.hd5',
custom_objects={'coeff_determination': self.coeff_determination})
testing_time_init1 = tm.time()
y_test = custom_model.predict(ftest)
t1 = tm.time() - testing_time_init1
testing_time_init2 = tm.time()
y_test = custom_model.predict(ftest)
t2 = tm.time() - testing_time_init2
testing_time_init3 = tm.time()
y_test1 = custom_model.predict(ftest)
y_test2 = custom_model.predict(ftest)
t3 = tm.time() - testing_time_init3
return y_test,t1,t2,t3
def CNN_info(self):
'''
print the CNN model summary
'''
self.model.summary()
# plot_model(self.model, to_file='cnn_model.png', show_shapes=True)
def CNN_save(self,model_name):
'''
save the learned parameters (weights and bias)
Inputs
------
model_name: name of the file to be saved (.hd5 file)
'''
self.model.save(model_name)
#%%
# generate training and testing data for CNN
l1 = []
with open('cnn.txt') as f:
for l in f:
l1.append((l.strip()).split("\t"))
nxf, nyf = np.int64(l1[0][0]), np.int64(l1[0][0])
nx, ny = np.int64(l1[1][0]), np.int64(l1[1][0])
n_snapshots = np.int64(l1[2][0])
n_snapshots_train = np.int64(l1[3][0])
n_snapshots_test = np.int64(l1[4][0])
freq = np.int64(l1[5][0])
istencil = np.int64(l1[6][0]) # 1: nine point, 2: single point
ifeatures = np.int64(l1[7][0]) # 1: 6 features, 2: 2 features
ilabel = np.int64(l1[8][0]) # 1: SGS (tau), 2: eddy-viscosity (nu)
re = np.float64(l1[9][0])
obj = DHIT(nx=nx,ny=ny,nxf=nxf,nyf=nyf,re=re,freq=freq,n_snapshots=n_snapshots,n_snapshots_train=n_snapshots_train,
n_snapshots_test=n_snapshots_test,istencil=istencil,ifeatures=ifeatures,ilabel=ilabel)
max_min = obj.max_min
x_train_sc,y_train_sc = obj.x_train,obj.y_train
x_test_sc,y_test_sc = obj.x_test,obj.y_test
x_train_sc_f = x_train_sc[:,:,:,:2]
x_train_sc_k = x_train_sc[:,:,:,2:]
x_test_sc_f = x_test_sc[:,:,:,:2]
x_test_sc_k = x_test_sc[:,:,:,2:]
nt, nx_train, ny_train, ncf = x_train_sc_f.shape
_, _, _, nck = x_train_sc_k.shape
_, _, _, nco = y_train_sc.shape
#%%
# train the CNN model and predict for the test data
model = CNN(x_train_sc_f,x_train_sc_k,y_train_sc,nx_train,ny_train,ncf,nck,nco)
model.CNN_info()
model.CNN_compile(optimizer='adam')
#%%
training_time_init = tm.time()
history_callback = model.CNN_train(epochs=5,batch_size=512)
total_training_time = tm.time() - training_time_init
loss, val_loss, mse, val_mse = model.CNN_history(history_callback)
#%%
directory = f'nn_history/TF2_{nx}/'
if not os.path.exists(directory):
os.makedirs(directory)
nn_history(loss, val_loss, mse, val_mse, istencil, ifeatures, n_snapshots_train, directory)
#%%
filename = os.path.join(directory, f'CNN_model_{ifeatures}')
model.CNN_save(filename)
filename = os.path.join(directory, f'scaling.npy')
np.save(filename,max_min)
#testing_time_init = tm.time()
y_pred_sc, t1, t2, t3 = model.CNN_predict(x_test_sc_f,x_test_sc_k)
#total_testing_time = tm.time() - testing_time_init
filename = os.path.join(directory, 'cpu_time.csv')
with open(filename, 'a', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(['CNN',istencil, ifeatures, n_snapshots_train, total_training_time, t1, t2, t3])
#%% unscale the predicted data
y_test = np.zeros(shape=(1, nx+1, ny+1, 3), dtype='double')
y_pred = np.zeros(shape=(1, nx+1, ny+1, 3), dtype='double')
#%%
if ilabel == 1:
for i in range(1):
y_pred[0,:,:,i] = 0.5*(y_pred_sc[0,:,:,i]*(max_min[-1,0] - max_min[-1,1]) + (max_min[-1,0] + max_min[-1,1]))
y_test[0,:,:,i] = 0.5*(y_test_sc[0,:,:,i]*(max_min[-1,0] - max_min[-1,1]) + (max_min[-1,0] + max_min[-1,1]))
elif ilabel == 2:
for i in range(3):
y_pred[0,:,:,i] = 0.5*(y_pred_sc[0,:,:,i]*(max_min[i+13,0] - max_min[i+13,1]) + (max_min[i+13,0] + max_min[i+13,1]))
y_test[0,:,:,i] = 0.5*(y_test_sc[0,:,:,i]*(max_min[i+13,0] - max_min[i+13,1]) + (max_min[i+13,0] + max_min[i+13,1]))
nn = 2
export_results(y_test[0], y_pred[0], ilabel, istencil, ifeatures, n_snapshots_train, nxf, nx, nn, directory)
#%%
if ilabel == 1:
num_bins = 64
fig, axs = plt.subplots(1,1,figsize=(6,4))
axs.set_yscale('log')
# the histogram of the data
axs.hist(y_test[0,:,:,0].flatten(), num_bins, histtype='step', alpha=1, color='r',zorder=5,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,0]),4*np.std(y_test[0,:,:,0])),density=True,label="True")
axs.hist(y_pred[0,:,:,0].flatten(), num_bins, histtype='step', alpha=1,color='b',zorder=10,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,0]),4*np.std(y_test[0,:,:,0])),density=True,label="CNN")
x_ticks = np.arange(-4.1*np.std(y_test[0,:,:,0]), 4.1*np.std(y_test[0,:,:,0]), np.std(y_test[0,:,:,0]))
x_labels = [r"${} \sigma$".format(i) for i in range(-4,5)]
axs.set_xlabel(r"$\tau_{11}$")
axs.set_ylabel("PDF")
axs.set_xticks(x_ticks)
axs.set_xticklabels(x_labels)
fig.tight_layout()
fig.subplots_adjust(hspace=0.5, bottom=0.25)
line_labels = ["True", "CNN"]
plt.figlegend( line_labels, loc = 'lower center', borderaxespad=0.3, ncol=3, labelspacing=0., prop={'size': 13} )
plt.show()
filename = os.path.join(directory, f'ts_cnn_{istencil}_{ifeatures}_{n_snapshots_train}.png')
fig.savefig(filename, bbox_inches = 'tight', dpi=200)
elif ilabel == 2:
num_bins = 64
fig, axs = plt.subplots(1,2,figsize=(6,5))
axs[0].set_yscale('log')
axs[1].set_yscale('log')
axs[2].set_yscale('log')
# the histogram of the data
axs[0].hist(y_test[0,:,:,0].flatten(), num_bins, histtype='step', alpha=1, color='r',zorder=5,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,0]),4*np.std(y_test[0,:,:,0])),density=True,label="True")
axs[0].hist(t11s.flatten(), num_bins, histtype='step', alpha=1,color='g',zorder=10,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,0]),4*np.std(y_test[0,:,:,0])),density=True,label=r"Dynamic")
axs[0].hist(y_pred[0,:,:,0].flatten(), num_bins, histtype='step', alpha=1,color='b',zorder=10,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,0]),4*np.std(y_test[0,:,:,0])),density=True,label="CNN")
#axs[0].hist(t11st.flatten(), num_bins, histtype='step', alpha=1,color='k',zorder=10,
# linewidth=2.0,range=(-4*np.std(y_test[0,:,:,0]),4*np.std(y_test[0,:,:,0])),density=True,label=r"$C_s=0.18$")
x_ticks = np.arange(-4*np.std(y_test[0,:,:,0]), 4.1*np.std(y_test[0,:,:,0]), np.std(y_test[0,:,:,0]))
x_labels = [r"${} \sigma$".format(i) for i in range(-4,5)]
axs[0].set_xlabel(r"$\tau_{11}$")
axs[0].set_ylabel("PDF")
axs[0].set_xticks(x_ticks)
axs[0].set_xticklabels(x_labels)
#------#
axs[1].hist(y_test[0,:,:,1].flatten(), num_bins, histtype='step', alpha=1, color='r',zorder=5,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,1]),4*np.std(y_test[0,:,:,1])),density=True,label="True")
axs[1].hist(t12s.flatten(), num_bins, histtype='step', alpha=1,color='g',zorder=10,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,1]),4*np.std(y_test[0,:,:,1])),density=True,label=r"Dynamic")
axs[1].hist(y_pred[0,:,:,1].flatten(), num_bins, histtype='step', alpha=1,color='b',zorder=10,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,1]),4*np.std(y_test[0,:,:,1])),density=True,label="CNN")
#axs[1].hist(t12st.flatten(), num_bins, histtype='step', alpha=1,color='k',zorder=10,
# linewidth=2.0,range=(-4*np.std(y_test[0,:,:,1]),4*np.std(y_test[0,:,:,1])),density=True,label=r"$C_s=0.18$")
x_ticks = np.arange(-4*np.std(y_test[0,:,:,1]), 4.1*np.std(y_test[0,:,:,1]), np.std(y_test[0,:,:,1]))
x_labels = [r"${} \sigma$".format(i) for i in range(-4,5)]
axs[1].set_xlabel(r"$\tau_{12}$")
#axs[1].set_ylabel("PDF")
axs[1].set_xticks(x_ticks)
axs[1].set_xticklabels(x_labels)
#------#
axs[2].hist(y_test[0,:,:,2].flatten(), num_bins, histtype='step', alpha=1, color='r',zorder=5,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,2]),4*np.std(y_test[0,:,:,2])),density=True,label="True")
axs[2].hist(t22s.flatten(), num_bins, histtype='step', alpha=1,color='g',zorder=10,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,2]),4*np.std(y_test[0,:,:,2])),density=True,label=r"Dynamic")
axs[2].hist(y_pred[0,:,:,2].flatten(), num_bins, histtype='step', alpha=1,color='b',zorder=10,
linewidth=2.0,range=(-4*np.std(y_test[0,:,:,2]),4*np.std(y_test[0,:,:,2])),density=True,label="CNN")
#axs[2].hist(t22st.flatten(), num_bins, histtype='step', alpha=1,color='k',zorder=10,
# linewidth=2.0,range=(-4*np.std(y_test[0,:,:,2]),4*np.std(y_test[0,:,:,2])),density=True,label=r"$C_s=0.18$")
x_ticks = np.arange(-4*np.std(y_test[0,:,:,2]), 4.1*np.std(y_test[0,:,:,2]), np.std(y_test[0,:,:,2]))
x_labels = [r"${} \sigma$".format(i) for i in range(-4,5)]
axs[2].set_xlabel(r"$\tau_{22}$")
#axs[2].set_ylabel("PDF")
axs[2].set_xticks(x_ticks)
axs[2].set_xticklabels(x_labels)
fig.tight_layout()
fig.subplots_adjust(hspace=0.5, bottom=0.25)
line_labels = ["True", "DSM", "CNN"]
plt.figlegend( line_labels, loc = 'lower center', borderaxespad=0.3, ncol=3, labelspacing=0., prop={'size': 13} )
plt.show()
filename = os.path.join(directory, f'ts_cnn_{istencil}_{ifeatures}_{n_snapshots_train}.png')
fig.savefig(filename, bbox_inches = 'tight', dpi=200)
#%%
# contour plot of shear stresses
fig, axs = plt.subplots(1,2,sharey=True,figsize=(11,5))
cbarticks = np.arange(-50,60,10)
cs = axs[0].contourf(y_test[0,:,:,0].T, cbarticks, cmap = 'jet', )
axs[0].text(0.4, -0.1, 'True', transform=axs[0].transAxes, fontsize=14, va='top')
cs = axs[1].contourf(y_pred[0,:,:,0].T, cbarticks, cmap = 'jet', )
axs[1].text(0.4, -0.1, 'CNN', transform=axs[1].transAxes, fontsize=14, va='top')
fig.tight_layout()
fig.subplots_adjust(bottom=0.15)
cbar_ax = fig.add_axes([0.22, -0.05, 0.6, 0.04])
fig.colorbar(cs, cax=cbar_ax, ticks=cbarticks, orientation='horizontal')
plt.show()
filename = filename = os.path.join(directory, f'contour_{istencil}_{ifeatures}_{n_snapshots_train}.png')
fig.savefig(filename, bbox_inches = 'tight', dpi=200)
#%%
def coeff_determination(y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
#%%
# nx = 128
# ny = 128
# n_field = 2
# n_kernels = 2
# nco = 1
# model = Sequential()
# field = Input(shape=(nx,ny,n_field))
# kernels = Input(shape=(nx,ny,n_kernels))
# x = Conv2D(16, (4, 4), activation='relu', padding='same')(field)
# x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
# x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
# x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
# x = concatenate(inputs=[x, kernels])
# x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
# x = Conv2D(16, (4, 4), activation='relu', padding='same')(x)
# sgs = Conv2D(nco, (4, 4), activation='linear', padding='same')(x)
# model = Model(inputs=[field, kernels], outputs=sgs) |
# THREE GOLD STARS
# Sudoku [http://en.wikipedia.org/wiki/Sudoku]
# is a logic puzzle where a game
# is defined by a partially filled
# 9 x 9 square of digits where each square
# contains one of the digits 1,2,3,4,5,6,7,8,9.
# For this question we will generalize
# and simplify the game.
# Define a procedure, check_sudoku,
# that takes as input a square list
# of lists representing an n x n
# sudoku puzzle solution and returns the boolean
# True if the input is a valid
# sudoku square and returns the boolean False
# otherwise.
# A valid sudoku square satisfies these
# two properties:
# 1. Each column of the square contains
# each of the whole numbers from 1 to n exactly once.
# 2. Each row of the square contains each
# of the whole numbers from 1 to n exactly once.
# You may assume the the input is square and contains at
# least one row and column.
correct = [[1, 2, 3],
[2, 3, 1],
[3, 1, 2]]
incorrect = [[1, 2, 3, 4],
[2, 3, 1, 3],
[3, 1, 2, 3],
[4, 4, 4, 4]]
incorrect2 = [[1, 2, 3, 4],
[2, 3, 1, 4],
[4, 1, 2, 3],
[3, 4, 1, 2]]
incorrect3 = [[1, 2, 3, 4, 5],
[2, 3, 1, 5, 6],
[4, 5, 2, 1, 3],
[3, 4, 5, 2, 1],
[5, 6, 4, 3, 2]]
incorrect4 = [['a', 'b', 'c'],
['b', 'c', 'a'],
['c', 'a', 'b']]
incorrect5 = [[1, 1.5],
[1.5, 1]]
# Idea: every number from 1 to p appears in each row and each column exactly once
# iterate row and col, check occurance
def check_sudoku(p):
for digit in range(1, len(p) + 1): # digits to check: 1,2,...,p
for i in range(len(p)): # go through each row
row_count, col_count = 0, 0
for j in range(len(p)): # go through each col
if p[i][j] == digit: # row digit match
row_count += 1
if p[j][i] == digit: # col digit match
col_count += 1
if row_count != 1 or col_count != 1:
return False
return True
# test cases
print(check_sudoku(incorrect))
# >>> False
print(check_sudoku(correct))
# >>> True
print(check_sudoku(incorrect2))
# >>> False
print(check_sudoku(incorrect3))
# >>> False
print(check_sudoku(incorrect4))
# >>> False
print(check_sudoku(incorrect5))
# >>> False
|
import time
from lists import command_list
from lists import math_commands
print('Hello my name is AIM. How may I help you?')
while True:
Question = input('')
if Question == 'what does aim mean':
print('Aim means: Artificial Information Machine')
elif Question == 'hi':
print('Greetings earthling! How may I assist you?')
elif Question == 'hello':
print('Live long and prosper sentient being! What can I assist you with?')
elif Question == "what can you do":
print('Everything')
print('Anything')
print('What do you want me to do?')
elif Question == 'what is the answer to life':
print(42)
elif Question == 'math':
print('enter equation one variable at a time, starting with the numbers:')
x = float(input())
y = float(input())
numbers = input()
if numbers == 'muiltiply':
print(x*y)
elif numbers == 'divide':
print(x/y)
elif numbers == 'minus':
print(x-y)
elif numbers == 'add':
print(x+y)
elif Question == 'thanks':
print('You are very welcome bag filled with dirty water')
if Question == 'what did you say?':
print('Star Trek')
elif Question == 'shutdown':
print('Shutting down in')
print(3)
sleep(1)
print(2)
sleep(1)
print(1)
sleep(1)
print('Powered Off')
sleep(1)
break
elif Question == '-help':
print('Here is a list of known commands:')
for elem in command_list:
print(elem)
sleep(5)
print('Not finding what you are looking for?')
sleep(2)
print('Maybe it is in a sub folder of one of the catagories.')
sleep(2)
print ('Try entering one of the catagories by inputing it')
elif Question == 'math operations':
for elem in math_commands:
print(elem)
else:
print('Unrecognized command. Type -help for a list of commands.')
|
# File: Dice.py
# Description: A game of war simulator
# Student's Name: Trace Tschida
# Student's UT EID: TRT729
# Course Name: CS 313E
# Unique Number: 51465
#
# Date Created: 9/24/2017
# Date Last Modified: 9/26/2017
import random
class Deck():
# initialize
def __init__(self):
# hold the list of cards
self.cardList = []
# list of rank and suits
lst_rank = ["2","3","4","5","6","7","8","9","10","J","Q","K","A"]
lst_suit = ["C", "D", "H", "S"]
# loop through each rank and suit and create a card
for suit in lst_suit:
for rank in lst_rank:
# create a card and append to the list
self.cardList.append(Card(suit, rank))
def shuffle(self):
# shuffle the cards
random.shuffle(self.cardList)
def dealOne(self, player):
# remove the top card
card_to_deal = self.cardList.pop(0)
# give the player a card
player.hand.append(card_to_deal)
player.handTotal += 1
def __str__(self):
str_deck = ""
# print method in rows of 13
count = 1
for card in self.cardList:
# spacing for the prints
space = " "
# print the space out front
if len(str(card)) == 3:
space = " "
str_deck += space + str(card)
# check to see if 13th card
if count % 13 == 0 and count != 0:
str_deck += "\n"
count += 1
return str_deck
class Card():
def __init__(self, _suit, _rank):
self.suit = _suit
self.rank = _rank
if self.rank not in ["J","Q","K","A"]:
self.value = int(self.rank)
elif self.rank == "J":
self.value = 11
elif self.rank == "Q":
self.value = 12
elif self.rank == "K":
self.value = 13
elif self.rank == "A":
self.value = 14
def __str__(self):
return self.rank + self.suit.upper()
class Player():
def __init__(self):
self.hand = []
self.handTotal = 0
def handNotEmpty(self):
if len(self.hand) != 0: # check that it is blank
return True
def playCard(self):
# get the top card
playCard = self.hand.pop(0)
self.handTotal -= 1
# return the top card of the deck
return playCard
def __str__(self):
# hold the string to be returned
str_hand = ""
# hold the count to print correctly for formatting
count = 1
for i in range(len(self.hand)):
# spacing for the prints
space = " "
# print the space out front
if len(str(self.hand[i])) == 3:
space = " "
str_hand += space + str(self.hand[i])
# check to see if 13th card
if count % 13 == 0 and count != 0:
str_hand += "\n"
count += 1
return str_hand
def playGame(cardDeck, player1, player2):
# display the initial cards
print()
print("Initial Hands:")
print("Player 1:")
print(player1)
print()
print("Player 2:")
print(player2)
print()
# counter for number of rounds
int_rounds = 1
while True:
try: # to draw a card, if a player has no more = auto loss
# hold the cards for the round
player1RoundCards = []
player2RoundCards = []
# print the round
print("ROUND " + str(int_rounds))
# each player plays a card and add to round cards
player1Card = player1.playCard()
player1RoundCards.append(player1Card)
print("Player 1 plays: " + str(player1Card))
player2Card = player2.playCard()
player2RoundCards.append(player2Card)
print("Player 2 plays: " + str(player2Card))
print()
# check to see if the values are the same
while player1Card.rank == player2Card.rank:
# print the war
print("War starts: " + str(player1Card) + " = " + str(player2Card))
# each player puts down three cards
for i in range(0,3):
# player 1 and 2 play cards
player1DownCard = player1.playCard()
player1RoundCards.append(player1DownCard)
player2DownCard = player2.playCard()
player2RoundCards.append(player2DownCard)
# spacing alignment
space1 = " "
space2 = " "
if len(str(player1DownCard)) == 3:
space1 = " "
if len(str(player2DownCard)) == 3:
space2 = " "
# print the results
print("Player 1 puts" + space1 + str(player1DownCard) + " face down")
print("Player 2 puts" + space2 + str(player2DownCard) + " face down")
# reset the player card to a new card
# allows the loop to iterate
player1Card = player1.playCard()
player2Card = player2.playCard()
# add the cards to the round cards
player1RoundCards.append(player1Card)
player2RoundCards.append(player2Card)
# spacing alignment
space1 = " "
space2 = " "
if len(str(player1Card)) == 3:
space1 = " "
if len(str(player2Card)) == 3:
space2 = " "
# print the face up cards
print("Player 1 puts" + space1 + str(player1Card) + " face up")
print("Player 2 puts" + space2 + str(player2Card) + " face up")
print()
# determine whom won becuase the cards are not the same
if (player1Card.value > player2Card.value):
# print the result
print("Player 1 wins round " + str(int_rounds) + ": " + str(player1Card) + " > " + str(player2Card))
print()
# add the first players hands to the winner
# loop through the played cards
for i in range(len(player1RoundCards)):
# take the first card out for player 1
player1.hand.append(player1RoundCards[i])
# update the number of cards in the hand
player1.handTotal += 1
for i in range(len(player2RoundCards)):
# take the first card out for player 2
player1.hand.append(player2RoundCards[i])
# update the number of cards in the hand
player1.handTotal += 1
else: # player 2 has won
# print the result
print("Player 2 wins round " + str(int_rounds) + ": " + str(player2Card) + " > " + str(player1Card))
print()
# add the second players hands to the winner
# loop through the played cards
for i in range(len(player1RoundCards)):
# take the first card out for player 1
player2.hand.append(player1RoundCards[i])
# update the number of cards in the hand
player2.handTotal += 1
# loop through the played cards for the other player
for i in range(len(player2RoundCards)):
# take the first card out for player 2
player2.hand.append(player2RoundCards[i])
# update the number of cards in the hand
player2.handTotal += 1
# print the rands remaining
print("Player 1 now has " + str(player1.handTotal) + " card(s) in hand:")
print(player1)
print()
print("Player 2 now has " + str(player2.handTotal) + " card(s) in hand:")
print(player2)
print()
if (player1.handNotEmpty() and player2.handNotEmpty()):
# increase the number of rounds
int_rounds += 1
print()
else:
# end the loop
break
except IndexError as ide:
# print the results of the game
# blank line
print()
if player1.handNotEmpty():
print("Player 2 has run out of cards.")
else:
print("Player 1 has run our of cards.")
# break out of the loop
break
def main():
cardDeck = Deck() # create a deck of 52 cards called "cardDeck"
print("Initial deck:")
print(cardDeck) # print the deck so we can see that you built it correctly
random.seed(15) # leave this in for grading purposes
cardDeck.shuffle() # shuffle the deck
print("Shuffled deck:")
print(cardDeck) # print the deck so we can see that your shuffle worked
player1 = Player() # create a player
player2 = Player() # create another player
for i in range(26): # deal 26 cards to each player, one at
cardDeck.dealOne(player1) # a time, alternating between players
cardDeck.dealOne(player2)
playGame(cardDeck,player1,player2)
if player1.handNotEmpty():
print("\n\nGame over. Player 1 wins!")
else:
print("\n\nGame over. Player 2 wins!")
print ("\n\nFinal hands:")
print ("Player 1: ")
print (player1) # printing a player object should print that player's hand
print ("\nPlayer 2:")
print (player2) # one of these players will have all of the cards, the other none
main() |
# -*- test-case-name: vumi.transports.mtech_ussd.tests.test_mtech_ussd -*-
from xml.etree import ElementTree as ET
from twisted.internet.defer import inlineCallbacks
from vumi import log
from vumi.message import TransportUserMessage
from vumi.transports.httprpc import HttpRpcTransport
from vumi.components.session import SessionManager
class MtechUssdTransport(HttpRpcTransport):
"""MTECH USSD transport.
Configuration parameters:
:param str transport_name:
The name this transport instance will use to create its queues
:param int ussd_session_timeout:
Number of seconds before USSD session information stored in
Redis expires. Default is 600s.
:param str web_path:
The HTTP path to listen on.
:param int web_port:
The HTTP port to listen on.
NOTE: We currently only support free-text USSD, not menus.
At the time of writing, vumi has no suitable message format for
specifying USSD menus. This may change in the future.
"""
ENCODING = 'utf-8'
@inlineCallbacks
def setup_transport(self):
super(MtechUssdTransport, self).setup_transport()
r_config = self.config.get('redis_manager', {})
r_prefix = "mtech_ussd:%s" % self.transport_name
session_timeout = int(self.config.get("ussd_session_timeout", 600))
self.session_manager = yield SessionManager.from_redis_config(
r_config, r_prefix, max_session_length=session_timeout)
@inlineCallbacks
def teardown_transport(self):
yield self.session_manager.stop()
yield super(MtechUssdTransport, self).teardown_transport()
def save_session(self, session_id, from_addr, to_addr):
return self.session_manager.create_session(
session_id, from_addr=from_addr, to_addr=to_addr)
def handle_status_message(self, msgid, session_id):
mur = MtechUssdResponse(session_id)
response_body = unicode(mur).encode(self.ENCODING)
log.msg("Outbound message: %r" % (response_body,))
return self.finish_request(msgid, response_body)
@inlineCallbacks
def handle_raw_inbound_message(self, msgid, request):
request_body = request.content.read()
log.msg("Inbound message: %r" % (request_body,))
try:
body = ET.fromstring(request_body)
except:
log.warning("Error parsing request XML: %s" % (request_body,))
yield self.finish_request(msgid, "", code=400)
return
# We always get this.
session_id = body.find('session_id').text
status_elem = body.find('status')
if status_elem is not None:
# We have a status message. These are all variations on "cancel".
yield self.handle_status_message(msgid, session_id)
return
page_id = body.find('page_id').text
# They sometimes send us page_id=0 in the middle of a session.
if page_id == '0' and body.find('mobile_number') is not None:
# This is a new session.
session = yield self.save_session(
session_id,
from_addr=body.find('mobile_number').text,
to_addr=body.find('gate').text) # ???
session_event = TransportUserMessage.SESSION_NEW
else:
# This is an existing session.
session = yield self.session_manager.load_session(session_id)
if 'from_addr' not in session:
# We have a missing or broken session.
yield self.finish_request(msgid, "", code=400)
return
session_event = TransportUserMessage.SESSION_RESUME
content = body.find('data').text
transport_metadata = {'session_id': session_id}
self.publish_message(
message_id=msgid,
content=content,
to_addr=session['to_addr'],
from_addr=session['from_addr'],
session_event=session_event,
transport_name=self.transport_name,
transport_type=self.config.get('transport_type'),
transport_metadata=transport_metadata,
)
def handle_outbound_message(self, message):
in_reply_to = message['in_reply_to']
session_id = message['transport_metadata'].get('session_id')
content = message['content']
if not (in_reply_to and session_id and content):
return self.publish_nack(user_message_id=message['message_id'],
sent_message_id=message['message_id'],
reason='Missing in_reply_to, content or session_id')
mur = MtechUssdResponse(message['transport_metadata']['session_id'])
mur.add_text(message['content'])
if message['session_event'] != TransportUserMessage.SESSION_CLOSE:
mur.add_freetext_option()
response_body = unicode(mur).encode(self.ENCODING)
log.msg("Outbound message: %r" % (response_body,))
self.finish_request(message['in_reply_to'], response_body)
return self.publish_ack(user_message_id=message['message_id'],
sent_message_id=message['message_id'])
class MtechUssdResponse(object):
def __init__(self, session_id):
self.session_id = session_id
self.title = None
self.text = []
self.nav = []
def add_title(self, title):
self.title = title
def add_text(self, text):
self.text.append(text)
def add_menu_item(self, text, option):
self.nav.append({
'text': text,
'pageId': 'index%s' % (option,),
'accesskey': option,
})
def add_freetext_option(self):
self.nav.append({'text': None, 'pageId': 'indexX', 'accesskey': '*'})
def to_xml(self):
page = ET.fromstring('<page version="2.0" />')
ET.SubElement(page, "session_id").text = self.session_id
if self.title is not None:
ET.SubElement(page, "title").text = self.title
for text in self.text:
lines = text.split('\n')
div = ET.SubElement(page, "div")
div.text = lines.pop(0)
for line in lines:
ET.SubElement(div, "br").tail = line
if self.nav:
nav = ET.SubElement(page, "navigation")
for link in self.nav:
ET.SubElement(
nav, "link", pageId=link['pageId'],
accesskey=link['accesskey']).text = link['text']
# We can't have "\n" in the output at all, it seems.
return ET.tostring(page, encoding="UTF-8").replace("\n", "")
def __str__(self):
return self.to_xml()
|
import numpy as np
import math
import matplotlib.pyplot as plt
import csv
import sys
import scipy
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
from scipy.spatial import ConvexHull
from mpl_toolkits.mplot3d.axes3d import Axes3D
import random
from sklearn.ensemble import IsolationForest
'''
Use: To read a file with x,y,z coordinates, and store the data for each dimension in a separate array.
params: filename - File with x,y,z cooridnates
returns: 3 arrays with x's, y's and z's
'''
def getPoints(filename):
x = list(); y = list(); z = list()
with open (filename, 'r') as csv_file:
csv_reader = csv.reader (csv_file)
for line in csv_reader:
x.append(line[0]); y.append(line[1]); z.append(line[2])
x = np.array(x, dtype = float); y = np.array(y, dtype = float); z = np.array(z, dtype = float)
return (x, y, z)
'''
Use: To read a file with one dimensional data and store it in an array
'''
def getList(filename):
x = list()
with open (filename, 'r') as csv_file:
csv_reader = csv.reader (csv_file)
for line in csv_reader:
x.append(line[0])
x = np.array(x, dtype = float)
return (x)
'''
Use: Generate a random list of colors and assign colors to coordinates based on which cluster it belongs to.
'''
def generateColors(numParticles, labels):
colors = list()
random.seed() #Initializing the random number generator
randomColors = [ ( random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1) ) for i in range(0,numParticles) ]
for label in labels:
if(label == -1):colors.append((0,0,0,0)) #Assigning black to noise/non-granules
else: colors.append(randomColors[label])
colors = np.array(colors, dtype = float)
return colors
#Main
#Getting pixel coordinates
coordinates = getPoints('3DCoordinates.csv')
#Scaling point cloud to biological size in microns
#Voxel size for A_nos_embryo7_488_cmle-19-29 is 0.056 x 0.056 x 0.15 micron^3
#x = np.array(coordinates[0]*0.056, dtype = float); y = np.array(coordinates[1]*0.056, dtype = float); z = np.array(coordinates[2]*0.15, dtype = float)
x = np.array(coordinates[0], dtype = float); y = np.array(coordinates[1], dtype = float); z = np.array(coordinates[2], dtype = float)
#Getting labels
labels = getList('hdbscanLabels.csv')
numParticles = int(max(labels) + 1)
#Making tuples of the form (x,y,z,label)
data = np.vstack((x,y,z,labels)).T
#Removing noise points
denoisedData = [i for i in data if i[3] > -1]
#Sorting by label
sortedData = sorted(denoisedData, key=lambda tup: tup[3])
s = [x[:-1] for x in sortedData] #Remove after checking for plotting
s = np.stack( s, axis=0 )
#Removing noise from labels and sorting - for plotting
denoisedLabels = [i for i in labels if i > -1]
denoisedLabels.sort() #sorting labels to match sorted Data for plotting
sortedLabels = [int(i) for i in denoisedLabels]
'''
#Checking if sorting data and labels worked
colors = generateColors(numParticles, sortedLabels)
#Creating a widget for 3D plotting
app = QtGui.QApplication([])
w = gl.GLViewWidget()
#w.show()
sp1 = gl.GLScatterPlotItem(pos=s, color = colors, pxMode=True, size = 0.0000001)
sp1.setGLOptions('opaque')
w.addItem(sp1)
'''
'''
Data:
data - 4D data with x,y,z,label
sortedData - data sorted by label (has been denoised)
s - sorted list of points for plotting
sortedLabels - sorted labels, corresponds with sortedData and s
'''
dv = list();xForVolPlot = list() #Keeping track of change in volume
clusterCounter = -1; #i is a counter for labels
rem = 0
#Finding the convex hull for every cluster
#for i in range(0, int(numParticles),1):
for i in range(0,20,1):
print('Cluster ' + str(i))
cluster = [j for j in sortedData if j[3] == i] #Accessing the points of every cluster
c = [x[:-1] for x in cluster] #removing labels from cluster coordinates
c = np.array(c, dtype = float)
clusterCounter = clusterCounter+1
#Removing very small clusters here
if(len(c) <= 3): print('Removed cluster with' + str(len(c)) + 'points'); continue;
'''
Finding the convex hull of clusters without removing outliers
'''
#Checking if we have a 2D case or 3D case by checking min and max in each dimension
cx,cy,cz = zip(*c)
dx = max(cx) - min(cx);dy = max(cy) - min(cy);dz = max(cz) - min(cz)#Getting the difference between the min and max element in each dimension
if(dx == 0 or dy == 0 or dz == 0): print('Flat cluster');continue
#scaling hull input after removing outliers
scx,scy,scz = zip(*c)
scx = np.array(scx, dtype = float); scy = np.array(scy, dtype = float); scz = np.array(scz, dtype = float)
scx = scx*0.056;scy = scy*0.056;scz = scz*0.15
#scx = np.array(scx*0.056, dtype = float); scy = np.array(scy*0.056, dtype = float); scz = np.array(scz*0.15, dtype = float)
scaledHullInput = np.vstack((scx,scy,scz)).T
convexHull = ConvexHull(scaledHullInput)
volBefore = convexHull.volume
'''
Finding the convex hull of clusters after removing outliers
'''
#Removing anomalous points from the cluster
model = IsolationForest(behaviour="new",max_samples=len(c),contamination='auto',n_estimators=1000)
model.fit(c)
sklearn_score_anomalies = model.decision_function(c)
original_paper_score = [-1*s + 0.5 for s in sklearn_score_anomalies] #(Source: https://stats.stackexchange.com/questions/335274/scikit-learn-isolationforest-anomaly-score)
#print('Anomaly scores')
#print(original_paper_score)
meanScore = np.mean(original_paper_score)
#print('Mean score: ' + str(meanScore))
sdScore = np.std(original_paper_score)
#print('Standard deviation: ' + str(sdScore))
#print('Mean + 1*sd: '+ str(meanScore+sdScore))
#print('Mean + 2*sd: '+ str(meanScore+2*sdScore))
cleanCluster = list()
for i in range(0, len(c), 1):
if(original_paper_score[i]>(meanScore+2*sdScore)): continue
else: cleanCluster.append(c[i])
cleanCluster = np.array(cleanCluster, dtype = float)
ccx,ccy,ccz = zip(*cleanCluster)
cdx = max(ccx) - min(ccx);cdy = max(ccy) - min(ccy);cdz = max(ccz) - min(ccz)#Getting the difference between the min and max element in each dimension
if(cdx == 0 or cdy == 0 or cdz == 0): print('Flat cluster');continue
'''
Set up for 3d plotting
'''
fig = plt.figure( )
plt.style.use('dark_background')
#plotting before here
ax = fig.add_subplot(1,2,1, projection = '3d')
ax.grid(False)
#Visualizing the cluster
ax.scatter (scx,scy,scz, c = 'g', marker='o', s=10, linewidths=2)
ax.set_title('Before')
ax.set_xlabel ('x, axis')
ax.set_ylabel ('y axis')
ax.set_zlabel ('z axis')
#plotting simplices (Source: https://stackoverflow.com/questions/27270477/3d-convex-hull-from-point-cloud)
for s in convexHull.simplices:
s = np.append(s, s[0]) # Here we cycle back to the first coordinate
ax.plot(scaledHullInput[s, 0], scaledHullInput[s, 1], scaledHullInput[s, 2], "r-")
print('Number of points removed: ' + str(len(c) - len(cleanCluster)))
if(len(c) - len(cleanCluster) != 0): rem = rem+1
#Scaling input
sccx,sccy,sccz = zip(*cleanCluster)
sccx = np.array(sccx, dtype = float); sccy = np.array(sccy, dtype = float); sccz = np.array(sccz, dtype = float)
sccx = sccx*0.056;sccy = sccy*0.056;sccz = sccz*0.15
#sccx = np.array(sccx*0.056, dtype = float); sccy = np.array(sccy*0.056, dtype = float); sccz = np.array(sccz*0.15, dtype = float)
scaledHullInputClean = np.vstack((sccx,sccy,sccz)).T
convexHull = ConvexHull(scaledHullInputClean)
volAfter = convexHull.volume
if((volBefore - volAfter) != 0):
print('Change in volume: ' + str(volBefore - volAfter))
xForVolPlot.append(clusterCounter);dv.append(volBefore - volAfter)
ax = fig.add_subplot(1,2,2, projection = '3d')
ax.grid(False)
#Visualizing the cluster
ax.scatter (sccx,sccy,sccz, c = 'g', marker='o', s=10, linewidths=2)
ax.set_title('After')
ax.set_xlabel ('x, axis')
ax.set_ylabel ('y axis')
ax.set_zlabel ('z axis')
#plotting simplices (Source: https://stackoverflow.com/questions/27270477/3d-convex-hull-from-point-cloud)
for s in convexHull.simplices:
s = np.append(s, s[0]) # Here we cycle back to the first coordinate
ax.plot(scaledHullInputClean[s, 0], scaledHullInputClean[s, 1], scaledHullInputClean[s, 2], "r-")
plt.show()
print(' ')
print('Points removed from ' + str(rem) + ' clusters')
print(xForVolPlot)
dvMean = np.mean(dv);dvSd = np.std(dv)
print('Mean change in volume: ' + str(dvMean))
print('Standard deviation: ' + str(dvSd))
for i in range(0, len(dv), 1):
if(dv[i] > (dvMean + 2*dvSd)): plt.scatter(xForVolPlot[i],dv[i],c='g')
else: plt.scatter(xForVolPlot[i],dv[i],c='b')
plt.xlabel('Cluster number')
plt.ylabel('Change in volume')
plt.show()
'''
# Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
QtGui.QApplication.instance().exec_()
''' |
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
from settings import SAVE_PATH
RESULTS_FILENAME = 'results.pkl'
STATUS_FILENAME = 'status.txt'
def save_results(out_dir, results_dict, status=None):
results_path = os.path.join(SAVE_PATH, out_dir)
ensure_dir_path_exists(results_path)
with open(os.path.join(results_path, RESULTS_FILENAME), "wb") as f:
pickle.dump(results_dict, f)
if status is not None:
status_path = os.path.join(SAVE_PATH, out_dir)
ensure_dir_path_exists(status_path)
with open(os.path.join(status_path, STATUS_FILENAME), "w") as f:
f.write("last trained: {}\n".format(status))
def open_results(out_dir):
# status_path = os.path.join(SAVE_PATH, out_dir, STATUS_FILENAME)
results_path = os.path.join(SAVE_PATH, out_dir, RESULTS_FILENAME)
with open(results_path, 'rb') as f:
results = pickle.load(f)
return results
def load_data(filename):
with open(filename, "rb") as f:
train_data, val_data, test_data = pickle.load(f, encoding="latin1")
return train_data, val_data, test_data
def to_one_hot(labels):
n_values = 10
return np.eye(n_values)[labels]
def check_class_distribution(y_labels):
unique, counts = np.unique(y_labels, return_counts=True)
return dict(zip(unique, counts))
def ensure_dir_path_exists(path):
"""Checks if path is an existing directory and if not, creates it."""
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
def plot_bar(y_data, labels, x_label='', y_label='', filename=None, dirname=None):
"""
:param y_data: list of scalar values
:param labels: list of labels for each bar
:param x_label: name of x axis
:param y_label: name of y axis
:param filename: name of file to save
:param dirname: directory to save
"""
plt.tight_layout()
plt.figure(figsize=(10, 5))
plt.xlabel(x_label)
plt.ylabel(y_label)
x = np.arange(len(labels))
plt.bar(x, y_data)
plt.xticks(x, labels, fontsize=8, rotation=15)
if filename is not None:
if dirname is not None:
ensure_dir_path_exists(os.path.join(SAVE_PATH, dirname))
plt.savefig(os.path.join(SAVE_PATH, dirname, filename) + '.png')
else:
plt.savefig(os.path.join(SAVE_PATH, filename) + '.png')
plt.show()
def plot_data(data, legend_labels, x_label='', y_label='', filename=None, dirname=None,
leg_loc='best'):
plt.tight_layout()
plt.figure(figsize=(10, 5))
plt.xlabel(x_label)
plt.ylabel(y_label)
for y_data, label in zip(data, legend_labels):
x = np.arange(1, len(y_data) + 1) # epoch amount
plt.plot(x, y_data, label=label)
plt.legend(loc=leg_loc)
if filename is not None:
if dirname is not None:
ensure_dir_path_exists(os.path.join(SAVE_PATH, dirname))
plt.savefig(os.path.join(SAVE_PATH, dirname, filename) + '.png')
else:
plt.savefig(os.path.join(SAVE_PATH, filename) + '.png')
plt.show()
def plot_val_loss(results, dirname=None):
val_loss_data = []
for log_data in results['log_data']:
val_loss_data.append(log_data['loss_epoch']['val'])
filename = 'loss_val'
plot_data(val_loss_data, results['label'], x_label='epoka', y_label='funkcja kosztu',
filename=filename, dirname=dirname, leg_loc='upper right')
def plot_val_accuracy(results, dirname=None):
val_acc_data = []
for log_data in results['log_data']:
val_acc_data.append(log_data['accuracy']['val'])
filename = 'acc_val'
plot_data(val_acc_data, results['label'], x_label='epoka', y_label='skuteczność',
filename=filename, dirname=dirname, leg_loc='lower right')
def plot_cross_mse_val_loss(results, dirname=None):
cross_entropy_results = results['log_data'][0]['loss_epoch']['val']
mse_results = results['log_data'][1]['loss_epoch']['val']
cross_filename = 'cross_entropy_loss_val'
mse_filename = 'mse_loss_val'
plot_data([cross_entropy_results], [results['label'][0]], x_label='epoka',
y_label='funkcja kosztu', filename=cross_filename, dirname=dirname,
leg_loc='upper right')
plot_data([mse_results], [results['label'][1]], x_label='epoka', y_label='funkcja kosztu',
filename=mse_filename, dirname=dirname, leg_loc='upper right')
def plot_val_vs_train_acc(results, dirname):
for (log_data, exp_label) in zip(results['log_data'], results['label']):
val_acc = log_data['accuracy']['val']
train_acc = log_data['accuracy']['train']
legend_labels = ['dane treningowe', 'dane walidacyjne']
filename = exp_label + '_val_train_acc'
plot_data([train_acc, val_acc], legend_labels, x_label='epoka', y_label='skuteczność',
filename=filename, dirname=dirname)
def plot_time_bar(results, dirname, time_unit='s'):
times = results['time']
labels = results['label']
filename = 'time'
plot_bar(times, labels, x_label='', y_label='czas trwania uczenia [{}]'.format(time_unit),
filename=filename, dirname=dirname)
def plot_val_loss_per_batch(data, filename, dirname):
plot_data(data=[data], legend_labels=['dane treningowe'], x_label='batch',
y_label='funkcja kosztu', filename=filename, dirname=dirname)
|
import os
from formatConverter import *
import timeSeriesFrame
extmap = {'.csv':1, '.txt':2, '.xls':3, '.sql':4}
dir = os.curdir
"""dir refers to current directory by default
recommended to change directory to point to a folder with only files desired to be read"""
def __doConv(file, id):
"""Conversion function"""
f = FormatConverter(file) #creates a FormatConverter object for the file
#reads in file with correct function
if id == 1:
f.readCSV()
elif id == 2:
f.readTXT()
elif id == 3:
f.readXLS()
elif id == 4:
f.readSQL()
#converts to all other file types
#commented out if not implemented
## f.toCSV()
## f.toTXT()
f.toXLS()
## f.toSQL()
print f.toTSF()
def main():
"""traverse directories and search for applicable file types
Input: none
Output: name of file being read
TimeSeriesFrame outputted (according to timeSeriesFrame.TimeSeriesFrame.__str__()
Creates: filename.xls of file being read
Test: used csvDC.csv, txtDC.txt
Sample Output:
>>> csvDC.csv
>>> Cannot modify an empty cell
>>> Dodge & Cox Stock, Cash, Top Value, ..., Mid Growth, Sm Value, Sm Growth,
>>> 1986-01-01 1.0016 0.602858 0.752 ,..., 1.923 0.869 2.284,
>>> 1986-02-01 -1.9514 0.600846 7.406 ,..., 8.816 7.005 7.376,
>>> 1986-03-01 14.8853 0.562892 4.668 ,..., 6.004 5.155 4.541,
>>> ..., ...,
>>> 2009-03-01 8.849 0.0205 8.4844, ..., 9.5268 8.8785 8.9754,
>>> 2009-04-01 1.45468990e+01 1.40000000e-02 8.50790000e+00, ..., 14.2105 15.8674 15.0506,
>>> 2009-05-01 7.4948 0.016014 7.2891 , ..., 5.1685 2.1622 3.8719,
>>> txtDC.txt
>>> Cannot modify an empty cell
>>> Dodge & Cox Stock, Cash, Top Value, ..., Mid Growth, Sm Value, Sm Growth,
>>> 1986-01-01 1.0016 0.602858 0.752 ,..., 1.923 0.869 2.284,
>>> 1986-02-01 -1.9514 0.600846 7.406 ,..., 8.816 7.005 7.376,
>>> 1986-03-01 14.8853 0.562892 4.668 ,..., 6.004 5.155 4.541,
>>> ..., ...,
>>> 2009-03-01 8.849 0.0205 8.4844, ..., 9.5268 8.8785 8.9754,
>>> 2009-04-01 1.45468990e+01 1.40000000e-02 8.50790000e+00, ..., 14.2105 15.8674 15.0506,
>>> 2009-05-01 7.4948 0.016014 7.2891 , ..., 5.1685 2.1622 3.8719,
"""
__doConv("csvDC.csv", 1)
if __name__ =="__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def solve(cipher, N=1000000):
idx = 0
buf = [0]*10
cipher = int(cipher)
lastnum = ""
while 10 != buf.count(1):
idx += 1
if N < idx:
return "INSOMNIA"
lastnum = str(cipher*idx)
for ch in lastnum:
buf[int(ch)] = 1
return lastnum
if __name__ == "__main__":
testcases = input()
for caseNr in xrange(1, testcases+1):
cipher = raw_input()
print("Case #%i: %s" % (caseNr, solve(cipher)))
|
# -*- coding: utf-8 -*-
import serial, threading, math, time, pygame, copy, numpy, itertools, binascii, struct
from sys import exit
from pygame.locals import *
from rdp import rdp
data_list_buffer = [] # shared item between window thread and lidar thread. Lidar stores data in the list and window thread uses it as initial data
state_mode_IMU = []
state_mode = []
thread_queue = {}
lidar_angle_buffer = [] # shared item between window thread and IMU thread. Length of the buffer is defined by LIDAR_BUFFER_LEN
data_buffer_lock = threading.Lock()
IMU_buffer_lock = threading.Lock()
state_lock = threading.Lock()
state_lock_IMU = threading.Lock()
DOUGLAS_EPSILON = 15
ANGLE_CHANGE_TOL = 8
ANGLE_TOL = 40
ANGLE_TOL2 = 20
FOOTHOLD_MIN_LEN = 50
FOOTHOLD_MAX_LEN = 500
OPTIMUM_STEP_LEN = 700
STEP_OFFSET = 200
MIN_PTS_DENSITY = 0.008
STEPS_PREDICT_NUM = 5
PAUSE = False
LIDAR_OFFSET = 4.4
LIDAR_BUFFER_LEN = 50
DRAWBOLN = True
class serial_read_error(Exception):
pass
class serial_read_time_out(Exception):
pass
class radar_serial_thread(serial.Serial, threading.Thread):
global data_list_buffer, state_mode
def __init__(self, threadID, name, S, B = 115200):
threading.Thread.__init__(self)
thread_queue[threadID] = self
# serial.Serial.__init__(self, serial_port, Baud)
self.serial_port = S
self.baud = B
self.threadID = threadID
self.name = name
self.start_time = 0
self.points_data = []
self.state_map = {\
b'\x01':"雷达CCD故障",\
b'\x02':"电机转动速度不稳定:::检查电机",\
b'\x03':"雷达配置值丢失",\
b'\x04':"激光发射管故障"\
}
# init the serial
try:
serial.Serial.__init__(self, self.serial_port, self.baud)
except serial.serialutil.SerialException:
state_lock.acquire()
try:
state_mode.clear()
state_mode.append("串口设备 "+self.serial_port+" 不存在")
finally:
state_lock.release()
raise serial.serialutil.SerialException
def restart(self):
try:
serial.Serial.__init__(self, self.serial_port, self.baud)
except serial.serialutil.SerialException:
state_lock.acquire()
try:
state_mode.clear()
state_mode.append("串口设备 "+self.serial_port+" 不存在")
finally:
state_lock.release()
raise serial.serialutil.SerialException
def Head_check(self):
# IO intensive
'''阻塞(time_out = 5s),直到检测到‘\xAA’;冗余检测;读取数据帧长度值和命令符;返回(isData,有效数据长度)'''
try:
self.start_time = time.time()
#start code check
# print("check start")
# while True:
# print(self.read())
while self.read() != b'\xAA':
if (time.time() - self.start_time) > 5:
raise serial_read_time_out
length = self.twobyte2int()-3
# print("1")
#address and type code check
if self.read(2) != b'\x00\x61':
raise serial_read_error
length -= 2
#command code read
r = self.read()
length -= 1
# print("2")
#length check
length_buffer = self.twobyte2int()
length -= 2
if length != length_buffer:
raise serial_read_error
#command code check
if r == b'\xA9':
isData = True
elif r == b'\xAB':
isData = False
return (isData, length)
except serial_read_error:
print("Serial data don't accord with the protocol ...... Data ignored")
except serial_read_time_out:
print("Head_check time out, Please comfirm the device is legal")
raise serial_read_time_out
def twobyte2int(self):
# IO intensive
buffer = self.read()[0]
int_number = buffer*256 + self.read()[0]
return int_number
def data_decode(self, L):# 解码运算一帧数据
# IO intensive
state_lock.acquire()
try:
if len(state_mode)>0:
state_mode.clear()
finally:
state_lock.release()
angle = self.twobyte2int()*0.01
self.points_data = []
L -= 2
N = int(L/2)
# print("N is ", N)
for i in range(N):
distance = self.twobyte2int()*0.25
if distance != 0:
self.points_data.append((angle, distance, time.time()))
angle += 22.5/N
angle = round(angle,2)
self.read(2)
def state_decode(self, L):
'''
设备故障代码
0x00: 无故障
0x01: CCD故障
0x02: 雷达转速不稳
0x03: 雷达配置值丢失
0x04: 激光管故障
'''
error_code = self.read()
print(error_code)
print(state_mode)
self.read(2)
state_lock.acquire()
try:
if error_code == b'\x00':
state_mode.clear()
else:
if len(state_mode) == 0:
state_mode.append(self.state_map[error_code])
finally:
state_lock.release()
def first_angle_print(self):
# test method
buffer_h = self.Head_check()
if buffer_h:
isData, L = buffer_h
if isData:
print(self.twobyte2int()*0.01)
L -= 2
N = int(L/2)
self.read(N)
else:
print("error")
def run(self):
while True:
try:
if (not state_mode) or state_mode[0] != "串口设备 "+self.serial_port+" 不存在":
buffer_h = self.Head_check()
if buffer_h:
# print('\nhead check...')
isData, Length = buffer_h
if isData:
# print('Is data')
# print(self.in_waiting)
self.data_decode(Length)
data_buffer_lock.acquire()
try:
data_list_buffer.append(self.points_data)#当前帧数据加入线程共享数据队列
finally:
data_buffer_lock.release()
else:
self.state_decode(Length)
except serial_read_time_out:
if len(state_mode) == 0:
state_lock.acquire()
try:
state_mode.append("寻找帧开头超时:::请确认已正确连接合法设备并设置串口")
finally:
state_lock.release()
except serial.serialutil.SerialException:
state_lock.acquire()
try:
state_mode.clear()
state_mode.append("串口设备 "+self.serial_port+" 丢失 按下command+R重连")
finally:
state_lock.release()
class window_thread(threading.Thread):
'''
Thread that conducts data analysis and recognition algorithm. This thread also generates a visualisation window to plot the data.
'''
global data_list_buffer, state_mode
def __init__(self, threadID, name, drawBoln):
threading.Thread.__init__(self)
thread_queue[threadID] = self
self.threadID = threadID
self.name = name
self.drawBoln = drawBoln
self.polar_points = []
self.polar_points_new = []
self.K = math.pi/180
self.points_analyse = []
self.points_reduced = []
self.steps_coord = {}
self.points_reduced_index = []
self.seg_angle_list = []
self.redundant_cluster = []
self.data_select = []
self.hidden_corners = []
self.lidar_angle = 0
self.footholds = []
if drawBoln:
self.SCREEN_SIZE = (2560, 1400)
# pygame window init
pygame.init()
self.screen = pygame.display.set_mode(self.SCREEN_SIZE, RESIZABLE, 32)
pygame.display.set_caption("Radar Points")
# 载入图片
self.point = pygame.image.load('point.png').convert_alpha()
self.warning_image = pygame.image.load('warning.png').convert_alpha()
# 字体设置
self.font = pygame.font.Font("PingFang.ttf", 70)
# point图片长款补偿
self.W,self.H = self.point.get_width()/2 , self.point.get_height()/2
self.X,self.Y = self.SCREEN_SIZE[0]/10 , self.SCREEN_SIZE[0]/10
self.k = self.SCREEN_SIZE[1]/2000
self.Shift_T = 50 #文本Y轴偏移
self.Shift_W = 200 #标志Y轴偏移
self.D = 2
def projection(self):
# CPU intensive
'''
takes one set of data and and calculate the average time of this set. The imu data is selected based on this calculated time.
The data is then offseted by this IMU angle selected and loaded into self.points_analyse
Calls the function self.analyse
'''
self.points_analyse.clear()
average_time = (self.polar_points[0][2] + self.polar_points[-1][2])/2
self.get_lidar_angle(average_time)
for n in self.polar_points:
adjusted_angle = n[0] + self.lidar_angle
if adjusted_angle > 360:
adjusted_angle -= 360
elif adjusted_angle < 0:
adjusted_angle += 360
self.polar_points_new.append((adjusted_angle, n[1]))
self.polar_points_new = sorted(self.polar_points_new)
for m in self.polar_points_new:
if m[0] <= 360 and m[0] >= 270:
xx, yy = (-(m[1] * math.sin(self.K * m[0])), (m[1] * math.cos(self.K * m[0])))
self.points_analyse.append((xx,-yy))
if self.points_analyse:
self.Analyse()
def get_lidar_angle(self, lidar_time):
IMU_buffer_lock.acquire()
try:
if lidar_angle_buffer:
self.lidar_angle = min(lidar_angle_buffer, key=lambda x:abs(x[0]-lidar_time))[1]
else:
print('No IMU Data')
finally:
IMU_buffer_lock.release()
def Analyse(self):
remove_index = []
seg_angle_list = []
redundant_data = []
self.footholds.clear()
changed_flag = False
outlier_flag = False
self.redundant_cluster.clear()
self.points_analyse.reverse()
self.data_select = self.points_analyse
n = len(self.points_analyse)
mask = rdp(self.points_analyse, epsilon = DOUGLAS_EPSILON, return_mask = True) # RDP algorithm that simplifies points
self.points_reduced_index = numpy.ndarray.tolist(numpy.where(mask)[0]) # a list that records the index of each points_reduced selected in the original long data list
pts_reduced = list(itertools.compress(self.points_analyse, mask))
if pts_reduced and pts_reduced[0]:
ptList_reduced_polar= self.coord_to_polar(pts_reduced)
pts_reduced = [y for (x,y) in sorted(zip(list(zip(*ptList_reduced_polar))[0], pts_reduced))]
#Discard point in pts_reduced if no points in between a segment
for p in range(len(pts_reduced) - 1):
index_change = self.points_reduced_index[p+1] - self.points_reduced_index[p]
if outlier_flag:
if index_change == 1:
remove_index.append(p)
changed_flag = True
else:
outlier_flag = False
elif index_change == 1:
outlier_flag = True
if not changed_flag:
deltax = pts_reduced[p+1][0] - pts_reduced[p][0]
deltay = pts_reduced[p+1][1] - pts_reduced[p][1]
seg_angle = math.atan2(deltay, deltax)*180/math.pi
if seg_angle < -180:
seg_angle += 360.0
elif seg_angle > 180:
seg_angle -= 360
seg_angle_list.append(seg_angle)
#if not enough angle change, discard the midpoint
for q in range(len(seg_angle_list) - 1):
if abs(seg_angle_list[q+1] - seg_angle_list[q]) <= ANGLE_CHANGE_TOL:
if self.points_reduced_index[q+2] - self.points_reduced_index[q+1] > 1:
remove_index.append(q+1)
changed_flag = True
#remove points and remake seg_angle_list
if changed_flag:
remove_index = sorted(list(set(remove_index)))
remove_index.reverse()
for item in remove_index:
self.points_reduced_index.pop(item)
pts_reduced.pop(item)
seg_angle_list.clear()
for t in range(len(pts_reduced) - 1):
deltax = pts_reduced[t+1][0] - pts_reduced[t][0]
deltay = pts_reduced[t+1][1] - pts_reduced[t][1]
seg_angle = math.atan2(deltay, deltax)*180/math.pi
if seg_angle < -180:
seg_angle += 360.0
elif seg_angle > 180:
seg_angle -= 360
seg_angle_list.append(seg_angle)
#check for the requirements of a foothold segment and generate the footholds list
for index in range(len(pts_reduced) - 1):
deltadist = self.calc_distance(pts_reduced[index+1], pts_reduced[index])
pts_in_btw = self.points_reduced_index[index+1] - self.points_reduced_index[index]
pts_density = pts_in_btw/deltadist
if pts_in_btw > 1:
if pts_density > MIN_PTS_DENSITY:
if abs(seg_angle_list[index]) < ANGLE_TOL:
if self.footholds:
distance = self.calc_distance(pts_reduced[self.footholds[-1][0]+1], pts_reduced[index])
if distance < FOOTHOLD_MAX_LEN:
self.footholds.append([index, deltadist, seg_angle_list[index]])
else:
self.footholds.append([index, deltadist, seg_angle_list[index]])
#mark the first point of the remaining data that doesn't contain a foothold as redundant_data_start
if self.footholds:
if self.footholds[-1][0] == len(self.points_reduced_index) - 2:
redundant_data_start = None
else:
redundant_data_start = self.points_reduced_index[self.footholds[-1][0]+1] + 1
pts_reduced = pts_reduced[0:self.footholds[-1][0]+2]
self.points_reduced_index = self.points_reduced_index[0:self.footholds[-1][0]+2]
self.data_select = self.points_analyse[0:redundant_data_start]
else:
redundant_data_start = None
if redundant_data_start:
self.redundant_cluster = self.points_analyse[redundant_data_start:]
self.seg_angle_list = seg_angle_list
self.points_reduced = pts_reduced
del pts_reduced, seg_angle_list, redundant_data
self.mode_recognition()
def coord_to_polar(self, ptList):
ptList_polar = []
for pt in ptList:
new_angle = math.atan2(pt[1], pt[0]) * 180/math.pi
dist = math.sqrt(pt[0]**2 + pt[1]**2)
ptList_polar.append((new_angle, dist))
return ptList_polar
def make_list_from_tuples(self, *args):
empty_list = []
for item in args:
empty_list.append(item)
return empty_list
def mode_recognition(self):
current_foothold_num = 0
self.hidden_corners = []
self.steps_coord = {}
steps_coord = {0: [0, self.points_reduced[0]]}
step_coord = self.points_reduced[0]
j = 0
if len(self.footholds) > 0:
while j <= STEPS_PREDICT_NUM:
if len(self.footholds) > current_foothold_num:
#if one foothold segment is longer than the OPTIMUM_STEP_LEN, then choose the point on the current segment
if (self.calc_distance(self.points_reduced[self.footholds[current_foothold_num][0] + 1], step_coord) > OPTIMUM_STEP_LEN):
step_coord = self.find_point(step_coord, OPTIMUM_STEP_LEN, self.footholds[current_foothold_num][2], self.footholds[current_foothold_num][0])
steps_coord[j+1] = [self.footholds[current_foothold_num][0], step_coord]
j += 1
else:
current_foothold_num += 1
if len(self.footholds) > current_foothold_num:
if self.points_reduced[self.footholds[current_foothold_num][0]][1] - self.points_reduced[self.footholds[current_foothold_num-1][0]][1] >= 0:
#Ascent case
if self.footholds[current_foothold_num][1] > FOOTHOLD_MIN_LEN:
step_coord = self.find_point(self.points_reduced[self.footholds[current_foothold_num][0]], STEP_OFFSET, self.footholds[current_foothold_num][2], self.footholds[current_foothold_num][0])
if self.calc_distance(step_coord, steps_coord[j][1]) > FOOTHOLD_MIN_LEN:
steps_coord[j+1] = [self.footholds[current_foothold_num][0], step_coord]
j += 1
else:
#Descent case
perpendicular_angle = self.seg_angle_list[self.footholds[current_foothold_num][0]-2] - 90
if abs(self.seg_angle_list[self.footholds[current_foothold_num][0]] - self.seg_angle_list[self.footholds[current_foothold_num][0]-2]) < ANGLE_TOL2:
create_point = (self.points_reduced[self.footholds[current_foothold_num][0]-1][0] + 10 * math.cos(perpendicular_angle * math.pi/180), self.points_reduced[self.footholds[current_foothold_num][0]-1][1] + 10 * math.sin(perpendicular_angle * math.pi/180))
(cornerx, cornery, t, r, s ) = self.intersectLines(self.points_reduced[self.footholds[current_foothold_num][0]-1], create_point, self.points_reduced[self.footholds[current_foothold_num][0]+1], self.points_reduced[self.footholds[current_foothold_num][0]])
self.hidden_corners.append((cornerx, cornery))
#hidden corners are predicted based on the assumption that the corner is a perpendicular one
if self.calc_distance((cornerx, cornery), self.points_reduced[self.footholds[current_foothold_num][0]+1]) > FOOTHOLD_MIN_LEN:
step_coord = (cornerx + FOOTHOLD_MIN_LEN * math.cos(math.pi/180 * self.seg_angle_list[self.footholds[current_foothold_num][0]]), cornery + FOOTHOLD_MIN_LEN * math.sin(math.pi/180 * self.seg_angle_list[self.footholds[current_foothold_num][0]]))
if self.calc_distance(step_coord, steps_coord[j][1]) > FOOTHOLD_MIN_LEN:
steps_coord[j+1] = [self.footholds[current_foothold_num][0], step_coord]
j += 1
else:
break
else:
break
self.steps_coord = steps_coord
if PAUSE:
input("Press Enter to continue...")
def find_point(self, seg_start_coord, offset, angle, range_index_low):
#find the point in the data on a segment given that is an offset away from a starting point
find_x_estimate = seg_start_coord[0] + offset * math.cos(angle/180.0*math.pi)
a = list(zip(*self.data_select[self.points_reduced_index[range_index_low] : self.points_reduced_index[range_index_low + 1]]))[0]
b = a.index(min(a, key=lambda x:abs(x - find_x_estimate)))
return self.data_select[b + self.points_reduced_index[range_index_low]]
def rotate_angle(self, original_coord, angle):
angle = angle*math.pi/180.
rotation_matrix = numpy.array([(math.cos(angle), -math.sin(angle)), (math.sin(angle), math.cos(angle))])
point_matrix = numpy.array(original_coord).transpose()
result_matrix = numpy.dot(rotation_matrix, point_matrix).transpose()
result = numpy.ndarray.tolist(result_matrix)
result = [tuple(l) for l in result]
return result
def calc_distance(self, point1, point2):
deltax = point1[0] - point2[0]
deltay = point1[1] - point2[1]
return math.sqrt(deltax**2 + deltay**2)
def intersectLines(self, pt1, pt2, ptA, ptB ):
""" this returns the intersection of Line(pt1,pt2) and Line(ptA,ptB)
returns a tuple: (xi, yi, valid, r, s), where
(xi, yi) is the intersection
r is the scalar multiple such that (xi,yi) = pt1 + r*(pt2-pt1)
s is the scalar multiple such that (xi,yi) = pt1 + s*(ptB-ptA)
valid == 0 if there are 0 or inf. intersections (invalid)
valid == 1 if it has a unique intersection ON the segment """
DET_TOLERANCE = 0.00000001
# the first line is pt1 + r*(pt2-pt1)
# in component form:
x1, y1 = pt1; x2, y2 = pt2
dx1 = x2 - x1; dy1 = y2 - y1
# the second line is ptA + s*(ptB-ptA)
x, y = ptA; xB, yB = ptB;
dx = xB - x; dy = yB - y;
# we need to find the (typically unique) values of r and s
# that will satisfy
#
# (x1, y1) + r(dx1, dy1) = (x, y) + s(dx, dy)
#
# which is the same as
#
# [ dx1 -dx ][ r ] = [ x-x1 ]
# [ dy1 -dy ][ s ] = [ y-y1 ]
#
# whose solution is
#
# [ r ] = _1_ [ -dy dx ] [ x-x1 ]
# [ s ] = DET [ -dy1 dx1 ] [ y-y1 ]
#
# where DET = (-dx1 * dy + dy1 * dx)
#
# if DET is too small, they're parallel
#
DET = (-dx1 * dy + dy1 * dx)
if math.fabs(DET) < DET_TOLERANCE: return (0,0,0,0,0)
# now, the determinant should be OK
DETinv = 1.0/DET
# find the scalar amount along the "self" segment
r = DETinv * (-dy * (x-x1) + dx * (y-y1))
# find the scalar amount along the input line
s = DETinv * (-dy1 * (x-x1) + dx1 * (y-y1))
# return the average of the two descriptions
xi = (x1 + r*dx1 + x + s*dx)/2.0
yi = (y1 + r*dy1 + y + s*dy)/2.0
return ( xi, yi, 1, r, s )
def points_360degrees(self):
#function that shares lidar data between Lidar thread and window thread
data_buffer_lock.acquire()
try:
if len(data_list_buffer) > 15:
# print(len(data_list_buffer))
self.polar_points.clear()
self.polar_points_new.clear()
for i in data_list_buffer[0:16]:
self.polar_points[len(self.polar_points):len(i)] = i
data_list_buffer[0:16] = []
return True
else:
return False
finally:
data_buffer_lock.release()
def draw_points(self):
self.X,self.Y = self.SCREEN_SIZE[0]/10 , self.SCREEN_SIZE[0]/10
self.k = self.SCREEN_SIZE[1]/2000
line_list = []
foothold_index_list = [x[0] for x in self.footholds]
self.screen.fill((84,106,124))
for p in self.data_select:
x,y = p
x,y = x*self.k+self.X,-y*self.k+self.Y
pygame.draw.circle(self.screen, (255, 255, 255), (int(x), int(y)), 4)
for index in range(len(self.points_reduced)):
(xr, yr) = self.points_reduced[index]
xr, yr = xr*self.k+self.X, -yr*self.k+self.Y
pygame.draw.circle(self.screen, 0x00ff00, (int(xr), int(yr)), 4)
if index != len(self.points_reduced) - 1:
(x1, y1) = self.points_reduced[index]
x1, y1 = x1*self.k+self.X, -y1*self.k+self.Y
(x2, y2) = self.points_reduced[index+1]
x2, y2 = x2*self.k+self.X, -y2*self.k+self.Y
if index in foothold_index_list:
pygame.draw.line(self.screen, (0, 255, 0), [x1, y1], [x2, y2], 2)
else:
pygame.draw.line(self.screen, (255, 0, 0), [x1, y1], [x2, y2], 2)
if self.redundant_cluster:
for point_index in self.redundant_cluster:
(xd, yd) = point_index
xd, yd = xd*self.k+self.X, -yd*self.k+self.Y
pygame.draw.circle(self.screen, 0xCD5437, (int(xd), int(yd)), 4)
if self.steps_coord:
for g in self.steps_coord.values():
(xs, ys) = g[1]
xs, ys = xs*self.k+self.X, -ys*self.k+self.Y
pygame.draw.circle(self.screen, 0xFFA233, (int(xs), int(ys)), 8)
if len(self.steps_coord.values()) > 1:
a = round(self.steps_coord[1][1][0] - self.steps_coord[0][1][0], 2)
b = round(self.steps_coord[1][1][1] - self.steps_coord[0][1][1], 2)
S2 = str(a) + "cm " + str(b) + "cm"
text = self.font.render(S2, True, (255,255,255))
self.screen.blit(text, (600,1200))
S1 = "横坐标变化: 纵坐标变化:"
text = self.font.render(S1, True, (255,255,255))
self.screen.blit(text, (200,1200))
if self.hidden_corners:
for (xh, yh) in self.hidden_corners:
xh, yh = xh*self.k+self.X, -yh*self.k+self.Y
pygame.draw.circle(self.screen, 0xAAF904, (int(xh), int(yh)), 4)
pygame.draw.circle(self.screen, 0xff00ff, (int(self.X), int(self.Y)), 50)
pygame.draw.line(self.screen, (0, 0, 0), [self.X, self.Y], [self.X - 50*math.sin(self.lidar_angle), self.Y - 50*math.cos(self.lidar_angle)], 2)
def warning(self, warning_str):
self.X,self.Y = self.SCREEN_SIZE[0]/2 , self.SCREEN_SIZE[1]/2
self.screen.fill((84,106,124))
warning_text = self.font.render(warning_str, True, (255,255,255))
width = warning_text.get_width()
height = warning_text.get_height()
self.screen.blit(self.warning_image, (self.X-self.warning_image.get_width()/2, self.Y-self.warning_image.get_height()/2 - self.Shift_W))
self.screen.blit(warning_text, (self.X-width/2, self.Y-height/2 + self.Shift_T))
def Screen_size_change(self, s):
self.SCREEN_SIZE = s
self.X,self.Y = self.SCREEN_SIZE[0]/2 , self.SCREEN_SIZE[1]/2
self.k = self.SCREEN_SIZE[1]/12000
self.point = pygame.image.load('point_white_2.png').convert_alpha()
self.W,self.H = self.point.get_width()/2 , self.point.get_height()/2
self.warning_image = pygame.image.load('warning_2.png').convert_alpha()
self.Shift_T = 25
self.Shift_W = 100
self.font = pygame.font.Font("PingFang.ttf", int(35*self.SCREEN_SIZE[0]/1400))
self.screen = pygame.display.set_mode(self.SCREEN_SIZE, RESIZABLE, 32)
self.D = 1
def XYK_change(self):
self.X,self.Y = self.SCREEN_SIZE[0]/2 , self.SCREEN_SIZE[1]/2
self.k = self.SCREEN_SIZE[1]/12000
def XY_move(self, move_tion):
self.X, self.Y = self.X + self.D * move_tion[0] , self.Y + self.D * move_tion[1]
def Y_change(self, C):
self.Y += C
def X_change(self, C):
self.X += C
def k_change(self, C):
if C == 0:
self.k = self.SCREEN_SIZE[1]/12000
else:
a = 10
self.k = self.k * (a + C)/a
def run(self):
while True:
# print("projection")
if self.points_360degrees():
self.projection()
#self.projection()
state_lock.acquire()
try:
state_buffer = state_mode
finally:
state_lock.release()
if not state_buffer:
if self.drawBoln:
self.draw_points()
else:
self.warning(state_buffer[0])
if self.drawBoln:
pygame.display.flip()
class IMU_data_get(serial.Serial, threading.Thread):
global lidar_angle_buffer
def __init__(self, threadID, name, S, B = 115200):
threading.Thread.__init__(self)
thread_queue[threadID] = self
# serial.Serial.__init__(self, serial_port, Baud)
self.K = 180 / math.pi
self.serial_port = S
self.baud = B
self.threadID = threadID
self.name = name
self.start_time = 0
self.lidar_angle = 0
# init the serial
try:
serial.Serial.__init__(self, self.serial_port, self.baud, timeout = 0.5)
except serial.serialutil.SerialException:
state_lock_IMU.acquire()
try:
state_mode_IMU.clear()
state_mode_IMU.append("串口设备 "+self.serial_port+" 连接失败")
finally:
state_lock_IMU.release()
raise serial.serialutil.SerialException
def restart(self):
try:
serial.Serial.__init__(self, self.serial_port, self.baud)
except serial.serialutil.SerialException:
state_lock_IMU.acquire()
try:
state_mode_IMU.clear()
state_mode_IMU.append("串口设备 "+self.serial_port+" 连接失败")
finally:
state_lock_IMU.release()
raise serial.serialutil.SerialException
def Head_check(self):
response = self.read()
if response == b'\xfa':
response = self.read()
if response == b'\xff':
MID = self.read()
if MID == b'\x36':
is_data = True
else:
is_data = False
else:
is_data = False
else:
is_data = False
return is_data
def get_data(self):
# IO intensive
'''
read data from imu. The function assumes the IMU data form with start code: 1020, 2038, and 8020. Only Euler angle data is used for calculating lidar angle.
Other 2 data packets are not used.
'''
data_length = self.read()
data_length = binascii.hexlify(data_length)
data_length = data_length.decode(errors = 'ignore')
data_length = int(data_length, 16)
data = self.read(data_length)
data = binascii.hexlify(data)
count_data = 0
while count_data < data_length:
data_ID = self.read(2)
print_ID = binascii.hexlify(data_ID)
count_data += 2
if data_ID == b'\x10\x20':
packet_count_length = self.read()
count_data += 1
packet_count_length = binascii.hexlify(packet_count_length)
packet_count_length = packet_count_length.decode(errors = 'ignore')
packet_count_length = int(packet_count_length, 16)
packet_count = self.read(packet_count_length)
count_data += packet_count_length
if data_ID == b'\x20\x38':
data_size = self.read()
count_data += 1
data_size = binascii.hexlify(data_size)
data_size = data_size.decode(errors = 'ignore')
data_size = int(data_size, 16)
euler = []
for i in range(math.floor(data_size/4)):
item = self.read(4)
count_data += 4
item = str(binascii.hexlify(item))[2:-1]
item = struct.unpack('!f', bytes.fromhex(item))[0]
euler.append(item)
self.lidar_angle = (time.time(), euler[1] + LIDAR_OFFSET)
if data_ID == b'\x80\x20':
data_size = self.read()
count_data += 1
data_size = binascii.hexlify(data_size)
data_size = data_size.decode(errors = 'ignore')
data_size = int(data_size, 16)
rate_of_turn = []
for i in range(math.floor(data_size/4)):
item = self.read(4)
count_data += 4
item = str(binascii.hexlify(item))[2:-1]
item = struct.unpack('!f', bytes.fromhex(item))[0]
rate_of_turn.append(item)
if data_ID == b'\x40\x20':
data_size = self.read()
count_data += 1
data_size = binascii.hexlify(data_size)
data_size = data_size.decode(errors = 'ignore')
data_size = int(data_size, 16)
acceleration = []
for i in range(math.floor(data_size/4)):
item = self.read(4)
count_data += 4
item = str(binascii.hexlify(item))[2:-1]
item = struct.unpack('!f', bytes.fromhex(item))[0]
acceleration.append(item)
check_sum = self.read()
check_sum = binascii.hexlify(check_sum)
def run(self):
# IMU_read_lock_2.acquire()
while True:
try:
if (not state_mode_IMU):
# print("angle")
# IMU_read_lock_1.acquire()
# try:
# self.Gravity_angle()
# IMU_read_lock_2.release()
# finally:
# IMU_read_lock_1.release()
# IMU_read_lock_2.acquire()
# while Flag.get():
# pass
is_data = self.Head_check()
if is_data:
self.get_data()
IMU_buffer_lock.acquire()
try:
lidar_angle_buffer.append(self.lidar_angle)
if len(lidar_angle_buffer) > LIDAR_BUFFER_LEN:
lidar_angle_buffer.pop(0)
finally:
IMU_buffer_lock.release()
except serial.serialutil.SerialException:
state_lock.acquire()
try:
state_mode.clear()
state_mode.append("串口设备 "+self.serial_port+" 丢失 按下command+R重连")
finally:
state_lock.release()
def Event_handle(obj):
while True:
event = pygame.event.wait()
if event.type == QUIT:
exit()
if event.type == VIDEORESIZE:
obj.Screen_size_change(event.size)
if event.type == KEYDOWN:
if event.key == K_q:
if pygame.key.get_mods() & pygame.KMOD_META:
exit()
elif event.key == K_r:
if pygame.key.get_mods() & pygame.KMOD_META:
th = Thread_restart()
th.setDaemon(True)
th.start()
elif event.key == K_c:
if pygame.key.get_mods() & pygame.KMOD_META:
obj.XYK_change()
elif event.key == K_UP:
obj.Y_change(-10)
elif event.key == K_DOWN:
obj.Y_change(10)
elif event.key == K_LEFT:
obj.X_change(-10)
elif event.key == K_RIGHT:
obj.X_change(10)
elif event.key == K_PLUS:
if pygame.key.get_mods() & pygame.KMOD_META:
obj.k_change(1)
elif event.key == K_MINUS:
if pygame.key.get_mods() & pygame.KMOD_META:
obj.k_change(-1)
elif event.key == K_0:
if pygame.key.get_mods() & pygame.KMOD_META:
obj.k_change(0)
if event.type == MOUSEBUTTONDOWN:
if event.button == 5:
obj.k_change(-1)
elif event.button == 4:
obj.k_change(1)
if event.type == MOUSEMOTION:
if event.buttons[0]:
obj.XY_move(event.rel)
class Thread_start(threading.Thread):
def run(self):
Not_started = True
while Not_started:
try:
radar_serial_thread_1 = radar_serial_thread(1, 'Thread-1', '/dev/tty.SLAB_USBtoUART')
radar_serial_thread_1.setDaemon(True)
radar_serial_thread_1.start()
state_lock.acquire()
try:
state_mode.clear()
finally:
state_lock.release()
Not_started = False
except serial.serialutil.SerialException:
pass
print("start thread end")
class Thread_IMU_start(threading.Thread):
def run(self):
Not_started = True
while Not_started:
try:
IMU = IMU_data_get(3, 'Thread-3', '/dev/tty.SLAB_USBtoUART140')
IMU.setDaemon(True)
IMU.start()
state_lock_IMU.acquire()
try:
state_mode_IMU.clear()
finally:
state_lock_IMU.release()
Not_started = False
except serial.serialutil.SerialException:
pass
print("start Thread-3 end")
class Thread_restart(threading.Thread):
def run(self):
Not_started = True
while Not_started:
try:
thread_queue[1].restart()
state_lock.acquire()
try:
state_mode.clear()
finally:
state_lock.release()
Not_started = False
except serial.serialutil.SerialException:
pass
print("restart done")
window_thread_1 = window_thread(2, 'Thread-2', DRAWBOLN)
window_thread_1.setDaemon(True)
start2thread = Thread_start()
start2thread.setDaemon(True)
start_IMU_thread = Thread_IMU_start()
start_IMU_thread.setDaemon(True)
window_thread_1.start()
time.sleep(0.9)
start2thread.start()
time.sleep(0.9)
start_IMU_thread.start()
if __name__ == '__main__':
if DRAWBOLN:
Event_handle(window_thread_1)
else:
pass |
from steemit.steemit import get_last_hash_comment, send_new_hash_comment
import json
from io import BytesIO
class RootHolder(object):
def __init__(self, api):
self.api = api
def get(self):
pass
def post(self, root):
pass
def create_empty(self):
root_object = dict(
Data="{'version':0.0.0}"
)
return self.update_root(root_object)
def update_root(self, root):
root_hash = self.api.object_put(
BytesIO(json.dumps(root).encode())
)
self.api.pin_add(root_hash['Hash'])
return root_hash['Hash']
class SteemitRootHolder(RootHolder):
def __init__(self, api):
self.root_hash = None
super().__init__(api)
def get(self):
self.root_hash = get_last_hash_comment()
if self.root_hash is None:
self.root_hash = self.create_empty()
send_new_hash_comment(self.root_hash)
else:
self.api.pin_add(self.root_hash)
return self.root_hash
def post(self, root):
new_hash = self.update_root(root)
send_new_hash_comment(new_hash, prev_hash=self.root_hash)
self.root_hash = new_hash
class DebugRootHolder(RootHolder):
def __init__(self, api):
self.hash_path = None
super().__init__(api)
def get(self):
if self.hash_path is None:
self.hash_path = self.create_empty()
return self.hash_path
def post(self, root):
print("New root {}".format(self.hash_path))
self.hash_path = self.update_root(root)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('textureDB', '0002_auto_20150727_1136'),
]
operations = [
migrations.AlterField(
model_name='realimage',
name='image',
field=models.ImageField(upload_to='textureDB/RealImage/%Y/%m/%d'),
),
migrations.AlterField(
model_name='renderedimage',
name='image',
field=models.ImageField(upload_to='textureDB/RenderedImage/%Y/%m/%d'),
),
]
|
#!/usr/bin/python
import os
import subprocess
import sys # package to accept input hostname/ipaddress
command= "ssh " + sys.argv[1] + " vmstat"
vm = subprocess.check_output("%s" %command, shell = True) # getting command output from remote host
vm1 = vm.splitlines()
vm2 = vm1[2].split()
sw_in = vm2[6]
sw_ot = vm2[7]
bi = vm2[8]
bo = vm2[9]
command1= "ssh " + sys.argv[1]+ " uptime"
vm = subprocess.check_output("%s" %command1, shell = True) # getting command output from remote host
def upti():
ut = subprocess.check_output("%s" %command1, shell = True)
ut1 = ut.splitlines()
ut2 = ut1[0].split()
ut3 = float(ut2[9])
com_cpu = "cat /proc/cpuinfo | grep 'cpu cores' | awk '{print $4 }'"
cpcors = subprocess.check_output("%s" %com_cpu, shell=True)
total_cpu = 100*ut3/float(cpcors)
print("cpu load is at %s" %total_cpu) # calculating the load average per cpu cores
print("done")
def mem_usage():
if sw_in or sw_ot < 0 : # data is move to swap memory as main memory is used
print( " you may need to increase memory as swap_in and out is more %s and %s" %(sw_in,sw_ot) )
if vm2[0] < vm2[1]: # more queue process for cpu
print("issue could be because of CPU as queue values are %s and %s " %(vm2[0],vm2[1]))
upt1 = upti()
print(upt1)
if vm2[9] < vm2[10]: # more queue process for memory, disk or network
print("issue could be because of more system process ")
else:
print("issue could be because of high user process")#
else:
print("issue could be becase of network or memory or storage")
mem_det = mem_usage()
print("check top command to find the process with high cpu or memorary usage")
|
import itertools
import logging
from typing import List, Dict, Any
from ray.remote_function import DEFAULT_REMOTE_FUNCTION_CPUS
import ray.ray_constants as ray_constants
logger = logging.getLogger(__name__)
MIN_PYARROW_VERSION = (4, 0, 1)
_VERSION_VALIDATED = False
def _check_pyarrow_version():
global _VERSION_VALIDATED
if not _VERSION_VALIDATED:
import pkg_resources
try:
version_info = pkg_resources.require("pyarrow")
version_str = version_info[0].version
version = tuple(
int(n) for n in version_str.split(".") if "dev" not in n)
if version < MIN_PYARROW_VERSION:
raise ImportError(
"Datasets requires pyarrow >= "
f"{'.'.join(str(n) for n in MIN_PYARROW_VERSION)}, "
f"but {version_str} is installed. Upgrade with "
"`pip install -U pyarrow`.")
except pkg_resources.DistributionNotFound:
logger.warning("You are using the 'pyarrow' module, but "
"the exact version is unknown (possibly carried as "
"an internal component by another module). Please "
"make sure you are using pyarrow >= "
f"{'.'.join(str(n) for n in MIN_PYARROW_VERSION)} "
"to ensure compatibility with Ray Datasets.")
else:
_VERSION_VALIDATED = True
def _get_spread_resources_iter(nodes: List[Dict[str, Any]],
spread_resource_prefix: str,
ray_remote_args: Dict[str, Any]):
"""Returns a round-robin iterator over resources that match the given
prefix and that coexist on nodes with the resource requests given in the
provided remote args (along with the task resource request defaults).
"""
# Extract the resource labels from the remote args.
resource_request_labels = _get_resource_request_labels(ray_remote_args)
# Filter on the prefix and the requested resources.
spread_resource_labels = _filtered_resources(
nodes,
include_prefix=spread_resource_prefix,
include_colocated_with=resource_request_labels)
if not spread_resource_labels:
# No spreadable resource labels available, raise an error.
raise ValueError(
"No resources both match the provided prefix "
f"{spread_resource_prefix} and are colocated with resources "
f"{resource_request_labels}.")
# Return a round-robin resource iterator over the spread labels.
return itertools.cycle([{
label: 0.001
} for label in spread_resource_labels])
def _get_resource_request_labels(ray_remote_args: Dict[str, Any]):
"""Extracts the resource labels from the given remote args, filling in
task resource request defaults.
"""
resource_request_labels = set(ray_remote_args.get("resources", {}).keys())
if DEFAULT_REMOTE_FUNCTION_CPUS > 0:
resource_request_labels.add("CPU")
if "num_gpus" in ray_remote_args:
resource_request_labels.add("GPU")
try:
accelerator_type = ray_remote_args["accelerator_type"]
except KeyError:
pass
else:
resource_request_labels.add(
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}"
f"{accelerator_type}")
return resource_request_labels
def _filtered_resources(nodes: List[Dict[str, Any]], include_prefix: str,
include_colocated_with: List[str]):
"""Filters cluster resource labels based on the given prefix and the
given resource colocation constraints.
Returns a list of unique, sorted resource labels.
"""
resources = [
resource for node in nodes
if set(include_colocated_with) <= set(node["Resources"].keys())
for resource in node["Resources"].keys()
if resource.startswith(include_prefix)
]
# Ensure stable ordering of unique resources.
return sorted(set(resources))
|
from socketserver import ThreadingMixIn
from http.server import HTTPServer
from http.server import CGIHTTPRequestHandler
if __name__ == '__main__':
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
pass
server_address = ('', 8000)
CGIHTTPRequestHandler.cgi_directories.append('/bin')
httpd = ThreadingHTTPServer(server_address, CGIHTTPRequestHandler)
httpd.serve_forever()
|
import os
import flask
from flask import Flask, request, redirect, url_for
from flask import jsonify
import threading
import requests
import json
import uuid
# very coarse grained lock for atomic operation
writeLock = threading.Lock()
cacheLock = threading.Lock()
# directory mapping
directory = {}
# local fs
UPLOAD_FOLDER = '/fs'
# file name mapping
'''
FILE_NAME_MAP is mapping bewteen pathtofilename and file uuid
'''
FILE_NAME_MAP = {}
'''
/dir1/ f0
/dir1/dir2 f1, f2
/dir1/dir2/dir3 f3, f4, f5
{
'dir1':{
'files':['f0'],
'dir2':{
'files':['f1','f2'],
'dir3':{
'files':['f3','f4','f5']
}
}
}
}
'''
app = Flask(__name__)
# create the file under path
@app.route("/add", methods=['GET', 'POST'])
def add():
if request.method == 'POST':
payload = request.get_json(force=True)
try:
fpath = payload['fpath']
filename = payload['fname']
fcontent = payload['fcontent']
if not fpath.startswith('/'):
raise Exception('invalid path: '+path)
# extract all the sub dirs
subdirs = [x for x in fpath.split('/') if len(x.strip())>0]
curdir = directory
aggdir = ''
# lock the directory update to eliminate race condition
with writeLock:
print(threading.current_thread())
print(str(writeLock))
while subdirs:
thisdir = subdirs.pop(0)
if thisdir not in curdir:
curdir[thisdir]={}
aggdir = aggdir+ '/' +thisdir
curdir = curdir[thisdir]
# end of the path
if not subdirs:
if 'files' not in curdir:
curdir['files'] = []
if filename in curdir['files']:
return jsonify({'response':'FileExists', 'response_code':400, 'filename':filename, 'path':aggdir, 'directory':directory})
# append into directory
curdir['files'].append(filename)
# write into disk
fileid = str(uuid.uuid4())
with open(os.path.join(UPLOAD_FOLDER, fileid), 'w') as the_file:
the_file.write(fcontent)
# update the local mapping
FILE_NAME_MAP[aggdir] = fileid
# update in the cache server
fileinfo = {'fcontent': fcontent, 'filepath': aggdir+'/'+filename}
response = requests.post('http://cache:5002/put', data=json.dumps(fileinfo))
# response = (response.json())
return jsonify({"response":"OK", "response_code": 200, 'filepath':aggdir+'/'+filename})
except Exception as e:
return jsonify({"response": str(e)+", error in directory service", "response_code": 500, 'directory':directory})
return jsonify({"response":"Error", "response_code": 200})
# create the file under path
@app.route("/find", methods=['GET', 'POST'])
def find():
if request.method == 'POST':
payload = request.get_json(force=True)
try:
fpath = payload['fpath']
filename = payload['fname']
if not fpath.startswith('/'):
raise Exception('invalid path: '+path)
# extract all the sub dirs
subdirs = [x for x in fpath.split('/') if len(x.strip())>0]
curdir = directory
aggdir = ''
while subdirs:
thisdir = subdirs.pop(0)
aggdir = aggdir+ '/' +thisdir
# dir not exist
if thisdir not in curdir:
return jsonify({'response':'DirNotExists', 'response_code':404, 'filename':filename, 'path':aggdir, 'directory':directory})
curdir = curdir[thisdir]
# end of the path
if not subdirs:
if filename not in curdir['files']:
return jsonify({'response':'FileNotExists', 'response_code':404, 'filename':filename, 'path':aggdir, 'directory':directory})
else:
# load in from cache
fileinfo = {'filepath': aggdir+'/'+filename}
response = requests.post('http://cache:5002/fetch', data=json.dumps(fileinfo))
response = response.json()
if response['response']=='InCache':
return jsonify(response)
elif response['response']=='NotInCache':
# read from disk
fileid = FILE_NAME_MAP[aggdir]
with open(os.path.join(UPLOAD_FOLDER, fileid), 'r') as the_file:
fileContent = the_file.read()
# load into cache
fileinfo['fcontent'] = fileContent
response = requests.post('http://cache:5002/put', data=json.dumps(fileinfo))
return jsonify({'response':'LoadInDisk','response_code':200, 'filename':filename, 'path':aggdir, 'fileContent': fileContent})
# if response['response']=='InCahe':
# return jsonify({'response':'InCache', 'response_code':200, 'filename':filename, 'path':aggdir, 'filecontent': response['filecontent'], 'directory':directory})
# return jsonify({'response':'FileExists', 'response_code':200, 'filename':filename, 'path':aggdir, 'filecontent': response['filecontent'], 'directory':directory})
except Exception as e:
return jsonify({"response": str(e)+", error in directory service", "response_code": 500, 'directory':directory})
return jsonify({"response":"Error", "response_code": 200})
@app.route("/", methods=['GET', 'POST'])
def hello():
return jsonify(directory)
@app.route('/fs')
def fs():
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(UPLOAD_FOLDER) if isfile(join(UPLOAD_FOLDER, f))]
return str(onlyfiles)
if __name__ == "__main__":
# enable multi-threading for flask
app.run(host='0.0.0.0', port=5001, threaded=True, debug=True) |
# -*- coding: utf-8 -*-
import json
import os
import re
import requests
STOCK_CODE_PATH = 'symbol.json'
def round_price_by_code(price, code):
"""
根据代码类型[股票,基金] 截取制定位数的价格
:param price: 证券价格
:param code: 证券代码
:return: str 截断后的价格的字符串表示
"""
if isinstance(price, str):
return price
typ = get_code_type(code)
if typ == 'fund':
return '{:.3f}'.format(price)
return '{:.2f}'.format(price)
def get_ipo_info(only_today=False):
import pyquery
response = requests.get('http://vip.stock.finance.sina.com.cn/corp/go.php/vRPD_NewStockIssue/page/1.phtml', headers={'accept-encoding': 'gzip, deflate, sdch'})
html = response.content.decode('gbk')
html_obj = pyquery.PyQuery(html)
table_html = html_obj('#con02-0').html()
import pandas as pd
df = pd.read_html(io.StringIO(table_html), skiprows=3,
converters={
'证券代码': str,
'申购代码': str
}
)[0]
if only_today:
today = datetime.datetime.now().strftime('%Y-%m-%d')
df = df[df['上网发行日期↓'] == today]
return df
def update_stock_codes():
"""获取所有股票 ID 到 all_stock_code 目录下"""
all_stock_codes_url = 'http://www.shdjt.com/js/lib/astock.js'
grep_stock_codes = re.compile('~(\d+)`')
response = requests.get(all_stock_codes_url)
all_stock_codes = grep_stock_codes.findall(response.text)
with open(stock_code_path(), 'w') as f:
f.write(json.dumps(dict(stock=all_stock_codes)))
def get_all_stock_codes(realtime=False):
"""获取所有股票 ID"""
return get_stock_codes(realtime)
def get_stock_codes(symbol_path=None, realtime=False):
"""获取所有股票 ID 到 all_stock_code 目录下"""
if realtime:
all_stock_codes_url = 'http://www.shdjt.com/js/lib/astock.js'
grep_stock_codes = re.compile('~(\d+)`')
response = requests.get(all_stock_codes_url)
symbol_code = grep_stock_codes.findall(response.text)
symbol_path = symbol_path if symbol_path else stock_code_path()
with open(symbol_path, 'w') as f:
json.dump(symbol_code, f)
return symbol_code
else:
with open(symbol_path) as f:
return json.load(f)
def get_stock_type(stock_code):
"""判断股票ID对应的证券市场
匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'"""
assert type(stock_code) is str, 'stock code need str type'
if stock_code.startswith(('sh', 'sz')):
return stock_code[:2]
if stock_code.startswith(('50', '51', '60', '90', '110', '113', '132', '204')):
return 'sh'
if stock_code.startswith(('00', '13', '18', '15', '16', '18', '20', '30', '39', '115', '1318')):
return 'sz'
if stock_code.startswith(('5', '6', '9')):
return 'sh'
return 'sz'
def get_code_type(code):
"""
判断代码是属于那种类型,目前仅支持 ['fund', 'stock']
:return str 返回code类型, fund 基金 stock 股票
"""
if code.startswith(('00', '30', '60')):
return 'stock'
return 'fund'
def stock_code_path():
return os.path.join(os.path.dirname(__file__), STOCK_CODE_PATH) |
#-*- coding:UTF-8 -*-
import urllib2
import re
import threading
class MyThread(threading.Thread):
def __init__(self):
super(MyThread, self).__init__()
def run(self):
global file_handle
global g_mutex
global count
result = []
while True:
g_mutex.acquire()
count += 1
g_mutex.release()
if count > 1000: break;
data = self.catch_data_from_page(count)
result.extend(data)
g_mutex_2.acquire()
file_handle.writelines(result)
print '%s catch %s data>>>>>>>>>>>>'%(self.name, len(result))
g_mutex_2.release()
def catch_data_from_page(self, page):
global words
url = 'http://6our.com/best?&p=' + str(page)
contents = []
try:
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36'
headers = {'User-Agent':user_agent}
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request, timeout = 3)
body = response.read()
response.close()
del response
pattern = re.compile('<div.*?class="content".*?title=".*?".*?id=".*?">(.*?)</div>', re.S)
items = re.findall(pattern, body)
def f(x):
if not words:return True
for w in words:
if w in x:
return True
return False
contents = filter(lambda x: f(x), items)
print '%s catch page %s data success'%(self.name, page)
except urllib2.URLError, e:
if hasattr(e, 'code'):
print e.code
if hasattr(e, 'reason'):
print e.reason
print '%s catch page %s data fail'%(self.name, page)
finally:
return contents
file_handle = open('shudong.txt', 'w')
words = []
g_mutex = threading.Lock()
g_mutex_2 = threading.Lock()
count = 0
def find_hot():
global count
pool = []
for i in xrange(50):
t = MyThread()
t.start()
pool.append(t)
for t in pool:
t.join(30)
file_handle.close()
print 'finish crawl %s data got'%count
if __name__ == '__main__':
find_hot()
|
class Solution:
def threeSum(self, nums):
solution = []
length = len(nums)-1
nums.sort()
for i in range(length-1):
if i > 0 and nums[i] == nums[i-1]:
continue
j = i+1
k = length
while j < k:
temp = nums[i] + nums[j] + nums[k]
if not temp:
solution.append([nums[i], nums[j], nums[k]])
while j < k and nums[j] == nums[j+1]:
j += 1
while j < k and nums[k] == nums[k-1]:
k -= 1
j += 1
k -= 1
elif temp < 0:
j += 1
else:
k -= 1
return solution
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 10:34:22 2020
@author: yadav
"""
# code to generate two point correlation function, Kernels K,L and sampling from DPP (see section 2.4 page number 145) in review 'random matrix theory of quantum transport' by Beenakker
# The code uses eq.(50) in review 'random matrix theory of quantum transport' by Beenakker to compute two point correlation function.
import math
import cmath
import numpy
import contour_integral
import matplotlib #pylab is submodule in matplotlib
import random
import timeit
import sys
theta = 1.0001
rho = 2.0 # V(x)=rho*x. for \theta=1 and V(x)=rho*x, V_eff(x)=2*rho*x/(1+gamma)=rho_eff*x (See our beta ensembles paper)
gamma = 0.4
beta = 1.0
iteration = 39.0
data1 = numpy.loadtxt("input_output_files/theta_"+str(theta)+"/linear_pot/rho_"+str(rho)+"/gamma_"+str(gamma)+"/mapping/mapping_output_nu2_gamma="+str(gamma)+"_theta="+str(theta)+"_18000points_iter"+str(iteration)+".txt",float)
data2 = numpy.loadtxt("input_output_files/theta_"+str(theta)+"/linear_pot/rho_"+str(rho)+"/gamma_"+str(gamma)+"/density/renormalized_density_psi_method_2_epsi=1e-4_gamma="+str(gamma)+"_theta="+str(theta)+"_rho="+str(rho)+"_18000points_corrected4_iter"+str(iteration)+".txt",float)
f_out=file("input_output_files/theta_"+str(theta)+"/linear_pot/rho_"+str(rho)+"/gamma_"+str(gamma)+"/density/Y_sampling_DPP_theta="+str(theta)+"_gamma="+str(gamma)+"_18000points.txt","w")
x = data1[:,0]
sigma = data2[:,1]
sigma = sigma.reshape(-1, 1) #reshape function coverts array of shape (n,) to (n,1). See https://stackoverflow.com/questions/39549331/reshape-numpy-n-vector-to-n-1-vector
sigma=sigma[::-1] # to reverse the order of the elements of array. See https://www.askpython.com/python/array/reverse-an-array-in-python
u = numpy.empty([len(x),len(x)],float)
u_delta = numpy.empty([len(x),len(x)],float)
for i in range(len(x)): # function len() on array gives no. of rows of array
for j in range(len(x)): # function len() on array gives no. of rows of array
if(i==j):
u[i,j] = sys.float_info.max # maximum possible float value in python. See https://stackoverflow.com/questions/3477283/what-is-the-maximum-float-in-python
else:
u[i,j] = -math.log(abs(x[i]-x[j]))-gamma*math.log(abs(x[i]**theta-x[j]**theta))
if(i==len(x)-1):
delta_x = x[i]-x[i-1]
else:
delta_x = x[i+1]-x[i]
u_delta[i,j] = delta_x*u[i,j]
print('Matrix u_delta has been computed')
R = (1.0/beta)*numpy.linalg.inv(u_delta) # See eq.(50) in review 'random matrix theory of quantum transport' by Beenakker
print('Two point correlation function R has been computed')
K = (numpy.dot(sigma,sigma.T)+R)**(0.5) # See eq.(50) and eq.(43) in review 'random matrix theory of quantum transport' by Beenakker
print('Kernel K has been computed')
Lambda_K, v_K = numpy.linalg.eig(K) # to find out eigenvalues and eigenvectors of real symmetric matrix.
#Lambda_K, v_K = numpy.linalg.eigh(K) # to find out eigenvalues and eigenvectors of real symmetric matrix.
Lambda_L = Lambda_K/(Lambda_K-1.0) # relation between eigenvalues of kernel K and eigenvalues of kernel L.
v_L=v_K # eigenvectors of kernel K and kernel L are same.
print('eigenvalues and eigenvectors of Kernel L has been computed')
N = len(K)
J = numpy.zeros(N,int)
# Following is the loop 1 for sampling of a DPP algorithm 1 in Kulesza and Taskar. See notes.
for i in range(N): # function len() on array gives no. of rows of array
p = random.random() # generates random floating number from [0.0,1.0)
if((abs(Lambda_L[i])/(1.0+abs(Lambda_L[i])))>=p):
J[i]=i
J_masked = numpy.ma.masked_equal(J,0) # to mask all the zeros of array J
J = J_masked[~J_masked.mask] # to remove all the masked values of array J_masked
N0 = len(J)
Y = numpy.empty(N0,int)
print('Loop 1 of sampling algorithm has been computed')
print('size of the sample is',N0)
# Following is the loop 2 for sampling of a DPP algorithm 1 in Kulesza and Taskar. See notes.
for j in range(N0):
mod_V = N0-j
print('iterations remaining are',N0-j)
while True:
m_prime = random.randint(0,N-1) # generates random integer from 0 to N-1
# m_prime = random.randrange(N) # generates random integer from 0 to N-1
e_m_prime = numpy.zeros(N,float)
e_m_prime[m_prime] = 1.0
p_prime = random.random() # generates random floating number from [0.0,1.0)
summation = 0
for k in range(N0):
volume = (numpy.dot(v_L[J[k]].T,e_m_prime))**2.0 # .T denotes transpose
summation = summation + volume
if((1.0/mod_V)*summation>=p_prime):
Y[j]=m_prime
f_out.write(str(Y[j])+'\n')
for l in range(N0):
v_L[l] = v_L[l]-(numpy.dot(v_L[l],e_m_prime))*e_m_prime
break
f_out.close() # () at the end is necessary to close the file
#numpy.savetxt("input_output_files/theta_"+str(theta)+"/V_eff/linear_pot/rho_"+str(rho)+"/gamma_"+str(gamma)+"/density/Y_sampling_DPP_c="+str(c_short_delta)+"_theta="+str(theta)+"_gamma="+str(gamma)+"_9000points.txt", Y, newline='n')
|
from django import forms
from .models import Comment
class CommentForm(forms.Form):
author = forms.CharField()
message = forms.CharField()
def save(self, commit=True):
comment = Comment(author= self.cleaned_data['author'], message= self.cleaned_data['message'])
if commit:
comment.save()
return comment
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
orm模块
2016_9_21
"""
import db
import time
from db import next_id
class Field(object):
def __init__(self, **kw):
self.name = kw.get('name', None)
self._default = kw.get('default', None)
self.primary_key = kw.get('primary_key', False)
self.nullable = kw.get('nullable', False)
self.updateable = kw.get('updateable', True)
self.insertable = kw.get('insertable', False)
self.ddl = kw.get('ddl', '')
@property
def default(self):
d = self._default
return d() if callable(d) else d
def __str__(self):
s = ['<%s:%s,%s,default(%s)' % (self.__class__.__name__, self.name, self.ddl, self._default)]
self.nullable and s.append('N')
self.updateable and s.append('U')
self.insertable and s.append('I')
s.append('>')
return ''.join(s)
class StringField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = ''
if 'ddl' not in kw:
kw['ddl'] = 'varchar(255)'
super(StringField, self).__init__(**kw)
class IntegerField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = 0
if 'ddl' not in kw:
kw['ddl'] = 'bigint'
super(IntegerField, self).__init__(**kw)
class FloatField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = 0.0
if 'ddl' not in kw:
kw['ddl'] = 'real'
super(FloatField, self).__init__(**kw)
class BooleanField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = False
if 'ddl' not in kw:
kw['ddl'] = 'bool'
super(BooleanField, self).__init__(**kw)
class TextField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = ''
if 'ddl' not in kw:
kw['ddl'] = 'blob'
super(TextField, self).__init__(**kw)
class VersionField(Field):
def __init__(self, name=None):
super(VersionField, self).__init__(name=name, default=0, ddl='bigint')
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
if name=='Model':
return type.__new__(cls, name, bases, attrs)
mappings = dict()
primary_key = None
for k, v in attrs.iteritems():
if isinstance(v, Field):
if not v.name:
v.name = k
# print('Found mapping: %s==>%s' % (k, v))
if v.primary_key:
if primary_key:
raise TypeError('Cannot define more than 1 primary key in class: %s' % name)
if v.updateable:
v.updateable = False
if v.nullable:
v.nullable = False
primary_key = v
mappings[k] = v
if not primary_key:
raise TypeError('Primary key not defined in class: %s' % name)
for k in mappings.iterkeys():
attrs.pop(k)
if not '__table__' in attrs:
attrs['__table__'] = name.lower()
attrs['__mappings__'] = mappings
attrs['__primary_key__'] = primary_key
return type.__new__(cls, name, bases, attrs)
class Model(dict):
__metaclass__=ModelMetaclass
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
@classmethod
def get(cls, pk):
"""
Get by primary key.
"""
d = db.select_one('select * from %s where %s=?' % (cls.__table__, cls.__primary_key__.name), pk)
return cls(**d) if d else None
@classmethod
def find_first(cls, where, *args):
"""
where查询,返回首个结果
"""
d = db.select_one('select * from %s %s' % (cls.__table__, where), *args)
return cls(**d) if d else None
@classmethod
def find_by(cls, where, *args):
L = db.select('select * from %s %s' % (cls.__table__, where), *args)
return [cls(**d) for d in L]
@classmethod
def find_all(cls):
L = db.select('select * from %s' % cls.__table__)
return [cls(**d) for d in L]
@classmethod
def count_all(cls):
return db.select('select count(%s) from %s' % (cls.__primary_key__.name, cls.__table__))
@classmethod
def count_by(cls, where, *args):
return db.select('select count(%s) from %s %s' % (cls.__primary_key__.name, cls.__table__, where), *args)
def insert(self):
fields = []
params = []
args = []
for k, v in self.__mappings__.iteritems():
tmp = getattr(self, k, None)
if not tmp:
tmp = self.__mappings__[k].default
self[k]=tmp
fields.append(v.name)
params.append('?')
args.append(tmp)
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(params))
return db.update(sql, *args)
def delete(self):
pk = self.__primary_key__.name
args = (getattr(self, pk), )
sql = 'delete from %s where %s=?' % (self.__table__, pk)
return db.update(sql, *args)
def update(self):
pk = self.__primary_key__.name
args = (getattr(self, pk), )
key_value = []
for k, v in self.__mappings__.iteritems():
if self.__mappings__[k].updateable:
key_value.append(v.name+'=\''+str(getattr(self, k, None))+'\'')
sql = 'update %s set %s where %s=?' % (self.__table__, ','.join(key_value), pk)
return db.update(sql, *args)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(updateable=False, ddl='varchar(50)')
password = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(updateable=False, default=time.time)
if __name__=='__main__':
db.create_engine('www-data', 'www-data', 'myblog')
# u = User(name='Test', email='test@example.com', password='1234567890', image='about:blank')
# print u.insert()
# print User.find_all()
# u = User.find_first('where email=?', 'test@example.com')
# print u.delete()
# u = User.find_first('where email=?', 'test@example.com')
# u.email = 'change@example.com'
# u.name = 'change'
# print u.update()
|
import img_similarity_comparison
if __name__ == '__main__':
print img_similarity_comparison.compare('http://image1.mop.com/vcms/project1/2012/9/9/10/55/201209091349748014080186572403.jpg', '1.jpg') |
def onehotnames():
onehotlist = ['age', 'balance', 'month', 'duration', 'campaign', 'previous',
'day_bin_(0, 7]', 'day_bin_(7, 14]', 'day_bin_(14, 21]',
'day_bin_(21, 28]', 'day_bin_(28, 31]', 'pdays_bin_(-2, -1]',
'pdays_bin_(-1, 0]', 'pdays_bin_(0, 400]', 'pdays_bin_(400, 900]',
'job_admin.', 'job_blue-collar', 'job_entrepreneur', 'job_housemaid',
'job_management', 'job_other', 'job_retired', 'job_self-employed',
'job_services', 'job_student', 'job_technician', 'job_unemployed',
'marital_divorced', 'marital_married', 'marital_single',
'education_other', 'education_primary', 'education_secondary',
'education_tertiary', 'default_no', 'default_yes', 'housing_no',
'housing_yes', 'loan_no', 'loan_yes', 'contact_cellular',
'contact_other', 'contact_telephone', 'poutcome_DNE',
'poutcome_failure', 'poutcome_other', 'poutcome_success']
return onehotlist |
#!/usr/bin/env python3
# RyanWaltersDev Jun 15 2021 -- TIY 7-5
# Initial prompt
prompt = "\n\nThank you for choosing Runway Theaters!"
prompt += "\n(Type 'veteran' to see our discount options for service members.)"
prompt += "\n(If you are finished, please enter 'quit')"
prompt += "\n\nEnter the age of the person that this ticket is for: "
# while loop
active = True
ticket_list = []
while active:
age = input(prompt)
ticket = 0
# integer input
answer = PriceCalculator(age)
if age.isnumeric() == True:
age = int(age)
# younger than 3
if age < 3:
print(f"\n\tYour {age} year old child gets free admission!")
# ages 3 to 12
elif age >= 3 and age <= 12:
print(f"\tThe ticket price for a {age} year old is $10!")
ticket += 10
ticket += ticket * 0.07
ticket_list.append(ticket)
# ages 12 to 64
elif age > 12 and age < 65:
print(f"\tThe ticket price for {age} year olds is $15!")
ticket += 15
ticket += ticket * 0.07
ticket_list.append(ticket)
# 65 and up
elif age >= 65 and age < 120:
print(f"\tThe price for our senior citizens is $12!")
ticket += 12
ticket += ticket * 0.07
ticket_list.append(ticket)
# Invalid age
else:
print("\tPlease enter a valid age.")
# string input
else:
# quit break
if age.title() == 'Quit' or age.title() == "'Quit'":
break
# veteran pricing
elif age.title() == 'Veteran' or age.title() == "'Veteran'":
print(f"\tThank you for your service. Your ticket price is $8!")
ticket += 8
ticket += ticket * 0.07
ticket_list.append(ticket)
# invalid response
else:
print("\tPlease enter a valid response.")
continue
# calculate ticket total
total = 0
for tickets in range(0, len(ticket_list)):
total = total + ticket_list[tickets]
print(f"\nYour current total is ${total}.")
# ask to continue
more_ticket = input("Would you like to buy more tickets? [Y/N] ")
if more_ticket.title() == 'Y' or more_ticket.title() == 'Yes':
continue
else:
print(f"\n\nThank you for choosing Runway Theaters! Enjoy the movie!")
active = False
# END OF PROGRAM
|
import datetime
import os
from datetime import timedelta
from airflow import DAG
from airflow.providers.docker.operators.docker import DockerOperator
from airflow.sensors.python import PythonSensor
from airflow.models import Variable
default_args = {
"owner": "airflow",
'email_on_failure': True,
"retries": 1,
"retry_delay": timedelta(minutes=1),
}
def _wait_for_data():
return os.path.exists(os.path.join(os.getcwd(), "data/raw/2021-06-18/data.csv")) \
and os.path.exists(os.path.join(os.getcwd(), "data/raw/2021-06-18/target.csv"))
def _wait_for_model():
return os.path.exists(os.path.join(os.getcwd(), Variable.get("PATH_TO_MODEL")))
with DAG(
dag_id="Predict",
default_args=default_args,
schedule_interval="@daily",
start_date=datetime.datetime.now(),
) as dag:
wait_for_data = PythonSensor(
task_id="wait_for_data",
python_callable=_wait_for_data,
timeout=6000,
poke_interval=10,
retries=10,
mode="poke",
)
wait_for_model = PythonSensor(
task_id="wait_for_model",
python_callable=_wait_for_model,
timeout=6000,
poke_interval=10,
retries=10,
mode="poke",
)
predict = DockerOperator(
image="airflow-predict",
command="--input-dir /data/raw/{{ ds }} --output-dir /data/predictions/{{ ds }} "
"--path-to-model /data/models/{{ ds }}/random_forest_classifier.pickle",
task_id="docker-airflow-predict",
do_xcom_push=False,
# !!! HOST folder(NOT IN CONTAINER) replace with yours !!!
# volumes=["/tmp:/data"]
volumes=["/home/agysar/made_2/ml_in_prod/airflow_ml_dags/data:/data"]
)
[wait_for_data, wait_for_model] >> predict
|
import shutil
import os
currDir = os.getcwd()
print(currDir)
shutil.unpack_archive("unzip_me_for_instructions.zip", "", "zip")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.